提交 ad26870d 编写于 作者: Z Zengruan Ye 提交者: Cheng Jian

KVM: arm64: Support the vCPU preemption check

euleros inclusion
category: feature
bugzilla: https://bugzilla.openeuler.org/show_bug.cgi?id=35
CVE: NA

--------------------------------

Support the vcpu_is_preempted() functionality under KVM/arm64. This will
enhance lock performance on overcommitted hosts (more runnable vCPUs
than physical CPUs in the system) as doing busy waits for preempted
vCPUs will hurt system performance far worse than early yielding.

unix benchmark result:
  host:  kernel 4.19.87, HiSilicon Kunpeng920, 8 CPUs
  guest: kernel 4.19.87, 16 vCPUs

               test-case                |    after-patch    |   before-patch
----------------------------------------+-------------------+------------------
 Dhrystone 2 using register variables   | 338955728.5 lps   | 339266319.5 lps
 Double-Precision Whetstone             |     30634.9 MWIPS |     30884.4 MWIPS
 Execl Throughput                       |      6753.2 lps   |      3580.1 lps
 File Copy 1024 bufsize 2000 maxblocks  |    490048.0 KBps  |    313282.3 KBps
 File Copy 256 bufsize 500 maxblocks    |    129662.5 KBps  |     83550.7 KBps
 File Copy 4096 bufsize 8000 maxblocks  |   1552551.5 KBps  |    814327.0 KBps
 Pipe Throughput                        |   8976422.5 lps   |   9048628.4 lps
 Pipe-based Context Switching           |    258641.7 lps   |    252925.9 lps
 Process Creation                       |      5312.2 lps   |      4507.9 lps
 Shell Scripts (1 concurrent)           |      8704.2 lpm   |      6720.9 lpm
 Shell Scripts (8 concurrent)           |      1708.8 lpm   |       607.2 lpm
 System Call Overhead                   |   3714444.7 lps   |   3746386.8 lps
----------------------------------------+-------------------+------------------
 System Benchmarks Index Score          |      2270.6       |      1679.2
Signed-off-by: NZengruan Ye <yezengruan@huawei.com>
Reviewed-by: Nzhanghailiang <zhang.zhanghailiang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
Signed-off-by: NChaochao Xing <xingchaochao@huawei.com>
Reviewed-by: NZengruan Ye <yezengruan@huawei.com>
Reviewed-by: NXiangyou Xie <xiexiangyou@huawei.com>
Signed-off-by: NCheng Jian <cj.chengjian@huawei.com>
上级 430b95e6
...@@ -27,12 +27,18 @@ static inline u64 paravirt_steal_clock(int cpu) ...@@ -27,12 +27,18 @@ static inline u64 paravirt_steal_clock(int cpu)
return pv_time_ops.steal_clock(cpu); return pv_time_ops.steal_clock(cpu);
} }
int __init pv_sched_init(void);
__visible bool __native_vcpu_is_preempted(int cpu); __visible bool __native_vcpu_is_preempted(int cpu);
static inline bool pv_vcpu_is_preempted(int cpu) static inline bool pv_vcpu_is_preempted(int cpu)
{ {
return pv_ops.sched.vcpu_is_preempted(cpu); return pv_ops.sched.vcpu_is_preempted(cpu);
} }
#endif #else
#define pv_sched_init() do {} while (0)
#endif /* CONFIG_PARAVIRT */
#endif #endif
...@@ -13,10 +13,18 @@ ...@@ -13,10 +13,18 @@
* Author: Stefano Stabellini <stefano.stabellini@eu.citrix.com> * Author: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
*/ */
#define pr_fmt(fmt) "arm-pv: " fmt
#include <linux/arm-smccc.h>
#include <linux/cpuhotplug.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/io.h>
#include <linux/jump_label.h> #include <linux/jump_label.h>
#include <linux/printk.h>
#include <linux/psci.h>
#include <linux/types.h> #include <linux/types.h>
#include <asm/paravirt.h> #include <asm/paravirt.h>
#include <asm/pvsched-abi.h>
struct static_key paravirt_steal_enabled; struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled; struct static_key paravirt_steal_rq_enabled;
...@@ -28,3 +36,107 @@ struct paravirt_patch_template pv_ops = { ...@@ -28,3 +36,107 @@ struct paravirt_patch_template pv_ops = {
EXPORT_SYMBOL_GPL(pv_time_ops); EXPORT_SYMBOL_GPL(pv_time_ops);
EXPORT_SYMBOL_GPL(pv_ops); EXPORT_SYMBOL_GPL(pv_ops);
DEFINE_PER_CPU(struct pvsched_vcpu_state, pvsched_vcpu_region) __aligned(64);
EXPORT_PER_CPU_SYMBOL(pvsched_vcpu_region);
static bool kvm_vcpu_is_preempted(int cpu)
{
struct pvsched_vcpu_state *reg;
u32 preempted;
reg = &per_cpu(pvsched_vcpu_region, cpu);
if (!reg) {
pr_warn_once("PV sched enabled but not configured for cpu %d\n",
cpu);
return false;
}
preempted = le32_to_cpu(READ_ONCE(reg->preempted));
return !!preempted;
}
static int pvsched_vcpu_state_dying_cpu(unsigned int cpu)
{
struct pvsched_vcpu_state *reg;
struct arm_smccc_res res;
reg = this_cpu_ptr(&pvsched_vcpu_region);
if (!reg)
return -EFAULT;
arm_smccc_1_1_invoke(ARM_SMCCC_HV_PV_SCHED_IPA_RELEASE, &res);
memset(reg, 0, sizeof(*reg));
return 0;
}
static int init_pvsched_vcpu_state(unsigned int cpu)
{
struct pvsched_vcpu_state *reg;
struct arm_smccc_res res;
reg = this_cpu_ptr(&pvsched_vcpu_region);
if (!reg)
return -EFAULT;
/* Pass the memory address to host via hypercall */
arm_smccc_1_1_invoke(ARM_SMCCC_HV_PV_SCHED_IPA_INIT,
virt_to_phys(reg), &res);
return 0;
}
static int kvm_arm_init_pvsched(void)
{
int ret;
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"hypervisor/arm/pvsched:starting",
init_pvsched_vcpu_state,
pvsched_vcpu_state_dying_cpu);
if (ret < 0) {
pr_warn("PV sched init failed\n");
return ret;
}
return 0;
}
static bool has_kvm_pvsched(void)
{
struct arm_smccc_res res;
/* To detect the presence of PV sched support we require SMCCC 1.1+ */
if (psci_ops.smccc_version < SMCCC_VERSION_1_1)
return false;
arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
ARM_SMCCC_HV_PV_SCHED_FEATURES, &res);
return (res.a0 == SMCCC_RET_SUCCESS);
}
int __init pv_sched_init(void)
{
int ret;
if (is_hyp_mode_available())
return 0;
if (!has_kvm_pvsched()) {
pr_warn("PV sched is not available\n");
return 0;
}
ret = kvm_arm_init_pvsched();
if (ret)
return ret;
pv_ops.sched.vcpu_is_preempted = kvm_vcpu_is_preempted;
pr_info("using PV sched preempted\n");
return 0;
}
...@@ -372,6 +372,8 @@ void __init setup_arch(char **cmdline_p) ...@@ -372,6 +372,8 @@ void __init setup_arch(char **cmdline_p)
smp_init_cpus(); smp_init_cpus();
smp_build_mpidr_hash(); smp_build_mpidr_hash();
pv_sched_init();
#ifdef CONFIG_ARM64_SW_TTBR0_PAN #ifdef CONFIG_ARM64_SW_TTBR0_PAN
/* /*
* Make sure init_thread_info.ttbr0 always generates translation * Make sure init_thread_info.ttbr0 always generates translation
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册