提交 4aeb7b02 编写于 作者: Z Zengruan Ye 提交者: Zheng Zengkai

KVM: arm64: Enable PV qspinlock

virt inclusion
category: feature
bugzilla: 47624
CVE: NA

--------------------------------

Linux kernel builds were run in KVM guest on HiSilicon Kunpeng920 system.
VM guests were set up with 32, 48 and 64 vCPUs on the 32 physical CPUs.
The kernel build (make -j<n>) was done in a VM with unpinned vCPUs 3
times with the best time selected and <n> is the number of vCPUs
available. The build times of the original linux 4.19.87, pvqspinlock
with various number of vCPUs are as follows:

  Kernel        32 vCPUs    48 vCPUs    60 vCPUs
  ----------    --------    --------    --------
  4.19.87       342.336s    602.048s    950.340s
  pvqsinlock    341.366s    376.135s    437.037s
Signed-off-by: NZengruan Ye <yezengruan@huawei.com>
Reviewed-by: NZhanghailiang <zhang.zhanghailiang@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 7560103e
......@@ -44,6 +44,7 @@ static inline bool pv_vcpu_is_preempted(int cpu)
}
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
void __init pv_qspinlock_init(void);
bool pv_is_native_spin_unlock(void);
static inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
{
......@@ -64,6 +65,10 @@ static inline void pv_kick(int cpu)
{
return pv_ops.sched.kick(cpu);
}
#else
#define pv_qspinlock_init() do {} while (0)
#endif /* SMP && PARAVIRT_SPINLOCKS */
#else
......
......@@ -22,6 +22,7 @@
#include <asm/paravirt.h>
#include <asm/pvclock-abi.h>
#include <asm/pvsched-abi.h>
#include <asm/qspinlock_paravirt.h>
#include <asm/smp_plat.h>
struct static_key paravirt_steal_enabled;
......@@ -251,6 +252,63 @@ static bool has_kvm_pvsched(void)
return (res.a0 == SMCCC_RET_SUCCESS);
}
#ifdef CONFIG_PARAVIRT_SPINLOCKS
static bool arm_pvspin = false;
/* Kick a cpu by its cpuid. Used to wake up a halted vcpu */
static void kvm_kick_cpu(int cpu)
{
struct arm_smccc_res res;
arm_smccc_1_1_invoke(ARM_SMCCC_HV_PV_SCHED_KICK_CPU, cpu, &res);
}
static void kvm_wait(u8 *ptr, u8 val)
{
unsigned long flags;
if (in_nmi())
return;
local_irq_save(flags);
if (READ_ONCE(*ptr) != val)
goto out;
dsb(sy);
wfi();
out:
local_irq_restore(flags);
}
void __init pv_qspinlock_init(void)
{
/* Don't use the PV qspinlock code if there is only 1 vCPU. */
if (num_possible_cpus() == 1)
arm_pvspin = false;
if (!arm_pvspin) {
pr_info("PV qspinlocks disabled\n");
return;
}
pr_info("PV qspinlocks enabled\n");
__pv_init_lock_hash();
pv_ops.sched.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
pv_ops.sched.queued_spin_unlock = __pv_queued_spin_unlock;
pv_ops.sched.wait = kvm_wait;
pv_ops.sched.kick = kvm_kick_cpu;
}
static __init int arm_parse_pvspin(char *arg)
{
arm_pvspin = true;
return 0;
}
early_param("arm_pvspin", arm_parse_pvspin);
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
int __init pv_sched_init(void)
{
int ret;
......@@ -270,5 +328,7 @@ int __init pv_sched_init(void)
pv_ops.sched.vcpu_is_preempted = kvm_vcpu_is_preempted;
pr_info("using PV sched preempted\n");
pv_qspinlock_init();
return 0;
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册