提交 430b95e6 编写于 作者: Z Zengruan Ye 提交者: Cheng Jian

KVM: arm64: Add interface to support vCPU preempted check

euleros inclusion
category: feature
bugzilla: https://bugzilla.openeuler.org/show_bug.cgi?id=35
CVE: NA

--------------------------------

This is to fix some lock holder preemption issues. Some other locks
implementation do a spin loop before acquiring the lock itself.
Currently kernel has an interface of bool vcpu_is_preempted(int cpu). It
takes the CPU as parameter and return true if the CPU is preempted.
Then kernel can break the spin loops upon the retval of vcpu_is_preempted.

As kernel has used this interface, So lets support it.
Signed-off-by: NZengruan Ye <yezengruan@huawei.com>
Reviewed-by: Nzhanghailiang <zhang.zhanghailiang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
Signed-off-by: NChaochao Xing <xingchaochao@huawei.com>
Reviewed-by: NZengruan Ye <yezengruan@huawei.com>
Reviewed-by: NXiangyou Xie <xiexiangyou@huawei.com>
Signed-off-by: NCheng Jian <cj.chengjian@huawei.com>
上级 5b727b32
......@@ -10,12 +10,29 @@ extern struct static_key paravirt_steal_rq_enabled;
struct pv_time_ops {
unsigned long long (*steal_clock)(int cpu);
};
struct pv_sched_ops {
bool (*vcpu_is_preempted)(int cpu);
};
struct paravirt_patch_template {
struct pv_sched_ops sched;
};
extern struct pv_time_ops pv_time_ops;
extern struct paravirt_patch_template pv_ops;
static inline u64 paravirt_steal_clock(int cpu)
{
return pv_time_ops.steal_clock(cpu);
}
__visible bool __native_vcpu_is_preempted(int cpu);
static inline bool pv_vcpu_is_preempted(int cpu)
{
return pv_ops.sched.vcpu_is_preempted(cpu);
}
#endif
#endif
......@@ -18,6 +18,7 @@
#include <asm/qrwlock.h>
#include <asm/qspinlock.h>
#include <asm/paravirt.h>
/* See include/linux/spinlock.h */
#define smp_mb__after_spinlock() smp_mb()
......@@ -30,9 +31,18 @@
* https://lore.kernel.org/lkml/20200110100612.GC2827@hirez.programming.kicks-ass.net
*/
#define vcpu_is_preempted vcpu_is_preempted
#ifdef CONFIG_PARAVIRT
static inline bool vcpu_is_preempted(int cpu)
{
return pv_vcpu_is_preempted(cpu);
}
#else
static inline bool vcpu_is_preempted(int cpu)
{
return false;
}
#endif /* CONFIG_PARAVIRT */
#endif /* __ASM_SPINLOCK_H */
......@@ -51,7 +51,7 @@ arm64-obj-$(CONFIG_ACPI) += acpi.o
arm64-obj-$(CONFIG_ARM64_ERR_RECOV) += ras.o
arm64-obj-$(CONFIG_ACPI_NUMA) += acpi_numa.o
arm64-obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o
arm64-obj-$(CONFIG_PARAVIRT) += paravirt.o
arm64-obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt-spinlocks.o
arm64-obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
arm64-obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o
arm64-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o \
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright(c) 2019 Huawei Technologies Co., Ltd
* Author: Zengruan Ye <yezengruan@huawei.com>
*/
#include <linux/spinlock.h>
#include <asm/paravirt.h>
__visible bool __native_vcpu_is_preempted(int cpu)
{
return false;
}
......@@ -22,4 +22,9 @@ struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled;
struct pv_time_ops pv_time_ops;
struct paravirt_patch_template pv_ops = {
.sched.vcpu_is_preempted = __native_vcpu_is_preempted,
};
EXPORT_SYMBOL_GPL(pv_time_ops);
EXPORT_SYMBOL_GPL(pv_ops);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册