提交 63042c58 编写于 作者: Z Zengruan Ye 提交者: Zheng Zengkai

KVM: arm64: Add interface to support vCPU preempted check

virt inclusion
category: feature
bugzilla: 47624
CVE: NA

--------------------------------

This is to fix some lock holder preemption issues. Some other locks
implementation do a spin loop before acquiring the lock itself.
Currently kernel has an interface of bool vcpu_is_preempted(int cpu). It
takes the CPU as parameter and return true if the CPU is preempted.
Then kernel can break the spin loops upon the retval of vcpu_is_preempted.

As kernel has used this interface, So lets support it.
Signed-off-by: NZengruan Ye <yezengruan@huawei.com>
Reviewed-by: NZhanghailiang <zhang.zhanghailiang@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 76732c97
......@@ -11,8 +11,13 @@ struct pv_time_ops {
unsigned long long (*steal_clock)(int cpu);
};
struct pv_sched_ops {
bool (*vcpu_is_preempted)(int cpu);
};
struct paravirt_patch_template {
struct pv_time_ops time;
struct pv_sched_ops sched;
};
extern struct paravirt_patch_template pv_ops;
......@@ -24,6 +29,12 @@ static inline u64 paravirt_steal_clock(int cpu)
int __init pv_time_init(void);
__visible bool __native_vcpu_is_preempted(int cpu);
static inline bool pv_vcpu_is_preempted(int cpu)
{
return pv_ops.sched.vcpu_is_preempted(cpu);
}
#else
#define pv_time_init() do {} while (0)
......
......@@ -7,6 +7,7 @@
#include <asm/qrwlock.h>
#include <asm/qspinlock.h>
#include <asm/paravirt.h>
/* See include/linux/spinlock.h */
#define smp_mb__after_spinlock() smp_mb()
......@@ -19,9 +20,18 @@
* https://lore.kernel.org/lkml/20200110100612.GC2827@hirez.programming.kicks-ass.net
*/
#define vcpu_is_preempted vcpu_is_preempted
#ifdef CONFIG_PARAVIRT
static inline bool vcpu_is_preempted(int cpu)
{
return pv_vcpu_is_preempted(cpu);
}
#else
static inline bool vcpu_is_preempted(int cpu)
{
return false;
}
#endif /* CONFIG_PARAVIRT */
#endif /* __ASM_SPINLOCK_H */
......@@ -49,7 +49,7 @@ obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o
obj-$(CONFIG_ACPI) += acpi.o
obj-$(CONFIG_ACPI_NUMA) += acpi_numa.o
obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o
obj-$(CONFIG_PARAVIRT) += paravirt.o
obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt-spinlocks.o
obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o
obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o \
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright(c) 2019 Huawei Technologies Co., Ltd
* Author: Zengruan Ye <yezengruan@huawei.com>
*/
#include <linux/spinlock.h>
#include <asm/paravirt.h>
__visible bool __native_vcpu_is_preempted(int cpu)
{
return false;
}
......@@ -26,7 +26,9 @@
struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled;
struct paravirt_patch_template pv_ops;
struct paravirt_patch_template pv_ops = {
.sched.vcpu_is_preempted = __native_vcpu_is_preempted,
};
EXPORT_SYMBOL_GPL(pv_ops);
struct pv_time_stolen_time_region {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册