From 430b95e6c6e78ed1b46d068bb3708c3b72e4e06a Mon Sep 17 00:00:00 2001 From: Zengruan Ye Date: Thu, 22 Apr 2021 20:15:53 +0800 Subject: [PATCH] KVM: arm64: Add interface to support vCPU preempted check euleros inclusion category: feature bugzilla: https://bugzilla.openeuler.org/show_bug.cgi?id=35 CVE: NA -------------------------------- This is to fix some lock holder preemption issues. Some other locks implementation do a spin loop before acquiring the lock itself. Currently kernel has an interface of bool vcpu_is_preempted(int cpu). It takes the CPU as parameter and return true if the CPU is preempted. Then kernel can break the spin loops upon the retval of vcpu_is_preempted. As kernel has used this interface, So lets support it. Signed-off-by: Zengruan Ye Reviewed-by: zhanghailiang Signed-off-by: Yang Yingliang Signed-off-by: Chaochao Xing Reviewed-by: Zengruan Ye Reviewed-by: Xiangyou Xie Signed-off-by: Cheng Jian --- arch/arm64/include/asm/paravirt.h | 17 +++++++++++++++++ arch/arm64/include/asm/spinlock.h | 10 ++++++++++ arch/arm64/kernel/Makefile | 2 +- arch/arm64/kernel/paravirt-spinlocks.c | 13 +++++++++++++ arch/arm64/kernel/paravirt.c | 5 +++++ 5 files changed, 46 insertions(+), 1 deletion(-) create mode 100644 arch/arm64/kernel/paravirt-spinlocks.c diff --git a/arch/arm64/include/asm/paravirt.h b/arch/arm64/include/asm/paravirt.h index bb5dcea42003..5c250cb9a52e 100644 --- a/arch/arm64/include/asm/paravirt.h +++ b/arch/arm64/include/asm/paravirt.h @@ -10,12 +10,29 @@ extern struct static_key paravirt_steal_rq_enabled; struct pv_time_ops { unsigned long long (*steal_clock)(int cpu); }; + +struct pv_sched_ops { + bool (*vcpu_is_preempted)(int cpu); +}; + +struct paravirt_patch_template { + struct pv_sched_ops sched; +}; + extern struct pv_time_ops pv_time_ops; +extern struct paravirt_patch_template pv_ops; static inline u64 paravirt_steal_clock(int cpu) { return pv_time_ops.steal_clock(cpu); } + +__visible bool __native_vcpu_is_preempted(int cpu); +static inline bool pv_vcpu_is_preempted(int cpu) +{ + return pv_ops.sched.vcpu_is_preempted(cpu); +} + #endif #endif diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index a9dec081beca..4a668995014c 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h @@ -18,6 +18,7 @@ #include #include +#include /* See include/linux/spinlock.h */ #define smp_mb__after_spinlock() smp_mb() @@ -30,9 +31,18 @@ * https://lore.kernel.org/lkml/20200110100612.GC2827@hirez.programming.kicks-ass.net */ #define vcpu_is_preempted vcpu_is_preempted +#ifdef CONFIG_PARAVIRT +static inline bool vcpu_is_preempted(int cpu) +{ + return pv_vcpu_is_preempted(cpu); +} + +#else + static inline bool vcpu_is_preempted(int cpu) { return false; } +#endif /* CONFIG_PARAVIRT */ #endif /* __ASM_SPINLOCK_H */ diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index ec0cf9543e21..6dd565aaad32 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -51,7 +51,7 @@ arm64-obj-$(CONFIG_ACPI) += acpi.o arm64-obj-$(CONFIG_ARM64_ERR_RECOV) += ras.o arm64-obj-$(CONFIG_ACPI_NUMA) += acpi_numa.o arm64-obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o -arm64-obj-$(CONFIG_PARAVIRT) += paravirt.o +arm64-obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt-spinlocks.o arm64-obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o arm64-obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o arm64-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o \ diff --git a/arch/arm64/kernel/paravirt-spinlocks.c b/arch/arm64/kernel/paravirt-spinlocks.c new file mode 100644 index 000000000000..fd733eb02d42 --- /dev/null +++ b/arch/arm64/kernel/paravirt-spinlocks.c @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright(c) 2019 Huawei Technologies Co., Ltd + * Author: Zengruan Ye + */ + +#include +#include + +__visible bool __native_vcpu_is_preempted(int cpu) +{ + return false; +} diff --git a/arch/arm64/kernel/paravirt.c b/arch/arm64/kernel/paravirt.c index 53f371ed4568..d5ce2d362e6e 100644 --- a/arch/arm64/kernel/paravirt.c +++ b/arch/arm64/kernel/paravirt.c @@ -22,4 +22,9 @@ struct static_key paravirt_steal_enabled; struct static_key paravirt_steal_rq_enabled; struct pv_time_ops pv_time_ops; +struct paravirt_patch_template pv_ops = { + .sched.vcpu_is_preempted = __native_vcpu_is_preempted, +}; + EXPORT_SYMBOL_GPL(pv_time_ops); +EXPORT_SYMBOL_GPL(pv_ops); -- GitLab