提交 503033c4 编写于 作者: Z Zengruan Ye 提交者: Yang Yingliang

KVM: arm64: Add interface to support PV qspinlock

euleros inclusion
category: feature
bugzilla: NA
CVE: NA

--------------------------------

As kernel has used this interface, so lets support it.
Signed-off-by: NZengruan Ye <yezengruan@huawei.com>
Reviewed-by: Nzhanghailiang <zhang.zhanghailiang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 8291f58e
...@@ -817,6 +817,7 @@ config NODES_SHIFT ...@@ -817,6 +817,7 @@ config NODES_SHIFT
config NUMA_AWARE_SPINLOCKS config NUMA_AWARE_SPINLOCKS
bool "Numa-aware spinlocks" bool "Numa-aware spinlocks"
depends on NUMA && QUEUED_SPINLOCKS depends on NUMA && QUEUED_SPINLOCKS
depends on PARAVIRT_SPINLOCKS
default n default n
help help
Introduce NUMA (Non Uniform Memory Access) awareness into Introduce NUMA (Non Uniform Memory Access) awareness into
...@@ -901,6 +902,19 @@ config PARAVIRT ...@@ -901,6 +902,19 @@ config PARAVIRT
under a hypervisor, potentially improving performance significantly under a hypervisor, potentially improving performance significantly
over full virtualization. over full virtualization.
config PARAVIRT_SPINLOCKS
bool "Paravirtualization layer for spinlocks"
depends on PARAVIRT && SMP
help
Paravirtualized spinlocks allow a pvops backend to replace the
spinlock implementation with something virtualization-friendly
(for example, block the virtual CPU rather than spinning).
It has a minimal impact on native kernels and gives a nice performance
benefit on paravirtualized KVM kernels.
If you are unsure how to answer this question, answer Y.
config PARAVIRT_TIME_ACCOUNTING config PARAVIRT_TIME_ACCOUNTING
bool "Paravirtual steal time accounting" bool "Paravirtual steal time accounting"
select PARAVIRT select PARAVIRT
......
...@@ -12,6 +12,12 @@ struct pv_time_ops { ...@@ -12,6 +12,12 @@ struct pv_time_ops {
}; };
struct pv_sched_ops { struct pv_sched_ops {
void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val);
void (*queued_spin_unlock)(struct qspinlock *lock);
void (*wait)(u8 *ptr, u8 val);
void (*kick)(int cpu);
bool (*vcpu_is_preempted)(int cpu); bool (*vcpu_is_preempted)(int cpu);
}; };
...@@ -35,6 +41,29 @@ static inline bool pv_vcpu_is_preempted(int cpu) ...@@ -35,6 +41,29 @@ static inline bool pv_vcpu_is_preempted(int cpu)
return pv_ops.sched.vcpu_is_preempted(cpu); return pv_ops.sched.vcpu_is_preempted(cpu);
} }
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
bool pv_is_native_spin_unlock(void);
static inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
{
return pv_ops.sched.queued_spin_lock_slowpath(lock, val);
}
static inline void pv_queued_spin_unlock(struct qspinlock *lock)
{
return pv_ops.sched.queued_spin_unlock(lock);
}
static inline void pv_wait(u8 *ptr, u8 val)
{
return pv_ops.sched.wait(ptr, val);
}
static inline void pv_kick(int cpu)
{
return pv_ops.sched.kick(cpu);
}
#endif /* SMP && PARAVIRT_SPINLOCKS */
#else #else
#define pv_sched_init() do {} while (0) #define pv_sched_init() do {} while (0)
......
...@@ -2,12 +2,19 @@ ...@@ -2,12 +2,19 @@
#ifndef _ASM_ARM64_QSPINLOCK_H #ifndef _ASM_ARM64_QSPINLOCK_H
#define _ASM_ARM64_QSPINLOCK_H #define _ASM_ARM64_QSPINLOCK_H
#ifdef CONFIG_NUMA_AWARE_SPINLOCKS
#include <asm-generic/qspinlock_types.h> #include <asm-generic/qspinlock_types.h>
#include <asm/paravirt.h>
#define _Q_PENDING_LOOPS (1 << 9)
#ifdef CONFIG_NUMA_AWARE_SPINLOCKS
extern void __cna_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); extern void __cna_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
#endif
#ifdef CONFIG_PARAVIRT_SPINLOCKS
extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
extern void (*cna_queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val); extern void __pv_init_lock_hash(void);
extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
#define queued_spin_unlock queued_spin_unlock #define queued_spin_unlock queued_spin_unlock
/** /**
...@@ -23,12 +30,12 @@ static inline void native_queued_spin_unlock(struct qspinlock *lock) ...@@ -23,12 +30,12 @@ static inline void native_queued_spin_unlock(struct qspinlock *lock)
static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
{ {
cna_queued_spin_lock_slowpath(lock, val); pv_queued_spin_lock_slowpath(lock, val);
} }
static inline void queued_spin_unlock(struct qspinlock *lock) static inline void queued_spin_unlock(struct qspinlock *lock)
{ {
native_queued_spin_unlock(lock); pv_queued_spin_unlock(lock);
} }
#endif #endif
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright(c) 2019 Huawei Technologies Co., Ltd
* Author: Zengruan Ye <yezengruan@huawei.com>
*/
#ifndef __ASM_QSPINLOCK_PARAVIRT_H
#define __ASM_QSPINLOCK_PARAVIRT_H
extern void __pv_queued_spin_unlock(struct qspinlock *lock);
#endif
...@@ -20,6 +20,9 @@ ...@@ -20,6 +20,9 @@
#include <asm/qspinlock.h> #include <asm/qspinlock.h>
#include <asm/paravirt.h> #include <asm/paravirt.h>
/* How long a lock should spin before we consider blocking */
#define SPIN_THRESHOLD (1 << 15)
/* See include/linux/spinlock.h */ /* See include/linux/spinlock.h */
#define smp_mb__after_spinlock() smp_mb() #define smp_mb__after_spinlock() smp_mb()
......
...@@ -49,6 +49,7 @@ arm64-obj-$(CONFIG_ARM64_ERR_RECOV) += ras.o ...@@ -49,6 +49,7 @@ arm64-obj-$(CONFIG_ARM64_ERR_RECOV) += ras.o
arm64-obj-$(CONFIG_ACPI_NUMA) += acpi_numa.o arm64-obj-$(CONFIG_ACPI_NUMA) += acpi_numa.o
arm64-obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o arm64-obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o
arm64-obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt-spinlocks.o arm64-obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt-spinlocks.o
arm64-obj-$(CONFIG_PARAVIRT_SPINLOCKS) += paravirt.o paravirt-spinlocks.o
arm64-obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o arm64-obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
arm64-obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o arm64-obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o
arm64-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o \ arm64-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o \
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/insn.h> #include <asm/insn.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/paravirt.h>
#include <linux/stop_machine.h> #include <linux/stop_machine.h>
#define __ALT_PTR(a,f) ((void *)&(a)->f + (a)->f) #define __ALT_PTR(a,f) ((void *)&(a)->f + (a)->f)
...@@ -291,9 +292,9 @@ void __init apply_boot_alternatives(void) ...@@ -291,9 +292,9 @@ void __init apply_boot_alternatives(void)
*/ */
if ((numa_spinlock_flag == 1) || if ((numa_spinlock_flag == 1) ||
(numa_spinlock_flag == 0 && nr_node_ids > 1 && (numa_spinlock_flag == 0 && nr_node_ids > 1 &&
cna_queued_spin_lock_slowpath == pv_ops.sched.queued_spin_lock_slowpath ==
native_queued_spin_lock_slowpath)) { native_queued_spin_lock_slowpath)) {
cna_queued_spin_lock_slowpath = pv_ops.sched.queued_spin_lock_slowpath =
__cna_queued_spin_lock_slowpath; __cna_queued_spin_lock_slowpath;
} }
#endif #endif
......
...@@ -11,3 +11,8 @@ __visible bool __native_vcpu_is_preempted(int cpu) ...@@ -11,3 +11,8 @@ __visible bool __native_vcpu_is_preempted(int cpu)
{ {
return false; return false;
} }
bool pv_is_native_spin_unlock(void)
{
return false;
}
...@@ -30,6 +30,10 @@ struct static_key paravirt_steal_enabled; ...@@ -30,6 +30,10 @@ struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled; struct static_key paravirt_steal_rq_enabled;
struct paravirt_patch_template pv_ops = { struct paravirt_patch_template pv_ops = {
#ifdef CONFIG_PARAVIRT_SPINLOCKS
.sched.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
.sched.queued_spin_unlock = native_queued_spin_unlock,
#endif
.sched.vcpu_is_preempted = __native_vcpu_is_preempted, .sched.vcpu_is_preempted = __native_vcpu_is_preempted,
}; };
EXPORT_SYMBOL_GPL(pv_ops); EXPORT_SYMBOL_GPL(pv_ops);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册