From 1ca5d55ddfc753970f965140d6e269808467afa6 Mon Sep 17 00:00:00 2001 From: Hanjun Guo Date: Sat, 31 Aug 2019 11:34:15 +0800 Subject: [PATCH] numa-aware qspinlock: using boot option to enable it hulk inclusion category: feature bugzilla: 13227 CVE: NA ------------------------------------------------- Set numa-aware qspinlock default off and enable it by passing using_numa_aware_qspinlock in the boot cmdline. Signed-off-by: Hanjun Guo Signed-off-by: Wei Li Signed-off-by: Yang Yingliang --- arch/arm64/kernel/setup.c | 5 +++++ arch/arm64/kernel/smp.c | 5 ----- kernel/locking/qspinlock.c | 15 +++++++++++++++ kernel/locking/qspinlock_cna.h | 11 +++++++++++ 4 files changed, 31 insertions(+), 5 deletions(-) diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 7b1a67582e17..a4f9791e03b2 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -312,6 +312,11 @@ void __init setup_arch(char **cmdline_p) early_ioremap_init(); setup_machine_fdt(__fdt_pointer); + /* + * Initialise the static keys early as they may be enabled by the + * early param code. + */ + jump_label_init(); parse_early_param(); diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 52787a83de55..6d673db8124e 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -433,11 +433,6 @@ void __init smp_cpus_done(unsigned int max_cpus) void __init smp_prepare_boot_cpu(void) { set_my_cpu_offset(per_cpu_offset(smp_processor_id())); - /* - * Initialise the static keys early as they may be enabled by the - * cpufeature code. - */ - jump_label_init(); cpuinfo_store_boot_cpu(); /* diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index 6138ec2b6b2d..fdd3a64ec082 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -584,6 +584,21 @@ EXPORT_SYMBOL(queued_spin_lock_slowpath); * Generate the code for NUMA-aware spin locks */ #if !defined(_GEN_CNA_LOCK_SLOWPATH) && defined(CONFIG_NUMA_AWARE_SPINLOCKS) +#include + +DEFINE_STATIC_KEY_TRUE(cna_lock_disabled); + +static int __init cna_locks_init_jump(char *str) +{ + if (!IS_ENABLED(CONFIG_NUMA)) + return 0; + + static_branch_dec(&cna_lock_disabled); + pr_info("NUMA aware qspinlock is enabled\n"); + return 0; +} +early_param("using_numa_aware_qspinlock", cna_locks_init_jump); + #define _GEN_CNA_LOCK_SLOWPATH #undef pv_init_node diff --git a/kernel/locking/qspinlock_cna.h b/kernel/locking/qspinlock_cna.h index 9e6bd9e6d82b..97f420be0624 100644 --- a/kernel/locking/qspinlock_cna.h +++ b/kernel/locking/qspinlock_cna.h @@ -79,6 +79,9 @@ static void cna_init_node(struct mcs_spinlock *node) struct mcs_spinlock *base_node; int cpuid; + if (static_branch_likely(&cna_lock_disabled)) + return; + BUILD_BUG_ON(sizeof(struct cna_node) > sizeof(struct qnode)); /* we store a pointer in the node's @locked field */ BUILD_BUG_ON(sizeof(uintptr_t) > sizeof_field(struct mcs_spinlock, locked)); @@ -147,6 +150,9 @@ static struct cna_node *find_successor(struct mcs_spinlock *me) static inline bool cna_set_locked_empty_mcs(struct qspinlock *lock, u32 val, struct mcs_spinlock *node) { + if (static_branch_likely(&cna_lock_disabled)) + return __set_locked_empty_mcs(lock, val, node); + /* Check whether the secondary queue is empty. */ if (node->locked <= 1) { if (atomic_try_cmpxchg_relaxed(&lock->val, &val, @@ -177,6 +183,11 @@ static inline void cna_pass_mcs_lock(struct mcs_spinlock *node, u64 *var = &next->locked; u64 val = 1; + if (static_branch_likely(&cna_lock_disabled)) { + __pass_mcs_lock(node, next); + return; + } + /* * Limit thread shuffling when the secondary queue is empty. * This copes with the overhead the shuffling creates when the -- GitLab