diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 7b1a67582e172f50a7a238cfeb0997dab94f7d28..a4f9791e03b298602e18367b546a1e90b219ad92 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -312,6 +312,11 @@ void __init setup_arch(char **cmdline_p) early_ioremap_init(); setup_machine_fdt(__fdt_pointer); + /* + * Initialise the static keys early as they may be enabled by the + * early param code. + */ + jump_label_init(); parse_early_param(); diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 52787a83de5588f066eee7c6a5cf9e0218aa209a..6d673db8124e6f1424d22a3b5db435fe716ad668 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -433,11 +433,6 @@ void __init smp_cpus_done(unsigned int max_cpus) void __init smp_prepare_boot_cpu(void) { set_my_cpu_offset(per_cpu_offset(smp_processor_id())); - /* - * Initialise the static keys early as they may be enabled by the - * cpufeature code. - */ - jump_label_init(); cpuinfo_store_boot_cpu(); /* diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index 6138ec2b6b2d0ea5158646244573a3c2fcfb46b3..fdd3a64ec0828587e96109cb5bbd685ee614d29c 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -584,6 +584,21 @@ EXPORT_SYMBOL(queued_spin_lock_slowpath); * Generate the code for NUMA-aware spin locks */ #if !defined(_GEN_CNA_LOCK_SLOWPATH) && defined(CONFIG_NUMA_AWARE_SPINLOCKS) +#include + +DEFINE_STATIC_KEY_TRUE(cna_lock_disabled); + +static int __init cna_locks_init_jump(char *str) +{ + if (!IS_ENABLED(CONFIG_NUMA)) + return 0; + + static_branch_dec(&cna_lock_disabled); + pr_info("NUMA aware qspinlock is enabled\n"); + return 0; +} +early_param("using_numa_aware_qspinlock", cna_locks_init_jump); + #define _GEN_CNA_LOCK_SLOWPATH #undef pv_init_node diff --git a/kernel/locking/qspinlock_cna.h b/kernel/locking/qspinlock_cna.h index 9e6bd9e6d82b1ddf31a8c274cd43d14b4a7f7e8d..97f420be0624aa6b30887a6896dabc2a60cdd6b7 100644 --- a/kernel/locking/qspinlock_cna.h +++ b/kernel/locking/qspinlock_cna.h @@ -79,6 +79,9 @@ static void cna_init_node(struct mcs_spinlock *node) struct mcs_spinlock *base_node; int cpuid; + if (static_branch_likely(&cna_lock_disabled)) + return; + BUILD_BUG_ON(sizeof(struct cna_node) > sizeof(struct qnode)); /* we store a pointer in the node's @locked field */ BUILD_BUG_ON(sizeof(uintptr_t) > sizeof_field(struct mcs_spinlock, locked)); @@ -147,6 +150,9 @@ static struct cna_node *find_successor(struct mcs_spinlock *me) static inline bool cna_set_locked_empty_mcs(struct qspinlock *lock, u32 val, struct mcs_spinlock *node) { + if (static_branch_likely(&cna_lock_disabled)) + return __set_locked_empty_mcs(lock, val, node); + /* Check whether the secondary queue is empty. */ if (node->locked <= 1) { if (atomic_try_cmpxchg_relaxed(&lock->val, &val, @@ -177,6 +183,11 @@ static inline void cna_pass_mcs_lock(struct mcs_spinlock *node, u64 *var = &next->locked; u64 val = 1; + if (static_branch_likely(&cna_lock_disabled)) { + __pass_mcs_lock(node, next); + return; + } + /* * Limit thread shuffling when the secondary queue is empty. * This copes with the overhead the shuffling creates when the