提交 1ca5d55d 编写于 作者: H Hanjun Guo 提交者: Xie XiuQi

numa-aware qspinlock: using boot option to enable it

hulk inclusion
category: feature
bugzilla: 13227
CVE: NA

-------------------------------------------------

Set numa-aware qspinlock default off and enable it by passing
using_numa_aware_qspinlock in the boot cmdline.
Signed-off-by: NHanjun Guo <guohanjun@huawei.com>
Signed-off-by: NWei Li <liwei391@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 e2ef2690
......@@ -312,6 +312,11 @@ void __init setup_arch(char **cmdline_p)
early_ioremap_init();
setup_machine_fdt(__fdt_pointer);
/*
* Initialise the static keys early as they may be enabled by the
* early param code.
*/
jump_label_init();
parse_early_param();
......
......@@ -433,11 +433,6 @@ void __init smp_cpus_done(unsigned int max_cpus)
void __init smp_prepare_boot_cpu(void)
{
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
/*
* Initialise the static keys early as they may be enabled by the
* cpufeature code.
*/
jump_label_init();
cpuinfo_store_boot_cpu();
/*
......
......@@ -584,6 +584,21 @@ EXPORT_SYMBOL(queued_spin_lock_slowpath);
* Generate the code for NUMA-aware spin locks
*/
#if !defined(_GEN_CNA_LOCK_SLOWPATH) && defined(CONFIG_NUMA_AWARE_SPINLOCKS)
#include <linux/jump_label.h>
DEFINE_STATIC_KEY_TRUE(cna_lock_disabled);
static int __init cna_locks_init_jump(char *str)
{
if (!IS_ENABLED(CONFIG_NUMA))
return 0;
static_branch_dec(&cna_lock_disabled);
pr_info("NUMA aware qspinlock is enabled\n");
return 0;
}
early_param("using_numa_aware_qspinlock", cna_locks_init_jump);
#define _GEN_CNA_LOCK_SLOWPATH
#undef pv_init_node
......
......@@ -79,6 +79,9 @@ static void cna_init_node(struct mcs_spinlock *node)
struct mcs_spinlock *base_node;
int cpuid;
if (static_branch_likely(&cna_lock_disabled))
return;
BUILD_BUG_ON(sizeof(struct cna_node) > sizeof(struct qnode));
/* we store a pointer in the node's @locked field */
BUILD_BUG_ON(sizeof(uintptr_t) > sizeof_field(struct mcs_spinlock, locked));
......@@ -147,6 +150,9 @@ static struct cna_node *find_successor(struct mcs_spinlock *me)
static inline bool cna_set_locked_empty_mcs(struct qspinlock *lock, u32 val,
struct mcs_spinlock *node)
{
if (static_branch_likely(&cna_lock_disabled))
return __set_locked_empty_mcs(lock, val, node);
/* Check whether the secondary queue is empty. */
if (node->locked <= 1) {
if (atomic_try_cmpxchg_relaxed(&lock->val, &val,
......@@ -177,6 +183,11 @@ static inline void cna_pass_mcs_lock(struct mcs_spinlock *node,
u64 *var = &next->locked;
u64 val = 1;
if (static_branch_likely(&cna_lock_disabled)) {
__pass_mcs_lock(node, next);
return;
}
/*
* Limit thread shuffling when the secondary queue is empty.
* This copes with the overhead the shuffling creates when the
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册