提交 516db1c6 编写于 作者: P Paul Burton 提交者: Ralf Baechle

MIPS: CM: Avoid per-core locking with CM3 & higher

CM3 provides a GCR_CL_OTHER register per VP, rather than only per core.
This means that we don't need to prevent other VPs within a core from
racing with code that makes use of the core-other register region.

Reduce locking overhead by demoting the per-core spinlock providing
protection for CM2.5 & lower to a per-CPU/per-VP spinlock for CM3 &
higher.
Signed-off-by: NPaul Burton <paul.burton@imgtec.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/16193/Signed-off-by: NRalf Baechle <ralf@linux-mips.org>
上级 9b03d8ab
......@@ -265,15 +265,34 @@ void mips_cm_lock_other(unsigned int core, unsigned int vp)
u32 val;
preempt_disable();
curr_core = current_cpu_data.core;
spin_lock_irqsave(&per_cpu(cm_core_lock, curr_core),
per_cpu(cm_core_lock_flags, curr_core));
if (mips_cm_revision() >= CM_REV_CM3) {
val = core << CM3_GCR_Cx_OTHER_CORE_SHF;
val |= vp << CM3_GCR_Cx_OTHER_VP_SHF;
/*
* We need to disable interrupts in SMP systems in order to
* ensure that we don't interrupt the caller with code which
* may modify the redirect register. We do so here in a
* slightly obscure way by using a spin lock, since this has
* the neat property of also catching any nested uses of
* mips_cm_lock_other() leading to a deadlock or a nice warning
* with lockdep enabled.
*/
spin_lock_irqsave(this_cpu_ptr(&cm_core_lock),
*this_cpu_ptr(&cm_core_lock_flags));
} else {
BUG_ON(vp != 0);
/*
* We only have a GCR_CL_OTHER per core in systems with
* CM 2.5 & older, so have to ensure other VP(E)s don't
* race with us.
*/
curr_core = current_cpu_data.core;
spin_lock_irqsave(&per_cpu(cm_core_lock, curr_core),
per_cpu(cm_core_lock_flags, curr_core));
val = core << CM_GCR_Cx_OTHER_CORENUM_SHF;
}
......@@ -288,10 +307,17 @@ void mips_cm_lock_other(unsigned int core, unsigned int vp)
void mips_cm_unlock_other(void)
{
unsigned curr_core = current_cpu_data.core;
unsigned int curr_core;
if (mips_cm_revision() < CM_REV_CM3) {
curr_core = current_cpu_data.core;
spin_unlock_irqrestore(&per_cpu(cm_core_lock, curr_core),
per_cpu(cm_core_lock_flags, curr_core));
} else {
spin_unlock_irqrestore(this_cpu_ptr(&cm_core_lock),
*this_cpu_ptr(&cm_core_lock_flags));
}
spin_unlock_irqrestore(&per_cpu(cm_core_lock, curr_core),
per_cpu(cm_core_lock_flags, curr_core));
preempt_enable();
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册