diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index 46f5e86ad5696fe21dcac2eedd6246fb3f9062b2..0173c7206544cfdb2847a4607c3c92d2238cade1 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -406,6 +406,21 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) idx = node->count++; tail = encode_tail(smp_processor_id(), idx); + /* + * 4 nodes are allocated based on the assumption that there will + * not be nested NMIs taking spinlocks. That may not be true in + * some architectures even though the chance of needing more than + * 4 nodes will still be extremely unlikely. When that happens, + * we fall back to spinning on the lock directly without using + * any MCS node. This is not the most elegant solution, but is + * simple enough. + */ + if (unlikely(idx >= MAX_NODES)) { + while (!queued_spin_trylock(lock)) + cpu_relax(); + goto release; + } + node = grab_mcs_node(node, idx); /*