提交 2aa79af6 编写于 作者: P Peter Zijlstra (Intel) 提交者: Ingo Molnar

locking/qspinlock: Revert to test-and-set on hypervisors

When we detect a hypervisor (!paravirt, see qspinlock paravirt support
patches), revert to a simple test-and-set lock to avoid the horrors
of queue preemption.
Signed-off-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: NWaiman Long <Waiman.Long@hp.com>
Signed-off-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Daniel J Blueman <daniel@numascale.com>
Cc: David Vrabel <david.vrabel@citrix.com>
Cc: Douglas Hatch <doug.hatch@hp.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Paolo Bonzini <paolo.bonzini@gmail.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Scott J Norton <scott.norton@hp.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: virtualization@lists.linux-foundation.org
Cc: xen-devel@lists.xenproject.org
Link: http://lkml.kernel.org/r/1429901803-29771-8-git-send-email-Waiman.Long@hp.comSigned-off-by: NIngo Molnar <mingo@kernel.org>
上级 2c83e8e9
#ifndef _ASM_X86_QSPINLOCK_H #ifndef _ASM_X86_QSPINLOCK_H
#define _ASM_X86_QSPINLOCK_H #define _ASM_X86_QSPINLOCK_H
#include <asm/cpufeature.h>
#include <asm-generic/qspinlock_types.h> #include <asm-generic/qspinlock_types.h>
#define queued_spin_unlock queued_spin_unlock #define queued_spin_unlock queued_spin_unlock
...@@ -15,6 +16,19 @@ static inline void queued_spin_unlock(struct qspinlock *lock) ...@@ -15,6 +16,19 @@ static inline void queued_spin_unlock(struct qspinlock *lock)
smp_store_release((u8 *)lock, 0); smp_store_release((u8 *)lock, 0);
} }
#define virt_queued_spin_lock virt_queued_spin_lock
static inline bool virt_queued_spin_lock(struct qspinlock *lock)
{
if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
return false;
while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0)
cpu_relax();
return true;
}
#include <asm-generic/qspinlock.h> #include <asm-generic/qspinlock.h>
#endif /* _ASM_X86_QSPINLOCK_H */ #endif /* _ASM_X86_QSPINLOCK_H */
...@@ -111,6 +111,13 @@ static inline void queued_spin_unlock_wait(struct qspinlock *lock) ...@@ -111,6 +111,13 @@ static inline void queued_spin_unlock_wait(struct qspinlock *lock)
cpu_relax(); cpu_relax();
} }
#ifndef virt_queued_spin_lock
static __always_inline bool virt_queued_spin_lock(struct qspinlock *lock)
{
return false;
}
#endif
/* /*
* Initializier * Initializier
*/ */
......
...@@ -249,6 +249,9 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) ...@@ -249,6 +249,9 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS)); BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
if (virt_queued_spin_lock(lock))
return;
/* /*
* wait for in-progress pending->locked hand-overs * wait for in-progress pending->locked hand-overs
* *
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册