提交 314cdbef 编写于 作者: N Nick Piggin 提交者: Ingo Molnar

x86: FIFO ticket spinlocks

Introduce ticket lock spinlocks for x86 which are FIFO. The implementation
is described in the comments. The straight-line lock/unlock instruction
sequence is slightly slower than the dec based locks on modern x86 CPUs,
however the difference is quite small on Core2 and Opteron when working out of
cache, and becomes almost insignificant even on P4 when the lock misses cache.
trylock is more significantly slower, but they are relatively rare.

On an 8 core (2 socket) Opteron, spinlock unfairness is extremely noticable,
with a userspace test having a difference of up to 2x runtime per thread, and
some threads are starved or "unfairly" granted the lock up to 1 000 000 (!)
times. After this patch, all threads appear to finish at exactly the same
time.

The memory ordering of the lock does conform to x86 standards, and the
implementation has been reviewed by Intel and AMD engineers.

The algorithm also tells us how many CPUs are contending the lock, so
lockbreak becomes trivial and we no longer have to waste 4 bytes per
spinlock for it.

After this, we can no longer spin on any locks with preempt enabled
and cannot reenable interrupts when spinning on an irq safe lock, because
at that point we have already taken a ticket and the would deadlock if
the same CPU tries to take the lock again.  These are questionable anyway:
if the lock happens to be called under a preempt or interrupt disabled section,
then it will just have the same latency problems. The real fix is to keep
critical sections short, and ensure locks are reasonably fair (which this
patch does).
Signed-off-by: NNick Piggin <npiggin@suse.de>
Signed-off-by: NThomas Gleixner <tglx@linutronix.de>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 95c354fe
...@@ -20,8 +20,7 @@ config X86 ...@@ -20,8 +20,7 @@ config X86
def_bool y def_bool y
config GENERIC_LOCKBREAK config GENERIC_LOCKBREAK
def_bool y def_bool n
depends on SMP && PREEMPT
config GENERIC_TIME config GENERIC_TIME
def_bool y def_bool y
......
...@@ -1077,27 +1077,6 @@ static inline unsigned long __raw_local_irq_save(void) ...@@ -1077,27 +1077,6 @@ static inline unsigned long __raw_local_irq_save(void)
return f; return f;
} }
#define CLI_STRING \
_paravirt_alt("pushl %%ecx; pushl %%edx;" \
"call *%[paravirt_cli_opptr];" \
"popl %%edx; popl %%ecx", \
"%c[paravirt_cli_type]", "%c[paravirt_clobber]")
#define STI_STRING \
_paravirt_alt("pushl %%ecx; pushl %%edx;" \
"call *%[paravirt_sti_opptr];" \
"popl %%edx; popl %%ecx", \
"%c[paravirt_sti_type]", "%c[paravirt_clobber]")
#define CLI_STI_CLOBBERS , "%eax"
#define CLI_STI_INPUT_ARGS \
, \
[paravirt_cli_type] "i" (PARAVIRT_PATCH(pv_irq_ops.irq_disable)), \
[paravirt_cli_opptr] "m" (pv_irq_ops.irq_disable), \
[paravirt_sti_type] "i" (PARAVIRT_PATCH(pv_irq_ops.irq_enable)), \
[paravirt_sti_opptr] "m" (pv_irq_ops.irq_enable), \
paravirt_clobber(CLBR_EAX)
/* Make sure as little as possible of this mess escapes. */ /* Make sure as little as possible of this mess escapes. */
#undef PARAVIRT_CALL #undef PARAVIRT_CALL
#undef __PVOP_CALL #undef __PVOP_CALL
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include <asm/rwlock.h> #include <asm/rwlock.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <linux/compiler.h>
/* /*
* Your basic SMP spinlocks, allowing only a single CPU anywhere * Your basic SMP spinlocks, allowing only a single CPU anywhere
...@@ -12,7 +13,8 @@ ...@@ -12,7 +13,8 @@
* Simple spin lock operations. There are two variants, one clears IRQ's * Simple spin lock operations. There are two variants, one clears IRQ's
* on the local processor, one does not. * on the local processor, one does not.
* *
* We make no fairness assumptions. They have a cost. * These are fair FIFO ticket locks, which are currently limited to 256
* CPUs.
* *
* (the type definitions are in asm/spinlock_types.h) * (the type definitions are in asm/spinlock_types.h)
*/ */
...@@ -42,103 +44,102 @@ typedef int _slock_t; ...@@ -42,103 +44,102 @@ typedef int _slock_t;
# define LOCK_PTR_REG "D" # define LOCK_PTR_REG "D"
#endif #endif
#if (NR_CPUS > 256)
#error spinlock supports a maximum of 256 CPUs
#endif
static inline int __raw_spin_is_locked(raw_spinlock_t *lock) static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
{ {
return *(volatile _slock_t *)(&(lock)->slock) <= 0; int tmp = *(volatile signed int *)(&(lock)->slock);
return (((tmp >> 8) & 0xff) != (tmp & 0xff));
} }
static inline void __raw_spin_lock(raw_spinlock_t *lock) static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
{ {
asm volatile( int tmp = *(volatile signed int *)(&(lock)->slock);
"\n1:\t"
LOCK_PREFIX " ; " LOCK_INS_DEC " %0\n\t" return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1;
"jns 3f\n"
"2:\t"
"rep;nop\n\t"
LOCK_INS_CMP " $0,%0\n\t"
"jle 2b\n\t"
"jmp 1b\n"
"3:\n\t"
: "+m" (lock->slock) : : "memory");
} }
/* static inline void __raw_spin_lock(raw_spinlock_t *lock)
* It is easier for the lock validator if interrupts are not re-enabled
* in the middle of a lock-acquire. This is a performance feature anyway
* so we turn it off:
*
* NOTE: there's an irqs-on section here, which normally would have to be
* irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant.
*/
#ifndef CONFIG_PROVE_LOCKING
static inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
unsigned long flags)
{ {
asm volatile( short inc = 0x0100;
"\n1:\t"
LOCK_PREFIX " ; " LOCK_INS_DEC " %[slock]\n\t" /*
"jns 5f\n" * Ticket locks are conceptually two bytes, one indicating the current
"testl $0x200, %[flags]\n\t" * head of the queue, and the other indicating the current tail. The
"jz 4f\n\t" * lock is acquired by atomically noting the tail and incrementing it
STI_STRING "\n" * by one (thus adding ourself to the queue and noting our position),
"3:\t" * then waiting until the head becomes equal to the the initial value
"rep;nop\n\t" * of the tail.
LOCK_INS_CMP " $0, %[slock]\n\t" *
"jle 3b\n\t" * This uses a 16-bit xadd to increment the tail and also load the
CLI_STRING "\n\t" * position of the head, which takes care of memory ordering issues
* and should be optimal for the uncontended case. Note the tail must
* be in the high byte, otherwise the 16-bit wide increment of the low
* byte would carry up and contaminate the high byte.
*/
__asm__ __volatile__ (
LOCK_PREFIX "xaddw %w0, %1\n"
"1:\t"
"cmpb %h0, %b0\n\t"
"je 2f\n\t"
"rep ; nop\n\t"
"movb %1, %b0\n\t"
/* don't need lfence here, because loads are in-order */
"jmp 1b\n" "jmp 1b\n"
"4:\t" "2:"
"rep;nop\n\t" :"+Q" (inc), "+m" (lock->slock)
LOCK_INS_CMP " $0, %[slock]\n\t" :
"jg 1b\n\t" :"memory", "cc");
"jmp 4b\n"
"5:\n\t"
: [slock] "+m" (lock->slock)
: [flags] "r" ((u32)flags)
CLI_STI_INPUT_ARGS
: "memory" CLI_STI_CLOBBERS);
} }
#endif
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
static inline int __raw_spin_trylock(raw_spinlock_t *lock) static inline int __raw_spin_trylock(raw_spinlock_t *lock)
{ {
_slock_t oldval; int tmp;
short new;
asm volatile( asm volatile(
LOCK_INS_XCH " %0,%1" "movw %2,%w0\n\t"
:"=q" (oldval), "+m" (lock->slock) "cmpb %h0,%b0\n\t"
:"0" (0) : "memory"); "jne 1f\n\t"
"movw %w0,%w1\n\t"
return oldval > 0; "incb %h1\n\t"
"lock ; cmpxchgw %w1,%2\n\t"
"1:"
"sete %b1\n\t"
"movzbl %b1,%0\n\t"
:"=&a" (tmp), "=Q" (new), "+m" (lock->slock)
:
: "memory", "cc");
return tmp;
} }
#if defined(CONFIG_X86_32) && \
(defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE))
/* /*
* __raw_spin_unlock based on writing $1 to the low byte. * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock
* This method works. Despite all the confusion.
* (except on PPro SMP or if we are using OOSTORE, so we use xchgb there)
* (PPro errata 66, 92) * (PPro errata 66, 92)
*/ */
#if defined(X86_64) || \ # define UNLOCK_LOCK_PREFIX LOCK_PREFIX
(!defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE))
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{
asm volatile(LOCK_INS_MOV " $1,%0" : "=m" (lock->slock) :: "memory");
}
#else #else
# define UNLOCK_LOCK_PREFIX
#endif
static inline void __raw_spin_unlock(raw_spinlock_t *lock) static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{ {
unsigned char oldval = 1; __asm__ __volatile__(
UNLOCK_LOCK_PREFIX "incb %0"
asm volatile("xchgb %b0, %1" :"+m" (lock->slock)
: "=q" (oldval), "+m" (lock->slock) :
: "0" (oldval) : "memory"); :"memory", "cc");
} }
#endif
static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
{ {
while (__raw_spin_is_locked(lock)) while (__raw_spin_is_locked(lock))
...@@ -159,11 +160,19 @@ static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) ...@@ -159,11 +160,19 @@ static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
* with the high bit (sign) being the "contended" bit. * with the high bit (sign) being the "contended" bit.
*/ */
/**
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
static inline int __raw_read_can_lock(raw_rwlock_t *lock) static inline int __raw_read_can_lock(raw_rwlock_t *lock)
{ {
return (int)(lock)->lock > 0; return (int)(lock)->lock > 0;
} }
/**
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
static inline int __raw_write_can_lock(raw_rwlock_t *lock) static inline int __raw_write_can_lock(raw_rwlock_t *lock)
{ {
return (lock)->lock == RW_LOCK_BIAS; return (lock)->lock == RW_LOCK_BIAS;
......
...@@ -9,7 +9,7 @@ typedef struct { ...@@ -9,7 +9,7 @@ typedef struct {
unsigned int slock; unsigned int slock;
} raw_spinlock_t; } raw_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 1 } #define __RAW_SPIN_LOCK_UNLOCKED { 0 }
typedef struct { typedef struct {
unsigned int lock; unsigned int lock;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册