提交 c576a3ea 编写于 作者: J Jeremy Fitzhardinge 提交者: H. Peter Anvin

x86, ticketlock: Convert spin loop to C

The inner loop of __ticket_spin_lock isn't doing anything very special,
so reimplement it in C.

For the 8 bit ticket lock variant, we use a register union to get direct
access to the lower and upper bytes in the tickets, but unfortunately gcc
won't generate a direct comparison between the two halves of the register,
so the generated asm isn't quite as pretty as the hand-coded version.
However benchmarking shows that this is actually a small improvement in
runtime performance on some benchmarks, and never a slowdown.

We also need to make sure there's a barrier at the end of the lock loop
to make sure that the compiler doesn't move any instructions from within
the locked region into the region where we don't yet own the lock.
Signed-off-by: NJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Link: http://lkml.kernel.org/r/4E5BCC40.3030501@goop.orgSigned-off-by: NH. Peter Anvin <hpa@linux.intel.com>
上级 84eb950d
...@@ -57,21 +57,21 @@ ...@@ -57,21 +57,21 @@
#if (NR_CPUS < 256) #if (NR_CPUS < 256)
static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
{ {
unsigned short inc = 1 << TICKET_SHIFT; register union {
struct __raw_tickets tickets;
asm volatile ( unsigned short slock;
LOCK_PREFIX "xaddw %w0, %1\n" } inc = { .slock = 1 << TICKET_SHIFT };
"1:\t"
"cmpb %h0, %b0\n\t" asm volatile (LOCK_PREFIX "xaddw %w0, %1\n"
"je 2f\n\t" : "+Q" (inc), "+m" (lock->slock) : : "memory", "cc");
"rep ; nop\n\t"
"movb %1, %b0\n\t" for (;;) {
/* don't need lfence here, because loads are in-order */ if (inc.tickets.head == inc.tickets.tail)
"jmp 1b\n" break;
"2:" cpu_relax();
: "+Q" (inc), "+m" (lock->slock) inc.tickets.head = ACCESS_ONCE(lock->tickets.head);
: }
: "memory", "cc"); barrier(); /* make sure nothing creeps before the lock is taken */
} }
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
...@@ -104,22 +104,22 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) ...@@ -104,22 +104,22 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
{ {
unsigned inc = 1 << TICKET_SHIFT; unsigned inc = 1 << TICKET_SHIFT;
unsigned tmp; __ticket_t tmp;
asm volatile(LOCK_PREFIX "xaddl %0, %1\n" asm volatile(LOCK_PREFIX "xaddl %0, %1\n\t"
"movzwl %w0, %2\n\t" : "+r" (inc), "+m" (lock->slock)
"shrl $16, %0\n\t" : : "memory", "cc");
"1:\t"
"cmpl %0, %2\n\t" tmp = inc;
"je 2f\n\t" inc >>= TICKET_SHIFT;
"rep ; nop\n\t"
"movzwl %1, %2\n\t" for (;;) {
/* don't need lfence here, because loads are in-order */ if ((__ticket_t)inc == tmp)
"jmp 1b\n" break;
"2:" cpu_relax();
: "+r" (inc), "+m" (lock->slock), "=&r" (tmp) tmp = ACCESS_ONCE(lock->tickets.head);
: }
: "memory", "cc"); barrier(); /* make sure nothing creeps before the lock is taken */
} }
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册