提交 8efb90cf 编写于 作者: L Linus Torvalds

Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking updates from Ingo Molnar:
 "The main changes in this cycle are:

   - big rtmutex and futex cleanup and robustification from Thomas
     Gleixner
   - mutex optimizations and refinements from Jason Low
   - arch_mutex_cpu_relax() removal and related cleanups
   - smaller lockdep tweaks"

* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits)
  arch, locking: Ciao arch_mutex_cpu_relax()
  locking/lockdep: Only ask for /proc/lock_stat output when available
  locking/mutexes: Optimize mutex trylock slowpath
  locking/mutexes: Try to acquire mutex only if it is unlocked
  locking/mutexes: Delete the MUTEX_SHOW_NO_WAITER macro
  locking/mutexes: Correct documentation on mutex optimistic spinning
  rtmutex: Make the rtmutex tester depend on BROKEN
  futex: Simplify futex_lock_pi_atomic() and make it more robust
  futex: Split out the first waiter attachment from lookup_pi_state()
  futex: Split out the waiter check from lookup_pi_state()
  futex: Use futex_top_waiter() in lookup_pi_state()
  futex: Make unlock_pi more robust
  rtmutex: Avoid pointless requeueing in the deadlock detection chain walk
  rtmutex: Cleanup deadlock detector debug logic
  rtmutex: Confine deadlock logic to futex
  rtmutex: Simplify remove_waiter()
  rtmutex: Document pi chain walk
  rtmutex: Clarify the boost/deboost part
  rtmutex: No need to keep task ref for lock owner check
  rtmutex: Simplify and document try_to_take_rtmutex()
  ...
......@@ -57,6 +57,7 @@ unsigned long get_wchan(struct task_struct *p);
((tsk) == current ? rdusp() : task_thread_info(tsk)->pcb.usp)
#define cpu_relax() barrier()
#define cpu_relax_lowlatency() cpu_relax()
#define ARCH_HAS_PREFETCH
#define ARCH_HAS_PREFETCHW
......
......@@ -62,6 +62,8 @@ unsigned long thread_saved_pc(struct task_struct *t);
#define cpu_relax() do { } while (0)
#endif
#define cpu_relax_lowlatency() cpu_relax()
#define copy_segments(tsk, mm) do { } while (0)
#define release_segments(mm) do { } while (0)
......
......@@ -82,6 +82,8 @@ unsigned long get_wchan(struct task_struct *p);
#define cpu_relax() barrier()
#endif
#define cpu_relax_lowlatency() cpu_relax()
#define task_pt_regs(p) \
((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
......
......@@ -129,6 +129,7 @@ extern void release_thread(struct task_struct *);
unsigned long get_wchan(struct task_struct *p);
#define cpu_relax() barrier()
#define cpu_relax_lowlatency() cpu_relax()
/* Thread switching */
extern struct task_struct *cpu_switch_to(struct task_struct *prev,
......
......@@ -92,6 +92,7 @@ extern struct avr32_cpuinfo boot_cpu_data;
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
#define cpu_relax() barrier()
#define cpu_relax_lowlatency() cpu_relax()
#define cpu_sync_pipeline() asm volatile("sub pc, -2" : : : "memory")
struct cpu_context {
......
......@@ -99,7 +99,7 @@ unsigned long get_wchan(struct task_struct *p);
#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
#define cpu_relax() smp_mb()
#define cpu_relax_lowlatency() cpu_relax()
/* Get the Silicon Revision of the chip */
static inline uint32_t __pure bfin_revid(void)
......
......@@ -121,6 +121,7 @@ extern unsigned long get_wchan(struct task_struct *p);
#define KSTK_ESP(task) (task_pt_regs(task)->sp)
#define cpu_relax() do { } while (0)
#define cpu_relax_lowlatency() cpu_relax()
extern const struct seq_operations cpuinfo_op;
......
......@@ -63,6 +63,7 @@ static inline void release_thread(struct task_struct *dead_task)
#define init_stack (init_thread_union.stack)
#define cpu_relax() barrier()
#define cpu_relax_lowlatency() cpu_relax()
void default_idle(void);
......
......@@ -56,6 +56,7 @@ struct thread_struct {
}
#define cpu_relax() __vmyield()
#define cpu_relax_lowlatency() cpu_relax()
/*
* Decides where the kernel will search for a free chunk of vm space during
......
......@@ -548,6 +548,7 @@ ia64_eoi (void)
}
#define cpu_relax() ia64_hint(ia64_hint_pause)
#define cpu_relax_lowlatency() cpu_relax()
static inline int
ia64_get_irr(unsigned int vector)
......
......@@ -133,5 +133,6 @@ unsigned long get_wchan(struct task_struct *p);
#define KSTK_ESP(tsk) ((tsk)->thread.sp)
#define cpu_relax() barrier()
#define cpu_relax_lowlatency() cpu_relax()
#endif /* _ASM_M32R_PROCESSOR_H */
......@@ -176,5 +176,6 @@ unsigned long get_wchan(struct task_struct *p);
#define task_pt_regs(tsk) ((struct pt_regs *) ((tsk)->thread.esp0))
#define cpu_relax() barrier()
#define cpu_relax_lowlatency() cpu_relax()
#endif
......@@ -155,6 +155,7 @@ unsigned long get_wchan(struct task_struct *p);
#define user_stack_pointer(regs) ((regs)->ctx.AX[0].U0)
#define cpu_relax() barrier()
#define cpu_relax_lowlatency() cpu_relax()
extern void setup_priv(void);
......
......@@ -22,6 +22,7 @@
extern const struct seq_operations cpuinfo_op;
# define cpu_relax() barrier()
# define cpu_relax_lowlatency() cpu_relax()
#define task_pt_regs(tsk) \
(((struct pt_regs *)(THREAD_SIZE + task_stack_page(tsk))) - 1)
......
......@@ -367,6 +367,7 @@ unsigned long get_wchan(struct task_struct *p);
#define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status)
#define cpu_relax() barrier()
#define cpu_relax_lowlatency() cpu_relax()
/*
* Return_address is a replacement for __builtin_return_address(count)
......
......@@ -68,7 +68,9 @@ extern struct mn10300_cpuinfo cpu_data[];
extern void identify_cpu(struct mn10300_cpuinfo *);
extern void print_cpu_info(struct mn10300_cpuinfo *);
extern void dodgy_tsc(void);
#define cpu_relax() barrier()
#define cpu_relax_lowlatency() cpu_relax()
/*
* User space process size: 1.75GB (default).
......
......@@ -101,6 +101,7 @@ extern unsigned long thread_saved_pc(struct task_struct *t);
#define init_stack (init_thread_union.stack)
#define cpu_relax() barrier()
#define cpu_relax_lowlatency() cpu_relax()
#endif /* __ASSEMBLY__ */
#endif /* __ASM_OPENRISC_PROCESSOR_H */
......@@ -338,6 +338,7 @@ extern unsigned long get_wchan(struct task_struct *p);
#define KSTK_ESP(tsk) ((tsk)->thread.regs.gr[30])
#define cpu_relax() barrier()
#define cpu_relax_lowlatency() cpu_relax()
/* Used as a macro to identify the combined VIPT/PIPT cached
* CPUs which require a guarantee of coherency (no inequivalent
......
......@@ -400,6 +400,8 @@ static inline unsigned long __pack_fe01(unsigned int fpmode)
#define cpu_relax() barrier()
#endif
#define cpu_relax_lowlatency() cpu_relax()
/* Check that a certain kernel stack pointer is valid in task_struct p */
int validate_sp(unsigned long sp, struct task_struct *p,
unsigned long nbytes);
......
......@@ -217,7 +217,7 @@ static inline void cpu_relax(void)
barrier();
}
#define arch_mutex_cpu_relax() barrier()
#define cpu_relax_lowlatency() barrier()
static inline void psw_set_key(unsigned int key)
{
......
......@@ -24,6 +24,7 @@ extern unsigned long get_wchan(struct task_struct *p);
#define current_text_addr() ({ __label__ _l; _l: &&_l; })
#define cpu_relax() barrier()
#define cpu_relax_lowlatency() cpu_relax()
#define release_thread(thread) do {} while (0)
/*
......
......@@ -97,6 +97,7 @@ extern struct sh_cpuinfo cpu_data[];
#define cpu_sleep() __asm__ __volatile__ ("sleep" : : : "memory")
#define cpu_relax() barrier()
#define cpu_relax_lowlatency() cpu_relax()
void default_idle(void);
void stop_this_cpu(void *);
......
......@@ -119,6 +119,8 @@ extern struct task_struct *last_task_used_math;
int do_mathemu(struct pt_regs *regs, struct task_struct *fpt);
#define cpu_relax() barrier()
#define cpu_relax_lowlatency() cpu_relax()
extern void (*sparc_idle)(void);
#endif
......
......@@ -216,6 +216,7 @@ unsigned long get_wchan(struct task_struct *task);
"nop\n\t" \
".previous" \
::: "memory")
#define cpu_relax_lowlatency() cpu_relax()
/* Prefetch support. This is tuned for UltraSPARC-III and later.
* UltraSPARC-I will treat these as nops, and UltraSPARC-II has
......
......@@ -266,6 +266,8 @@ static inline void cpu_relax(void)
barrier();
}
#define cpu_relax_lowlatency() cpu_relax()
/* Info on this processor (see fs/proc/cpuinfo.c) */
struct seq_operations;
extern const struct seq_operations cpuinfo_op;
......
......@@ -71,6 +71,7 @@ extern void release_thread(struct task_struct *);
unsigned long get_wchan(struct task_struct *p);
#define cpu_relax() barrier()
#define cpu_relax_lowlatency() cpu_relax()
#define task_pt_regs(p) \
((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
......
......@@ -99,7 +99,7 @@
#if defined(CONFIG_X86_PPRO_FENCE)
/*
* For either of these options x86 doesn't have a strong TSO memory
* For this option x86 doesn't have a strong TSO memory
* model and we should fall back to full barriers.
*/
......
......@@ -696,6 +696,8 @@ static inline void cpu_relax(void)
rep_nop();
}
#define cpu_relax_lowlatency() cpu_relax()
/* Stop speculative execution and prefetching of modified code. */
static inline void sync_core(void)
{
......
......@@ -3,7 +3,7 @@
#include <asm-generic/qrwlock_types.h>
#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
#ifndef CONFIG_X86_PPRO_FENCE
#define queue_write_unlock queue_write_unlock
static inline void queue_write_unlock(struct qrwlock *lock)
{
......
......@@ -25,7 +25,8 @@ static inline void rep_nop(void)
__asm__ __volatile__("rep;nop": : :"memory");
}
#define cpu_relax() rep_nop()
#define cpu_relax() rep_nop()
#define cpu_relax_lowlatency() cpu_relax()
#include <asm/processor-generic.h>
......
......@@ -182,6 +182,7 @@ extern unsigned long get_wchan(struct task_struct *p);
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->areg[1])
#define cpu_relax() barrier()
#define cpu_relax_lowlatency() cpu_relax()
/* Special register access. */
......
......@@ -176,8 +176,4 @@ extern void mutex_unlock(struct mutex *lock);
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
#ifndef arch_mutex_cpu_relax
# define arch_mutex_cpu_relax() cpu_relax()
#endif
#endif /* __LINUX_MUTEX_H */
......@@ -90,11 +90,9 @@ extern void __rt_mutex_init(struct rt_mutex *lock, const char *name);
extern void rt_mutex_destroy(struct rt_mutex *lock);
extern void rt_mutex_lock(struct rt_mutex *lock);
extern int rt_mutex_lock_interruptible(struct rt_mutex *lock,
int detect_deadlock);
extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
extern int rt_mutex_timed_lock(struct rt_mutex *lock,
struct hrtimer_sleeper *timeout,
int detect_deadlock);
struct hrtimer_sleeper *timeout);
extern int rt_mutex_trylock(struct rt_mutex *lock);
......
......@@ -164,8 +164,6 @@ static inline unsigned read_seqcount_begin(const seqcount_t *s)
static inline unsigned raw_seqcount_begin(const seqcount_t *s)
{
unsigned ret = ACCESS_ONCE(s->sequence);
seqcount_lockdep_reader_access(s);
smp_rmb();
return ret & ~1;
}
......
......@@ -792,93 +792,90 @@ void exit_pi_state_list(struct task_struct *curr)
* [10] There is no transient state which leaves owner and user space
* TID out of sync.
*/
static int
lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
union futex_key *key, struct futex_pi_state **ps)
/*
* Validate that the existing waiter has a pi_state and sanity check
* the pi_state against the user space value. If correct, attach to
* it.
*/
static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state,
struct futex_pi_state **ps)
{
struct futex_pi_state *pi_state = NULL;
struct futex_q *this, *next;
struct task_struct *p;
pid_t pid = uval & FUTEX_TID_MASK;
plist_for_each_entry_safe(this, next, &hb->chain, list) {
if (match_futex(&this->key, key)) {
/*
* Sanity check the waiter before increasing
* the refcount and attaching to it.
*/
pi_state = this->pi_state;
/*
* Userspace might have messed up non-PI and
* PI futexes [3]
*/
if (unlikely(!pi_state))
return -EINVAL;
/*
* Userspace might have messed up non-PI and PI futexes [3]
*/
if (unlikely(!pi_state))
return -EINVAL;
WARN_ON(!atomic_read(&pi_state->refcount));
WARN_ON(!atomic_read(&pi_state->refcount));
/*
* Handle the owner died case:
*/
if (uval & FUTEX_OWNER_DIED) {
/*
* exit_pi_state_list sets owner to NULL and wakes the
* topmost waiter. The task which acquires the
* pi_state->rt_mutex will fixup owner.
*/
if (!pi_state->owner) {
/*
* Handle the owner died case:
* No pi state owner, but the user space TID
* is not 0. Inconsistent state. [5]
*/
if (uval & FUTEX_OWNER_DIED) {
/*
* exit_pi_state_list sets owner to NULL and
* wakes the topmost waiter. The task which
* acquires the pi_state->rt_mutex will fixup
* owner.
*/
if (!pi_state->owner) {
/*
* No pi state owner, but the user
* space TID is not 0. Inconsistent
* state. [5]
*/
if (pid)
return -EINVAL;
/*
* Take a ref on the state and
* return. [4]
*/
goto out_state;
}
/*
* If TID is 0, then either the dying owner
* has not yet executed exit_pi_state_list()
* or some waiter acquired the rtmutex in the
* pi state, but did not yet fixup the TID in
* user space.
*
* Take a ref on the state and return. [6]
*/
if (!pid)
goto out_state;
} else {
/*
* If the owner died bit is not set,
* then the pi_state must have an
* owner. [7]
*/
if (!pi_state->owner)
return -EINVAL;
}
if (pid)
return -EINVAL;
/*
* Bail out if user space manipulated the
* futex value. If pi state exists then the
* owner TID must be the same as the user
* space TID. [9/10]
* Take a ref on the state and return success. [4]
*/
if (pid != task_pid_vnr(pi_state->owner))
return -EINVAL;
out_state:
atomic_inc(&pi_state->refcount);
*ps = pi_state;
return 0;
goto out_state;
}
/*
* If TID is 0, then either the dying owner has not
* yet executed exit_pi_state_list() or some waiter
* acquired the rtmutex in the pi state, but did not
* yet fixup the TID in user space.
*
* Take a ref on the state and return success. [6]
*/
if (!pid)
goto out_state;
} else {
/*
* If the owner died bit is not set, then the pi_state
* must have an owner. [7]
*/
if (!pi_state->owner)
return -EINVAL;
}
/*
* Bail out if user space manipulated the futex value. If pi
* state exists then the owner TID must be the same as the
* user space TID. [9/10]
*/
if (pid != task_pid_vnr(pi_state->owner))
return -EINVAL;
out_state:
atomic_inc(&pi_state->refcount);
*ps = pi_state;
return 0;
}
/*
* Lookup the task for the TID provided from user space and attach to
* it after doing proper sanity checks.
*/
static int attach_to_pi_owner(u32 uval, union futex_key *key,
struct futex_pi_state **ps)
{
pid_t pid = uval & FUTEX_TID_MASK;
struct futex_pi_state *pi_state;
struct task_struct *p;
/*
* We are the first waiter - try to look up the real owner and attach
* the new pi_state to it, but bail out when TID = 0 [1]
......@@ -920,7 +917,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
pi_state = alloc_pi_state();
/*
* Initialize the pi_mutex in locked state and make 'p'
* Initialize the pi_mutex in locked state and make @p
* the owner of it:
*/
rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
......@@ -940,6 +937,36 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
return 0;
}
static int lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
union futex_key *key, struct futex_pi_state **ps)
{
struct futex_q *match = futex_top_waiter(hb, key);
/*
* If there is a waiter on that futex, validate it and
* attach to the pi_state when the validation succeeds.
*/
if (match)
return attach_to_pi_state(uval, match->pi_state, ps);
/*
* We are the first waiter - try to look up the owner based on
* @uval and attach to it.
*/
return attach_to_pi_owner(uval, key, ps);
}
static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
{
u32 uninitialized_var(curval);
if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
return -EFAULT;
/*If user space value changed, let the caller retry */
return curval != uval ? -EAGAIN : 0;
}
/**
* futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
* @uaddr: the pi futex user address
......@@ -963,113 +990,69 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
struct futex_pi_state **ps,
struct task_struct *task, int set_waiters)
{
int lock_taken, ret, force_take = 0;
u32 uval, newval, curval, vpid = task_pid_vnr(task);
retry:
ret = lock_taken = 0;
u32 uval, newval, vpid = task_pid_vnr(task);
struct futex_q *match;
int ret;
/*
* To avoid races, we attempt to take the lock here again
* (by doing a 0 -> TID atomic cmpxchg), while holding all
* the locks. It will most likely not succeed.
* Read the user space value first so we can validate a few
* things before proceeding further.
*/
newval = vpid;
if (set_waiters)
newval |= FUTEX_WAITERS;
if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, 0, newval)))
if (get_futex_value_locked(&uval, uaddr))
return -EFAULT;
/*
* Detect deadlocks.
*/
if ((unlikely((curval & FUTEX_TID_MASK) == vpid)))
if ((unlikely((uval & FUTEX_TID_MASK) == vpid)))
return -EDEADLK;
/*
* Surprise - we got the lock, but we do not trust user space at all.
*/
if (unlikely(!curval)) {
/*
* We verify whether there is kernel state for this
* futex. If not, we can safely assume, that the 0 ->
* TID transition is correct. If state exists, we do
* not bother to fixup the user space state as it was
* corrupted already.
*/
return futex_top_waiter(hb, key) ? -EINVAL : 1;
}
uval = curval;
/*
* Set the FUTEX_WAITERS flag, so the owner will know it has someone
* to wake at the next unlock.
* Lookup existing state first. If it exists, try to attach to
* its pi_state.
*/
newval = curval | FUTEX_WAITERS;
match = futex_top_waiter(hb, key);
if (match)
return attach_to_pi_state(uval, match->pi_state, ps);
/*
* Should we force take the futex? See below.
* No waiter and user TID is 0. We are here because the
* waiters or the owner died bit is set or called from
* requeue_cmp_pi or for whatever reason something took the
* syscall.
*/
if (unlikely(force_take)) {
if (!(uval & FUTEX_TID_MASK)) {
/*
* Keep the OWNER_DIED and the WAITERS bit and set the
* new TID value.
* We take over the futex. No other waiters and the user space
* TID is 0. We preserve the owner died bit.
*/
newval = (curval & ~FUTEX_TID_MASK) | vpid;
force_take = 0;
lock_taken = 1;
}
newval = uval & FUTEX_OWNER_DIED;
newval |= vpid;
if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
return -EFAULT;
if (unlikely(curval != uval))
goto retry;
/* The futex requeue_pi code can enforce the waiters bit */
if (set_waiters)
newval |= FUTEX_WAITERS;
ret = lock_pi_update_atomic(uaddr, uval, newval);
/* If the take over worked, return 1 */
return ret < 0 ? ret : 1;
}
/*
* We took the lock due to forced take over.
* First waiter. Set the waiters bit before attaching ourself to
* the owner. If owner tries to unlock, it will be forced into
* the kernel and blocked on hb->lock.
*/
if (unlikely(lock_taken))
return 1;
newval = uval | FUTEX_WAITERS;
ret = lock_pi_update_atomic(uaddr, uval, newval);
if (ret)
return ret;
/*
* We dont have the lock. Look up the PI state (or create it if
* we are the first waiter):
* If the update of the user space value succeeded, we try to
* attach to the owner. If that fails, no harm done, we only
* set the FUTEX_WAITERS bit in the user space variable.
*/
ret = lookup_pi_state(uval, hb, key, ps);
if (unlikely(ret)) {
switch (ret) {
case -ESRCH:
/*
* We failed to find an owner for this
* futex. So we have no pi_state to block
* on. This can happen in two cases:
*
* 1) The owner died
* 2) A stale FUTEX_WAITERS bit
*
* Re-read the futex value.
*/
if (get_futex_value_locked(&curval, uaddr))
return -EFAULT;
/*
* If the owner died or we have a stale
* WAITERS bit the owner TID in the user space
* futex is 0.
*/
if (!(curval & FUTEX_TID_MASK)) {
force_take = 1;
goto retry;
}
default:
break;
}
}
return ret;
return attach_to_pi_owner(uval, key, ps);
}
/**
......@@ -1186,22 +1169,6 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
return 0;
}
static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
{
u32 uninitialized_var(oldval);
/*
* There is no waiter, so we unlock the futex. The owner died
* bit has not to be preserved here. We are the owner:
*/
if (cmpxchg_futex_value_locked(&oldval, uaddr, uval, 0))
return -EFAULT;
if (oldval != uval)
return -EAGAIN;
return 0;
}
/*
* Express the locking dependencies for lockdep:
*/
......@@ -1659,7 +1626,12 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
goto retry;
goto out;
case -EAGAIN:
/* The owner was exiting, try again. */
/*
* Two reasons for this:
* - Owner is exiting and we just wait for the
* exit to complete.
* - The user space value changed.
*/
double_unlock_hb(hb1, hb2);
hb_waiters_dec(hb2);
put_futex_key(&key2);
......@@ -1718,7 +1690,7 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
this->pi_state = pi_state;
ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
this->rt_waiter,
this->task, 1);
this->task);
if (ret == 1) {
/* We got the lock. */
requeue_pi_wake_futex(this, &key2, hb2);
......@@ -2316,8 +2288,10 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, int detect,
goto uaddr_faulted;
case -EAGAIN:
/*
* Task is exiting and we just wait for the
* exit to complete.
* Two reasons for this:
* - Task is exiting and we just wait for the
* exit to complete.
* - The user space value changed.
*/
queue_unlock(hb);
put_futex_key(&q.key);
......@@ -2337,9 +2311,9 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, int detect,
/*
* Block on the PI mutex:
*/
if (!trylock)
ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
else {
if (!trylock) {
ret = rt_mutex_timed_futex_lock(&q.pi_state->pi_mutex, to);
} else {
ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
/* Fixup the trylock return value: */
ret = ret ? 0 : -EWOULDBLOCK;
......@@ -2401,10 +2375,10 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, int detect,
*/
static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
{
struct futex_hash_bucket *hb;
struct futex_q *this, *next;
u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current);
union futex_key key = FUTEX_KEY_INIT;
u32 uval, vpid = task_pid_vnr(current);
struct futex_hash_bucket *hb;
struct futex_q *match;
int ret;
retry:
......@@ -2417,57 +2391,47 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
return -EPERM;
ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE);
if (unlikely(ret != 0))
goto out;
if (ret)
return ret;
hb = hash_futex(&key);
spin_lock(&hb->lock);
/*
* To avoid races, try to do the TID -> 0 atomic transition
* again. If it succeeds then we can return without waking
* anyone else up. We only try this if neither the waiters nor
* the owner died bit are set.
*/
if (!(uval & ~FUTEX_TID_MASK) &&
cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0))
goto pi_faulted;
/*
* Rare case: we managed to release the lock atomically,
* no need to wake anyone else up:
*/
if (unlikely(uval == vpid))
goto out_unlock;
/*
* Ok, other tasks may need to be woken up - check waiters
* and do the wakeup if necessary:
* Check waiters first. We do not trust user space values at
* all and we at least want to know if user space fiddled
* with the futex value instead of blindly unlocking.
*/
plist_for_each_entry_safe(this, next, &hb->chain, list) {
if (!match_futex (&this->key, &key))
continue;
ret = wake_futex_pi(uaddr, uval, this);
match = futex_top_waiter(hb, &key);
if (match) {
ret = wake_futex_pi(uaddr, uval, match);
/*
* The atomic access to the futex value
* generated a pagefault, so retry the
* user-access and the wakeup:
* The atomic access to the futex value generated a
* pagefault, so retry the user-access and the wakeup:
*/
if (ret == -EFAULT)
goto pi_faulted;
goto out_unlock;
}
/*
* No waiters - kernel unlocks the futex:
* We have no kernel internal state, i.e. no waiters in the
* kernel. Waiters which are about to queue themselves are stuck
* on hb->lock. So we can safely ignore them. We do neither
* preserve the WAITERS bit not the OWNER_DIED one. We are the
* owner.
*/
ret = unlock_futex_pi(uaddr, uval);
if (ret == -EFAULT)
if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))
goto pi_faulted;
/*
* If uval has changed, let user space handle it.
*/
ret = (curval == uval) ? 0 : -EAGAIN;
out_unlock:
spin_unlock(&hb->lock);
put_futex_key(&key);
out:
return ret;
pi_faulted:
......@@ -2669,7 +2633,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
*/
WARN_ON(!q.pi_state);
pi_mutex = &q.pi_state->pi_mutex;
ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter);
debug_rt_mutex_free_waiter(&rt_waiter);
spin_lock(q.lock_ptr);
......
......@@ -384,7 +384,9 @@ static void print_lockdep_off(const char *bug_msg)
{
printk(KERN_DEBUG "%s\n", bug_msg);
printk(KERN_DEBUG "turning off the locking correctness validator.\n");
#ifdef CONFIG_LOCK_STAT
printk(KERN_DEBUG "Please attach the output of /proc/lock_stat to the bug report\n");
#endif
}
static int save_trace(struct stack_trace *trace)
......
#include <linux/percpu.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include "mcs_spinlock.h"
......@@ -79,7 +77,7 @@ osq_wait_next(struct optimistic_spin_queue *lock,
break;
}
arch_mutex_cpu_relax();
cpu_relax_lowlatency();
}
return next;
......@@ -120,7 +118,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
if (need_resched())
goto unqueue;
arch_mutex_cpu_relax();
cpu_relax_lowlatency();
}
return true;
......@@ -146,7 +144,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
if (smp_load_acquire(&node->locked))
return true;
arch_mutex_cpu_relax();
cpu_relax_lowlatency();
/*
* Or we race against a concurrent unqueue()'s step-B, in which
......
......@@ -27,7 +27,7 @@ struct mcs_spinlock {
#define arch_mcs_spin_lock_contended(l) \
do { \
while (!(smp_load_acquire(l))) \
arch_mutex_cpu_relax(); \
cpu_relax_lowlatency(); \
} while (0)
#endif
......@@ -104,7 +104,7 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
return;
/* Wait until the next pointer is set */
while (!(next = ACCESS_ONCE(node->next)))
arch_mutex_cpu_relax();
cpu_relax_lowlatency();
}
/* Pass lock to next waiter. */
......
......@@ -46,12 +46,6 @@
# include <asm/mutex.h>
#endif
/*
* A negative mutex count indicates that waiters are sleeping waiting for the
* mutex.
*/
#define MUTEX_SHOW_NO_WAITER(mutex) (atomic_read(&(mutex)->count) >= 0)
void
__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
{
......@@ -152,7 +146,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
if (need_resched())
break;
arch_mutex_cpu_relax();
cpu_relax_lowlatency();
}
rcu_read_unlock();
......@@ -388,12 +382,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
/*
* Optimistic spinning.
*
* We try to spin for acquisition when we find that there are no
* pending waiters and the lock owner is currently running on a
* (different) CPU.
*
* The rationale is that if the lock owner is running, it is likely to
* release the lock soon.
* We try to spin for acquisition when we find that the lock owner
* is currently running on a (different) CPU and while we don't
* need to reschedule. The rationale is that if the lock owner is
* running, it is likely to release the lock soon.
*
* Since this needs the lock owner, and this mutex implementation
* doesn't track the owner atomically in the lock field, we need to
......@@ -440,7 +432,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
if (owner && !mutex_spin_on_owner(lock, owner))
break;
if ((atomic_read(&lock->count) == 1) &&
/* Try to acquire the mutex if it is unlocked. */
if (!mutex_is_locked(lock) &&
(atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
lock_acquired(&lock->dep_map, ip);
if (use_ww_ctx) {
......@@ -471,7 +464,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
* memory barriers as we'll eventually observe the right
* values at the cost of a few extra spins.
*/
arch_mutex_cpu_relax();
cpu_relax_lowlatency();
}
osq_unlock(&lock->osq);
slowpath:
......@@ -485,8 +478,11 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
#endif
spin_lock_mutex(&lock->wait_lock, flags);
/* once more, can we acquire the lock? */
if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, 0) == 1))
/*
* Once more, try to acquire the lock. Only try-lock the mutex if
* it is unlocked to reduce unnecessary xchg() operations.
*/
if (!mutex_is_locked(lock) && (atomic_xchg(&lock->count, 0) == 1))
goto skip_wait;
debug_mutex_lock_common(lock, &waiter);
......@@ -506,9 +502,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
* it's unlocked. Later on, if we sleep, this is the
* operation that gives us the lock. We xchg it to -1, so
* that when we release the lock, we properly wake up the
* other waiters:
* other waiters. We only attempt the xchg if the count is
* non-negative in order to avoid unnecessary xchg operations:
*/
if (MUTEX_SHOW_NO_WAITER(lock) &&
if (atomic_read(&lock->count) >= 0 &&
(atomic_xchg(&lock->count, -1) == 1))
break;
......@@ -823,6 +820,10 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
unsigned long flags;
int prev;
/* No need to trylock if the mutex is locked. */
if (mutex_is_locked(lock))
return 0;
spin_lock_mutex(&lock->wait_lock, flags);
prev = atomic_xchg(&lock->count, -1);
......
......@@ -20,7 +20,6 @@
#include <linux/cpumask.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <linux/mutex.h>
#include <asm/qrwlock.h>
/**
......@@ -35,7 +34,7 @@ static __always_inline void
rspin_until_writer_unlock(struct qrwlock *lock, u32 cnts)
{
while ((cnts & _QW_WMASK) == _QW_LOCKED) {
arch_mutex_cpu_relax();
cpu_relax_lowlatency();
cnts = smp_load_acquire((u32 *)&lock->cnts);
}
}
......@@ -75,7 +74,7 @@ void queue_read_lock_slowpath(struct qrwlock *lock)
* to make sure that the write lock isn't taken.
*/
while (atomic_read(&lock->cnts) & _QW_WMASK)
arch_mutex_cpu_relax();
cpu_relax_lowlatency();
cnts = atomic_add_return(_QR_BIAS, &lock->cnts) - _QR_BIAS;
rspin_until_writer_unlock(lock, cnts);
......@@ -114,7 +113,7 @@ void queue_write_lock_slowpath(struct qrwlock *lock)
cnts | _QW_WAITING) == cnts))
break;
arch_mutex_cpu_relax();
cpu_relax_lowlatency();
}
/* When no more readers, set the locked flag */
......@@ -125,7 +124,7 @@ void queue_write_lock_slowpath(struct qrwlock *lock)
_QW_LOCKED) == _QW_WAITING))
break;
arch_mutex_cpu_relax();
cpu_relax_lowlatency();
}
unlock:
arch_spin_unlock(&lock->lock);
......
......@@ -66,12 +66,13 @@ void rt_mutex_debug_task_free(struct task_struct *task)
* the deadlock. We print when we return. act_waiter can be NULL in
* case of a remove waiter operation.
*/
void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *act_waiter,
void debug_rt_mutex_deadlock(enum rtmutex_chainwalk chwalk,
struct rt_mutex_waiter *act_waiter,
struct rt_mutex *lock)
{
struct task_struct *task;
if (!debug_locks || detect || !act_waiter)
if (!debug_locks || chwalk == RT_MUTEX_FULL_CHAINWALK || !act_waiter)
return;
task = rt_mutex_owner(act_waiter->lock);
......
......@@ -20,14 +20,15 @@ extern void debug_rt_mutex_unlock(struct rt_mutex *lock);
extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock,
struct task_struct *powner);
extern void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock);
extern void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *waiter,
extern void debug_rt_mutex_deadlock(enum rtmutex_chainwalk chwalk,
struct rt_mutex_waiter *waiter,
struct rt_mutex *lock);
extern void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter);
# define debug_rt_mutex_reset_waiter(w) \
do { (w)->deadlock_lock = NULL; } while (0)
static inline int debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter,
int detect)
static inline bool debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter,
enum rtmutex_chainwalk walk)
{
return (waiter != NULL);
}
......
此差异已折叠。
......@@ -22,10 +22,15 @@
#define debug_rt_mutex_init(m, n) do { } while (0)
#define debug_rt_mutex_deadlock(d, a ,l) do { } while (0)
#define debug_rt_mutex_print_deadlock(w) do { } while (0)
#define debug_rt_mutex_detect_deadlock(w,d) (d)
#define debug_rt_mutex_reset_waiter(w) do { } while (0)
static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w)
{
WARN(1, "rtmutex deadlock detected\n");
}
static inline bool debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *w,
enum rtmutex_chainwalk walk)
{
return walk == RT_MUTEX_FULL_CHAINWALK;
}
......@@ -101,6 +101,21 @@ static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
((unsigned long)lock->owner & ~RT_MUTEX_OWNER_MASKALL);
}
/*
* Constants for rt mutex functions which have a selectable deadlock
* detection.
*
* RT_MUTEX_MIN_CHAINWALK: Stops the lock chain walk when there are
* no further PI adjustments to be made.
*
* RT_MUTEX_FULL_CHAINWALK: Invoke deadlock detection with a full
* walk of the lock chain.
*/
enum rtmutex_chainwalk {
RT_MUTEX_MIN_CHAINWALK,
RT_MUTEX_FULL_CHAINWALK,
};
/*
* PI-futex support (proxy locking functions, etc.):
*/
......@@ -111,12 +126,11 @@ extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
struct task_struct *proxy_owner);
extern int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
struct rt_mutex_waiter *waiter,
struct task_struct *task,
int detect_deadlock);
struct task_struct *task);
extern int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
struct hrtimer_sleeper *to,
struct rt_mutex_waiter *waiter,
int detect_deadlock);
struct rt_mutex_waiter *waiter);
extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct hrtimer_sleeper *to);
#ifdef CONFIG_DEBUG_RT_MUTEXES
# include "rtmutex-debug.h"
......
......@@ -329,7 +329,7 @@ bool rwsem_spin_on_owner(struct rw_semaphore *sem, struct task_struct *owner)
if (need_resched())
break;
arch_mutex_cpu_relax();
cpu_relax_lowlatency();
}
rcu_read_unlock();
......@@ -381,7 +381,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
* memory barriers as we'll eventually observe the right
* values at the cost of a few extra spins.
*/
arch_mutex_cpu_relax();
cpu_relax_lowlatency();
}
osq_unlock(&sem->osq);
done:
......
......@@ -835,7 +835,7 @@ config DEBUG_RT_MUTEXES
config RT_MUTEX_TESTER
bool "Built-in scriptable tester for rt-mutexes"
depends on DEBUG_KERNEL && RT_MUTEXES
depends on DEBUG_KERNEL && RT_MUTEXES && BROKEN
help
This option enables a rt-mutex tester.
......
#include <linux/export.h>
#include <linux/lockref.h>
#include <linux/mutex.h>
#if USE_CMPXCHG_LOCKREF
......@@ -29,7 +28,7 @@
if (likely(old.lock_count == prev.lock_count)) { \
SUCCESS; \
} \
arch_mutex_cpu_relax(); \
cpu_relax_lowlatency(); \
} \
} while (0)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册