提交 e857b6fc 编写于 作者: L Linus Torvalds

Merge tag 'locking-core-2020-12-14' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking updates from Thomas Gleixner:
 "A moderate set of locking updates:

   - A few extensions to the rwsem API and support for opportunistic
     spinning and lock stealing

   - lockdep selftest improvements

   - Documentation updates

   - Cleanups and small fixes all over the place"

* tag 'locking-core-2020-12-14' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (21 commits)
  seqlock: kernel-doc: Specify when preemption is automatically altered
  seqlock: Prefix internal seqcount_t-only macros with a "do_"
  Documentation: seqlock: s/LOCKTYPE/LOCKNAME/g
  locking/rwsem: Remove reader optimistic spinning
  locking/rwsem: Enable reader optimistic lock stealing
  locking/rwsem: Prevent potential lock starvation
  locking/rwsem: Pass the current atomic count to rwsem_down_read_slowpath()
  locking/rwsem: Fold __down_{read,write}*()
  locking/rwsem: Introduce rwsem_write_trylock()
  locking/rwsem: Better collate rwsem_read_trylock()
  rwsem: Implement down_read_interruptible
  rwsem: Implement down_read_killable_nested
  refcount: Fix a kernel-doc markup
  completion: Drop init_completion define
  atomic: Update MAINTAINERS
  atomic: Delete obsolete documentation
  seqlock: Rename __seqprop() users
  lockdep/selftest: Add spin_nest_lock test
  lockdep/selftests: Fix PROVE_RAW_LOCK_NESTING
  seqlock: avoid -Wshadow warnings
  ...
此差异已折叠。
...@@ -89,7 +89,7 @@ Read path:: ...@@ -89,7 +89,7 @@ Read path::
.. _seqcount_locktype_t: .. _seqcount_locktype_t:
Sequence counters with associated locks (``seqcount_LOCKTYPE_t``) Sequence counters with associated locks (``seqcount_LOCKNAME_t``)
----------------------------------------------------------------- -----------------------------------------------------------------
As discussed at :ref:`seqcount_t`, sequence count write side critical As discussed at :ref:`seqcount_t`, sequence count write side critical
...@@ -115,27 +115,26 @@ The following sequence counters with associated locks are defined: ...@@ -115,27 +115,26 @@ The following sequence counters with associated locks are defined:
- ``seqcount_mutex_t`` - ``seqcount_mutex_t``
- ``seqcount_ww_mutex_t`` - ``seqcount_ww_mutex_t``
The plain seqcount read and write APIs branch out to the specific The sequence counter read and write APIs can take either a plain
seqcount_LOCKTYPE_t implementation at compile-time. This avoids kernel seqcount_t or any of the seqcount_LOCKNAME_t variants above.
API explosion per each new seqcount LOCKTYPE.
Initialization (replace "LOCKTYPE" with one of the supported locks):: Initialization (replace "LOCKNAME" with one of the supported locks)::
/* dynamic */ /* dynamic */
seqcount_LOCKTYPE_t foo_seqcount; seqcount_LOCKNAME_t foo_seqcount;
seqcount_LOCKTYPE_init(&foo_seqcount, &lock); seqcount_LOCKNAME_init(&foo_seqcount, &lock);
/* static */ /* static */
static seqcount_LOCKTYPE_t foo_seqcount = static seqcount_LOCKNAME_t foo_seqcount =
SEQCNT_LOCKTYPE_ZERO(foo_seqcount, &lock); SEQCNT_LOCKNAME_ZERO(foo_seqcount, &lock);
/* C99 struct init */ /* C99 struct init */
struct { struct {
.seq = SEQCNT_LOCKTYPE_ZERO(foo.seq, &lock), .seq = SEQCNT_LOCKNAME_ZERO(foo.seq, &lock),
} foo; } foo;
Write path: same as in :ref:`seqcount_t`, while running from a context Write path: same as in :ref:`seqcount_t`, while running from a context
with the associated LOCKTYPE lock acquired. with the associated write serialization lock acquired.
Read path: same as in :ref:`seqcount_t`. Read path: same as in :ref:`seqcount_t`.
......
...@@ -2982,6 +2982,8 @@ L: linux-kernel@vger.kernel.org ...@@ -2982,6 +2982,8 @@ L: linux-kernel@vger.kernel.org
S: Maintained S: Maintained
F: arch/*/include/asm/atomic*.h F: arch/*/include/asm/atomic*.h
F: include/*/atomic*.h F: include/*/atomic*.h
F: include/linux/refcount.h
F: Documentation/atomic_*.txt
F: scripts/atomic/ F: scripts/atomic/
ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER
......
...@@ -28,8 +28,7 @@ struct completion { ...@@ -28,8 +28,7 @@ struct completion {
struct swait_queue_head wait; struct swait_queue_head wait;
}; };
#define init_completion_map(x, m) __init_completion(x) #define init_completion_map(x, m) init_completion(x)
#define init_completion(x) __init_completion(x)
static inline void complete_acquire(struct completion *x) {} static inline void complete_acquire(struct completion *x) {}
static inline void complete_release(struct completion *x) {} static inline void complete_release(struct completion *x) {}
...@@ -82,7 +81,7 @@ static inline void complete_release(struct completion *x) {} ...@@ -82,7 +81,7 @@ static inline void complete_release(struct completion *x) {}
* This inline function will initialize a dynamically created completion * This inline function will initialize a dynamically created completion
* structure. * structure.
*/ */
static inline void __init_completion(struct completion *x) static inline void init_completion(struct completion *x)
{ {
x->done = 0; x->done = 0;
init_swait_queue_head(&x->wait); init_swait_queue_head(&x->wait);
......
...@@ -101,7 +101,7 @@ ...@@ -101,7 +101,7 @@
struct mutex; struct mutex;
/** /**
* struct refcount_t - variant of atomic_t specialized for reference counts * typedef refcount_t - variant of atomic_t specialized for reference counts
* @refs: atomic_t counter field * @refs: atomic_t counter field
* *
* The counter saturates at REFCOUNT_SATURATED and will not move once * The counter saturates at REFCOUNT_SATURATED and will not move once
......
...@@ -123,6 +123,7 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem) ...@@ -123,6 +123,7 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem)
* lock for reading * lock for reading
*/ */
extern void down_read(struct rw_semaphore *sem); extern void down_read(struct rw_semaphore *sem);
extern int __must_check down_read_interruptible(struct rw_semaphore *sem);
extern int __must_check down_read_killable(struct rw_semaphore *sem); extern int __must_check down_read_killable(struct rw_semaphore *sem);
/* /*
...@@ -171,6 +172,7 @@ extern void downgrade_write(struct rw_semaphore *sem); ...@@ -171,6 +172,7 @@ extern void downgrade_write(struct rw_semaphore *sem);
* See Documentation/locking/lockdep-design.rst for more details.) * See Documentation/locking/lockdep-design.rst for more details.)
*/ */
extern void down_read_nested(struct rw_semaphore *sem, int subclass); extern void down_read_nested(struct rw_semaphore *sem, int subclass);
extern int __must_check down_read_killable_nested(struct rw_semaphore *sem, int subclass);
extern void down_write_nested(struct rw_semaphore *sem, int subclass); extern void down_write_nested(struct rw_semaphore *sem, int subclass);
extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass); extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass);
extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock); extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock);
...@@ -191,6 +193,7 @@ extern void down_read_non_owner(struct rw_semaphore *sem); ...@@ -191,6 +193,7 @@ extern void down_read_non_owner(struct rw_semaphore *sem);
extern void up_read_non_owner(struct rw_semaphore *sem); extern void up_read_non_owner(struct rw_semaphore *sem);
#else #else
# define down_read_nested(sem, subclass) down_read(sem) # define down_read_nested(sem, subclass) down_read(sem)
# define down_read_killable_nested(sem, subclass) down_read_killable(sem)
# define down_write_nest_lock(sem, nest_lock) down_write(sem) # define down_write_nest_lock(sem, nest_lock) down_write(sem)
# define down_write_nested(sem, subclass) down_write(sem) # define down_write_nested(sem, subclass) down_write(sem)
# define down_write_killable_nested(sem, subclass) down_write_killable(sem) # define down_write_killable_nested(sem, subclass) down_write_killable(sem)
......
...@@ -307,10 +307,10 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu ...@@ -307,10 +307,10 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu
__seqprop_case((s), mutex, prop), \ __seqprop_case((s), mutex, prop), \
__seqprop_case((s), ww_mutex, prop)) __seqprop_case((s), ww_mutex, prop))
#define __seqcount_ptr(s) __seqprop(s, ptr) #define seqprop_ptr(s) __seqprop(s, ptr)
#define __seqcount_sequence(s) __seqprop(s, sequence) #define seqprop_sequence(s) __seqprop(s, sequence)
#define __seqcount_lock_preemptible(s) __seqprop(s, preemptible) #define seqprop_preemptible(s) __seqprop(s, preemptible)
#define __seqcount_assert_lock_held(s) __seqprop(s, assert) #define seqprop_assert(s) __seqprop(s, assert)
/** /**
* __read_seqcount_begin() - begin a seqcount_t read section w/o barrier * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
...@@ -328,13 +328,13 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu ...@@ -328,13 +328,13 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu
*/ */
#define __read_seqcount_begin(s) \ #define __read_seqcount_begin(s) \
({ \ ({ \
unsigned seq; \ unsigned __seq; \
\ \
while ((seq = __seqcount_sequence(s)) & 1) \ while ((__seq = seqprop_sequence(s)) & 1) \
cpu_relax(); \ cpu_relax(); \
\ \
kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
seq; \ __seq; \
}) })
/** /**
...@@ -345,10 +345,10 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu ...@@ -345,10 +345,10 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu
*/ */
#define raw_read_seqcount_begin(s) \ #define raw_read_seqcount_begin(s) \
({ \ ({ \
unsigned seq = __read_seqcount_begin(s); \ unsigned _seq = __read_seqcount_begin(s); \
\ \
smp_rmb(); \ smp_rmb(); \
seq; \ _seq; \
}) })
/** /**
...@@ -359,7 +359,7 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu ...@@ -359,7 +359,7 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu
*/ */
#define read_seqcount_begin(s) \ #define read_seqcount_begin(s) \
({ \ ({ \
seqcount_lockdep_reader_access(__seqcount_ptr(s)); \ seqcount_lockdep_reader_access(seqprop_ptr(s)); \
raw_read_seqcount_begin(s); \ raw_read_seqcount_begin(s); \
}) })
...@@ -376,11 +376,11 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu ...@@ -376,11 +376,11 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu
*/ */
#define raw_read_seqcount(s) \ #define raw_read_seqcount(s) \
({ \ ({ \
unsigned seq = __seqcount_sequence(s); \ unsigned __seq = seqprop_sequence(s); \
\ \
smp_rmb(); \ smp_rmb(); \
kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
seq; \ __seq; \
}) })
/** /**
...@@ -425,9 +425,9 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu ...@@ -425,9 +425,9 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu
* Return: true if a read section retry is required, else false * Return: true if a read section retry is required, else false
*/ */
#define __read_seqcount_retry(s, start) \ #define __read_seqcount_retry(s, start) \
__read_seqcount_t_retry(__seqcount_ptr(s), start) do___read_seqcount_retry(seqprop_ptr(s), start)
static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start) static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start)
{ {
kcsan_atomic_next(0); kcsan_atomic_next(0);
return unlikely(READ_ONCE(s->sequence) != start); return unlikely(READ_ONCE(s->sequence) != start);
...@@ -445,27 +445,29 @@ static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start) ...@@ -445,27 +445,29 @@ static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start)
* Return: true if a read section retry is required, else false * Return: true if a read section retry is required, else false
*/ */
#define read_seqcount_retry(s, start) \ #define read_seqcount_retry(s, start) \
read_seqcount_t_retry(__seqcount_ptr(s), start) do_read_seqcount_retry(seqprop_ptr(s), start)
static inline int read_seqcount_t_retry(const seqcount_t *s, unsigned start) static inline int do_read_seqcount_retry(const seqcount_t *s, unsigned start)
{ {
smp_rmb(); smp_rmb();
return __read_seqcount_t_retry(s, start); return do___read_seqcount_retry(s, start);
} }
/** /**
* raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep * raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep
* @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* Context: check write_seqcount_begin()
*/ */
#define raw_write_seqcount_begin(s) \ #define raw_write_seqcount_begin(s) \
do { \ do { \
if (__seqcount_lock_preemptible(s)) \ if (seqprop_preemptible(s)) \
preempt_disable(); \ preempt_disable(); \
\ \
raw_write_seqcount_t_begin(__seqcount_ptr(s)); \ do_raw_write_seqcount_begin(seqprop_ptr(s)); \
} while (0) } while (0)
static inline void raw_write_seqcount_t_begin(seqcount_t *s) static inline void do_raw_write_seqcount_begin(seqcount_t *s)
{ {
kcsan_nestable_atomic_begin(); kcsan_nestable_atomic_begin();
s->sequence++; s->sequence++;
...@@ -475,16 +477,18 @@ static inline void raw_write_seqcount_t_begin(seqcount_t *s) ...@@ -475,16 +477,18 @@ static inline void raw_write_seqcount_t_begin(seqcount_t *s)
/** /**
* raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep * raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep
* @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* Context: check write_seqcount_end()
*/ */
#define raw_write_seqcount_end(s) \ #define raw_write_seqcount_end(s) \
do { \ do { \
raw_write_seqcount_t_end(__seqcount_ptr(s)); \ do_raw_write_seqcount_end(seqprop_ptr(s)); \
\ \
if (__seqcount_lock_preemptible(s)) \ if (seqprop_preemptible(s)) \
preempt_enable(); \ preempt_enable(); \
} while (0) } while (0)
static inline void raw_write_seqcount_t_end(seqcount_t *s) static inline void do_raw_write_seqcount_end(seqcount_t *s)
{ {
smp_wmb(); smp_wmb();
s->sequence++; s->sequence++;
...@@ -498,20 +502,21 @@ static inline void raw_write_seqcount_t_end(seqcount_t *s) ...@@ -498,20 +502,21 @@ static inline void raw_write_seqcount_t_end(seqcount_t *s)
* @subclass: lockdep nesting level * @subclass: lockdep nesting level
* *
* See Documentation/locking/lockdep-design.rst * See Documentation/locking/lockdep-design.rst
* Context: check write_seqcount_begin()
*/ */
#define write_seqcount_begin_nested(s, subclass) \ #define write_seqcount_begin_nested(s, subclass) \
do { \ do { \
__seqcount_assert_lock_held(s); \ seqprop_assert(s); \
\ \
if (__seqcount_lock_preemptible(s)) \ if (seqprop_preemptible(s)) \
preempt_disable(); \ preempt_disable(); \
\ \
write_seqcount_t_begin_nested(__seqcount_ptr(s), subclass); \ do_write_seqcount_begin_nested(seqprop_ptr(s), subclass); \
} while (0) } while (0)
static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass) static inline void do_write_seqcount_begin_nested(seqcount_t *s, int subclass)
{ {
raw_write_seqcount_t_begin(s); do_raw_write_seqcount_begin(s);
seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_); seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
} }
...@@ -519,46 +524,46 @@ static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass) ...@@ -519,46 +524,46 @@ static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass)
* write_seqcount_begin() - start a seqcount_t write side critical section * write_seqcount_begin() - start a seqcount_t write side critical section
* @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
* *
* write_seqcount_begin opens a write side critical section of the given * Context: sequence counter write side sections must be serialized and
* seqcount_t. * non-preemptible. Preemption will be automatically disabled if and
* * only if the seqcount write serialization lock is associated, and
* Context: seqcount_t write side critical sections must be serialized and * preemptible. If readers can be invoked from hardirq or softirq
* non-preemptible. If readers can be invoked from hardirq or softirq
* context, interrupts or bottom halves must be respectively disabled. * context, interrupts or bottom halves must be respectively disabled.
*/ */
#define write_seqcount_begin(s) \ #define write_seqcount_begin(s) \
do { \ do { \
__seqcount_assert_lock_held(s); \ seqprop_assert(s); \
\ \
if (__seqcount_lock_preemptible(s)) \ if (seqprop_preemptible(s)) \
preempt_disable(); \ preempt_disable(); \
\ \
write_seqcount_t_begin(__seqcount_ptr(s)); \ do_write_seqcount_begin(seqprop_ptr(s)); \
} while (0) } while (0)
static inline void write_seqcount_t_begin(seqcount_t *s) static inline void do_write_seqcount_begin(seqcount_t *s)
{ {
write_seqcount_t_begin_nested(s, 0); do_write_seqcount_begin_nested(s, 0);
} }
/** /**
* write_seqcount_end() - end a seqcount_t write side critical section * write_seqcount_end() - end a seqcount_t write side critical section
* @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
* *
* The write section must've been opened with write_seqcount_begin(). * Context: Preemption will be automatically re-enabled if and only if
* the seqcount write serialization lock is associated, and preemptible.
*/ */
#define write_seqcount_end(s) \ #define write_seqcount_end(s) \
do { \ do { \
write_seqcount_t_end(__seqcount_ptr(s)); \ do_write_seqcount_end(seqprop_ptr(s)); \
\ \
if (__seqcount_lock_preemptible(s)) \ if (seqprop_preemptible(s)) \
preempt_enable(); \ preempt_enable(); \
} while (0) } while (0)
static inline void write_seqcount_t_end(seqcount_t *s) static inline void do_write_seqcount_end(seqcount_t *s)
{ {
seqcount_release(&s->dep_map, _RET_IP_); seqcount_release(&s->dep_map, _RET_IP_);
raw_write_seqcount_t_end(s); do_raw_write_seqcount_end(s);
} }
/** /**
...@@ -603,9 +608,9 @@ static inline void write_seqcount_t_end(seqcount_t *s) ...@@ -603,9 +608,9 @@ static inline void write_seqcount_t_end(seqcount_t *s)
* } * }
*/ */
#define raw_write_seqcount_barrier(s) \ #define raw_write_seqcount_barrier(s) \
raw_write_seqcount_t_barrier(__seqcount_ptr(s)) do_raw_write_seqcount_barrier(seqprop_ptr(s))
static inline void raw_write_seqcount_t_barrier(seqcount_t *s) static inline void do_raw_write_seqcount_barrier(seqcount_t *s)
{ {
kcsan_nestable_atomic_begin(); kcsan_nestable_atomic_begin();
s->sequence++; s->sequence++;
...@@ -623,9 +628,9 @@ static inline void raw_write_seqcount_t_barrier(seqcount_t *s) ...@@ -623,9 +628,9 @@ static inline void raw_write_seqcount_t_barrier(seqcount_t *s)
* will complete successfully and see data older than this. * will complete successfully and see data older than this.
*/ */
#define write_seqcount_invalidate(s) \ #define write_seqcount_invalidate(s) \
write_seqcount_t_invalidate(__seqcount_ptr(s)) do_write_seqcount_invalidate(seqprop_ptr(s))
static inline void write_seqcount_t_invalidate(seqcount_t *s) static inline void do_write_seqcount_invalidate(seqcount_t *s)
{ {
smp_wmb(); smp_wmb();
kcsan_nestable_atomic_begin(); kcsan_nestable_atomic_begin();
...@@ -865,9 +870,9 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) ...@@ -865,9 +870,9 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
} }
/* /*
* For all seqlock_t write side functions, use write_seqcount_*t*_begin() * For all seqlock_t write side functions, use the the internal
* instead of the generic write_seqcount_begin(). This way, no redundant * do_write_seqcount_begin() instead of generic write_seqcount_begin().
* lockdep_assert_held() checks are added. * This way, no redundant lockdep_assert_held() checks are added.
*/ */
/** /**
...@@ -886,7 +891,7 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) ...@@ -886,7 +891,7 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
static inline void write_seqlock(seqlock_t *sl) static inline void write_seqlock(seqlock_t *sl)
{ {
spin_lock(&sl->lock); spin_lock(&sl->lock);
write_seqcount_t_begin(&sl->seqcount.seqcount); do_write_seqcount_begin(&sl->seqcount.seqcount);
} }
/** /**
...@@ -898,7 +903,7 @@ static inline void write_seqlock(seqlock_t *sl) ...@@ -898,7 +903,7 @@ static inline void write_seqlock(seqlock_t *sl)
*/ */
static inline void write_sequnlock(seqlock_t *sl) static inline void write_sequnlock(seqlock_t *sl)
{ {
write_seqcount_t_end(&sl->seqcount.seqcount); do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock(&sl->lock); spin_unlock(&sl->lock);
} }
...@@ -912,7 +917,7 @@ static inline void write_sequnlock(seqlock_t *sl) ...@@ -912,7 +917,7 @@ static inline void write_sequnlock(seqlock_t *sl)
static inline void write_seqlock_bh(seqlock_t *sl) static inline void write_seqlock_bh(seqlock_t *sl)
{ {
spin_lock_bh(&sl->lock); spin_lock_bh(&sl->lock);
write_seqcount_t_begin(&sl->seqcount.seqcount); do_write_seqcount_begin(&sl->seqcount.seqcount);
} }
/** /**
...@@ -925,7 +930,7 @@ static inline void write_seqlock_bh(seqlock_t *sl) ...@@ -925,7 +930,7 @@ static inline void write_seqlock_bh(seqlock_t *sl)
*/ */
static inline void write_sequnlock_bh(seqlock_t *sl) static inline void write_sequnlock_bh(seqlock_t *sl)
{ {
write_seqcount_t_end(&sl->seqcount.seqcount); do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock_bh(&sl->lock); spin_unlock_bh(&sl->lock);
} }
...@@ -939,7 +944,7 @@ static inline void write_sequnlock_bh(seqlock_t *sl) ...@@ -939,7 +944,7 @@ static inline void write_sequnlock_bh(seqlock_t *sl)
static inline void write_seqlock_irq(seqlock_t *sl) static inline void write_seqlock_irq(seqlock_t *sl)
{ {
spin_lock_irq(&sl->lock); spin_lock_irq(&sl->lock);
write_seqcount_t_begin(&sl->seqcount.seqcount); do_write_seqcount_begin(&sl->seqcount.seqcount);
} }
/** /**
...@@ -951,7 +956,7 @@ static inline void write_seqlock_irq(seqlock_t *sl) ...@@ -951,7 +956,7 @@ static inline void write_seqlock_irq(seqlock_t *sl)
*/ */
static inline void write_sequnlock_irq(seqlock_t *sl) static inline void write_sequnlock_irq(seqlock_t *sl)
{ {
write_seqcount_t_end(&sl->seqcount.seqcount); do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock_irq(&sl->lock); spin_unlock_irq(&sl->lock);
} }
...@@ -960,7 +965,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) ...@@ -960,7 +965,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&sl->lock, flags); spin_lock_irqsave(&sl->lock, flags);
write_seqcount_t_begin(&sl->seqcount.seqcount); do_write_seqcount_begin(&sl->seqcount.seqcount);
return flags; return flags;
} }
...@@ -989,7 +994,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) ...@@ -989,7 +994,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
static inline void static inline void
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
{ {
write_seqcount_t_end(&sl->seqcount.seqcount); do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock_irqrestore(&sl->lock, flags); spin_unlock_irqrestore(&sl->lock, flags);
} }
......
...@@ -310,8 +310,6 @@ static inline bool should_fail_futex(bool fshared) ...@@ -310,8 +310,6 @@ static inline bool should_fail_futex(bool fshared)
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
static void compat_exit_robust_list(struct task_struct *curr); static void compat_exit_robust_list(struct task_struct *curr);
#else
static inline void compat_exit_robust_list(struct task_struct *curr) { }
#endif #endif
/* /*
......
...@@ -56,13 +56,11 @@ LOCK_EVENT(rwsem_sleep_reader) /* # of reader sleeps */ ...@@ -56,13 +56,11 @@ LOCK_EVENT(rwsem_sleep_reader) /* # of reader sleeps */
LOCK_EVENT(rwsem_sleep_writer) /* # of writer sleeps */ LOCK_EVENT(rwsem_sleep_writer) /* # of writer sleeps */
LOCK_EVENT(rwsem_wake_reader) /* # of reader wakeups */ LOCK_EVENT(rwsem_wake_reader) /* # of reader wakeups */
LOCK_EVENT(rwsem_wake_writer) /* # of writer wakeups */ LOCK_EVENT(rwsem_wake_writer) /* # of writer wakeups */
LOCK_EVENT(rwsem_opt_rlock) /* # of opt-acquired read locks */ LOCK_EVENT(rwsem_opt_lock) /* # of opt-acquired write locks */
LOCK_EVENT(rwsem_opt_wlock) /* # of opt-acquired write locks */
LOCK_EVENT(rwsem_opt_fail) /* # of failed optspins */ LOCK_EVENT(rwsem_opt_fail) /* # of failed optspins */
LOCK_EVENT(rwsem_opt_nospin) /* # of disabled optspins */ LOCK_EVENT(rwsem_opt_nospin) /* # of disabled optspins */
LOCK_EVENT(rwsem_opt_norspin) /* # of disabled reader-only optspins */
LOCK_EVENT(rwsem_opt_rlock2) /* # of opt-acquired 2ndary read locks */
LOCK_EVENT(rwsem_rlock) /* # of read locks acquired */ LOCK_EVENT(rwsem_rlock) /* # of read locks acquired */
LOCK_EVENT(rwsem_rlock_steal) /* # of read locks by lock stealing */
LOCK_EVENT(rwsem_rlock_fast) /* # of fast read locks acquired */ LOCK_EVENT(rwsem_rlock_fast) /* # of fast read locks acquired */
LOCK_EVENT(rwsem_rlock_fail) /* # of failed read lock acquisitions */ LOCK_EVENT(rwsem_rlock_fail) /* # of failed read lock acquisitions */
LOCK_EVENT(rwsem_rlock_handoff) /* # of read lock handoffs */ LOCK_EVENT(rwsem_rlock_handoff) /* # of read lock handoffs */
......
此差异已折叠。
...@@ -58,10 +58,10 @@ static struct ww_mutex o, o2, o3; ...@@ -58,10 +58,10 @@ static struct ww_mutex o, o2, o3;
* Normal standalone locks, for the circular and irq-context * Normal standalone locks, for the circular and irq-context
* dependency tests: * dependency tests:
*/ */
static DEFINE_RAW_SPINLOCK(lock_A); static DEFINE_SPINLOCK(lock_A);
static DEFINE_RAW_SPINLOCK(lock_B); static DEFINE_SPINLOCK(lock_B);
static DEFINE_RAW_SPINLOCK(lock_C); static DEFINE_SPINLOCK(lock_C);
static DEFINE_RAW_SPINLOCK(lock_D); static DEFINE_SPINLOCK(lock_D);
static DEFINE_RWLOCK(rwlock_A); static DEFINE_RWLOCK(rwlock_A);
static DEFINE_RWLOCK(rwlock_B); static DEFINE_RWLOCK(rwlock_B);
...@@ -93,12 +93,12 @@ static DEFINE_RT_MUTEX(rtmutex_D); ...@@ -93,12 +93,12 @@ static DEFINE_RT_MUTEX(rtmutex_D);
* but X* and Y* are different classes. We do this so that * but X* and Y* are different classes. We do this so that
* we do not trigger a real lockup: * we do not trigger a real lockup:
*/ */
static DEFINE_RAW_SPINLOCK(lock_X1); static DEFINE_SPINLOCK(lock_X1);
static DEFINE_RAW_SPINLOCK(lock_X2); static DEFINE_SPINLOCK(lock_X2);
static DEFINE_RAW_SPINLOCK(lock_Y1); static DEFINE_SPINLOCK(lock_Y1);
static DEFINE_RAW_SPINLOCK(lock_Y2); static DEFINE_SPINLOCK(lock_Y2);
static DEFINE_RAW_SPINLOCK(lock_Z1); static DEFINE_SPINLOCK(lock_Z1);
static DEFINE_RAW_SPINLOCK(lock_Z2); static DEFINE_SPINLOCK(lock_Z2);
static DEFINE_RWLOCK(rwlock_X1); static DEFINE_RWLOCK(rwlock_X1);
static DEFINE_RWLOCK(rwlock_X2); static DEFINE_RWLOCK(rwlock_X2);
...@@ -138,10 +138,10 @@ static DEFINE_RT_MUTEX(rtmutex_Z2); ...@@ -138,10 +138,10 @@ static DEFINE_RT_MUTEX(rtmutex_Z2);
*/ */
#define INIT_CLASS_FUNC(class) \ #define INIT_CLASS_FUNC(class) \
static noinline void \ static noinline void \
init_class_##class(raw_spinlock_t *lock, rwlock_t *rwlock, \ init_class_##class(spinlock_t *lock, rwlock_t *rwlock, \
struct mutex *mutex, struct rw_semaphore *rwsem)\ struct mutex *mutex, struct rw_semaphore *rwsem)\
{ \ { \
raw_spin_lock_init(lock); \ spin_lock_init(lock); \
rwlock_init(rwlock); \ rwlock_init(rwlock); \
mutex_init(mutex); \ mutex_init(mutex); \
init_rwsem(rwsem); \ init_rwsem(rwsem); \
...@@ -210,10 +210,10 @@ static void init_shared_classes(void) ...@@ -210,10 +210,10 @@ static void init_shared_classes(void)
* Shortcuts for lock/unlock API variants, to keep * Shortcuts for lock/unlock API variants, to keep
* the testcases compact: * the testcases compact:
*/ */
#define L(x) raw_spin_lock(&lock_##x) #define L(x) spin_lock(&lock_##x)
#define U(x) raw_spin_unlock(&lock_##x) #define U(x) spin_unlock(&lock_##x)
#define LU(x) L(x); U(x) #define LU(x) L(x); U(x)
#define SI(x) raw_spin_lock_init(&lock_##x) #define SI(x) spin_lock_init(&lock_##x)
#define WL(x) write_lock(&rwlock_##x) #define WL(x) write_lock(&rwlock_##x)
#define WU(x) write_unlock(&rwlock_##x) #define WU(x) write_unlock(&rwlock_##x)
...@@ -1341,7 +1341,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_wlock) ...@@ -1341,7 +1341,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_wlock)
#define I2(x) \ #define I2(x) \
do { \ do { \
raw_spin_lock_init(&lock_##x); \ spin_lock_init(&lock_##x); \
rwlock_init(&rwlock_##x); \ rwlock_init(&rwlock_##x); \
mutex_init(&mutex_##x); \ mutex_init(&mutex_##x); \
init_rwsem(&rwsem_##x); \ init_rwsem(&rwsem_##x); \
...@@ -2005,10 +2005,23 @@ static void ww_test_edeadlk_acquire_wrong_slow(void) ...@@ -2005,10 +2005,23 @@ static void ww_test_edeadlk_acquire_wrong_slow(void)
static void ww_test_spin_nest_unlocked(void) static void ww_test_spin_nest_unlocked(void)
{ {
raw_spin_lock_nest_lock(&lock_A, &o.base); spin_lock_nest_lock(&lock_A, &o.base);
U(A); U(A);
} }
/* This is not a deadlock, because we have X1 to serialize Y1 and Y2 */
static void ww_test_spin_nest_lock(void)
{
spin_lock(&lock_X1);
spin_lock_nest_lock(&lock_Y1, &lock_X1);
spin_lock(&lock_A);
spin_lock_nest_lock(&lock_Y2, &lock_X1);
spin_unlock(&lock_A);
spin_unlock(&lock_Y2);
spin_unlock(&lock_Y1);
spin_unlock(&lock_X1);
}
static void ww_test_unneeded_slow(void) static void ww_test_unneeded_slow(void)
{ {
WWAI(&t); WWAI(&t);
...@@ -2226,6 +2239,10 @@ static void ww_tests(void) ...@@ -2226,6 +2239,10 @@ static void ww_tests(void)
dotest(ww_test_spin_nest_unlocked, FAILURE, LOCKTYPE_WW); dotest(ww_test_spin_nest_unlocked, FAILURE, LOCKTYPE_WW);
pr_cont("\n"); pr_cont("\n");
print_testname("spinlock nest test");
dotest(ww_test_spin_nest_lock, SUCCESS, LOCKTYPE_WW);
pr_cont("\n");
printk(" -----------------------------------------------------\n"); printk(" -----------------------------------------------------\n");
printk(" |block | try |context|\n"); printk(" |block | try |context|\n");
printk(" -----------------------------------------------------\n"); printk(" -----------------------------------------------------\n");
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册