提交 e966eaee 编写于 作者: I Ingo Molnar

locking/lockdep: Remove the cross-release locking checks

This code (CONFIG_LOCKDEP_CROSSRELEASE=y and CONFIG_LOCKDEP_COMPLETIONS=y),
while it found a number of old bugs initially, was also causing too many
false positives that caused people to disable lockdep - which is arguably
a worse overall outcome.

If we disable cross-release by default but keep the code upstream then
in practice the most likely outcome is that we'll allow the situation
to degrade gradually, by allowing entropy to introduce more and more
false positives, until it overwhelms maintenance capacity.

Another bad side effect was that people were trying to work around
the false positives by uglifying/complicating unrelated code. There's
a marked difference between annotating locking operations and
uglifying good code just due to bad lock debugging code ...

This gradual decrease in quality happened to a number of debugging
facilities in the kernel, and lockdep is pretty complex already,
so we cannot risk this outcome.

Either cross-release checking can be done right with no false positives,
or it should not be included in the upstream kernel.

( Note that it might make sense to maintain it out of tree and go through
  the false positives every now and then and see whether new bugs were
  introduced. )

Cc: Byungchul Park <byungchul.park@lge.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: NIngo Molnar <mingo@kernel.org>
上级 d89c7035
此差异已折叠。
...@@ -10,9 +10,6 @@ ...@@ -10,9 +10,6 @@
*/ */
#include <linux/wait.h> #include <linux/wait.h>
#ifdef CONFIG_LOCKDEP_COMPLETIONS
#include <linux/lockdep.h>
#endif
/* /*
* struct completion - structure used to maintain state for a "completion" * struct completion - structure used to maintain state for a "completion"
...@@ -29,58 +26,16 @@ ...@@ -29,58 +26,16 @@
struct completion { struct completion {
unsigned int done; unsigned int done;
wait_queue_head_t wait; wait_queue_head_t wait;
#ifdef CONFIG_LOCKDEP_COMPLETIONS
struct lockdep_map_cross map;
#endif
}; };
#ifdef CONFIG_LOCKDEP_COMPLETIONS
static inline void complete_acquire(struct completion *x)
{
lock_acquire_exclusive((struct lockdep_map *)&x->map, 0, 0, NULL, _RET_IP_);
}
static inline void complete_release(struct completion *x)
{
lock_release((struct lockdep_map *)&x->map, 0, _RET_IP_);
}
static inline void complete_release_commit(struct completion *x)
{
lock_commit_crosslock((struct lockdep_map *)&x->map);
}
#define init_completion_map(x, m) \
do { \
lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map, \
(m)->name, (m)->key, 0); \
__init_completion(x); \
} while (0)
#define init_completion(x) \
do { \
static struct lock_class_key __key; \
lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map, \
"(completion)" #x, \
&__key, 0); \
__init_completion(x); \
} while (0)
#else
#define init_completion_map(x, m) __init_completion(x) #define init_completion_map(x, m) __init_completion(x)
#define init_completion(x) __init_completion(x) #define init_completion(x) __init_completion(x)
static inline void complete_acquire(struct completion *x) {} static inline void complete_acquire(struct completion *x) {}
static inline void complete_release(struct completion *x) {} static inline void complete_release(struct completion *x) {}
static inline void complete_release_commit(struct completion *x) {} static inline void complete_release_commit(struct completion *x) {}
#endif
#ifdef CONFIG_LOCKDEP_COMPLETIONS
#define COMPLETION_INITIALIZER(work) \
{ 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait), \
STATIC_CROSS_LOCKDEP_MAP_INIT("(completion)" #work, &(work)) }
#else
#define COMPLETION_INITIALIZER(work) \ #define COMPLETION_INITIALIZER(work) \
{ 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
#endif
#define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \ #define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \
(*({ init_completion_map(&(work), &(map)); &(work); })) (*({ init_completion_map(&(work), &(map)); &(work); }))
......
...@@ -158,12 +158,6 @@ struct lockdep_map { ...@@ -158,12 +158,6 @@ struct lockdep_map {
int cpu; int cpu;
unsigned long ip; unsigned long ip;
#endif #endif
#ifdef CONFIG_LOCKDEP_CROSSRELEASE
/*
* Whether it's a crosslock.
*/
int cross;
#endif
}; };
static inline void lockdep_copy_map(struct lockdep_map *to, static inline void lockdep_copy_map(struct lockdep_map *to,
...@@ -267,95 +261,8 @@ struct held_lock { ...@@ -267,95 +261,8 @@ struct held_lock {
unsigned int hardirqs_off:1; unsigned int hardirqs_off:1;
unsigned int references:12; /* 32 bits */ unsigned int references:12; /* 32 bits */
unsigned int pin_count; unsigned int pin_count;
#ifdef CONFIG_LOCKDEP_CROSSRELEASE
/*
* Generation id.
*
* A value of cross_gen_id will be stored when holding this,
* which is globally increased whenever each crosslock is held.
*/
unsigned int gen_id;
#endif
};
#ifdef CONFIG_LOCKDEP_CROSSRELEASE
#define MAX_XHLOCK_TRACE_ENTRIES 5
/*
* This is for keeping locks waiting for commit so that true dependencies
* can be added at commit step.
*/
struct hist_lock {
/*
* Id for each entry in the ring buffer. This is used to
* decide whether the ring buffer was overwritten or not.
*
* For example,
*
* |<----------- hist_lock ring buffer size ------->|
* pppppppppppppppppppppiiiiiiiiiiiiiiiiiiiiiiiiiiiii
* wrapped > iiiiiiiiiiiiiiiiiiiiiiiiiii.......................
*
* where 'p' represents an acquisition in process
* context, 'i' represents an acquisition in irq
* context.
*
* In this example, the ring buffer was overwritten by
* acquisitions in irq context, that should be detected on
* rollback or commit.
*/
unsigned int hist_id;
/*
* Seperate stack_trace data. This will be used at commit step.
*/
struct stack_trace trace;
unsigned long trace_entries[MAX_XHLOCK_TRACE_ENTRIES];
/*
* Seperate hlock instance. This will be used at commit step.
*
* TODO: Use a smaller data structure containing only necessary
* data. However, we should make lockdep code able to handle the
* smaller one first.
*/
struct held_lock hlock;
}; };
/*
* To initialize a lock as crosslock, lockdep_init_map_crosslock() should
* be called instead of lockdep_init_map().
*/
struct cross_lock {
/*
* When more than one acquisition of crosslocks are overlapped,
* we have to perform commit for them based on cross_gen_id of
* the first acquisition, which allows us to add more true
* dependencies.
*
* Moreover, when no acquisition of a crosslock is in progress,
* we should not perform commit because the lock might not exist
* any more, which might cause incorrect memory access. So we
* have to track the number of acquisitions of a crosslock.
*/
int nr_acquire;
/*
* Seperate hlock instance. This will be used at commit step.
*
* TODO: Use a smaller data structure containing only necessary
* data. However, we should make lockdep code able to handle the
* smaller one first.
*/
struct held_lock hlock;
};
struct lockdep_map_cross {
struct lockdep_map map;
struct cross_lock xlock;
};
#endif
/* /*
* Initialization, self-test and debugging-output methods: * Initialization, self-test and debugging-output methods:
*/ */
...@@ -560,37 +467,6 @@ enum xhlock_context_t { ...@@ -560,37 +467,6 @@ enum xhlock_context_t {
XHLOCK_CTX_NR, XHLOCK_CTX_NR,
}; };
#ifdef CONFIG_LOCKDEP_CROSSRELEASE
extern void lockdep_init_map_crosslock(struct lockdep_map *lock,
const char *name,
struct lock_class_key *key,
int subclass);
extern void lock_commit_crosslock(struct lockdep_map *lock);
/*
* What we essencially have to initialize is 'nr_acquire'. Other members
* will be initialized in add_xlock().
*/
#define STATIC_CROSS_LOCK_INIT() \
{ .nr_acquire = 0,}
#define STATIC_CROSS_LOCKDEP_MAP_INIT(_name, _key) \
{ .map.name = (_name), .map.key = (void *)(_key), \
.map.cross = 1, .xlock = STATIC_CROSS_LOCK_INIT(), }
/*
* To initialize a lockdep_map statically use this macro.
* Note that _name must not be NULL.
*/
#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
{ .name = (_name), .key = (void *)(_key), .cross = 0, }
extern void crossrelease_hist_start(enum xhlock_context_t c);
extern void crossrelease_hist_end(enum xhlock_context_t c);
extern void lockdep_invariant_state(bool force);
extern void lockdep_init_task(struct task_struct *task);
extern void lockdep_free_task(struct task_struct *task);
#else /* !CROSSRELEASE */
#define lockdep_init_map_crosslock(m, n, k, s) do {} while (0) #define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
/* /*
* To initialize a lockdep_map statically use this macro. * To initialize a lockdep_map statically use this macro.
...@@ -604,7 +480,6 @@ static inline void crossrelease_hist_end(enum xhlock_context_t c) {} ...@@ -604,7 +480,6 @@ static inline void crossrelease_hist_end(enum xhlock_context_t c) {}
static inline void lockdep_invariant_state(bool force) {} static inline void lockdep_invariant_state(bool force) {}
static inline void lockdep_init_task(struct task_struct *task) {} static inline void lockdep_init_task(struct task_struct *task) {}
static inline void lockdep_free_task(struct task_struct *task) {} static inline void lockdep_free_task(struct task_struct *task) {}
#endif /* CROSSRELEASE */
#ifdef CONFIG_LOCK_STAT #ifdef CONFIG_LOCK_STAT
......
...@@ -849,17 +849,6 @@ struct task_struct { ...@@ -849,17 +849,6 @@ struct task_struct {
struct held_lock held_locks[MAX_LOCK_DEPTH]; struct held_lock held_locks[MAX_LOCK_DEPTH];
#endif #endif
#ifdef CONFIG_LOCKDEP_CROSSRELEASE
#define MAX_XHLOCKS_NR 64UL
struct hist_lock *xhlocks; /* Crossrelease history locks */
unsigned int xhlock_idx;
/* For restoring at history boundaries */
unsigned int xhlock_idx_hist[XHLOCK_CTX_NR];
unsigned int hist_id;
/* For overwrite check at each context exit */
unsigned int hist_id_save[XHLOCK_CTX_NR];
#endif
#ifdef CONFIG_UBSAN #ifdef CONFIG_UBSAN
unsigned int in_ubsan; unsigned int in_ubsan;
#endif #endif
......
...@@ -57,10 +57,6 @@ ...@@ -57,10 +57,6 @@
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <trace/events/lock.h> #include <trace/events/lock.h>
#ifdef CONFIG_LOCKDEP_CROSSRELEASE
#include <linux/slab.h>
#endif
#ifdef CONFIG_PROVE_LOCKING #ifdef CONFIG_PROVE_LOCKING
int prove_locking = 1; int prove_locking = 1;
module_param(prove_locking, int, 0644); module_param(prove_locking, int, 0644);
...@@ -75,19 +71,6 @@ module_param(lock_stat, int, 0644); ...@@ -75,19 +71,6 @@ module_param(lock_stat, int, 0644);
#define lock_stat 0 #define lock_stat 0
#endif #endif
#ifdef CONFIG_BOOTPARAM_LOCKDEP_CROSSRELEASE_FULLSTACK
static int crossrelease_fullstack = 1;
#else
static int crossrelease_fullstack;
#endif
static int __init allow_crossrelease_fullstack(char *str)
{
crossrelease_fullstack = 1;
return 0;
}
early_param("crossrelease_fullstack", allow_crossrelease_fullstack);
/* /*
* lockdep_lock: protects the lockdep graph, the hashes and the * lockdep_lock: protects the lockdep graph, the hashes and the
* class/list/hash allocators. * class/list/hash allocators.
...@@ -740,18 +723,6 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) ...@@ -740,18 +723,6 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL); return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL);
} }
#ifdef CONFIG_LOCKDEP_CROSSRELEASE
static void cross_init(struct lockdep_map *lock, int cross);
static int cross_lock(struct lockdep_map *lock);
static int lock_acquire_crosslock(struct held_lock *hlock);
static int lock_release_crosslock(struct lockdep_map *lock);
#else
static inline void cross_init(struct lockdep_map *lock, int cross) {}
static inline int cross_lock(struct lockdep_map *lock) { return 0; }
static inline int lock_acquire_crosslock(struct held_lock *hlock) { return 2; }
static inline int lock_release_crosslock(struct lockdep_map *lock) { return 2; }
#endif
/* /*
* Register a lock's class in the hash-table, if the class is not present * Register a lock's class in the hash-table, if the class is not present
* yet. Otherwise we look it up. We cache the result in the lock object * yet. Otherwise we look it up. We cache the result in the lock object
...@@ -1151,24 +1122,6 @@ print_circular_lock_scenario(struct held_lock *src, ...@@ -1151,24 +1122,6 @@ print_circular_lock_scenario(struct held_lock *src,
printk(KERN_CONT "\n\n"); printk(KERN_CONT "\n\n");
} }
if (cross_lock(tgt->instance)) {
printk(" Possible unsafe locking scenario by crosslock:\n\n");
printk(" CPU0 CPU1\n");
printk(" ---- ----\n");
printk(" lock(");
__print_lock_name(parent);
printk(KERN_CONT ");\n");
printk(" lock(");
__print_lock_name(target);
printk(KERN_CONT ");\n");
printk(" lock(");
__print_lock_name(source);
printk(KERN_CONT ");\n");
printk(" unlock(");
__print_lock_name(target);
printk(KERN_CONT ");\n");
printk("\n *** DEADLOCK ***\n\n");
} else {
printk(" Possible unsafe locking scenario:\n\n"); printk(" Possible unsafe locking scenario:\n\n");
printk(" CPU0 CPU1\n"); printk(" CPU0 CPU1\n");
printk(" ---- ----\n"); printk(" ---- ----\n");
...@@ -1185,7 +1138,6 @@ print_circular_lock_scenario(struct held_lock *src, ...@@ -1185,7 +1138,6 @@ print_circular_lock_scenario(struct held_lock *src,
__print_lock_name(source); __print_lock_name(source);
printk(KERN_CONT ");\n"); printk(KERN_CONT ");\n");
printk("\n *** DEADLOCK ***\n\n"); printk("\n *** DEADLOCK ***\n\n");
}
} }
/* /*
...@@ -1211,9 +1163,6 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth, ...@@ -1211,9 +1163,6 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth,
curr->comm, task_pid_nr(curr)); curr->comm, task_pid_nr(curr));
print_lock(check_src); print_lock(check_src);
if (cross_lock(check_tgt->instance))
pr_warn("\nbut now in release context of a crosslock acquired at the following:\n");
else
pr_warn("\nbut task is already holding lock:\n"); pr_warn("\nbut task is already holding lock:\n");
print_lock(check_tgt); print_lock(check_tgt);
...@@ -1244,9 +1193,7 @@ static noinline int print_circular_bug(struct lock_list *this, ...@@ -1244,9 +1193,7 @@ static noinline int print_circular_bug(struct lock_list *this,
if (!debug_locks_off_graph_unlock() || debug_locks_silent) if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return 0; return 0;
if (cross_lock(check_tgt->instance)) if (!save_trace(&this->trace))
this->trace = *trace;
else if (!save_trace(&this->trace))
return 0; return 0;
depth = get_lock_depth(target); depth = get_lock_depth(target);
...@@ -1850,9 +1797,6 @@ check_deadlock(struct task_struct *curr, struct held_lock *next, ...@@ -1850,9 +1797,6 @@ check_deadlock(struct task_struct *curr, struct held_lock *next,
if (nest) if (nest)
return 2; return 2;
if (cross_lock(prev->instance))
continue;
return print_deadlock_bug(curr, prev, next); return print_deadlock_bug(curr, prev, next);
} }
return 1; return 1;
...@@ -2018,18 +1962,13 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next) ...@@ -2018,18 +1962,13 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
for (;;) { for (;;) {
int distance = curr->lockdep_depth - depth + 1; int distance = curr->lockdep_depth - depth + 1;
hlock = curr->held_locks + depth - 1; hlock = curr->held_locks + depth - 1;
/*
* Only non-crosslock entries get new dependencies added.
* Crosslock entries will be added by commit later:
*/
if (!cross_lock(hlock->instance)) {
/* /*
* Only non-recursive-read entries get new dependencies * Only non-recursive-read entries get new dependencies
* added: * added:
*/ */
if (hlock->read != 2 && hlock->check) { if (hlock->read != 2 && hlock->check) {
int ret = check_prev_add(curr, hlock, next, int ret = check_prev_add(curr, hlock, next, distance, &trace, save_trace);
distance, &trace, save_trace);
if (!ret) if (!ret)
return 0; return 0;
...@@ -2042,7 +1981,7 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next) ...@@ -2042,7 +1981,7 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
if (!hlock->trylock) if (!hlock->trylock)
break; break;
} }
}
depth--; depth--;
/* /*
* End of lock-stack? * End of lock-stack?
...@@ -3292,21 +3231,10 @@ static void __lockdep_init_map(struct lockdep_map *lock, const char *name, ...@@ -3292,21 +3231,10 @@ static void __lockdep_init_map(struct lockdep_map *lock, const char *name,
void lockdep_init_map(struct lockdep_map *lock, const char *name, void lockdep_init_map(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, int subclass) struct lock_class_key *key, int subclass)
{ {
cross_init(lock, 0);
__lockdep_init_map(lock, name, key, subclass); __lockdep_init_map(lock, name, key, subclass);
} }
EXPORT_SYMBOL_GPL(lockdep_init_map); EXPORT_SYMBOL_GPL(lockdep_init_map);
#ifdef CONFIG_LOCKDEP_CROSSRELEASE
void lockdep_init_map_crosslock(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, int subclass)
{
cross_init(lock, 1);
__lockdep_init_map(lock, name, key, subclass);
}
EXPORT_SYMBOL_GPL(lockdep_init_map_crosslock);
#endif
struct lock_class_key __lockdep_no_validate__; struct lock_class_key __lockdep_no_validate__;
EXPORT_SYMBOL_GPL(__lockdep_no_validate__); EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
...@@ -3362,7 +3290,6 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, ...@@ -3362,7 +3290,6 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
int chain_head = 0; int chain_head = 0;
int class_idx; int class_idx;
u64 chain_key; u64 chain_key;
int ret;
if (unlikely(!debug_locks)) if (unlikely(!debug_locks))
return 0; return 0;
...@@ -3411,8 +3338,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, ...@@ -3411,8 +3338,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
class_idx = class - lock_classes + 1; class_idx = class - lock_classes + 1;
/* TODO: nest_lock is not implemented for crosslock yet. */ if (depth) {
if (depth && !cross_lock(lock)) {
hlock = curr->held_locks + depth - 1; hlock = curr->held_locks + depth - 1;
if (hlock->class_idx == class_idx && nest_lock) { if (hlock->class_idx == class_idx && nest_lock) {
if (hlock->references) { if (hlock->references) {
...@@ -3500,14 +3426,6 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, ...@@ -3500,14 +3426,6 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
if (!validate_chain(curr, lock, hlock, chain_head, chain_key)) if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
return 0; return 0;
ret = lock_acquire_crosslock(hlock);
/*
* 2 means normal acquire operations are needed. Otherwise, it's
* ok just to return with '0:fail, 1:success'.
*/
if (ret != 2)
return ret;
curr->curr_chain_key = chain_key; curr->curr_chain_key = chain_key;
curr->lockdep_depth++; curr->lockdep_depth++;
check_chain_key(curr); check_chain_key(curr);
...@@ -3745,19 +3663,11 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip) ...@@ -3745,19 +3663,11 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
struct task_struct *curr = current; struct task_struct *curr = current;
struct held_lock *hlock; struct held_lock *hlock;
unsigned int depth; unsigned int depth;
int ret, i; int i;
if (unlikely(!debug_locks)) if (unlikely(!debug_locks))
return 0; return 0;
ret = lock_release_crosslock(lock);
/*
* 2 means normal release operations are needed. Otherwise, it's
* ok just to return with '0:fail, 1:success'.
*/
if (ret != 2)
return ret;
depth = curr->lockdep_depth; depth = curr->lockdep_depth;
/* /*
* So we're all set to release this lock.. wait what lock? We don't * So we're all set to release this lock.. wait what lock? We don't
...@@ -4675,495 +4585,3 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s) ...@@ -4675,495 +4585,3 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
dump_stack(); dump_stack();
} }
EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious); EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
#ifdef CONFIG_LOCKDEP_CROSSRELEASE
/*
* Crossrelease works by recording a lock history for each thread and
* connecting those historic locks that were taken after the
* wait_for_completion() in the complete() context.
*
* Task-A Task-B
*
* mutex_lock(&A);
* mutex_unlock(&A);
*
* wait_for_completion(&C);
* lock_acquire_crosslock();
* atomic_inc_return(&cross_gen_id);
* |
* | mutex_lock(&B);
* | mutex_unlock(&B);
* |
* | complete(&C);
* `-- lock_commit_crosslock();
*
* Which will then add a dependency between B and C.
*/
#define xhlock(i) (current->xhlocks[(i) % MAX_XHLOCKS_NR])
/*
* Whenever a crosslock is held, cross_gen_id will be increased.
*/
static atomic_t cross_gen_id; /* Can be wrapped */
/*
* Make an entry of the ring buffer invalid.
*/
static inline void invalidate_xhlock(struct hist_lock *xhlock)
{
/*
* Normally, xhlock->hlock.instance must be !NULL.
*/
xhlock->hlock.instance = NULL;
}
/*
* Lock history stacks; we have 2 nested lock history stacks:
*
* HARD(IRQ)
* SOFT(IRQ)
*
* The thing is that once we complete a HARD/SOFT IRQ the future task locks
* should not depend on any of the locks observed while running the IRQ. So
* what we do is rewind the history buffer and erase all our knowledge of that
* temporal event.
*/
void crossrelease_hist_start(enum xhlock_context_t c)
{
struct task_struct *cur = current;
if (!cur->xhlocks)
return;
cur->xhlock_idx_hist[c] = cur->xhlock_idx;
cur->hist_id_save[c] = cur->hist_id;
}
void crossrelease_hist_end(enum xhlock_context_t c)
{
struct task_struct *cur = current;
if (cur->xhlocks) {
unsigned int idx = cur->xhlock_idx_hist[c];
struct hist_lock *h = &xhlock(idx);
cur->xhlock_idx = idx;
/* Check if the ring was overwritten. */
if (h->hist_id != cur->hist_id_save[c])
invalidate_xhlock(h);
}
}
/*
* lockdep_invariant_state() is used to annotate independence inside a task, to
* make one task look like multiple independent 'tasks'.
*
* Take for instance workqueues; each work is independent of the last. The
* completion of a future work does not depend on the completion of a past work
* (in general). Therefore we must not carry that (lock) dependency across
* works.
*
* This is true for many things; pretty much all kthreads fall into this
* pattern, where they have an invariant state and future completions do not
* depend on past completions. Its just that since they all have the 'same'
* form -- the kthread does the same over and over -- it doesn't typically
* matter.
*
* The same is true for system-calls, once a system call is completed (we've
* returned to userspace) the next system call does not depend on the lock
* history of the previous system call.
*
* They key property for independence, this invariant state, is that it must be
* a point where we hold no locks and have no history. Because if we were to
* hold locks, the restore at _end() would not necessarily recover it's history
* entry. Similarly, independence per-definition means it does not depend on
* prior state.
*/
void lockdep_invariant_state(bool force)
{
/*
* We call this at an invariant point, no current state, no history.
* Verify the former, enforce the latter.
*/
WARN_ON_ONCE(!force && current->lockdep_depth);
if (current->xhlocks)
invalidate_xhlock(&xhlock(current->xhlock_idx));
}
static int cross_lock(struct lockdep_map *lock)
{
return lock ? lock->cross : 0;
}
/*
* This is needed to decide the relationship between wrapable variables.
*/
static inline int before(unsigned int a, unsigned int b)
{
return (int)(a - b) < 0;
}
static inline struct lock_class *xhlock_class(struct hist_lock *xhlock)
{
return hlock_class(&xhlock->hlock);
}
static inline struct lock_class *xlock_class(struct cross_lock *xlock)
{
return hlock_class(&xlock->hlock);
}
/*
* Should we check a dependency with previous one?
*/
static inline int depend_before(struct held_lock *hlock)
{
return hlock->read != 2 && hlock->check && !hlock->trylock;
}
/*
* Should we check a dependency with next one?
*/
static inline int depend_after(struct held_lock *hlock)
{
return hlock->read != 2 && hlock->check;
}
/*
* Check if the xhlock is valid, which would be false if,
*
* 1. Has not used after initializaion yet.
* 2. Got invalidated.
*
* Remind hist_lock is implemented as a ring buffer.
*/
static inline int xhlock_valid(struct hist_lock *xhlock)
{
/*
* xhlock->hlock.instance must be !NULL.
*/
return !!xhlock->hlock.instance;
}
/*
* Record a hist_lock entry.
*
* Irq disable is only required.
*/
static void add_xhlock(struct held_lock *hlock)
{
unsigned int idx = ++current->xhlock_idx;
struct hist_lock *xhlock = &xhlock(idx);
#ifdef CONFIG_DEBUG_LOCKDEP
/*
* This can be done locklessly because they are all task-local
* state, we must however ensure IRQs are disabled.
*/
WARN_ON_ONCE(!irqs_disabled());
#endif
/* Initialize hist_lock's members */
xhlock->hlock = *hlock;
xhlock->hist_id = ++current->hist_id;
xhlock->trace.nr_entries = 0;
xhlock->trace.max_entries = MAX_XHLOCK_TRACE_ENTRIES;
xhlock->trace.entries = xhlock->trace_entries;
if (crossrelease_fullstack) {
xhlock->trace.skip = 3;
save_stack_trace(&xhlock->trace);
} else {
xhlock->trace.nr_entries = 1;
xhlock->trace.entries[0] = hlock->acquire_ip;
}
}
static inline int same_context_xhlock(struct hist_lock *xhlock)
{
return xhlock->hlock.irq_context == task_irq_context(current);
}
/*
* This should be lockless as far as possible because this would be
* called very frequently.
*/
static void check_add_xhlock(struct held_lock *hlock)
{
/*
* Record a hist_lock, only in case that acquisitions ahead
* could depend on the held_lock. For example, if the held_lock
* is trylock then acquisitions ahead never depends on that.
* In that case, we don't need to record it. Just return.
*/
if (!current->xhlocks || !depend_before(hlock))
return;
add_xhlock(hlock);
}
/*
* For crosslock.
*/
static int add_xlock(struct held_lock *hlock)
{
struct cross_lock *xlock;
unsigned int gen_id;
if (!graph_lock())
return 0;
xlock = &((struct lockdep_map_cross *)hlock->instance)->xlock;
/*
* When acquisitions for a crosslock are overlapped, we use
* nr_acquire to perform commit for them, based on cross_gen_id
* of the first acquisition, which allows to add additional
* dependencies.
*
* Moreover, when no acquisition of a crosslock is in progress,
* we should not perform commit because the lock might not exist
* any more, which might cause incorrect memory access. So we
* have to track the number of acquisitions of a crosslock.
*
* depend_after() is necessary to initialize only the first
* valid xlock so that the xlock can be used on its commit.
*/
if (xlock->nr_acquire++ && depend_after(&xlock->hlock))
goto unlock;
gen_id = (unsigned int)atomic_inc_return(&cross_gen_id);
xlock->hlock = *hlock;
xlock->hlock.gen_id = gen_id;
unlock:
graph_unlock();
return 1;
}
/*
* Called for both normal and crosslock acquires. Normal locks will be
* pushed on the hist_lock queue. Cross locks will record state and
* stop regular lock_acquire() to avoid being placed on the held_lock
* stack.
*
* Return: 0 - failure;
* 1 - crosslock, done;
* 2 - normal lock, continue to held_lock[] ops.
*/
static int lock_acquire_crosslock(struct held_lock *hlock)
{
/*
* CONTEXT 1 CONTEXT 2
* --------- ---------
* lock A (cross)
* X = atomic_inc_return(&cross_gen_id)
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* Y = atomic_read_acquire(&cross_gen_id)
* lock B
*
* atomic_read_acquire() is for ordering between A and B,
* IOW, A happens before B, when CONTEXT 2 see Y >= X.
*
* Pairs with atomic_inc_return() in add_xlock().
*/
hlock->gen_id = (unsigned int)atomic_read_acquire(&cross_gen_id);
if (cross_lock(hlock->instance))
return add_xlock(hlock);
check_add_xhlock(hlock);
return 2;
}
static int copy_trace(struct stack_trace *trace)
{
unsigned long *buf = stack_trace + nr_stack_trace_entries;
unsigned int max_nr = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
unsigned int nr = min(max_nr, trace->nr_entries);
trace->nr_entries = nr;
memcpy(buf, trace->entries, nr * sizeof(trace->entries[0]));
trace->entries = buf;
nr_stack_trace_entries += nr;
if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
if (!debug_locks_off_graph_unlock())
return 0;
print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
dump_stack();
return 0;
}
return 1;
}
static int commit_xhlock(struct cross_lock *xlock, struct hist_lock *xhlock)
{
unsigned int xid, pid;
u64 chain_key;
xid = xlock_class(xlock) - lock_classes;
chain_key = iterate_chain_key((u64)0, xid);
pid = xhlock_class(xhlock) - lock_classes;
chain_key = iterate_chain_key(chain_key, pid);
if (lookup_chain_cache(chain_key))
return 1;
if (!add_chain_cache_classes(xid, pid, xhlock->hlock.irq_context,
chain_key))
return 0;
if (!check_prev_add(current, &xlock->hlock, &xhlock->hlock, 1,
&xhlock->trace, copy_trace))
return 0;
return 1;
}
static void commit_xhlocks(struct cross_lock *xlock)
{
unsigned int cur = current->xhlock_idx;
unsigned int prev_hist_id = xhlock(cur).hist_id;
unsigned int i;
if (!graph_lock())
return;
if (xlock->nr_acquire) {
for (i = 0; i < MAX_XHLOCKS_NR; i++) {
struct hist_lock *xhlock = &xhlock(cur - i);
if (!xhlock_valid(xhlock))
break;
if (before(xhlock->hlock.gen_id, xlock->hlock.gen_id))
break;
if (!same_context_xhlock(xhlock))
break;
/*
* Filter out the cases where the ring buffer was
* overwritten and the current entry has a bigger
* hist_id than the previous one, which is impossible
* otherwise:
*/
if (unlikely(before(prev_hist_id, xhlock->hist_id)))
break;
prev_hist_id = xhlock->hist_id;
/*
* commit_xhlock() returns 0 with graph_lock already
* released if fail.
*/
if (!commit_xhlock(xlock, xhlock))
return;
}
}
graph_unlock();
}
void lock_commit_crosslock(struct lockdep_map *lock)
{
struct cross_lock *xlock;
unsigned long flags;
if (unlikely(!debug_locks || current->lockdep_recursion))
return;
if (!current->xhlocks)
return;
/*
* Do commit hist_locks with the cross_lock, only in case that
* the cross_lock could depend on acquisitions after that.
*
* For example, if the cross_lock does not have the 'check' flag
* then we don't need to check dependencies and commit for that.
* Just skip it. In that case, of course, the cross_lock does
* not depend on acquisitions ahead, either.
*
* WARNING: Don't do that in add_xlock() in advance. When an
* acquisition context is different from the commit context,
* invalid(skipped) cross_lock might be accessed.
*/
if (!depend_after(&((struct lockdep_map_cross *)lock)->xlock.hlock))
return;
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
xlock = &((struct lockdep_map_cross *)lock)->xlock;
commit_xhlocks(xlock);
current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_commit_crosslock);
/*
* Return: 0 - failure;
* 1 - crosslock, done;
* 2 - normal lock, continue to held_lock[] ops.
*/
static int lock_release_crosslock(struct lockdep_map *lock)
{
if (cross_lock(lock)) {
if (!graph_lock())
return 0;
((struct lockdep_map_cross *)lock)->xlock.nr_acquire--;
graph_unlock();
return 1;
}
return 2;
}
static void cross_init(struct lockdep_map *lock, int cross)
{
if (cross)
((struct lockdep_map_cross *)lock)->xlock.nr_acquire = 0;
lock->cross = cross;
/*
* Crossrelease assumes that the ring buffer size of xhlocks
* is aligned with power of 2. So force it on build.
*/
BUILD_BUG_ON(MAX_XHLOCKS_NR & (MAX_XHLOCKS_NR - 1));
}
void lockdep_init_task(struct task_struct *task)
{
int i;
task->xhlock_idx = UINT_MAX;
task->hist_id = 0;
for (i = 0; i < XHLOCK_CTX_NR; i++) {
task->xhlock_idx_hist[i] = UINT_MAX;
task->hist_id_save[i] = 0;
}
task->xhlocks = kzalloc(sizeof(struct hist_lock) * MAX_XHLOCKS_NR,
GFP_KERNEL);
}
void lockdep_free_task(struct task_struct *task)
{
if (task->xhlocks) {
void *tmp = task->xhlocks;
/* Diable crossrelease for current */
task->xhlocks = NULL;
kfree(tmp);
}
}
#endif
...@@ -1099,8 +1099,6 @@ config PROVE_LOCKING ...@@ -1099,8 +1099,6 @@ config PROVE_LOCKING
select DEBUG_MUTEXES select DEBUG_MUTEXES
select DEBUG_RT_MUTEXES if RT_MUTEXES select DEBUG_RT_MUTEXES if RT_MUTEXES
select DEBUG_LOCK_ALLOC select DEBUG_LOCK_ALLOC
select LOCKDEP_CROSSRELEASE
select LOCKDEP_COMPLETIONS
select TRACE_IRQFLAGS select TRACE_IRQFLAGS
default n default n
help help
...@@ -1170,37 +1168,6 @@ config LOCK_STAT ...@@ -1170,37 +1168,6 @@ config LOCK_STAT
CONFIG_LOCK_STAT defines "contended" and "acquired" lock events. CONFIG_LOCK_STAT defines "contended" and "acquired" lock events.
(CONFIG_LOCKDEP defines "acquire" and "release" events.) (CONFIG_LOCKDEP defines "acquire" and "release" events.)
config LOCKDEP_CROSSRELEASE
bool
help
This makes lockdep work for crosslock which is a lock allowed to
be released in a different context from the acquisition context.
Normally a lock must be released in the context acquiring the lock.
However, relexing this constraint helps synchronization primitives
such as page locks or completions can use the lock correctness
detector, lockdep.
config LOCKDEP_COMPLETIONS
bool
help
A deadlock caused by wait_for_completion() and complete() can be
detected by lockdep using crossrelease feature.
config BOOTPARAM_LOCKDEP_CROSSRELEASE_FULLSTACK
bool "Enable the boot parameter, crossrelease_fullstack"
depends on LOCKDEP_CROSSRELEASE
default n
help
The lockdep "cross-release" feature needs to record stack traces
(of calling functions) for all acquisitions, for eventual later
use during analysis. By default only a single caller is recorded,
because the unwind operation can be very expensive with deeper
stack chains.
However a boot parameter, crossrelease_fullstack, was
introduced since sometimes deeper traces are required for full
analysis. This option turns on the boot parameter.
config DEBUG_LOCKDEP config DEBUG_LOCKDEP
bool "Lock dependency engine debugging" bool "Lock dependency engine debugging"
depends on DEBUG_KERNEL && LOCKDEP depends on DEBUG_KERNEL && LOCKDEP
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册