提交 4e6045f1 编写于 作者: J Johannes Berg 提交者: Linus Torvalds

workqueue: debug flushing deadlocks with lockdep

In the following scenario:

code path 1:
  my_function() -> lock(L1); ...; flush_workqueue(); ...

code path 2:
  run_workqueue() -> my_work() -> ...; lock(L1); ...

you can get a deadlock when my_work() is queued or running
but my_function() has acquired L1 already.

This patch adds a pseudo-lock to each workqueue to make lockdep
warn about this scenario.

[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: NJohannes Berg <johannes@sipsolutions.net>
Acked-by: NOleg Nesterov <oleg@tv-sign.ru>
Acked-by: NIngo Molnar <mingo@elte.hu>
Acked-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 cf7b708c
...@@ -275,6 +275,14 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name, ...@@ -275,6 +275,14 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
lockdep_init_map(&(lock)->dep_map, #lock, \ lockdep_init_map(&(lock)->dep_map, #lock, \
(lock)->dep_map.key, sub) (lock)->dep_map.key, sub)
/*
* To initialize a lockdep_map statically use this macro.
* Note that _name must not be NULL.
*/
#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
{ .name = (_name), .key = (void *)(_key), }
/* /*
* Acquire a lock. * Acquire a lock.
* *
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/timer.h> #include <linux/timer.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/lockdep.h>
#include <asm/atomic.h> #include <asm/atomic.h>
struct workqueue_struct; struct workqueue_struct;
...@@ -28,6 +29,9 @@ struct work_struct { ...@@ -28,6 +29,9 @@ struct work_struct {
#define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK) #define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK)
struct list_head entry; struct list_head entry;
work_func_t func; work_func_t func;
#ifdef CONFIG_LOCKDEP
struct lockdep_map lockdep_map;
#endif
}; };
#define WORK_DATA_INIT() ATOMIC_LONG_INIT(0) #define WORK_DATA_INIT() ATOMIC_LONG_INIT(0)
...@@ -41,10 +45,23 @@ struct execute_work { ...@@ -41,10 +45,23 @@ struct execute_work {
struct work_struct work; struct work_struct work;
}; };
#ifdef CONFIG_LOCKDEP
/*
* NB: because we have to copy the lockdep_map, setting _key
* here is required, otherwise it could get initialised to the
* copy of the lockdep_map!
*/
#define __WORK_INIT_LOCKDEP_MAP(n, k) \
.lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
#else
#define __WORK_INIT_LOCKDEP_MAP(n, k)
#endif
#define __WORK_INITIALIZER(n, f) { \ #define __WORK_INITIALIZER(n, f) { \
.data = WORK_DATA_INIT(), \ .data = WORK_DATA_INIT(), \
.entry = { &(n).entry, &(n).entry }, \ .entry = { &(n).entry, &(n).entry }, \
.func = (f), \ .func = (f), \
__WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
} }
#define __DELAYED_WORK_INITIALIZER(n, f) { \ #define __DELAYED_WORK_INITIALIZER(n, f) { \
...@@ -76,12 +93,24 @@ struct execute_work { ...@@ -76,12 +93,24 @@ struct execute_work {
* assignment of the work data initializer allows the compiler * assignment of the work data initializer allows the compiler
* to generate better code. * to generate better code.
*/ */
#ifdef CONFIG_LOCKDEP
#define INIT_WORK(_work, _func) \
do { \
static struct lock_class_key __key; \
\
(_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0);\
INIT_LIST_HEAD(&(_work)->entry); \
PREPARE_WORK((_work), (_func)); \
} while (0)
#else
#define INIT_WORK(_work, _func) \ #define INIT_WORK(_work, _func) \
do { \ do { \
(_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
INIT_LIST_HEAD(&(_work)->entry); \ INIT_LIST_HEAD(&(_work)->entry); \
PREPARE_WORK((_work), (_func)); \ PREPARE_WORK((_work), (_func)); \
} while (0) } while (0)
#endif
#define INIT_DELAYED_WORK(_work, _func) \ #define INIT_DELAYED_WORK(_work, _func) \
do { \ do { \
...@@ -118,9 +147,23 @@ struct execute_work { ...@@ -118,9 +147,23 @@ struct execute_work {
clear_bit(WORK_STRUCT_PENDING, work_data_bits(work)) clear_bit(WORK_STRUCT_PENDING, work_data_bits(work))
extern struct workqueue_struct *__create_workqueue(const char *name, extern struct workqueue_struct *
int singlethread, __create_workqueue_key(const char *name, int singlethread,
int freezeable); int freezeable, struct lock_class_key *key);
#ifdef CONFIG_LOCKDEP
#define __create_workqueue(name, singlethread, freezeable) \
({ \
static struct lock_class_key __key; \
\
__create_workqueue_key((name), (singlethread), \
(freezeable), &__key); \
})
#else
#define __create_workqueue(name, singlethread, freezeable) \
__create_workqueue_key((name), (singlethread), (freezeable), NULL)
#endif
#define create_workqueue(name) __create_workqueue((name), 0, 0) #define create_workqueue(name) __create_workqueue((name), 0, 0)
#define create_freezeable_workqueue(name) __create_workqueue((name), 1, 1) #define create_freezeable_workqueue(name) __create_workqueue((name), 1, 1)
#define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0) #define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0)
......
...@@ -1521,7 +1521,7 @@ static inline int lookup_chain_cache(u64 chain_key, struct lock_class *class) ...@@ -1521,7 +1521,7 @@ static inline int lookup_chain_cache(u64 chain_key, struct lock_class *class)
} }
static int validate_chain(struct task_struct *curr, struct lockdep_map *lock, static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
struct held_lock *hlock, int chain_head, u64 chain_key) struct held_lock *hlock, int chain_head, u64 chain_key)
{ {
/* /*
* Trylock needs to maintain the stack of held locks, but it * Trylock needs to maintain the stack of held locks, but it
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <linux/freezer.h> #include <linux/freezer.h>
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
#include <linux/debug_locks.h> #include <linux/debug_locks.h>
#include <linux/lockdep.h>
/* /*
* The per-CPU workqueue (if single thread, we always use the first * The per-CPU workqueue (if single thread, we always use the first
...@@ -61,6 +62,9 @@ struct workqueue_struct { ...@@ -61,6 +62,9 @@ struct workqueue_struct {
const char *name; const char *name;
int singlethread; int singlethread;
int freezeable; /* Freeze threads during suspend */ int freezeable; /* Freeze threads during suspend */
#ifdef CONFIG_LOCKDEP
struct lockdep_map lockdep_map;
#endif
}; };
/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
...@@ -250,6 +254,17 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) ...@@ -250,6 +254,17 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
struct work_struct *work = list_entry(cwq->worklist.next, struct work_struct *work = list_entry(cwq->worklist.next,
struct work_struct, entry); struct work_struct, entry);
work_func_t f = work->func; work_func_t f = work->func;
#ifdef CONFIG_LOCKDEP
/*
* It is permissible to free the struct work_struct
* from inside the function that is called from it,
* this we need to take into account for lockdep too.
* To avoid bogus "held lock freed" warnings as well
* as problems when looking into work->lockdep_map,
* make a copy and use that here.
*/
struct lockdep_map lockdep_map = work->lockdep_map;
#endif
cwq->current_work = work; cwq->current_work = work;
list_del_init(cwq->worklist.next); list_del_init(cwq->worklist.next);
...@@ -257,7 +272,11 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) ...@@ -257,7 +272,11 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
BUG_ON(get_wq_data(work) != cwq); BUG_ON(get_wq_data(work) != cwq);
work_clear_pending(work); work_clear_pending(work);
lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
lock_acquire(&lockdep_map, 0, 0, 0, 2, _THIS_IP_);
f(work); f(work);
lock_release(&lockdep_map, 1, _THIS_IP_);
lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
...@@ -376,6 +395,8 @@ void fastcall flush_workqueue(struct workqueue_struct *wq) ...@@ -376,6 +395,8 @@ void fastcall flush_workqueue(struct workqueue_struct *wq)
int cpu; int cpu;
might_sleep(); might_sleep();
lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
lock_release(&wq->lockdep_map, 1, _THIS_IP_);
for_each_cpu_mask(cpu, *cpu_map) for_each_cpu_mask(cpu, *cpu_map)
flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
} }
...@@ -446,6 +467,9 @@ static void wait_on_work(struct work_struct *work) ...@@ -446,6 +467,9 @@ static void wait_on_work(struct work_struct *work)
might_sleep(); might_sleep();
lock_acquire(&work->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
lock_release(&work->lockdep_map, 1, _THIS_IP_);
cwq = get_wq_data(work); cwq = get_wq_data(work);
if (!cwq) if (!cwq)
return; return;
...@@ -695,8 +719,10 @@ static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) ...@@ -695,8 +719,10 @@ static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
} }
} }
struct workqueue_struct *__create_workqueue(const char *name, struct workqueue_struct *__create_workqueue_key(const char *name,
int singlethread, int freezeable) int singlethread,
int freezeable,
struct lock_class_key *key)
{ {
struct workqueue_struct *wq; struct workqueue_struct *wq;
struct cpu_workqueue_struct *cwq; struct cpu_workqueue_struct *cwq;
...@@ -713,6 +739,7 @@ struct workqueue_struct *__create_workqueue(const char *name, ...@@ -713,6 +739,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
} }
wq->name = name; wq->name = name;
lockdep_init_map(&wq->lockdep_map, name, key, 0);
wq->singlethread = singlethread; wq->singlethread = singlethread;
wq->freezeable = freezeable; wq->freezeable = freezeable;
INIT_LIST_HEAD(&wq->list); INIT_LIST_HEAD(&wq->list);
...@@ -741,7 +768,7 @@ struct workqueue_struct *__create_workqueue(const char *name, ...@@ -741,7 +768,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
} }
return wq; return wq;
} }
EXPORT_SYMBOL_GPL(__create_workqueue); EXPORT_SYMBOL_GPL(__create_workqueue_key);
static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
{ {
...@@ -752,6 +779,9 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) ...@@ -752,6 +779,9 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
if (cwq->thread == NULL) if (cwq->thread == NULL)
return; return;
lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
flush_cpu_workqueue(cwq); flush_cpu_workqueue(cwq);
/* /*
* If the caller is CPU_DEAD and cwq->worklist was not empty, * If the caller is CPU_DEAD and cwq->worklist was not empty,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册