提交 6e74057c 编写于 作者: C Chris Mason

Btrfs: Fix async thread shutdown race

It was possible for an async worker thread to be selected to
receive a new work item, but exit before the work item was
actually placed into that thread's work list.

This commit fixes the race by incrementing the num_pending
counter earlier, and making sure to check the number of pending
work items before a thread exits.
Signed-off-by: NChris Mason <chris.mason@oracle.com>
上级 627e421a
...@@ -191,7 +191,8 @@ static int try_worker_shutdown(struct btrfs_worker_thread *worker) ...@@ -191,7 +191,8 @@ static int try_worker_shutdown(struct btrfs_worker_thread *worker)
!worker->working && !worker->working &&
!list_empty(&worker->worker_list) && !list_empty(&worker->worker_list) &&
list_empty(&worker->prio_pending) && list_empty(&worker->prio_pending) &&
list_empty(&worker->pending)) { list_empty(&worker->pending) &&
atomic_read(&worker->num_pending) == 0) {
freeit = 1; freeit = 1;
list_del_init(&worker->worker_list); list_del_init(&worker->worker_list);
worker->workers->num_workers--; worker->workers->num_workers--;
...@@ -485,7 +486,6 @@ static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers) ...@@ -485,7 +486,6 @@ static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
*/ */
next = workers->worker_list.next; next = workers->worker_list.next;
worker = list_entry(next, struct btrfs_worker_thread, worker_list); worker = list_entry(next, struct btrfs_worker_thread, worker_list);
atomic_inc(&worker->num_pending);
worker->sequence++; worker->sequence++;
if (worker->sequence % workers->idle_thresh == 0) if (worker->sequence % workers->idle_thresh == 0)
...@@ -521,8 +521,7 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers) ...@@ -521,8 +521,7 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
goto again; goto again;
} }
} }
spin_unlock_irqrestore(&workers->lock, flags); goto found;
return worker;
fallback: fallback:
fallback = NULL; fallback = NULL;
...@@ -537,6 +536,12 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers) ...@@ -537,6 +536,12 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
BUG_ON(!fallback); BUG_ON(!fallback);
worker = list_entry(fallback, worker = list_entry(fallback,
struct btrfs_worker_thread, worker_list); struct btrfs_worker_thread, worker_list);
found:
/*
* this makes sure the worker doesn't exit before it is placed
* onto a busy/idle list
*/
atomic_inc(&worker->num_pending);
spin_unlock_irqrestore(&workers->lock, flags); spin_unlock_irqrestore(&workers->lock, flags);
return worker; return worker;
} }
...@@ -569,7 +574,7 @@ int btrfs_requeue_work(struct btrfs_work *work) ...@@ -569,7 +574,7 @@ int btrfs_requeue_work(struct btrfs_work *work)
spin_lock(&worker->workers->lock); spin_lock(&worker->workers->lock);
worker->idle = 0; worker->idle = 0;
list_move_tail(&worker->worker_list, list_move_tail(&worker->worker_list,
&worker->workers->worker_list); &worker->workers->worker_list);
spin_unlock(&worker->workers->lock); spin_unlock(&worker->workers->lock);
} }
if (!worker->working) { if (!worker->working) {
...@@ -627,7 +632,6 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work) ...@@ -627,7 +632,6 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
list_add_tail(&work->list, &worker->prio_pending); list_add_tail(&work->list, &worker->prio_pending);
else else
list_add_tail(&work->list, &worker->pending); list_add_tail(&work->list, &worker->pending);
atomic_inc(&worker->num_pending);
check_busy_worker(worker); check_busy_worker(worker);
/* /*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册