提交 ee863954 编写于 作者: D David Sterba

btrfs: comment the rest of implicit barriers before waitqueue_active

There are atomic operations that imply the barrier for waitqueue_active
mixed in an if-condition.
Signed-off-by: NDavid Sterba <dsterba@suse.com>
上级 779adf0f
...@@ -463,6 +463,10 @@ static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node, ...@@ -463,6 +463,10 @@ static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
static void finish_one_item(struct btrfs_delayed_root *delayed_root) static void finish_one_item(struct btrfs_delayed_root *delayed_root)
{ {
int seq = atomic_inc_return(&delayed_root->items_seq); int seq = atomic_inc_return(&delayed_root->items_seq);
/*
* atomic_dec_return implies a barrier for waitqueue_active
*/
if ((atomic_dec_return(&delayed_root->items) < if ((atomic_dec_return(&delayed_root->items) <
BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) && BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
waitqueue_active(&delayed_root->wait)) waitqueue_active(&delayed_root->wait))
......
...@@ -802,6 +802,9 @@ static void run_one_async_done(struct btrfs_work *work) ...@@ -802,6 +802,9 @@ static void run_one_async_done(struct btrfs_work *work)
limit = btrfs_async_submit_limit(fs_info); limit = btrfs_async_submit_limit(fs_info);
limit = limit * 2 / 3; limit = limit * 2 / 3;
/*
* atomic_dec_return implies a barrier for waitqueue_active
*/
if (atomic_dec_return(&fs_info->nr_async_submits) < limit && if (atomic_dec_return(&fs_info->nr_async_submits) < limit &&
waitqueue_active(&fs_info->async_submit_wait)) waitqueue_active(&fs_info->async_submit_wait))
wake_up(&fs_info->async_submit_wait); wake_up(&fs_info->async_submit_wait);
......
...@@ -1096,6 +1096,9 @@ static noinline void async_cow_submit(struct btrfs_work *work) ...@@ -1096,6 +1096,9 @@ static noinline void async_cow_submit(struct btrfs_work *work)
nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >> nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
PAGE_CACHE_SHIFT; PAGE_CACHE_SHIFT;
/*
* atomic_sub_return implies a barrier for waitqueue_active
*/
if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) < if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) <
5 * 1024 * 1024 && 5 * 1024 * 1024 &&
waitqueue_active(&root->fs_info->async_submit_wait)) waitqueue_active(&root->fs_info->async_submit_wait))
......
...@@ -79,6 +79,9 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw) ...@@ -79,6 +79,9 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
write_lock(&eb->lock); write_lock(&eb->lock);
WARN_ON(atomic_read(&eb->spinning_writers)); WARN_ON(atomic_read(&eb->spinning_writers));
atomic_inc(&eb->spinning_writers); atomic_inc(&eb->spinning_writers);
/*
* atomic_dec_and_test implies a barrier for waitqueue_active
*/
if (atomic_dec_and_test(&eb->blocking_writers) && if (atomic_dec_and_test(&eb->blocking_writers) &&
waitqueue_active(&eb->write_lock_wq)) waitqueue_active(&eb->write_lock_wq))
wake_up(&eb->write_lock_wq); wake_up(&eb->write_lock_wq);
...@@ -86,6 +89,9 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw) ...@@ -86,6 +89,9 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
BUG_ON(atomic_read(&eb->blocking_readers) == 0); BUG_ON(atomic_read(&eb->blocking_readers) == 0);
read_lock(&eb->lock); read_lock(&eb->lock);
atomic_inc(&eb->spinning_readers); atomic_inc(&eb->spinning_readers);
/*
* atomic_dec_and_test implies a barrier for waitqueue_active
*/
if (atomic_dec_and_test(&eb->blocking_readers) && if (atomic_dec_and_test(&eb->blocking_readers) &&
waitqueue_active(&eb->read_lock_wq)) waitqueue_active(&eb->read_lock_wq))
wake_up(&eb->read_lock_wq); wake_up(&eb->read_lock_wq);
...@@ -229,6 +235,9 @@ void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb) ...@@ -229,6 +235,9 @@ void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
} }
btrfs_assert_tree_read_locked(eb); btrfs_assert_tree_read_locked(eb);
WARN_ON(atomic_read(&eb->blocking_readers) == 0); WARN_ON(atomic_read(&eb->blocking_readers) == 0);
/*
* atomic_dec_and_test implies a barrier for waitqueue_active
*/
if (atomic_dec_and_test(&eb->blocking_readers) && if (atomic_dec_and_test(&eb->blocking_readers) &&
waitqueue_active(&eb->read_lock_wq)) waitqueue_active(&eb->read_lock_wq))
wake_up(&eb->read_lock_wq); wake_up(&eb->read_lock_wq);
......
...@@ -345,6 +345,9 @@ static noinline void run_scheduled_bios(struct btrfs_device *device) ...@@ -345,6 +345,9 @@ static noinline void run_scheduled_bios(struct btrfs_device *device)
pending = pending->bi_next; pending = pending->bi_next;
cur->bi_next = NULL; cur->bi_next = NULL;
/*
* atomic_dec_return implies a barrier for waitqueue_active
*/
if (atomic_dec_return(&fs_info->nr_async_bios) < limit && if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
waitqueue_active(&fs_info->async_submit_wait)) waitqueue_active(&fs_info->async_submit_wait))
wake_up(&fs_info->async_submit_wait); wake_up(&fs_info->async_submit_wait);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册