提交 6ec9765d 编写于 作者: Q Qu Wenruo 提交者: David Sterba

btrfs: introduce compressed_bio::pending_sectors to trace compressed bio

For btrfs_submit_compressed_read() and btrfs_submit_compressed_write(),
we have a pretty weird dance around compressed_bio::pending_bios:

  btrfs_submit_compressed_read/write()
  {
	cb = kmalloc()
	refcount_set(&cb->pending_bios, 0);
	bio = btrfs_alloc_bio();

	/* NOTE here, we haven't yet submitted any bio */
	refcount_set(&cb->pending_bios, 1);

	for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
		if (submit) {
			/* Here we submit bio, but we always have one
			 * extra pending_bios */
			refcount_inc(&cb->pending_bios);
			ret = btrfs_map_bio();
		}
	}

	/* Submit the last bio */
	ret = btrfs_map_bio();
  }

There are two reasons why we do this:

- compressed_bio::pending_bios is a refcount
  Thus if it's reduced to 0, it can not be increased again.

- To ensure the compressed_bio is not freed by some submitted bios
  If the submitted bio is finished before the next bio submitted,
  we can free the compressed_bio completely.

But the above code is sometimes confusing, and we can do it better by
introducing a new member, compressed_bio::pending_sectors.

Now we use compressed_bio::pending_sectors to indicate whether we have
any pending sectors under IO or not yet submitted.

If pending_sectors == 0, we're definitely the last bio of compressed_bio,
and is OK to release the compressed bio.

Now the workflow looks like this:

  btrfs_submit_compressed_read/write()
  {
	cb = kmalloc()
	atomic_set(&cb->pending_bios, 0);
	refcount_set(&cb->pending_sectors,
		     compressed_len >> sectorsize_bits);
	bio = btrfs_alloc_bio();

	for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
		if (submit) {
			refcount_inc(&cb->pending_bios);
			ret = btrfs_map_bio();
		}
	}

	/* Submit the last bio */
	refcount_inc(&cb->pending_bios);
	ret = btrfs_map_bio();
  }

For now we still need pending_bios for later error handling, but will
remove pending_bios eventually after properly handling the errors.
Signed-off-by: NQu Wenruo <wqu@suse.com>
Signed-off-by: NDavid Sterba <dsterba@suse.com>
上级 6a404910
...@@ -193,6 +193,38 @@ static int check_compressed_csum(struct btrfs_inode *inode, struct bio *bio, ...@@ -193,6 +193,38 @@ static int check_compressed_csum(struct btrfs_inode *inode, struct bio *bio,
return 0; return 0;
} }
/*
* Reduce bio and io accounting for a compressed_bio with its corresponding bio.
*
* Return true if there is no pending bio nor io.
* Return false otherwise.
*/
static bool dec_and_test_compressed_bio(struct compressed_bio *cb, struct bio *bio)
{
struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
unsigned int bi_size = 0;
bool last_io = false;
struct bio_vec *bvec;
struct bvec_iter_all iter_all;
/*
* At endio time, bi_iter.bi_size doesn't represent the real bio size.
* Thus here we have to iterate through all segments to grab correct
* bio size.
*/
bio_for_each_segment_all(bvec, bio, iter_all)
bi_size += bvec->bv_len;
if (bio->bi_status)
cb->errors = 1;
ASSERT(bi_size && bi_size <= cb->compressed_len);
last_io = refcount_sub_and_test(bi_size >> fs_info->sectorsize_bits,
&cb->pending_sectors);
atomic_dec(&cb->pending_bios);
return last_io;
}
/* when we finish reading compressed pages from the disk, we /* when we finish reading compressed pages from the disk, we
* decompress them and then run the bio end_io routines on the * decompress them and then run the bio end_io routines on the
* decompressed pages (in the inode address space). * decompressed pages (in the inode address space).
...@@ -212,13 +244,7 @@ static void end_compressed_bio_read(struct bio *bio) ...@@ -212,13 +244,7 @@ static void end_compressed_bio_read(struct bio *bio)
unsigned int mirror = btrfs_bio(bio)->mirror_num; unsigned int mirror = btrfs_bio(bio)->mirror_num;
int ret = 0; int ret = 0;
if (bio->bi_status) if (!dec_and_test_compressed_bio(cb, bio))
cb->errors = 1;
/* if there are more bios still pending for this compressed
* extent, just exit
*/
if (!refcount_dec_and_test(&cb->pending_bios))
goto out; goto out;
/* /*
...@@ -336,13 +362,7 @@ static void end_compressed_bio_write(struct bio *bio) ...@@ -336,13 +362,7 @@ static void end_compressed_bio_write(struct bio *bio)
struct page *page; struct page *page;
unsigned int index; unsigned int index;
if (bio->bi_status) if (!dec_and_test_compressed_bio(cb, bio))
cb->errors = 1;
/* if there are more bios still pending for this compressed
* extent, just exit
*/
if (!refcount_dec_and_test(&cb->pending_bios))
goto out; goto out;
/* ok, we're the last bio for this extent, step one is to /* ok, we're the last bio for this extent, step one is to
...@@ -408,7 +428,8 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, ...@@ -408,7 +428,8 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS); cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
if (!cb) if (!cb)
return BLK_STS_RESOURCE; return BLK_STS_RESOURCE;
refcount_set(&cb->pending_bios, 0); atomic_set(&cb->pending_bios, 0);
refcount_set(&cb->pending_sectors, compressed_len >> fs_info->sectorsize_bits);
cb->errors = 0; cb->errors = 0;
cb->inode = &inode->vfs_inode; cb->inode = &inode->vfs_inode;
cb->start = start; cb->start = start;
...@@ -442,7 +463,6 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, ...@@ -442,7 +463,6 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
bio->bi_opf |= REQ_CGROUP_PUNT; bio->bi_opf |= REQ_CGROUP_PUNT;
kthread_associate_blkcg(blkcg_css); kthread_associate_blkcg(blkcg_css);
} }
refcount_set(&cb->pending_bios, 1);
/* create and submit bios for the compressed pages */ /* create and submit bios for the compressed pages */
bytes_left = compressed_len; bytes_left = compressed_len;
...@@ -470,13 +490,7 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, ...@@ -470,13 +490,7 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
page->mapping = NULL; page->mapping = NULL;
if (submit || len < PAGE_SIZE) { if (submit || len < PAGE_SIZE) {
/* atomic_inc(&cb->pending_bios);
* inc the count before we submit the bio so
* we know the end IO handler won't happen before
* we inc the count. Otherwise, the cb might get
* freed before we're done setting it up
*/
refcount_inc(&cb->pending_bios);
ret = btrfs_bio_wq_end_io(fs_info, bio, ret = btrfs_bio_wq_end_io(fs_info, bio,
BTRFS_WQ_ENDIO_DATA); BTRFS_WQ_ENDIO_DATA);
BUG_ON(ret); /* -ENOMEM */ BUG_ON(ret); /* -ENOMEM */
...@@ -515,6 +529,7 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, ...@@ -515,6 +529,7 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
cond_resched(); cond_resched();
} }
atomic_inc(&cb->pending_bios);
ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA); ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
BUG_ON(ret); /* -ENOMEM */ BUG_ON(ret); /* -ENOMEM */
...@@ -734,7 +749,8 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ...@@ -734,7 +749,8 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
if (!cb) if (!cb)
goto out; goto out;
refcount_set(&cb->pending_bios, 0); atomic_set(&cb->pending_bios, 0);
refcount_set(&cb->pending_sectors, compressed_len >> fs_info->sectorsize_bits);
cb->errors = 0; cb->errors = 0;
cb->inode = inode; cb->inode = inode;
cb->mirror_num = mirror_num; cb->mirror_num = mirror_num;
...@@ -779,7 +795,6 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ...@@ -779,7 +795,6 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
comp_bio->bi_opf = REQ_OP_READ; comp_bio->bi_opf = REQ_OP_READ;
comp_bio->bi_private = cb; comp_bio->bi_private = cb;
comp_bio->bi_end_io = end_compressed_bio_read; comp_bio->bi_end_io = end_compressed_bio_read;
refcount_set(&cb->pending_bios, 1);
for (pg_index = 0; pg_index < nr_pages; pg_index++) { for (pg_index = 0; pg_index < nr_pages; pg_index++) {
u32 pg_len = PAGE_SIZE; u32 pg_len = PAGE_SIZE;
...@@ -808,18 +823,11 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ...@@ -808,18 +823,11 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
if (submit || bio_add_page(comp_bio, page, pg_len, 0) < pg_len) { if (submit || bio_add_page(comp_bio, page, pg_len, 0) < pg_len) {
unsigned int nr_sectors; unsigned int nr_sectors;
atomic_inc(&cb->pending_bios);
ret = btrfs_bio_wq_end_io(fs_info, comp_bio, ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
BTRFS_WQ_ENDIO_DATA); BTRFS_WQ_ENDIO_DATA);
BUG_ON(ret); /* -ENOMEM */ BUG_ON(ret); /* -ENOMEM */
/*
* inc the count before we submit the bio so
* we know the end IO handler won't happen before
* we inc the count. Otherwise, the cb might get
* freed before we're done setting it up
*/
refcount_inc(&cb->pending_bios);
ret = btrfs_lookup_bio_sums(inode, comp_bio, sums); ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
BUG_ON(ret); /* -ENOMEM */ BUG_ON(ret); /* -ENOMEM */
...@@ -844,6 +852,7 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ...@@ -844,6 +852,7 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
cur_disk_byte += pg_len; cur_disk_byte += pg_len;
} }
atomic_inc(&cb->pending_bios);
ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA); ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
BUG_ON(ret); /* -ENOMEM */ BUG_ON(ret); /* -ENOMEM */
......
...@@ -28,8 +28,11 @@ struct btrfs_inode; ...@@ -28,8 +28,11 @@ struct btrfs_inode;
#define BTRFS_ZLIB_DEFAULT_LEVEL 3 #define BTRFS_ZLIB_DEFAULT_LEVEL 3
struct compressed_bio { struct compressed_bio {
/* number of bios pending for this compressed extent */ /* Number of bios pending for this compressed extent */
refcount_t pending_bios; atomic_t pending_bios;
/* Number of sectors with unfinished IO (unsubmitted or unfinished) */
refcount_t pending_sectors;
/* Number of compressed pages in the array */ /* Number of compressed pages in the array */
unsigned int nr_pages; unsigned int nr_pages;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册