提交 61891923 编写于 作者: S Stefan Behrens 提交者: Josef Bacik

Btrfs: handle errors from btrfs_map_bio() everywhere

With the addition of the device replace procedure, it is possible
for btrfs_map_bio(READ) to report an error. This happens when the
specific mirror is requested which is located on the target disk,
and the copy operation has not yet copied this block. Hence the
block cannot be read and this error state is indicated by
returning EIO.
Some background information follows now. A new mirror is added
while the device replace procedure is running.
btrfs_get_num_copies() returns one more, and
btrfs_map_bio(GET_READ_MIRROR) adds one more mirror if a disk
location is involved that was already handled by the device
replace copy operation. The assigned mirror num is the highest
mirror number, e.g. the value 3 in case of RAID1.
If btrfs_map_bio() is invoked with mirror_num == 0 (i.e., select
any mirror), the copy on the target drive is never selected
because that disk shall be able to perform the write requests as
quickly as possible. The parallel execution of read requests would
only slow down the disk copy procedure. Second case is that
btrfs_map_bio() is called with mirror_num > 0. This is done from
the repair code only. In this case, the highest mirror num is
assigned to the target disk, since it is used last. And when this
mirror is not available because the copy procedure has not yet
handled this area, an error is returned. Everywhere in the code
the handling of such errors is added now.
Signed-off-by: NStefan Behrens <sbehrens@giantdisaster.de>
Signed-off-by: NChris Mason <chris.mason@fusionio.com>
上级 63a212ab
...@@ -1585,6 +1585,18 @@ static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len, ...@@ -1585,6 +1585,18 @@ static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len,
ret = btrfs_map_block(state->root->fs_info, READ, ret = btrfs_map_block(state->root->fs_info, READ,
bytenr, &length, &multi, mirror_num); bytenr, &length, &multi, mirror_num);
if (ret) {
block_ctx_out->start = 0;
block_ctx_out->dev_bytenr = 0;
block_ctx_out->len = 0;
block_ctx_out->dev = NULL;
block_ctx_out->datav = NULL;
block_ctx_out->pagev = NULL;
block_ctx_out->mem_to_free = NULL;
return ret;
}
device = multi->stripes[0].dev; device = multi->stripes[0].dev;
block_ctx_out->dev = btrfsic_dev_state_lookup(device->bdev); block_ctx_out->dev = btrfsic_dev_state_lookup(device->bdev);
block_ctx_out->dev_bytenr = multi->stripes[0].physical; block_ctx_out->dev_bytenr = multi->stripes[0].physical;
...@@ -1594,8 +1606,7 @@ static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len, ...@@ -1594,8 +1606,7 @@ static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len,
block_ctx_out->pagev = NULL; block_ctx_out->pagev = NULL;
block_ctx_out->mem_to_free = NULL; block_ctx_out->mem_to_free = NULL;
if (0 == ret) kfree(multi);
kfree(multi);
if (NULL == block_ctx_out->dev) { if (NULL == block_ctx_out->dev) {
ret = -ENXIO; ret = -ENXIO;
printk(KERN_INFO "btrfsic: error, cannot lookup dev (#1)!\n"); printk(KERN_INFO "btrfsic: error, cannot lookup dev (#1)!\n");
......
...@@ -687,7 +687,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ...@@ -687,7 +687,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
ret = btrfs_map_bio(root, READ, comp_bio, ret = btrfs_map_bio(root, READ, comp_bio,
mirror_num, 0); mirror_num, 0);
BUG_ON(ret); /* -ENOMEM */ if (ret)
bio_endio(comp_bio, ret);
bio_put(comp_bio); bio_put(comp_bio);
...@@ -712,7 +713,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ...@@ -712,7 +713,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
} }
ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0); ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0);
BUG_ON(ret); /* -ENOMEM */ if (ret)
bio_endio(comp_bio, ret);
bio_put(comp_bio); bio_put(comp_bio);
return 0; return 0;
......
...@@ -852,11 +852,16 @@ static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio, ...@@ -852,11 +852,16 @@ static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
int mirror_num, unsigned long bio_flags, int mirror_num, unsigned long bio_flags,
u64 bio_offset) u64 bio_offset)
{ {
int ret;
/* /*
* when we're called for a write, we're already in the async * when we're called for a write, we're already in the async
* submission context. Just jump into btrfs_map_bio * submission context. Just jump into btrfs_map_bio
*/ */
return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1); ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
if (ret)
bio_endio(bio, ret);
return ret;
} }
static int check_async_write(struct inode *inode, unsigned long bio_flags) static int check_async_write(struct inode *inode, unsigned long bio_flags)
...@@ -878,7 +883,6 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, ...@@ -878,7 +883,6 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
int ret; int ret;
if (!(rw & REQ_WRITE)) { if (!(rw & REQ_WRITE)) {
/* /*
* called for a read, do the setup so that checksum validation * called for a read, do the setup so that checksum validation
* can happen in the async kernel threads * can happen in the async kernel threads
...@@ -886,26 +890,32 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, ...@@ -886,26 +890,32 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info, ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
bio, 1); bio, 1);
if (ret) if (ret)
return ret; goto out_w_error;
return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
mirror_num, 0); mirror_num, 0);
} else if (!async) { } else if (!async) {
ret = btree_csum_one_bio(bio); ret = btree_csum_one_bio(bio);
if (ret) if (ret)
return ret; goto out_w_error;
return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
mirror_num, 0); mirror_num, 0);
} else {
/*
* kthread helpers are used to submit writes so that
* checksumming can happen in parallel across all CPUs
*/
ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
inode, rw, bio, mirror_num, 0,
bio_offset,
__btree_submit_bio_start,
__btree_submit_bio_done);
} }
/* if (ret) {
* kthread helpers are used to submit writes so that checksumming out_w_error:
* can happen in parallel across all CPUs bio_endio(bio, ret);
*/ }
return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, return ret;
inode, rw, bio, mirror_num, 0,
bio_offset,
__btree_submit_bio_start,
__btree_submit_bio_done);
} }
#ifdef CONFIG_MIGRATION #ifdef CONFIG_MIGRATION
......
...@@ -2462,10 +2462,6 @@ btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, ...@@ -2462,10 +2462,6 @@ btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
return bio; return bio;
} }
/*
* Since writes are async, they will only return -ENOMEM.
* Reads can return the full range of I/O error conditions.
*/
static int __must_check submit_one_bio(int rw, struct bio *bio, static int __must_check submit_one_bio(int rw, struct bio *bio,
int mirror_num, unsigned long bio_flags) int mirror_num, unsigned long bio_flags)
{ {
......
...@@ -1602,7 +1602,12 @@ static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio, ...@@ -1602,7 +1602,12 @@ static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
u64 bio_offset) u64 bio_offset)
{ {
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root *root = BTRFS_I(inode)->root;
return btrfs_map_bio(root, rw, bio, mirror_num, 1); int ret;
ret = btrfs_map_bio(root, rw, bio, mirror_num, 1);
if (ret)
bio_endio(bio, ret);
return ret;
} }
/* /*
...@@ -1626,15 +1631,17 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, ...@@ -1626,15 +1631,17 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
if (!(rw & REQ_WRITE)) { if (!(rw & REQ_WRITE)) {
ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata); ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
if (ret) if (ret)
return ret; goto out;
if (bio_flags & EXTENT_BIO_COMPRESSED) { if (bio_flags & EXTENT_BIO_COMPRESSED) {
return btrfs_submit_compressed_read(inode, bio, ret = btrfs_submit_compressed_read(inode, bio,
mirror_num, bio_flags); mirror_num,
bio_flags);
goto out;
} else if (!skip_sum) { } else if (!skip_sum) {
ret = btrfs_lookup_bio_sums(root, inode, bio, NULL); ret = btrfs_lookup_bio_sums(root, inode, bio, NULL);
if (ret) if (ret)
return ret; goto out;
} }
goto mapit; goto mapit;
} else if (!skip_sum) { } else if (!skip_sum) {
...@@ -1642,15 +1649,21 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, ...@@ -1642,15 +1649,21 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
goto mapit; goto mapit;
/* we're doing a write, do the async checksumming */ /* we're doing a write, do the async checksumming */
return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
inode, rw, bio, mirror_num, inode, rw, bio, mirror_num,
bio_flags, bio_offset, bio_flags, bio_offset,
__btrfs_submit_bio_start, __btrfs_submit_bio_start,
__btrfs_submit_bio_done); __btrfs_submit_bio_done);
goto out;
} }
mapit: mapit:
return btrfs_map_bio(root, rw, bio, mirror_num, 0); ret = btrfs_map_bio(root, rw, bio, mirror_num, 0);
out:
if (ret < 0)
bio_endio(bio, ret);
return ret;
} }
/* /*
......
...@@ -4435,7 +4435,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, ...@@ -4435,7 +4435,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
ret = btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio, ret = btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
mirror_num); mirror_num);
if (ret) /* -ENOMEM */ if (ret)
return ret; return ret;
total_devs = bbio->num_stripes; total_devs = bbio->num_stripes;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册