提交 4c664611 编写于 作者: Q Qu Wenruo 提交者: David Sterba

btrfs: rename btrfs_bio to btrfs_io_context

The structure btrfs_bio is used by two different sites:

- bio->bi_private for mirror based profiles
  For those profiles (SINGLE/DUP/RAID1*/RAID10), this structures records
  how many mirrors are still pending, and save the original endio
  function of the bio.

- RAID56 code
  In that case, RAID56 only utilize the stripes info, and no long uses
  that to trace the pending mirrors.

So btrfs_bio is not always bind to a bio, and contains more info for IO
context, thus renaming it will make the naming less confusing.
Signed-off-by: NQu Wenruo <wqu@suse.com>
Reviewed-by: NDavid Sterba <dsterba@suse.com>
Signed-off-by: NDavid Sterba <dsterba@suse.com>
上级 dc287224
...@@ -1455,7 +1455,7 @@ static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len, ...@@ -1455,7 +1455,7 @@ static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len,
struct btrfs_fs_info *fs_info = state->fs_info; struct btrfs_fs_info *fs_info = state->fs_info;
int ret; int ret;
u64 length; u64 length;
struct btrfs_bio *multi = NULL; struct btrfs_io_context *multi = NULL;
struct btrfs_device *device; struct btrfs_device *device;
length = len; length = len;
......
...@@ -1266,7 +1266,7 @@ static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len, ...@@ -1266,7 +1266,7 @@ static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
return ret; return ret;
} }
static int do_discard_extent(struct btrfs_bio_stripe *stripe, u64 *bytes) static int do_discard_extent(struct btrfs_io_stripe *stripe, u64 *bytes)
{ {
struct btrfs_device *dev = stripe->dev; struct btrfs_device *dev = stripe->dev;
struct btrfs_fs_info *fs_info = dev->fs_info; struct btrfs_fs_info *fs_info = dev->fs_info;
...@@ -1313,22 +1313,21 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr, ...@@ -1313,22 +1313,21 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
u64 discarded_bytes = 0; u64 discarded_bytes = 0;
u64 end = bytenr + num_bytes; u64 end = bytenr + num_bytes;
u64 cur = bytenr; u64 cur = bytenr;
struct btrfs_bio *bbio = NULL; struct btrfs_io_context *bioc = NULL;
/* /*
* Avoid races with device replace and make sure our bbio has devices * Avoid races with device replace and make sure our bioc has devices
* associated to its stripes that don't go away while we are discarding. * associated to its stripes that don't go away while we are discarding.
*/ */
btrfs_bio_counter_inc_blocked(fs_info); btrfs_bio_counter_inc_blocked(fs_info);
while (cur < end) { while (cur < end) {
struct btrfs_bio_stripe *stripe; struct btrfs_io_stripe *stripe;
int i; int i;
num_bytes = end - cur; num_bytes = end - cur;
/* Tell the block device(s) that the sectors can be discarded */ /* Tell the block device(s) that the sectors can be discarded */
ret = btrfs_map_block(fs_info, BTRFS_MAP_DISCARD, cur, ret = btrfs_map_block(fs_info, BTRFS_MAP_DISCARD, cur,
&num_bytes, &bbio, 0); &num_bytes, &bioc, 0);
/* /*
* Error can be -ENOMEM, -ENOENT (no such chunk mapping) or * Error can be -ENOMEM, -ENOENT (no such chunk mapping) or
* -EOPNOTSUPP. For any such error, @num_bytes is not updated, * -EOPNOTSUPP. For any such error, @num_bytes is not updated,
...@@ -1337,8 +1336,8 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr, ...@@ -1337,8 +1336,8 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
if (ret < 0) if (ret < 0)
goto out; goto out;
stripe = bbio->stripes; stripe = bioc->stripes;
for (i = 0; i < bbio->num_stripes; i++, stripe++) { for (i = 0; i < bioc->num_stripes; i++, stripe++) {
u64 bytes; u64 bytes;
struct btrfs_device *device = stripe->dev; struct btrfs_device *device = stripe->dev;
...@@ -1361,7 +1360,7 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr, ...@@ -1361,7 +1360,7 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
* And since there are two loops, explicitly * And since there are two loops, explicitly
* go to out to avoid confusion. * go to out to avoid confusion.
*/ */
btrfs_put_bbio(bbio); btrfs_put_bioc(bioc);
goto out; goto out;
} }
...@@ -1372,7 +1371,7 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr, ...@@ -1372,7 +1371,7 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
*/ */
ret = 0; ret = 0;
} }
btrfs_put_bbio(bbio); btrfs_put_bioc(bioc);
cur += num_bytes; cur += num_bytes;
} }
out: out:
......
...@@ -2290,7 +2290,7 @@ static int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start, ...@@ -2290,7 +2290,7 @@ static int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
struct btrfs_device *dev; struct btrfs_device *dev;
u64 map_length = 0; u64 map_length = 0;
u64 sector; u64 sector;
struct btrfs_bio *bbio = NULL; struct btrfs_io_context *bioc = NULL;
int ret; int ret;
ASSERT(!(fs_info->sb->s_flags & SB_RDONLY)); ASSERT(!(fs_info->sb->s_flags & SB_RDONLY));
...@@ -2304,7 +2304,7 @@ static int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start, ...@@ -2304,7 +2304,7 @@ static int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
map_length = length; map_length = length;
/* /*
* Avoid races with device replace and make sure our bbio has devices * Avoid races with device replace and make sure our bioc has devices
* associated to its stripes that don't go away while we are doing the * associated to its stripes that don't go away while we are doing the
* read repair operation. * read repair operation.
*/ */
...@@ -2317,28 +2317,28 @@ static int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start, ...@@ -2317,28 +2317,28 @@ static int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
* stripe's dev and sector. * stripe's dev and sector.
*/ */
ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical, ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical,
&map_length, &bbio, 0); &map_length, &bioc, 0);
if (ret) { if (ret) {
btrfs_bio_counter_dec(fs_info); btrfs_bio_counter_dec(fs_info);
bio_put(bio); bio_put(bio);
return -EIO; return -EIO;
} }
ASSERT(bbio->mirror_num == 1); ASSERT(bioc->mirror_num == 1);
} else { } else {
ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical, ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical,
&map_length, &bbio, mirror_num); &map_length, &bioc, mirror_num);
if (ret) { if (ret) {
btrfs_bio_counter_dec(fs_info); btrfs_bio_counter_dec(fs_info);
bio_put(bio); bio_put(bio);
return -EIO; return -EIO;
} }
BUG_ON(mirror_num != bbio->mirror_num); BUG_ON(mirror_num != bioc->mirror_num);
} }
sector = bbio->stripes[bbio->mirror_num - 1].physical >> 9; sector = bioc->stripes[bioc->mirror_num - 1].physical >> 9;
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
dev = bbio->stripes[bbio->mirror_num - 1].dev; dev = bioc->stripes[bioc->mirror_num - 1].dev;
btrfs_put_bbio(bbio); btrfs_put_bioc(bioc);
if (!dev || !dev->bdev || if (!dev || !dev->bdev ||
!test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) { !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
btrfs_bio_counter_dec(fs_info); btrfs_bio_counter_dec(fs_info);
......
...@@ -360,7 +360,7 @@ static void extent_map_device_set_bits(struct extent_map *em, unsigned bits) ...@@ -360,7 +360,7 @@ static void extent_map_device_set_bits(struct extent_map *em, unsigned bits)
int i; int i;
for (i = 0; i < map->num_stripes; i++) { for (i = 0; i < map->num_stripes; i++) {
struct btrfs_bio_stripe *stripe = &map->stripes[i]; struct btrfs_io_stripe *stripe = &map->stripes[i];
struct btrfs_device *device = stripe->dev; struct btrfs_device *device = stripe->dev;
set_extent_bits_nowait(&device->alloc_state, stripe->physical, set_extent_bits_nowait(&device->alloc_state, stripe->physical,
...@@ -375,7 +375,7 @@ static void extent_map_device_clear_bits(struct extent_map *em, unsigned bits) ...@@ -375,7 +375,7 @@ static void extent_map_device_clear_bits(struct extent_map *em, unsigned bits)
int i; int i;
for (i = 0; i < map->num_stripes; i++) { for (i = 0; i < map->num_stripes; i++) {
struct btrfs_bio_stripe *stripe = &map->stripes[i]; struct btrfs_io_stripe *stripe = &map->stripes[i];
struct btrfs_device *device = stripe->dev; struct btrfs_device *device = stripe->dev;
__clear_extent_bit(&device->alloc_state, stripe->physical, __clear_extent_bit(&device->alloc_state, stripe->physical,
......
...@@ -61,7 +61,7 @@ enum btrfs_rbio_ops { ...@@ -61,7 +61,7 @@ enum btrfs_rbio_ops {
struct btrfs_raid_bio { struct btrfs_raid_bio {
struct btrfs_fs_info *fs_info; struct btrfs_fs_info *fs_info;
struct btrfs_bio *bbio; struct btrfs_io_context *bioc;
/* while we're doing rmw on a stripe /* while we're doing rmw on a stripe
* we put it into a hash table so we can * we put it into a hash table so we can
...@@ -271,7 +271,7 @@ static void cache_rbio_pages(struct btrfs_raid_bio *rbio) ...@@ -271,7 +271,7 @@ static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
*/ */
static int rbio_bucket(struct btrfs_raid_bio *rbio) static int rbio_bucket(struct btrfs_raid_bio *rbio)
{ {
u64 num = rbio->bbio->raid_map[0]; u64 num = rbio->bioc->raid_map[0];
/* /*
* we shift down quite a bit. We're using byte * we shift down quite a bit. We're using byte
...@@ -559,8 +559,7 @@ static int rbio_can_merge(struct btrfs_raid_bio *last, ...@@ -559,8 +559,7 @@ static int rbio_can_merge(struct btrfs_raid_bio *last,
test_bit(RBIO_CACHE_BIT, &cur->flags)) test_bit(RBIO_CACHE_BIT, &cur->flags))
return 0; return 0;
if (last->bbio->raid_map[0] != if (last->bioc->raid_map[0] != cur->bioc->raid_map[0])
cur->bbio->raid_map[0])
return 0; return 0;
/* we can't merge with different operations */ /* we can't merge with different operations */
...@@ -673,7 +672,7 @@ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio) ...@@ -673,7 +672,7 @@ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
spin_lock_irqsave(&h->lock, flags); spin_lock_irqsave(&h->lock, flags);
list_for_each_entry(cur, &h->hash_list, hash_list) { list_for_each_entry(cur, &h->hash_list, hash_list) {
if (cur->bbio->raid_map[0] != rbio->bbio->raid_map[0]) if (cur->bioc->raid_map[0] != rbio->bioc->raid_map[0])
continue; continue;
spin_lock(&cur->bio_list_lock); spin_lock(&cur->bio_list_lock);
...@@ -838,7 +837,7 @@ static void __free_raid_bio(struct btrfs_raid_bio *rbio) ...@@ -838,7 +837,7 @@ static void __free_raid_bio(struct btrfs_raid_bio *rbio)
} }
} }
btrfs_put_bbio(rbio->bbio); btrfs_put_bioc(rbio->bioc);
kfree(rbio); kfree(rbio);
} }
...@@ -906,7 +905,7 @@ static void raid_write_end_io(struct bio *bio) ...@@ -906,7 +905,7 @@ static void raid_write_end_io(struct bio *bio)
/* OK, we have read all the stripes we need to. */ /* OK, we have read all the stripes we need to. */
max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ? max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
0 : rbio->bbio->max_errors; 0 : rbio->bioc->max_errors;
if (atomic_read(&rbio->error) > max_errors) if (atomic_read(&rbio->error) > max_errors)
err = BLK_STS_IOERR; err = BLK_STS_IOERR;
...@@ -961,12 +960,12 @@ static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes) ...@@ -961,12 +960,12 @@ static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
* this does not allocate any pages for rbio->pages. * this does not allocate any pages for rbio->pages.
*/ */
static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info, static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
struct btrfs_bio *bbio, struct btrfs_io_context *bioc,
u64 stripe_len) u64 stripe_len)
{ {
struct btrfs_raid_bio *rbio; struct btrfs_raid_bio *rbio;
int nr_data = 0; int nr_data = 0;
int real_stripes = bbio->num_stripes - bbio->num_tgtdevs; int real_stripes = bioc->num_stripes - bioc->num_tgtdevs;
int num_pages = rbio_nr_pages(stripe_len, real_stripes); int num_pages = rbio_nr_pages(stripe_len, real_stripes);
int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE); int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
void *p; void *p;
...@@ -987,7 +986,7 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info, ...@@ -987,7 +986,7 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
spin_lock_init(&rbio->bio_list_lock); spin_lock_init(&rbio->bio_list_lock);
INIT_LIST_HEAD(&rbio->stripe_cache); INIT_LIST_HEAD(&rbio->stripe_cache);
INIT_LIST_HEAD(&rbio->hash_list); INIT_LIST_HEAD(&rbio->hash_list);
rbio->bbio = bbio; rbio->bioc = bioc;
rbio->fs_info = fs_info; rbio->fs_info = fs_info;
rbio->stripe_len = stripe_len; rbio->stripe_len = stripe_len;
rbio->nr_pages = num_pages; rbio->nr_pages = num_pages;
...@@ -1015,9 +1014,9 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info, ...@@ -1015,9 +1014,9 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
CONSUME_ALLOC(rbio->finish_pbitmap, BITS_TO_LONGS(stripe_npages)); CONSUME_ALLOC(rbio->finish_pbitmap, BITS_TO_LONGS(stripe_npages));
#undef CONSUME_ALLOC #undef CONSUME_ALLOC
if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5) if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID5)
nr_data = real_stripes - 1; nr_data = real_stripes - 1;
else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) else if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID6)
nr_data = real_stripes - 2; nr_data = real_stripes - 2;
else else
BUG(); BUG();
...@@ -1077,10 +1076,10 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio, ...@@ -1077,10 +1076,10 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
struct bio *last = bio_list->tail; struct bio *last = bio_list->tail;
int ret; int ret;
struct bio *bio; struct bio *bio;
struct btrfs_bio_stripe *stripe; struct btrfs_io_stripe *stripe;
u64 disk_start; u64 disk_start;
stripe = &rbio->bbio->stripes[stripe_nr]; stripe = &rbio->bioc->stripes[stripe_nr];
disk_start = stripe->physical + (page_index << PAGE_SHIFT); disk_start = stripe->physical + (page_index << PAGE_SHIFT);
/* if the device is missing, just fail this stripe */ /* if the device is missing, just fail this stripe */
...@@ -1155,7 +1154,7 @@ static void index_rbio_pages(struct btrfs_raid_bio *rbio) ...@@ -1155,7 +1154,7 @@ static void index_rbio_pages(struct btrfs_raid_bio *rbio)
int i = 0; int i = 0;
start = bio->bi_iter.bi_sector << 9; start = bio->bi_iter.bi_sector << 9;
stripe_offset = start - rbio->bbio->raid_map[0]; stripe_offset = start - rbio->bioc->raid_map[0];
page_index = stripe_offset >> PAGE_SHIFT; page_index = stripe_offset >> PAGE_SHIFT;
if (bio_flagged(bio, BIO_CLONED)) if (bio_flagged(bio, BIO_CLONED))
...@@ -1179,7 +1178,7 @@ static void index_rbio_pages(struct btrfs_raid_bio *rbio) ...@@ -1179,7 +1178,7 @@ static void index_rbio_pages(struct btrfs_raid_bio *rbio)
*/ */
static noinline void finish_rmw(struct btrfs_raid_bio *rbio) static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
{ {
struct btrfs_bio *bbio = rbio->bbio; struct btrfs_io_context *bioc = rbio->bioc;
void **pointers = rbio->finish_pointers; void **pointers = rbio->finish_pointers;
int nr_data = rbio->nr_data; int nr_data = rbio->nr_data;
int stripe; int stripe;
...@@ -1284,11 +1283,11 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio) ...@@ -1284,11 +1283,11 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
} }
} }
if (likely(!bbio->num_tgtdevs)) if (likely(!bioc->num_tgtdevs))
goto write_data; goto write_data;
for (stripe = 0; stripe < rbio->real_stripes; stripe++) { for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
if (!bbio->tgtdev_map[stripe]) if (!bioc->tgtdev_map[stripe])
continue; continue;
for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
...@@ -1302,7 +1301,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio) ...@@ -1302,7 +1301,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
} }
ret = rbio_add_io_page(rbio, &bio_list, page, ret = rbio_add_io_page(rbio, &bio_list, page,
rbio->bbio->tgtdev_map[stripe], rbio->bioc->tgtdev_map[stripe],
pagenr, rbio->stripe_len); pagenr, rbio->stripe_len);
if (ret) if (ret)
goto cleanup; goto cleanup;
...@@ -1339,12 +1338,12 @@ static int find_bio_stripe(struct btrfs_raid_bio *rbio, ...@@ -1339,12 +1338,12 @@ static int find_bio_stripe(struct btrfs_raid_bio *rbio,
{ {
u64 physical = bio->bi_iter.bi_sector; u64 physical = bio->bi_iter.bi_sector;
int i; int i;
struct btrfs_bio_stripe *stripe; struct btrfs_io_stripe *stripe;
physical <<= 9; physical <<= 9;
for (i = 0; i < rbio->bbio->num_stripes; i++) { for (i = 0; i < rbio->bioc->num_stripes; i++) {
stripe = &rbio->bbio->stripes[i]; stripe = &rbio->bioc->stripes[i];
if (in_range(physical, stripe->physical, rbio->stripe_len) && if (in_range(physical, stripe->physical, rbio->stripe_len) &&
stripe->dev->bdev && bio->bi_bdev == stripe->dev->bdev) { stripe->dev->bdev && bio->bi_bdev == stripe->dev->bdev) {
return i; return i;
...@@ -1365,7 +1364,7 @@ static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio, ...@@ -1365,7 +1364,7 @@ static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
int i; int i;
for (i = 0; i < rbio->nr_data; i++) { for (i = 0; i < rbio->nr_data; i++) {
u64 stripe_start = rbio->bbio->raid_map[i]; u64 stripe_start = rbio->bioc->raid_map[i];
if (in_range(logical, stripe_start, rbio->stripe_len)) if (in_range(logical, stripe_start, rbio->stripe_len))
return i; return i;
...@@ -1456,7 +1455,7 @@ static void raid_rmw_end_io(struct bio *bio) ...@@ -1456,7 +1455,7 @@ static void raid_rmw_end_io(struct bio *bio)
if (!atomic_dec_and_test(&rbio->stripes_pending)) if (!atomic_dec_and_test(&rbio->stripes_pending))
return; return;
if (atomic_read(&rbio->error) > rbio->bbio->max_errors) if (atomic_read(&rbio->error) > rbio->bioc->max_errors)
goto cleanup; goto cleanup;
/* /*
...@@ -1538,8 +1537,8 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio) ...@@ -1538,8 +1537,8 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
} }
/* /*
* the bbio may be freed once we submit the last bio. Make sure * The bioc may be freed once we submit the last bio. Make sure not to
* not to touch it after that * touch it after that.
*/ */
atomic_set(&rbio->stripes_pending, bios_to_read); atomic_set(&rbio->stripes_pending, bios_to_read);
while ((bio = bio_list_pop(&bio_list))) { while ((bio = bio_list_pop(&bio_list))) {
...@@ -1720,16 +1719,16 @@ static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule) ...@@ -1720,16 +1719,16 @@ static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
* our main entry point for writes from the rest of the FS. * our main entry point for writes from the rest of the FS.
*/ */
int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio, int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len) struct btrfs_io_context *bioc, u64 stripe_len)
{ {
struct btrfs_raid_bio *rbio; struct btrfs_raid_bio *rbio;
struct btrfs_plug_cb *plug = NULL; struct btrfs_plug_cb *plug = NULL;
struct blk_plug_cb *cb; struct blk_plug_cb *cb;
int ret; int ret;
rbio = alloc_rbio(fs_info, bbio, stripe_len); rbio = alloc_rbio(fs_info, bioc, stripe_len);
if (IS_ERR(rbio)) { if (IS_ERR(rbio)) {
btrfs_put_bbio(bbio); btrfs_put_bioc(bioc);
return PTR_ERR(rbio); return PTR_ERR(rbio);
} }
bio_list_add(&rbio->bio_list, bio); bio_list_add(&rbio->bio_list, bio);
...@@ -1842,7 +1841,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) ...@@ -1842,7 +1841,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
} }
/* all raid6 handling here */ /* all raid6 handling here */
if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) { if (rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6) {
/* /*
* single failure, rebuild from parity raid5 * single failure, rebuild from parity raid5
* style * style
...@@ -1874,8 +1873,8 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) ...@@ -1874,8 +1873,8 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
* here due to a crc mismatch and we can't give them the * here due to a crc mismatch and we can't give them the
* data they want * data they want
*/ */
if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) { if (rbio->bioc->raid_map[failb] == RAID6_Q_STRIPE) {
if (rbio->bbio->raid_map[faila] == if (rbio->bioc->raid_map[faila] ==
RAID5_P_STRIPE) { RAID5_P_STRIPE) {
err = BLK_STS_IOERR; err = BLK_STS_IOERR;
goto cleanup; goto cleanup;
...@@ -1887,7 +1886,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) ...@@ -1887,7 +1886,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
goto pstripe; goto pstripe;
} }
if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) { if (rbio->bioc->raid_map[failb] == RAID5_P_STRIPE) {
raid6_datap_recov(rbio->real_stripes, raid6_datap_recov(rbio->real_stripes,
PAGE_SIZE, faila, pointers); PAGE_SIZE, faila, pointers);
} else { } else {
...@@ -2006,7 +2005,7 @@ static void raid_recover_end_io(struct bio *bio) ...@@ -2006,7 +2005,7 @@ static void raid_recover_end_io(struct bio *bio)
if (!atomic_dec_and_test(&rbio->stripes_pending)) if (!atomic_dec_and_test(&rbio->stripes_pending))
return; return;
if (atomic_read(&rbio->error) > rbio->bbio->max_errors) if (atomic_read(&rbio->error) > rbio->bioc->max_errors)
rbio_orig_end_io(rbio, BLK_STS_IOERR); rbio_orig_end_io(rbio, BLK_STS_IOERR);
else else
__raid_recover_end_io(rbio); __raid_recover_end_io(rbio);
...@@ -2074,7 +2073,7 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio) ...@@ -2074,7 +2073,7 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
* were up to date, or we might have no bios to read because * were up to date, or we might have no bios to read because
* the devices were gone. * the devices were gone.
*/ */
if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) { if (atomic_read(&rbio->error) <= rbio->bioc->max_errors) {
__raid_recover_end_io(rbio); __raid_recover_end_io(rbio);
return 0; return 0;
} else { } else {
...@@ -2083,8 +2082,8 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio) ...@@ -2083,8 +2082,8 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
} }
/* /*
* the bbio may be freed once we submit the last bio. Make sure * The bioc may be freed once we submit the last bio. Make sure not to
* not to touch it after that * touch it after that.
*/ */
atomic_set(&rbio->stripes_pending, bios_to_read); atomic_set(&rbio->stripes_pending, bios_to_read);
while ((bio = bio_list_pop(&bio_list))) { while ((bio = bio_list_pop(&bio_list))) {
...@@ -2117,21 +2116,21 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio) ...@@ -2117,21 +2116,21 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
* of the drive. * of the drive.
*/ */
int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio, int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len, struct btrfs_io_context *bioc, u64 stripe_len,
int mirror_num, int generic_io) int mirror_num, int generic_io)
{ {
struct btrfs_raid_bio *rbio; struct btrfs_raid_bio *rbio;
int ret; int ret;
if (generic_io) { if (generic_io) {
ASSERT(bbio->mirror_num == mirror_num); ASSERT(bioc->mirror_num == mirror_num);
btrfs_io_bio(bio)->mirror_num = mirror_num; btrfs_io_bio(bio)->mirror_num = mirror_num;
} }
rbio = alloc_rbio(fs_info, bbio, stripe_len); rbio = alloc_rbio(fs_info, bioc, stripe_len);
if (IS_ERR(rbio)) { if (IS_ERR(rbio)) {
if (generic_io) if (generic_io)
btrfs_put_bbio(bbio); btrfs_put_bioc(bioc);
return PTR_ERR(rbio); return PTR_ERR(rbio);
} }
...@@ -2142,11 +2141,11 @@ int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio, ...@@ -2142,11 +2141,11 @@ int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
rbio->faila = find_logical_bio_stripe(rbio, bio); rbio->faila = find_logical_bio_stripe(rbio, bio);
if (rbio->faila == -1) { if (rbio->faila == -1) {
btrfs_warn(fs_info, btrfs_warn(fs_info,
"%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)", "%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bioc has map_type %llu)",
__func__, bio->bi_iter.bi_sector << 9, __func__, bio->bi_iter.bi_sector << 9,
(u64)bio->bi_iter.bi_size, bbio->map_type); (u64)bio->bi_iter.bi_size, bioc->map_type);
if (generic_io) if (generic_io)
btrfs_put_bbio(bbio); btrfs_put_bioc(bioc);
kfree(rbio); kfree(rbio);
return -EIO; return -EIO;
} }
...@@ -2155,7 +2154,7 @@ int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio, ...@@ -2155,7 +2154,7 @@ int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
btrfs_bio_counter_inc_noblocked(fs_info); btrfs_bio_counter_inc_noblocked(fs_info);
rbio->generic_bio_cnt = 1; rbio->generic_bio_cnt = 1;
} else { } else {
btrfs_get_bbio(bbio); btrfs_get_bioc(bioc);
} }
/* /*
...@@ -2214,7 +2213,7 @@ static void read_rebuild_work(struct btrfs_work *work) ...@@ -2214,7 +2213,7 @@ static void read_rebuild_work(struct btrfs_work *work)
/* /*
* The following code is used to scrub/replace the parity stripe * The following code is used to scrub/replace the parity stripe
* *
* Caller must have already increased bio_counter for getting @bbio. * Caller must have already increased bio_counter for getting @bioc.
* *
* Note: We need make sure all the pages that add into the scrub/replace * Note: We need make sure all the pages that add into the scrub/replace
* raid bio are correct and not be changed during the scrub/replace. That * raid bio are correct and not be changed during the scrub/replace. That
...@@ -2223,14 +2222,14 @@ static void read_rebuild_work(struct btrfs_work *work) ...@@ -2223,14 +2222,14 @@ static void read_rebuild_work(struct btrfs_work *work)
struct btrfs_raid_bio * struct btrfs_raid_bio *
raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio, raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len, struct btrfs_io_context *bioc, u64 stripe_len,
struct btrfs_device *scrub_dev, struct btrfs_device *scrub_dev,
unsigned long *dbitmap, int stripe_nsectors) unsigned long *dbitmap, int stripe_nsectors)
{ {
struct btrfs_raid_bio *rbio; struct btrfs_raid_bio *rbio;
int i; int i;
rbio = alloc_rbio(fs_info, bbio, stripe_len); rbio = alloc_rbio(fs_info, bioc, stripe_len);
if (IS_ERR(rbio)) if (IS_ERR(rbio))
return NULL; return NULL;
bio_list_add(&rbio->bio_list, bio); bio_list_add(&rbio->bio_list, bio);
...@@ -2242,12 +2241,12 @@ raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio, ...@@ -2242,12 +2241,12 @@ raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
rbio->operation = BTRFS_RBIO_PARITY_SCRUB; rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
/* /*
* After mapping bbio with BTRFS_MAP_WRITE, parities have been sorted * After mapping bioc with BTRFS_MAP_WRITE, parities have been sorted
* to the end position, so this search can start from the first parity * to the end position, so this search can start from the first parity
* stripe. * stripe.
*/ */
for (i = rbio->nr_data; i < rbio->real_stripes; i++) { for (i = rbio->nr_data; i < rbio->real_stripes; i++) {
if (bbio->stripes[i].dev == scrub_dev) { if (bioc->stripes[i].dev == scrub_dev) {
rbio->scrubp = i; rbio->scrubp = i;
break; break;
} }
...@@ -2260,7 +2259,7 @@ raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio, ...@@ -2260,7 +2259,7 @@ raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors); bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
/* /*
* We have already increased bio_counter when getting bbio, record it * We have already increased bio_counter when getting bioc, record it
* so we can free it at rbio_orig_end_io(). * so we can free it at rbio_orig_end_io().
*/ */
rbio->generic_bio_cnt = 1; rbio->generic_bio_cnt = 1;
...@@ -2275,10 +2274,10 @@ void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page, ...@@ -2275,10 +2274,10 @@ void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
int stripe_offset; int stripe_offset;
int index; int index;
ASSERT(logical >= rbio->bbio->raid_map[0]); ASSERT(logical >= rbio->bioc->raid_map[0]);
ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] + ASSERT(logical + PAGE_SIZE <= rbio->bioc->raid_map[0] +
rbio->stripe_len * rbio->nr_data); rbio->stripe_len * rbio->nr_data);
stripe_offset = (int)(logical - rbio->bbio->raid_map[0]); stripe_offset = (int)(logical - rbio->bioc->raid_map[0]);
index = stripe_offset >> PAGE_SHIFT; index = stripe_offset >> PAGE_SHIFT;
rbio->bio_pages[index] = page; rbio->bio_pages[index] = page;
} }
...@@ -2312,7 +2311,7 @@ static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio) ...@@ -2312,7 +2311,7 @@ static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
int need_check) int need_check)
{ {
struct btrfs_bio *bbio = rbio->bbio; struct btrfs_io_context *bioc = rbio->bioc;
void **pointers = rbio->finish_pointers; void **pointers = rbio->finish_pointers;
unsigned long *pbitmap = rbio->finish_pbitmap; unsigned long *pbitmap = rbio->finish_pbitmap;
int nr_data = rbio->nr_data; int nr_data = rbio->nr_data;
...@@ -2335,7 +2334,7 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, ...@@ -2335,7 +2334,7 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
else else
BUG(); BUG();
if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) { if (bioc->num_tgtdevs && bioc->tgtdev_map[rbio->scrubp]) {
is_replace = 1; is_replace = 1;
bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages); bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
} }
...@@ -2435,7 +2434,7 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, ...@@ -2435,7 +2434,7 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
page = rbio_stripe_page(rbio, rbio->scrubp, pagenr); page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
ret = rbio_add_io_page(rbio, &bio_list, page, ret = rbio_add_io_page(rbio, &bio_list, page,
bbio->tgtdev_map[rbio->scrubp], bioc->tgtdev_map[rbio->scrubp],
pagenr, rbio->stripe_len); pagenr, rbio->stripe_len);
if (ret) if (ret)
goto cleanup; goto cleanup;
...@@ -2483,7 +2482,7 @@ static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe) ...@@ -2483,7 +2482,7 @@ static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
*/ */
static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio) static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
{ {
if (atomic_read(&rbio->error) > rbio->bbio->max_errors) if (atomic_read(&rbio->error) > rbio->bioc->max_errors)
goto cleanup; goto cleanup;
if (rbio->faila >= 0 || rbio->failb >= 0) { if (rbio->faila >= 0 || rbio->failb >= 0) {
...@@ -2504,7 +2503,7 @@ static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio) ...@@ -2504,7 +2503,7 @@ static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
* the data, so the capability of the repair is declined. * the data, so the capability of the repair is declined.
* (In the case of RAID5, we can not repair anything) * (In the case of RAID5, we can not repair anything)
*/ */
if (dfail > rbio->bbio->max_errors - 1) if (dfail > rbio->bioc->max_errors - 1)
goto cleanup; goto cleanup;
/* /*
...@@ -2625,8 +2624,8 @@ static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio) ...@@ -2625,8 +2624,8 @@ static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
} }
/* /*
* the bbio may be freed once we submit the last bio. Make sure * The bioc may be freed once we submit the last bio. Make sure not to
* not to touch it after that * touch it after that.
*/ */
atomic_set(&rbio->stripes_pending, bios_to_read); atomic_set(&rbio->stripes_pending, bios_to_read);
while ((bio = bio_list_pop(&bio_list))) { while ((bio = bio_list_pop(&bio_list))) {
...@@ -2671,11 +2670,11 @@ void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio) ...@@ -2671,11 +2670,11 @@ void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
struct btrfs_raid_bio * struct btrfs_raid_bio *
raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio, raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 length) struct btrfs_io_context *bioc, u64 length)
{ {
struct btrfs_raid_bio *rbio; struct btrfs_raid_bio *rbio;
rbio = alloc_rbio(fs_info, bbio, length); rbio = alloc_rbio(fs_info, bioc, length);
if (IS_ERR(rbio)) if (IS_ERR(rbio))
return NULL; return NULL;
...@@ -2695,7 +2694,7 @@ raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio, ...@@ -2695,7 +2694,7 @@ raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
} }
/* /*
* When we get bbio, we have already increased bio_counter, record it * When we get bioc, we have already increased bio_counter, record it
* so we can free it at rbio_orig_end_io() * so we can free it at rbio_orig_end_io()
*/ */
rbio->generic_bio_cnt = 1; rbio->generic_bio_cnt = 1;
......
...@@ -31,24 +31,24 @@ struct btrfs_raid_bio; ...@@ -31,24 +31,24 @@ struct btrfs_raid_bio;
struct btrfs_device; struct btrfs_device;
int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio, int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len, struct btrfs_io_context *bioc, u64 stripe_len,
int mirror_num, int generic_io); int mirror_num, int generic_io);
int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio, int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len); struct btrfs_io_context *bioc, u64 stripe_len);
void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page, void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
u64 logical); u64 logical);
struct btrfs_raid_bio * struct btrfs_raid_bio *
raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio, raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len, struct btrfs_io_context *bioc, u64 stripe_len,
struct btrfs_device *scrub_dev, struct btrfs_device *scrub_dev,
unsigned long *dbitmap, int stripe_nsectors); unsigned long *dbitmap, int stripe_nsectors);
void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio); void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio);
struct btrfs_raid_bio * struct btrfs_raid_bio *
raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio, raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 length); struct btrfs_io_context *bioc, u64 length);
void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio); void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio);
int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info); int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info);
......
...@@ -227,7 +227,7 @@ int btree_readahead_hook(struct extent_buffer *eb, int err) ...@@ -227,7 +227,7 @@ int btree_readahead_hook(struct extent_buffer *eb, int err)
} }
static struct reada_zone *reada_find_zone(struct btrfs_device *dev, u64 logical, static struct reada_zone *reada_find_zone(struct btrfs_device *dev, u64 logical,
struct btrfs_bio *bbio) struct btrfs_io_context *bioc)
{ {
struct btrfs_fs_info *fs_info = dev->fs_info; struct btrfs_fs_info *fs_info = dev->fs_info;
int ret; int ret;
...@@ -275,11 +275,11 @@ static struct reada_zone *reada_find_zone(struct btrfs_device *dev, u64 logical, ...@@ -275,11 +275,11 @@ static struct reada_zone *reada_find_zone(struct btrfs_device *dev, u64 logical,
kref_init(&zone->refcnt); kref_init(&zone->refcnt);
zone->elems = 0; zone->elems = 0;
zone->device = dev; /* our device always sits at index 0 */ zone->device = dev; /* our device always sits at index 0 */
for (i = 0; i < bbio->num_stripes; ++i) { for (i = 0; i < bioc->num_stripes; ++i) {
/* bounds have already been checked */ /* bounds have already been checked */
zone->devs[i] = bbio->stripes[i].dev; zone->devs[i] = bioc->stripes[i].dev;
} }
zone->ndevs = bbio->num_stripes; zone->ndevs = bioc->num_stripes;
spin_lock(&fs_info->reada_lock); spin_lock(&fs_info->reada_lock);
ret = radix_tree_insert(&dev->reada_zones, ret = radix_tree_insert(&dev->reada_zones,
...@@ -309,7 +309,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info, ...@@ -309,7 +309,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info,
int ret; int ret;
struct reada_extent *re = NULL; struct reada_extent *re = NULL;
struct reada_extent *re_exist = NULL; struct reada_extent *re_exist = NULL;
struct btrfs_bio *bbio = NULL; struct btrfs_io_context *bioc = NULL;
struct btrfs_device *dev; struct btrfs_device *dev;
struct btrfs_device *prev_dev; struct btrfs_device *prev_dev;
u64 length; u64 length;
...@@ -345,28 +345,28 @@ static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info, ...@@ -345,28 +345,28 @@ static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info,
*/ */
length = fs_info->nodesize; length = fs_info->nodesize;
ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical, ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
&length, &bbio, 0); &length, &bioc, 0);
if (ret || !bbio || length < fs_info->nodesize) if (ret || !bioc || length < fs_info->nodesize)
goto error; goto error;
if (bbio->num_stripes > BTRFS_MAX_MIRRORS) { if (bioc->num_stripes > BTRFS_MAX_MIRRORS) {
btrfs_err(fs_info, btrfs_err(fs_info,
"readahead: more than %d copies not supported", "readahead: more than %d copies not supported",
BTRFS_MAX_MIRRORS); BTRFS_MAX_MIRRORS);
goto error; goto error;
} }
real_stripes = bbio->num_stripes - bbio->num_tgtdevs; real_stripes = bioc->num_stripes - bioc->num_tgtdevs;
for (nzones = 0; nzones < real_stripes; ++nzones) { for (nzones = 0; nzones < real_stripes; ++nzones) {
struct reada_zone *zone; struct reada_zone *zone;
dev = bbio->stripes[nzones].dev; dev = bioc->stripes[nzones].dev;
/* cannot read ahead on missing device. */ /* cannot read ahead on missing device. */
if (!dev->bdev) if (!dev->bdev)
continue; continue;
zone = reada_find_zone(dev, logical, bbio); zone = reada_find_zone(dev, logical, bioc);
if (!zone) if (!zone)
continue; continue;
...@@ -464,7 +464,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info, ...@@ -464,7 +464,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info,
if (!have_zone) if (!have_zone)
goto error; goto error;
btrfs_put_bbio(bbio); btrfs_put_bioc(bioc);
return re; return re;
error: error:
...@@ -488,7 +488,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info, ...@@ -488,7 +488,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info,
kref_put(&zone->refcnt, reada_zone_release); kref_put(&zone->refcnt, reada_zone_release);
spin_unlock(&fs_info->reada_lock); spin_unlock(&fs_info->reada_lock);
} }
btrfs_put_bbio(bbio); btrfs_put_bioc(bioc);
kfree(re); kfree(re);
return re_exist; return re_exist;
} }
......
...@@ -57,7 +57,7 @@ struct scrub_ctx; ...@@ -57,7 +57,7 @@ struct scrub_ctx;
struct scrub_recover { struct scrub_recover {
refcount_t refs; refcount_t refs;
struct btrfs_bio *bbio; struct btrfs_io_context *bioc;
u64 map_length; u64 map_length;
}; };
...@@ -254,7 +254,7 @@ static void scrub_put_ctx(struct scrub_ctx *sctx); ...@@ -254,7 +254,7 @@ static void scrub_put_ctx(struct scrub_ctx *sctx);
static inline int scrub_is_page_on_raid56(struct scrub_page *spage) static inline int scrub_is_page_on_raid56(struct scrub_page *spage)
{ {
return spage->recover && return spage->recover &&
(spage->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK); (spage->recover->bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
} }
static void scrub_pending_bio_inc(struct scrub_ctx *sctx) static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
...@@ -798,7 +798,7 @@ static inline void scrub_put_recover(struct btrfs_fs_info *fs_info, ...@@ -798,7 +798,7 @@ static inline void scrub_put_recover(struct btrfs_fs_info *fs_info,
{ {
if (refcount_dec_and_test(&recover->refs)) { if (refcount_dec_and_test(&recover->refs)) {
btrfs_bio_counter_dec(fs_info); btrfs_bio_counter_dec(fs_info);
btrfs_put_bbio(recover->bbio); btrfs_put_bioc(recover->bioc);
kfree(recover); kfree(recover);
} }
} }
...@@ -1027,8 +1027,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) ...@@ -1027,8 +1027,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
sblock_other = sblocks_for_recheck + mirror_index; sblock_other = sblocks_for_recheck + mirror_index;
} else { } else {
struct scrub_recover *r = sblock_bad->pagev[0]->recover; struct scrub_recover *r = sblock_bad->pagev[0]->recover;
int max_allowed = r->bbio->num_stripes - int max_allowed = r->bioc->num_stripes - r->bioc->num_tgtdevs;
r->bbio->num_tgtdevs;
if (mirror_index >= max_allowed) if (mirror_index >= max_allowed)
break; break;
...@@ -1218,14 +1217,14 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) ...@@ -1218,14 +1217,14 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
return 0; return 0;
} }
static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio) static inline int scrub_nr_raid_mirrors(struct btrfs_io_context *bioc)
{ {
if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5) if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID5)
return 2; return 2;
else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) else if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID6)
return 3; return 3;
else else
return (int)bbio->num_stripes; return (int)bioc->num_stripes;
} }
static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type, static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
...@@ -1269,7 +1268,7 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock, ...@@ -1269,7 +1268,7 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
u64 flags = original_sblock->pagev[0]->flags; u64 flags = original_sblock->pagev[0]->flags;
u64 have_csum = original_sblock->pagev[0]->have_csum; u64 have_csum = original_sblock->pagev[0]->have_csum;
struct scrub_recover *recover; struct scrub_recover *recover;
struct btrfs_bio *bbio; struct btrfs_io_context *bioc;
u64 sublen; u64 sublen;
u64 mapped_length; u64 mapped_length;
u64 stripe_offset; u64 stripe_offset;
...@@ -1288,7 +1287,7 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock, ...@@ -1288,7 +1287,7 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
while (length > 0) { while (length > 0) {
sublen = min_t(u64, length, fs_info->sectorsize); sublen = min_t(u64, length, fs_info->sectorsize);
mapped_length = sublen; mapped_length = sublen;
bbio = NULL; bioc = NULL;
/* /*
* With a length of sectorsize, each returned stripe represents * With a length of sectorsize, each returned stripe represents
...@@ -1296,27 +1295,27 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock, ...@@ -1296,27 +1295,27 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
*/ */
btrfs_bio_counter_inc_blocked(fs_info); btrfs_bio_counter_inc_blocked(fs_info);
ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
logical, &mapped_length, &bbio); logical, &mapped_length, &bioc);
if (ret || !bbio || mapped_length < sublen) { if (ret || !bioc || mapped_length < sublen) {
btrfs_put_bbio(bbio); btrfs_put_bioc(bioc);
btrfs_bio_counter_dec(fs_info); btrfs_bio_counter_dec(fs_info);
return -EIO; return -EIO;
} }
recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS); recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
if (!recover) { if (!recover) {
btrfs_put_bbio(bbio); btrfs_put_bioc(bioc);
btrfs_bio_counter_dec(fs_info); btrfs_bio_counter_dec(fs_info);
return -ENOMEM; return -ENOMEM;
} }
refcount_set(&recover->refs, 1); refcount_set(&recover->refs, 1);
recover->bbio = bbio; recover->bioc = bioc;
recover->map_length = mapped_length; recover->map_length = mapped_length;
BUG_ON(page_index >= SCRUB_MAX_PAGES_PER_BLOCK); BUG_ON(page_index >= SCRUB_MAX_PAGES_PER_BLOCK);
nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS); nmirrors = min(scrub_nr_raid_mirrors(bioc), BTRFS_MAX_MIRRORS);
for (mirror_index = 0; mirror_index < nmirrors; for (mirror_index = 0; mirror_index < nmirrors;
mirror_index++) { mirror_index++) {
...@@ -1348,17 +1347,17 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock, ...@@ -1348,17 +1347,17 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
sctx->fs_info->csum_size); sctx->fs_info->csum_size);
scrub_stripe_index_and_offset(logical, scrub_stripe_index_and_offset(logical,
bbio->map_type, bioc->map_type,
bbio->raid_map, bioc->raid_map,
mapped_length, mapped_length,
bbio->num_stripes - bioc->num_stripes -
bbio->num_tgtdevs, bioc->num_tgtdevs,
mirror_index, mirror_index,
&stripe_index, &stripe_index,
&stripe_offset); &stripe_offset);
spage->physical = bbio->stripes[stripe_index].physical + spage->physical = bioc->stripes[stripe_index].physical +
stripe_offset; stripe_offset;
spage->dev = bbio->stripes[stripe_index].dev; spage->dev = bioc->stripes[stripe_index].dev;
BUG_ON(page_index >= original_sblock->page_count); BUG_ON(page_index >= original_sblock->page_count);
spage->physical_for_dev_replace = spage->physical_for_dev_replace =
...@@ -1401,7 +1400,7 @@ static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info, ...@@ -1401,7 +1400,7 @@ static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
bio->bi_end_io = scrub_bio_wait_endio; bio->bi_end_io = scrub_bio_wait_endio;
mirror_num = spage->sblock->pagev[0]->mirror_num; mirror_num = spage->sblock->pagev[0]->mirror_num;
ret = raid56_parity_recover(fs_info, bio, spage->recover->bbio, ret = raid56_parity_recover(fs_info, bio, spage->recover->bioc,
spage->recover->map_length, spage->recover->map_length,
mirror_num, 0); mirror_num, 0);
if (ret) if (ret)
...@@ -2203,7 +2202,7 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock) ...@@ -2203,7 +2202,7 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock)
struct btrfs_fs_info *fs_info = sctx->fs_info; struct btrfs_fs_info *fs_info = sctx->fs_info;
u64 length = sblock->page_count * PAGE_SIZE; u64 length = sblock->page_count * PAGE_SIZE;
u64 logical = sblock->pagev[0]->logical; u64 logical = sblock->pagev[0]->logical;
struct btrfs_bio *bbio = NULL; struct btrfs_io_context *bioc = NULL;
struct bio *bio; struct bio *bio;
struct btrfs_raid_bio *rbio; struct btrfs_raid_bio *rbio;
int ret; int ret;
...@@ -2211,19 +2210,19 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock) ...@@ -2211,19 +2210,19 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock)
btrfs_bio_counter_inc_blocked(fs_info); btrfs_bio_counter_inc_blocked(fs_info);
ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical, ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
&length, &bbio); &length, &bioc);
if (ret || !bbio || !bbio->raid_map) if (ret || !bioc || !bioc->raid_map)
goto bbio_out; goto bioc_out;
if (WARN_ON(!sctx->is_dev_replace || if (WARN_ON(!sctx->is_dev_replace ||
!(bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) { !(bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) {
/* /*
* We shouldn't be scrubbing a missing device. Even for dev * We shouldn't be scrubbing a missing device. Even for dev
* replace, we should only get here for RAID 5/6. We either * replace, we should only get here for RAID 5/6. We either
* managed to mount something with no mirrors remaining or * managed to mount something with no mirrors remaining or
* there's a bug in scrub_remap_extent()/btrfs_map_block(). * there's a bug in scrub_remap_extent()/btrfs_map_block().
*/ */
goto bbio_out; goto bioc_out;
} }
bio = btrfs_io_bio_alloc(0); bio = btrfs_io_bio_alloc(0);
...@@ -2231,7 +2230,7 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock) ...@@ -2231,7 +2230,7 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock)
bio->bi_private = sblock; bio->bi_private = sblock;
bio->bi_end_io = scrub_missing_raid56_end_io; bio->bi_end_io = scrub_missing_raid56_end_io;
rbio = raid56_alloc_missing_rbio(fs_info, bio, bbio, length); rbio = raid56_alloc_missing_rbio(fs_info, bio, bioc, length);
if (!rbio) if (!rbio)
goto rbio_out; goto rbio_out;
...@@ -2249,9 +2248,9 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock) ...@@ -2249,9 +2248,9 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock)
rbio_out: rbio_out:
bio_put(bio); bio_put(bio);
bbio_out: bioc_out:
btrfs_bio_counter_dec(fs_info); btrfs_bio_counter_dec(fs_info);
btrfs_put_bbio(bbio); btrfs_put_bioc(bioc);
spin_lock(&sctx->stat_lock); spin_lock(&sctx->stat_lock);
sctx->stat.malloc_errors++; sctx->stat.malloc_errors++;
spin_unlock(&sctx->stat_lock); spin_unlock(&sctx->stat_lock);
...@@ -2826,7 +2825,7 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity) ...@@ -2826,7 +2825,7 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
struct btrfs_fs_info *fs_info = sctx->fs_info; struct btrfs_fs_info *fs_info = sctx->fs_info;
struct bio *bio; struct bio *bio;
struct btrfs_raid_bio *rbio; struct btrfs_raid_bio *rbio;
struct btrfs_bio *bbio = NULL; struct btrfs_io_context *bioc = NULL;
u64 length; u64 length;
int ret; int ret;
...@@ -2838,16 +2837,16 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity) ...@@ -2838,16 +2837,16 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
btrfs_bio_counter_inc_blocked(fs_info); btrfs_bio_counter_inc_blocked(fs_info);
ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start, ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start,
&length, &bbio); &length, &bioc);
if (ret || !bbio || !bbio->raid_map) if (ret || !bioc || !bioc->raid_map)
goto bbio_out; goto bioc_out;
bio = btrfs_io_bio_alloc(0); bio = btrfs_io_bio_alloc(0);
bio->bi_iter.bi_sector = sparity->logic_start >> 9; bio->bi_iter.bi_sector = sparity->logic_start >> 9;
bio->bi_private = sparity; bio->bi_private = sparity;
bio->bi_end_io = scrub_parity_bio_endio; bio->bi_end_io = scrub_parity_bio_endio;
rbio = raid56_parity_alloc_scrub_rbio(fs_info, bio, bbio, rbio = raid56_parity_alloc_scrub_rbio(fs_info, bio, bioc,
length, sparity->scrub_dev, length, sparity->scrub_dev,
sparity->dbitmap, sparity->dbitmap,
sparity->nsectors); sparity->nsectors);
...@@ -2860,9 +2859,9 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity) ...@@ -2860,9 +2859,9 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
rbio_out: rbio_out:
bio_put(bio); bio_put(bio);
bbio_out: bioc_out:
btrfs_bio_counter_dec(fs_info); btrfs_bio_counter_dec(fs_info);
btrfs_put_bbio(bbio); btrfs_put_bioc(bioc);
bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap, bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
sparity->nsectors); sparity->nsectors);
spin_lock(&sctx->stat_lock); spin_lock(&sctx->stat_lock);
...@@ -2901,7 +2900,7 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx, ...@@ -2901,7 +2900,7 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
struct btrfs_root *root = fs_info->extent_root; struct btrfs_root *root = fs_info->extent_root;
struct btrfs_root *csum_root = fs_info->csum_root; struct btrfs_root *csum_root = fs_info->csum_root;
struct btrfs_extent_item *extent; struct btrfs_extent_item *extent;
struct btrfs_bio *bbio = NULL; struct btrfs_io_context *bioc = NULL;
u64 flags; u64 flags;
int ret; int ret;
int slot; int slot;
...@@ -3044,22 +3043,22 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx, ...@@ -3044,22 +3043,22 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
extent_len); extent_len);
mapped_length = extent_len; mapped_length = extent_len;
bbio = NULL; bioc = NULL;
ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, ret = btrfs_map_block(fs_info, BTRFS_MAP_READ,
extent_logical, &mapped_length, &bbio, extent_logical, &mapped_length, &bioc,
0); 0);
if (!ret) { if (!ret) {
if (!bbio || mapped_length < extent_len) if (!bioc || mapped_length < extent_len)
ret = -EIO; ret = -EIO;
} }
if (ret) { if (ret) {
btrfs_put_bbio(bbio); btrfs_put_bioc(bioc);
goto out; goto out;
} }
extent_physical = bbio->stripes[0].physical; extent_physical = bioc->stripes[0].physical;
extent_mirror_num = bbio->mirror_num; extent_mirror_num = bioc->mirror_num;
extent_dev = bbio->stripes[0].dev; extent_dev = bioc->stripes[0].dev;
btrfs_put_bbio(bbio); btrfs_put_bioc(bioc);
ret = btrfs_lookup_csums_range(csum_root, ret = btrfs_lookup_csums_range(csum_root,
extent_logical, extent_logical,
...@@ -4309,20 +4308,20 @@ static void scrub_remap_extent(struct btrfs_fs_info *fs_info, ...@@ -4309,20 +4308,20 @@ static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
int *extent_mirror_num) int *extent_mirror_num)
{ {
u64 mapped_length; u64 mapped_length;
struct btrfs_bio *bbio = NULL; struct btrfs_io_context *bioc = NULL;
int ret; int ret;
mapped_length = extent_len; mapped_length = extent_len;
ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical, ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical,
&mapped_length, &bbio, 0); &mapped_length, &bioc, 0);
if (ret || !bbio || mapped_length < extent_len || if (ret || !bioc || mapped_length < extent_len ||
!bbio->stripes[0].dev->bdev) { !bioc->stripes[0].dev->bdev) {
btrfs_put_bbio(bbio); btrfs_put_bioc(bioc);
return; return;
} }
*extent_physical = bbio->stripes[0].physical; *extent_physical = bioc->stripes[0].physical;
*extent_mirror_num = bbio->mirror_num; *extent_mirror_num = bioc->mirror_num;
*extent_dev = bbio->stripes[0].dev; *extent_dev = bioc->stripes[0].dev;
btrfs_put_bbio(bbio); btrfs_put_bioc(bioc);
} }
此差异已折叠。
...@@ -306,11 +306,11 @@ struct btrfs_fs_devices { ...@@ -306,11 +306,11 @@ struct btrfs_fs_devices {
/* /*
* we need the mirror number and stripe index to be passed around * we need the mirror number and stripe index to be passed around
* the call chain while we are processing end_io (especially errors). * the call chain while we are processing end_io (especially errors).
* Really, what we need is a btrfs_bio structure that has this info * Really, what we need is a btrfs_io_context structure that has this info
* and is properly sized with its stripe array, but we're not there * and is properly sized with its stripe array, but we're not there
* quite yet. We have our own btrfs bioset, and all of the bios * quite yet. We have our own btrfs bioset, and all of the bios
* we allocate are actually btrfs_io_bios. We'll cram as much of * we allocate are actually btrfs_io_bios. We'll cram as much of
* struct btrfs_bio as we can into this over time. * struct btrfs_io_context as we can into this over time.
*/ */
struct btrfs_io_bio { struct btrfs_io_bio {
unsigned int mirror_num; unsigned int mirror_num;
...@@ -339,13 +339,29 @@ static inline void btrfs_io_bio_free_csum(struct btrfs_io_bio *io_bio) ...@@ -339,13 +339,29 @@ static inline void btrfs_io_bio_free_csum(struct btrfs_io_bio *io_bio)
} }
} }
struct btrfs_bio_stripe { struct btrfs_io_stripe {
struct btrfs_device *dev; struct btrfs_device *dev;
u64 physical; u64 physical;
u64 length; /* only used for discard mappings */ u64 length; /* only used for discard mappings */
}; };
struct btrfs_bio { /*
* Context for IO subsmission for device stripe.
*
* - Track the unfinished mirrors for mirror based profiles
* Mirror based profiles are SINGLE/DUP/RAID1/RAID10.
*
* - Contain the logical -> physical mapping info
* Used by submit_stripe_bio() for mapping logical bio
* into physical device address.
*
* - Contain device replace info
* Used by handle_ops_on_dev_replace() to copy logical bios
* into the new device.
*
* - Contain RAID56 full stripe logical bytenrs
*/
struct btrfs_io_context {
refcount_t refs; refcount_t refs;
atomic_t stripes_pending; atomic_t stripes_pending;
struct btrfs_fs_info *fs_info; struct btrfs_fs_info *fs_info;
...@@ -365,7 +381,7 @@ struct btrfs_bio { ...@@ -365,7 +381,7 @@ struct btrfs_bio {
* so raid_map[0] is the start of our full stripe * so raid_map[0] is the start of our full stripe
*/ */
u64 *raid_map; u64 *raid_map;
struct btrfs_bio_stripe stripes[]; struct btrfs_io_stripe stripes[];
}; };
struct btrfs_device_info { struct btrfs_device_info {
...@@ -400,11 +416,11 @@ struct map_lookup { ...@@ -400,11 +416,11 @@ struct map_lookup {
int num_stripes; int num_stripes;
int sub_stripes; int sub_stripes;
int verified_stripes; /* For mount time dev extent verification */ int verified_stripes; /* For mount time dev extent verification */
struct btrfs_bio_stripe stripes[]; struct btrfs_io_stripe stripes[];
}; };
#define map_lookup_size(n) (sizeof(struct map_lookup) + \ #define map_lookup_size(n) (sizeof(struct map_lookup) + \
(sizeof(struct btrfs_bio_stripe) * (n))) (sizeof(struct btrfs_io_stripe) * (n)))
struct btrfs_balance_args; struct btrfs_balance_args;
struct btrfs_balance_progress; struct btrfs_balance_progress;
...@@ -441,14 +457,14 @@ static inline enum btrfs_map_op btrfs_op(struct bio *bio) ...@@ -441,14 +457,14 @@ static inline enum btrfs_map_op btrfs_op(struct bio *bio)
} }
} }
void btrfs_get_bbio(struct btrfs_bio *bbio); void btrfs_get_bioc(struct btrfs_io_context *bioc);
void btrfs_put_bbio(struct btrfs_bio *bbio); void btrfs_put_bioc(struct btrfs_io_context *bioc);
int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
u64 logical, u64 *length, u64 logical, u64 *length,
struct btrfs_bio **bbio_ret, int mirror_num); struct btrfs_io_context **bioc_ret, int mirror_num);
int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
u64 logical, u64 *length, u64 logical, u64 *length,
struct btrfs_bio **bbio_ret); struct btrfs_io_context **bioc_ret);
int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *map, int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *map,
enum btrfs_map_op op, u64 logical, enum btrfs_map_op op, u64 logical,
struct btrfs_io_geometry *io_geom); struct btrfs_io_geometry *io_geom);
......
...@@ -1637,27 +1637,27 @@ int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 len ...@@ -1637,27 +1637,27 @@ int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 len
static int read_zone_info(struct btrfs_fs_info *fs_info, u64 logical, static int read_zone_info(struct btrfs_fs_info *fs_info, u64 logical,
struct blk_zone *zone) struct blk_zone *zone)
{ {
struct btrfs_bio *bbio = NULL; struct btrfs_io_context *bioc = NULL;
u64 mapped_length = PAGE_SIZE; u64 mapped_length = PAGE_SIZE;
unsigned int nofs_flag; unsigned int nofs_flag;
int nmirrors; int nmirrors;
int i, ret; int i, ret;
ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical, ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
&mapped_length, &bbio); &mapped_length, &bioc);
if (ret || !bbio || mapped_length < PAGE_SIZE) { if (ret || !bioc || mapped_length < PAGE_SIZE) {
btrfs_put_bbio(bbio); btrfs_put_bioc(bioc);
return -EIO; return -EIO;
} }
if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK)
return -EINVAL; return -EINVAL;
nofs_flag = memalloc_nofs_save(); nofs_flag = memalloc_nofs_save();
nmirrors = (int)bbio->num_stripes; nmirrors = (int)bioc->num_stripes;
for (i = 0; i < nmirrors; i++) { for (i = 0; i < nmirrors; i++) {
u64 physical = bbio->stripes[i].physical; u64 physical = bioc->stripes[i].physical;
struct btrfs_device *dev = bbio->stripes[i].dev; struct btrfs_device *dev = bioc->stripes[i].dev;
/* Missing device */ /* Missing device */
if (!dev->bdev) if (!dev->bdev)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册