提交 b3a6c38a 编写于 作者: C Christoph Hellwig 提交者: Zheng Zengkai

md/raid6: refactor raid5_read_one_chunk

mainline inclusion
from mainline-v5.12-rc1
commit e82ed3a4
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I587H6
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=e82ed3a4fbb54b2d7dcb2a7733520f3e10b97abf

-------------------------------

Refactor raid5_read_one_chunk so that all simple checks are done
before allocating the bio.
Signed-off-by: NChristoph Hellwig <hch@lst.de>
Acked-by: NSong Liu <song@kernel.org>
Reviewed-by: NJohannes Thumshirn <johannes.thumshirn@wdc.com>
Reviewed-by: NChaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Acked-by: NDamien Le Moal <damien.lemoal@wdc.com>
Signed-off-by: NJens Axboe <axboe@kernel.dk>
Conflict:
	drivers/md/raid5.c
Signed-off-by: NZhang Wensheng <zhangwensheng5@huawei.com>
Reviewed-by: NJason Yan <yanaijie@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 7f625ab4
...@@ -5398,97 +5398,79 @@ static void raid5_align_endio(struct bio *bi) ...@@ -5398,97 +5398,79 @@ static void raid5_align_endio(struct bio *bi)
static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio) static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
{ {
struct r5conf *conf = mddev->private; struct r5conf *conf = mddev->private;
int dd_idx; struct bio *align_bio;
struct bio* align_bi;
struct md_rdev *rdev; struct md_rdev *rdev;
sector_t end_sector; sector_t sector, end_sector, first_bad;
int bad_sectors, dd_idx;
struct md_io_acct *md_io_acct; struct md_io_acct *md_io_acct;
if (!in_chunk_boundary(mddev, raid_bio)) { if (!in_chunk_boundary(mddev, raid_bio)) {
pr_debug("%s: non aligned\n", __func__); pr_debug("%s: non aligned\n", __func__);
return 0; return 0;
} }
/*
* use bio_clone_fast to make a copy of the bio
*/
align_bi = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->io_acct_set);
if (!align_bi)
return 0;
md_io_acct = container_of(align_bi, struct md_io_acct, bio_clone);
raid_bio->bi_next = (void *)rdev;
if (blk_queue_io_stat(raid_bio->bi_disk->queue))
md_io_acct->start_time = bio_start_io_acct(raid_bio);
md_io_acct->orig_bio = raid_bio;
/*
* set bi_end_io to a new function, and set bi_private to the
* original bio.
*/
align_bi->bi_end_io = raid5_align_endio;
align_bi->bi_private = md_io_acct;
/*
* compute position
*/
align_bi->bi_iter.bi_sector =
raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector,
0, &dd_idx, NULL);
end_sector = bio_end_sector(align_bi); sector = raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, 0,
&dd_idx, NULL);
end_sector = bio_end_sector(raid_bio);
rcu_read_lock(); rcu_read_lock();
if (r5c_big_stripe_cached(conf, sector))
goto out_rcu_unlock;
rdev = rcu_dereference(conf->disks[dd_idx].replacement); rdev = rcu_dereference(conf->disks[dd_idx].replacement);
if (!rdev || test_bit(Faulty, &rdev->flags) || if (!rdev || test_bit(Faulty, &rdev->flags) ||
rdev->recovery_offset < end_sector) { rdev->recovery_offset < end_sector) {
rdev = rcu_dereference(conf->disks[dd_idx].rdev); rdev = rcu_dereference(conf->disks[dd_idx].rdev);
if (rdev && if (!rdev)
(test_bit(Faulty, &rdev->flags) || goto out_rcu_unlock;
!(test_bit(In_sync, &rdev->flags) || if (test_bit(Faulty, &rdev->flags) ||
rdev->recovery_offset >= end_sector))) !(test_bit(In_sync, &rdev->flags) ||
rdev = NULL; rdev->recovery_offset >= end_sector))
goto out_rcu_unlock;
} }
if (r5c_big_stripe_cached(conf, align_bi->bi_iter.bi_sector)) { atomic_inc(&rdev->nr_pending);
rcu_read_unlock(); rcu_read_unlock();
bio_put(align_bi);
align_bio = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->bio_set);
align_bio = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->io_acct_set);
md_io_acct = container_of(align_bio, struct md_io_acct, bio_clone);
raid_bio->bi_next = (void *)rdev;
if (blk_queue_io_stat(raid_bio->bi_disk->queue))
md_io_acct->start_time = bio_start_io_acct(raid_bio);
md_io_acct->orig_bio = raid_bio;
bio_set_dev(align_bio, rdev->bdev);
align_bio->bi_end_io = raid5_align_endio;
align_bio->bi_private = md_io_acct;
align_bio->bi_iter.bi_sector = sector;
if (is_badblock(rdev, sector, bio_sectors(align_bio), &first_bad,
&bad_sectors)) {
bio_put(align_bio);
rdev_dec_pending(rdev, mddev);
return 0; return 0;
} }
if (rdev) { /* No reshape active, so we can trust rdev->data_offset */
sector_t first_bad; align_bio->bi_iter.bi_sector += rdev->data_offset;
int bad_sectors;
atomic_inc(&rdev->nr_pending); spin_lock_irq(&conf->device_lock);
rcu_read_unlock(); wait_event_lock_irq(conf->wait_for_quiescent, conf->quiesce == 0,
raid_bio->bi_next = (void*)rdev; conf->device_lock);
bio_set_dev(align_bi, rdev->bdev); atomic_inc(&conf->active_aligned_reads);
spin_unlock_irq(&conf->device_lock);
if (is_badblock(rdev, align_bi->bi_iter.bi_sector,
bio_sectors(align_bi),
&first_bad, &bad_sectors)) {
bio_put(align_bi);
rdev_dec_pending(rdev, mddev);
return 0;
}
/* No reshape active, so we can trust rdev->data_offset */
align_bi->bi_iter.bi_sector += rdev->data_offset;
spin_lock_irq(&conf->device_lock); if (mddev->gendisk)
wait_event_lock_irq(conf->wait_for_quiescent, trace_block_bio_remap(align_bio->bi_disk->queue,
conf->quiesce == 0, align_bio, disk_devt(mddev->gendisk),
conf->device_lock); raid_bio->bi_iter.bi_sector);
atomic_inc(&conf->active_aligned_reads); submit_bio_noacct(align_bio);
spin_unlock_irq(&conf->device_lock); return 1;
if (mddev->gendisk) out_rcu_unlock:
trace_block_bio_remap(align_bi->bi_disk->queue, rcu_read_unlock();
align_bi, disk_devt(mddev->gendisk), return 0;
raid_bio->bi_iter.bi_sector);
submit_bio_noacct(align_bi);
return 1;
} else {
rcu_read_unlock();
bio_put(align_bi);
return 0;
}
} }
static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio) static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册