diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index c7c5b2693fc96ac2033524c067648c470bede1e9..0f1b78b386491e3cbddd7bd5cbd3ff8a841dbf02 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -301,27 +301,18 @@ static void reschedule_retry(struct r10bio *r10_bio) static void raid_end_bio_io(struct r10bio *r10_bio) { struct bio *bio = r10_bio->master_bio; - int done; struct r10conf *conf = r10_bio->mddev->private; - if (bio->bi_phys_segments) { - unsigned long flags; - spin_lock_irqsave(&conf->device_lock, flags); - bio->bi_phys_segments--; - done = (bio->bi_phys_segments == 0); - spin_unlock_irqrestore(&conf->device_lock, flags); - } else - done = 1; if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) bio->bi_error = -EIO; - if (done) { - bio_endio(bio); - /* - * Wake up any possible resync thread that waits for the device - * to go idle. - */ - allow_barrier(conf); - } + + bio_endio(bio); + /* + * Wake up any possible resync thread that waits for the device + * to go idle. + */ + allow_barrier(conf); + free_r10bio(r10_bio); } @@ -985,6 +976,15 @@ static void wait_barrier(struct r10conf *conf) spin_unlock_irq(&conf->resync_lock); } +static void inc_pending(struct r10conf *conf) +{ + /* The current request requires multiple r10_bio, so + * we need to increment the pending count. + */ + WARN_ON(!atomic_read(&conf->nr_pending)); + atomic_inc(&conf->nr_pending); +} + static void allow_barrier(struct r10conf *conf) { if ((atomic_dec_and_test(&conf->nr_pending)) || @@ -1162,12 +1162,8 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, sectors_handled = (r10_bio->sector + max_sectors - bio->bi_iter.bi_sector); r10_bio->sectors = max_sectors; - spin_lock_irq(&conf->device_lock); - if (bio->bi_phys_segments == 0) - bio->bi_phys_segments = 2; - else - bio->bi_phys_segments++; - spin_unlock_irq(&conf->device_lock); + inc_pending(conf); + bio_inc_remaining(bio); /* * Cannot call generic_make_request directly as that will be * queued in __generic_make_request and subsequent @@ -1262,9 +1258,7 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio, * on which we have seen a write error, we want to avoid * writing to those blocks. This potentially requires several * writes to write around the bad blocks. Each set of writes - * gets its own r10_bio with a set of bios attached. The number - * of r10_bios is recored in bio->bi_phys_segments just as with - * the read case. + * gets its own r10_bio with a set of bios attached. */ r10_bio->read_slot = -1; /* make sure repl_bio gets freed */ @@ -1495,15 +1489,9 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio, */ if (sectors_handled < bio_sectors(bio)) { - /* We need another r10_bio and it needs to be counted - * in bio->bi_phys_segments. - */ - spin_lock_irq(&conf->device_lock); - if (bio->bi_phys_segments == 0) - bio->bi_phys_segments = 2; - else - bio->bi_phys_segments++; - spin_unlock_irq(&conf->device_lock); + /* We need another r10_bio and it needs to be counted */ + inc_pending(conf); + bio_inc_remaining(bio); one_write_done(r10_bio); r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); @@ -1532,16 +1520,6 @@ static void __make_request(struct mddev *mddev, struct bio *bio) r10_bio->sector = bio->bi_iter.bi_sector; r10_bio->state = 0; - /* - * We might need to issue multiple reads to different devices if there - * are bad blocks around, so we keep track of the number of reads in - * bio->bi_phys_segments. If this is 0, there is only one r10_bio and - * no locking will be needed when the request completes. If it is - * non-zero, then it is the number of not-completed requests. - */ - bio->bi_phys_segments = 0; - bio_clear_flag(bio, BIO_SEG_VALID); - if (bio_data_dir(bio) == READ) raid10_read_request(mddev, bio, r10_bio); else @@ -2693,12 +2671,8 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio) r10_bio->sector + max_sectors - mbio->bi_iter.bi_sector; r10_bio->sectors = max_sectors; - spin_lock_irq(&conf->device_lock); - if (mbio->bi_phys_segments == 0) - mbio->bi_phys_segments = 2; - else - mbio->bi_phys_segments++; - spin_unlock_irq(&conf->device_lock); + bio_inc_remaining(mbio); + inc_pending(conf); generic_make_request(bio); r10_bio = mempool_alloc(conf->r10bio_pool,