提交 e3620a3a 编写于 作者: J Jonathan Brassow 提交者: NeilBrown

MD RAID5: Avoid accessing gendisk or queue structs when not available

MD RAID5:  Fix kernel oops when RAID4/5/6 is used via device-mapper

Commit a9add5d9 (v3.8-rc1) added blktrace calls to the RAID4/5/6 driver.
However, when device-mapper is used to create RAID4/5/6 arrays, the
mddev->gendisk and mddev->queue fields are not setup.  Therefore, calling
things like trace_block_bio_remap will cause a kernel oops.  This patch
conditionalizes those calls on whether the proper fields exist to make
the calls.  (Device-mapper will call trace_block_bio_remap on its own.)

This patch is suitable for the 3.8.y stable kernel.

Cc: stable@vger.kernel.org (v3.8+)
Signed-off-by: NJonathan Brassow <jbrassow@redhat.com>
Signed-off-by: NNeilBrown <neilb@suse.de>
上级 ce7d363a
...@@ -674,9 +674,11 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) ...@@ -674,9 +674,11 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
bi->bi_next = NULL; bi->bi_next = NULL;
if (rrdev) if (rrdev)
set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
trace_block_bio_remap(bdev_get_queue(bi->bi_bdev),
bi, disk_devt(conf->mddev->gendisk), if (conf->mddev->gendisk)
sh->dev[i].sector); trace_block_bio_remap(bdev_get_queue(bi->bi_bdev),
bi, disk_devt(conf->mddev->gendisk),
sh->dev[i].sector);
generic_make_request(bi); generic_make_request(bi);
} }
if (rrdev) { if (rrdev) {
...@@ -704,9 +706,10 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) ...@@ -704,9 +706,10 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
rbi->bi_io_vec[0].bv_offset = 0; rbi->bi_io_vec[0].bv_offset = 0;
rbi->bi_size = STRIPE_SIZE; rbi->bi_size = STRIPE_SIZE;
rbi->bi_next = NULL; rbi->bi_next = NULL;
trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev), if (conf->mddev->gendisk)
rbi, disk_devt(conf->mddev->gendisk), trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
sh->dev[i].sector); rbi, disk_devt(conf->mddev->gendisk),
sh->dev[i].sector);
generic_make_request(rbi); generic_make_request(rbi);
} }
if (!rdev && !rrdev) { if (!rdev && !rrdev) {
...@@ -2835,8 +2838,10 @@ static void handle_stripe_dirtying(struct r5conf *conf, ...@@ -2835,8 +2838,10 @@ static void handle_stripe_dirtying(struct r5conf *conf,
set_bit(STRIPE_HANDLE, &sh->state); set_bit(STRIPE_HANDLE, &sh->state);
if (rmw < rcw && rmw > 0) { if (rmw < rcw && rmw > 0) {
/* prefer read-modify-write, but need to get some data */ /* prefer read-modify-write, but need to get some data */
blk_add_trace_msg(conf->mddev->queue, "raid5 rmw %llu %d", if (conf->mddev->queue)
(unsigned long long)sh->sector, rmw); blk_add_trace_msg(conf->mddev->queue,
"raid5 rmw %llu %d",
(unsigned long long)sh->sector, rmw);
for (i = disks; i--; ) { for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i]; struct r5dev *dev = &sh->dev[i];
if ((dev->towrite || i == sh->pd_idx) && if ((dev->towrite || i == sh->pd_idx) &&
...@@ -2886,7 +2891,7 @@ static void handle_stripe_dirtying(struct r5conf *conf, ...@@ -2886,7 +2891,7 @@ static void handle_stripe_dirtying(struct r5conf *conf,
} }
} }
} }
if (rcw) if (rcw && conf->mddev->queue)
blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d", blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d",
(unsigned long long)sh->sector, (unsigned long long)sh->sector,
rcw, qread, test_bit(STRIPE_DELAYED, &sh->state)); rcw, qread, test_bit(STRIPE_DELAYED, &sh->state));
...@@ -3993,9 +3998,10 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) ...@@ -3993,9 +3998,10 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
atomic_inc(&conf->active_aligned_reads); atomic_inc(&conf->active_aligned_reads);
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev), if (mddev->gendisk)
align_bi, disk_devt(mddev->gendisk), trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
raid_bio->bi_sector); align_bi, disk_devt(mddev->gendisk),
raid_bio->bi_sector);
generic_make_request(align_bi); generic_make_request(align_bi);
return 1; return 1;
} else { } else {
...@@ -4089,7 +4095,8 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) ...@@ -4089,7 +4095,8 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
} }
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
} }
trace_block_unplug(mddev->queue, cnt, !from_schedule); if (mddev->queue)
trace_block_unplug(mddev->queue, cnt, !from_schedule);
kfree(cb); kfree(cb);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册