提交 cea9c228 编写于 作者: N NeilBrown

md: add explicit method to signal the end of a reshape.

Currently raid5 (the only module that supports restriping)
notices that the reshape has finished be sync_request being
given a large value, and handles any cleanup them.

This patch changes it so md_check_recovery calls into an
explicit finish_reshape method as well.

The clean-up from sync_request can do things that need to be
done promptly, typically things local to the raid5_conf_t
structure.

The "finish_reshape" method is called under the mddev_lock
so it can do things involving reconfiguring the device.

This allows us to get rid of md_set_array_sectors_locked, which
would have caused a deadlock if you tried to stop and array
while a reshape was happening.
Signed-off-by: NNeilBrown <neilb@suse.de>
上级 7ec05478
...@@ -5073,14 +5073,6 @@ void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors) ...@@ -5073,14 +5073,6 @@ void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors)
} }
EXPORT_SYMBOL(md_set_array_sectors); EXPORT_SYMBOL(md_set_array_sectors);
void md_set_array_sectors_lock(mddev_t *mddev, sector_t array_sectors)
{
mddev_lock(mddev);
md_set_array_sectors(mddev, array_sectors);
mddev_unlock(mddev);
}
EXPORT_SYMBOL(md_set_array_sectors_lock);
static int update_size(mddev_t *mddev, sector_t num_sectors) static int update_size(mddev_t *mddev, sector_t num_sectors)
{ {
mdk_rdev_t *rdev; mdk_rdev_t *rdev;
...@@ -6641,6 +6633,9 @@ void md_check_recovery(mddev_t *mddev) ...@@ -6641,6 +6633,9 @@ void md_check_recovery(mddev_t *mddev)
sysfs_notify(&mddev->kobj, NULL, sysfs_notify(&mddev->kobj, NULL,
"degraded"); "degraded");
} }
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
mddev->pers->finish_reshape)
mddev->pers->finish_reshape(mddev);
md_update_sb(mddev, 1); md_update_sb(mddev, 1);
/* if array is no-longer degraded, then any saved_raid_disk /* if array is no-longer degraded, then any saved_raid_disk
......
...@@ -317,6 +317,7 @@ struct mdk_personality ...@@ -317,6 +317,7 @@ struct mdk_personality
sector_t (*size) (mddev_t *mddev, sector_t sectors, int raid_disks); sector_t (*size) (mddev_t *mddev, sector_t sectors, int raid_disks);
int (*check_reshape) (mddev_t *mddev); int (*check_reshape) (mddev_t *mddev);
int (*start_reshape) (mddev_t *mddev); int (*start_reshape) (mddev_t *mddev);
void (*finish_reshape) (mddev_t *mddev);
int (*reconfig) (mddev_t *mddev, int layout, int chunk_size); int (*reconfig) (mddev_t *mddev, int layout, int chunk_size);
/* quiesce moves between quiescence states /* quiesce moves between quiescence states
* 0 - fully active * 0 - fully active
...@@ -433,4 +434,3 @@ extern void md_new_event(mddev_t *mddev); ...@@ -433,4 +434,3 @@ extern void md_new_event(mddev_t *mddev);
extern int md_allow_write(mddev_t *mddev); extern int md_allow_write(mddev_t *mddev);
extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev); extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
extern void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors); extern void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors);
extern void md_set_array_sectors_lock(mddev_t *mddev, sector_t array_sectors);
...@@ -3850,6 +3850,7 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski ...@@ -3850,6 +3850,7 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
if (sector_nr >= max_sector) { if (sector_nr >= max_sector) {
/* just being told to finish up .. nothing much to do */ /* just being told to finish up .. nothing much to do */
unplug_slaves(mddev); unplug_slaves(mddev);
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
end_reshape(conf); end_reshape(conf);
return 0; return 0;
...@@ -4836,43 +4837,49 @@ static int raid5_start_reshape(mddev_t *mddev) ...@@ -4836,43 +4837,49 @@ static int raid5_start_reshape(mddev_t *mddev)
static void end_reshape(raid5_conf_t *conf) static void end_reshape(raid5_conf_t *conf)
{ {
struct block_device *bdev;
if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
mddev_t *mddev = conf->mddev;
md_set_array_sectors_lock(mddev, raid5_size(mddev, 0,
conf->raid_disks));
set_capacity(mddev->gendisk, mddev->array_sectors);
mddev->changed = 1;
conf->previous_raid_disks = conf->raid_disks;
bdev = bdget_disk(conf->mddev->gendisk, 0);
if (bdev) {
mutex_lock(&bdev->bd_inode->i_mutex);
i_size_write(bdev->bd_inode,
(loff_t)conf->mddev->array_sectors << 9);
mutex_unlock(&bdev->bd_inode->i_mutex);
bdput(bdev);
}
spin_lock_irq(&conf->device_lock); spin_lock_irq(&conf->device_lock);
conf->previous_raid_disks = conf->raid_disks;
conf->expand_progress = MaxSector; conf->expand_progress = MaxSector;
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
conf->mddev->reshape_position = MaxSector;
/* read-ahead size must cover two whole stripes, which is /* read-ahead size must cover two whole stripes, which is
* 2 * (datadisks) * chunksize where 'n' is the number of raid devices * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
*/ */
{ {
int data_disks = conf->previous_raid_disks - conf->max_degraded; int data_disks = conf->raid_disks - conf->max_degraded;
int stripe = data_disks * int stripe = data_disks * (conf->chunk_size
(conf->mddev->chunk_size / PAGE_SIZE); / PAGE_SIZE);
if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
} }
} }
} }
static void raid5_finish_reshape(mddev_t *mddev)
{
struct block_device *bdev;
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
set_capacity(mddev->gendisk, mddev->array_sectors);
mddev->changed = 1;
mddev->reshape_position = MaxSector;
bdev = bdget_disk(mddev->gendisk, 0);
if (bdev) {
mutex_lock(&bdev->bd_inode->i_mutex);
i_size_write(bdev->bd_inode,
(loff_t)mddev->array_sectors << 9);
mutex_unlock(&bdev->bd_inode->i_mutex);
bdput(bdev);
}
}
}
static void raid5_quiesce(mddev_t *mddev, int state) static void raid5_quiesce(mddev_t *mddev, int state)
{ {
raid5_conf_t *conf = mddev_to_conf(mddev); raid5_conf_t *conf = mddev_to_conf(mddev);
...@@ -5098,6 +5105,7 @@ static struct mdk_personality raid6_personality = ...@@ -5098,6 +5105,7 @@ static struct mdk_personality raid6_personality =
#ifdef CONFIG_MD_RAID5_RESHAPE #ifdef CONFIG_MD_RAID5_RESHAPE
.check_reshape = raid5_check_reshape, .check_reshape = raid5_check_reshape,
.start_reshape = raid5_start_reshape, .start_reshape = raid5_start_reshape,
.finish_reshape = raid5_finish_reshape,
#endif #endif
.quiesce = raid5_quiesce, .quiesce = raid5_quiesce,
.takeover = raid6_takeover, .takeover = raid6_takeover,
...@@ -5121,6 +5129,7 @@ static struct mdk_personality raid5_personality = ...@@ -5121,6 +5129,7 @@ static struct mdk_personality raid5_personality =
#ifdef CONFIG_MD_RAID5_RESHAPE #ifdef CONFIG_MD_RAID5_RESHAPE
.check_reshape = raid5_check_reshape, .check_reshape = raid5_check_reshape,
.start_reshape = raid5_start_reshape, .start_reshape = raid5_start_reshape,
.finish_reshape = raid5_finish_reshape,
#endif #endif
.quiesce = raid5_quiesce, .quiesce = raid5_quiesce,
.takeover = raid5_takeover, .takeover = raid5_takeover,
...@@ -5146,6 +5155,7 @@ static struct mdk_personality raid4_personality = ...@@ -5146,6 +5155,7 @@ static struct mdk_personality raid4_personality =
#ifdef CONFIG_MD_RAID5_RESHAPE #ifdef CONFIG_MD_RAID5_RESHAPE
.check_reshape = raid5_check_reshape, .check_reshape = raid5_check_reshape,
.start_reshape = raid5_start_reshape, .start_reshape = raid5_start_reshape,
.finish_reshape = raid5_finish_reshape,
#endif #endif
.quiesce = raid5_quiesce, .quiesce = raid5_quiesce,
}; };
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册