提交 ccfcc3c1 编写于 作者: N NeilBrown 提交者: Linus Torvalds

[PATCH] md: Core of raid5 resize process

This patch provides the core of the resize/expand process.

sync_request notices if a 'reshape' is happening and acts accordingly.

It allocated new stripe_heads for the next chunk-wide-stripe in the target
geometry, marking them STRIPE_EXPANDING.

Then it finds which stripe heads in the old geometry can provide data needed
by these and marks them STRIPE_EXPAND_SOURCE.  This causes stripe_handle to
read all blocks on those stripes.

Once all blocks on a STRIPE_EXPAND_SOURCE stripe_head are read, any that are
needed are copied into the corresponding STRIPE_EXPANDING stripe_head.  Once a
STRIPE_EXPANDING stripe_head is full, it is marks STRIPE_EXPAND_READY and then
is written out and released.
Signed-off-by: NNeil Brown <neilb@suse.de>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 7ecaa1e6
...@@ -2165,7 +2165,9 @@ action_show(mddev_t *mddev, char *page) ...@@ -2165,7 +2165,9 @@ action_show(mddev_t *mddev, char *page)
char *type = "idle"; char *type = "idle";
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) { test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) {
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
type = "reshape";
else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
type = "resync"; type = "resync";
else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
...@@ -4088,8 +4090,10 @@ static void status_resync(struct seq_file *seq, mddev_t * mddev) ...@@ -4088,8 +4090,10 @@ static void status_resync(struct seq_file *seq, mddev_t * mddev)
seq_printf(seq, "] "); seq_printf(seq, "] ");
} }
seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)", seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
(test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
"reshape" :
(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
"resync" : "recovery"), "resync" : "recovery")),
per_milli/10, per_milli % 10, per_milli/10, per_milli % 10,
(unsigned long long) resync, (unsigned long long) resync,
(unsigned long long) max_blocks); (unsigned long long) max_blocks);
...@@ -4543,7 +4547,9 @@ static void md_do_sync(mddev_t *mddev) ...@@ -4543,7 +4547,9 @@ static void md_do_sync(mddev_t *mddev)
*/ */
max_sectors = mddev->resync_max_sectors; max_sectors = mddev->resync_max_sectors;
mddev->resync_mismatches = 0; mddev->resync_mismatches = 0;
} else } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
max_sectors = mddev->size << 1;
else
/* recovery follows the physical size of devices */ /* recovery follows the physical size of devices */
max_sectors = mddev->size << 1; max_sectors = mddev->size << 1;
...@@ -4679,6 +4685,8 @@ static void md_do_sync(mddev_t *mddev) ...@@ -4679,6 +4685,8 @@ static void md_do_sync(mddev_t *mddev)
mddev->pers->sync_request(mddev, max_sectors, &skipped, 1); mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) && if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
mddev->curr_resync > 2 && mddev->curr_resync > 2 &&
mddev->curr_resync >= mddev->recovery_cp) { mddev->curr_resync >= mddev->recovery_cp) {
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
......
...@@ -93,11 +93,11 @@ static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh) ...@@ -93,11 +93,11 @@ static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
md_wakeup_thread(conf->mddev->thread); md_wakeup_thread(conf->mddev->thread);
} }
list_add_tail(&sh->lru, &conf->inactive_list);
atomic_dec(&conf->active_stripes); atomic_dec(&conf->active_stripes);
if (!conf->inactive_blocked || if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
atomic_read(&conf->active_stripes) < (conf->max_nr_stripes*3/4)) list_add_tail(&sh->lru, &conf->inactive_list);
wake_up(&conf->wait_for_stripe); wake_up(&conf->wait_for_stripe);
}
} }
} }
} }
...@@ -273,9 +273,8 @@ static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector ...@@ -273,9 +273,8 @@ static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector
} else { } else {
if (!test_bit(STRIPE_HANDLE, &sh->state)) if (!test_bit(STRIPE_HANDLE, &sh->state))
atomic_inc(&conf->active_stripes); atomic_inc(&conf->active_stripes);
if (list_empty(&sh->lru)) if (!list_empty(&sh->lru))
BUG(); list_del_init(&sh->lru);
list_del_init(&sh->lru);
} }
} }
} while (sh == NULL); } while (sh == NULL);
...@@ -1035,6 +1034,18 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in ...@@ -1035,6 +1034,18 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
return 0; return 0;
} }
static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int disks)
{
int sectors_per_chunk = conf->chunk_size >> 9;
sector_t x = stripe;
int pd_idx, dd_idx;
int chunk_offset = sector_div(x, sectors_per_chunk);
stripe = x;
raid5_compute_sector(stripe*(disks-1)*sectors_per_chunk
+ chunk_offset, disks, disks-1, &dd_idx, &pd_idx, conf);
return pd_idx;
}
/* /*
* handle_stripe - do things to a stripe. * handle_stripe - do things to a stripe.
...@@ -1061,7 +1072,7 @@ static void handle_stripe(struct stripe_head *sh) ...@@ -1061,7 +1072,7 @@ static void handle_stripe(struct stripe_head *sh)
struct bio *return_bi= NULL; struct bio *return_bi= NULL;
struct bio *bi; struct bio *bi;
int i; int i;
int syncing; int syncing, expanding, expanded;
int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0; int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0;
int non_overwrite = 0; int non_overwrite = 0;
int failed_num=0; int failed_num=0;
...@@ -1076,6 +1087,8 @@ static void handle_stripe(struct stripe_head *sh) ...@@ -1076,6 +1087,8 @@ static void handle_stripe(struct stripe_head *sh)
clear_bit(STRIPE_DELAYED, &sh->state); clear_bit(STRIPE_DELAYED, &sh->state);
syncing = test_bit(STRIPE_SYNCING, &sh->state); syncing = test_bit(STRIPE_SYNCING, &sh->state);
expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
/* Now to look around and see what can be done */ /* Now to look around and see what can be done */
rcu_read_lock(); rcu_read_lock();
...@@ -1268,13 +1281,14 @@ static void handle_stripe(struct stripe_head *sh) ...@@ -1268,13 +1281,14 @@ static void handle_stripe(struct stripe_head *sh)
* parity, or to satisfy requests * parity, or to satisfy requests
* or to load a block that is being partially written. * or to load a block that is being partially written.
*/ */
if (to_read || non_overwrite || (syncing && (uptodate < disks))) { if (to_read || non_overwrite || (syncing && (uptodate < disks)) || expanding) {
for (i=disks; i--;) { for (i=disks; i--;) {
dev = &sh->dev[i]; dev = &sh->dev[i];
if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) && if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
(dev->toread || (dev->toread ||
(dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) || (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
syncing || syncing ||
expanding ||
(failed && (sh->dev[failed_num].toread || (failed && (sh->dev[failed_num].toread ||
(sh->dev[failed_num].towrite && !test_bit(R5_OVERWRITE, &sh->dev[failed_num].flags)))) (sh->dev[failed_num].towrite && !test_bit(R5_OVERWRITE, &sh->dev[failed_num].flags))))
) )
...@@ -1464,13 +1478,76 @@ static void handle_stripe(struct stripe_head *sh) ...@@ -1464,13 +1478,76 @@ static void handle_stripe(struct stripe_head *sh)
set_bit(R5_Wantwrite, &dev->flags); set_bit(R5_Wantwrite, &dev->flags);
set_bit(R5_ReWrite, &dev->flags); set_bit(R5_ReWrite, &dev->flags);
set_bit(R5_LOCKED, &dev->flags); set_bit(R5_LOCKED, &dev->flags);
locked++;
} else { } else {
/* let's read it back */ /* let's read it back */
set_bit(R5_Wantread, &dev->flags); set_bit(R5_Wantread, &dev->flags);
set_bit(R5_LOCKED, &dev->flags); set_bit(R5_LOCKED, &dev->flags);
locked++;
} }
} }
if (expanded && test_bit(STRIPE_EXPANDING, &sh->state)) {
/* Need to write out all blocks after computing parity */
sh->disks = conf->raid_disks;
sh->pd_idx = stripe_to_pdidx(sh->sector, conf, conf->raid_disks);
compute_parity(sh, RECONSTRUCT_WRITE);
for (i= conf->raid_disks; i--;) {
set_bit(R5_LOCKED, &sh->dev[i].flags);
locked++;
set_bit(R5_Wantwrite, &sh->dev[i].flags);
}
clear_bit(STRIPE_EXPANDING, &sh->state);
} else if (expanded) {
clear_bit(STRIPE_EXPAND_READY, &sh->state);
wake_up(&conf->wait_for_overlap);
md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
}
if (expanding && locked == 0) {
/* We have read all the blocks in this stripe and now we need to
* copy some of them into a target stripe for expand.
*/
clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
for (i=0; i< sh->disks; i++)
if (i != sh->pd_idx) {
int dd_idx, pd_idx, j;
struct stripe_head *sh2;
sector_t bn = compute_blocknr(sh, i);
sector_t s = raid5_compute_sector(bn, conf->raid_disks,
conf->raid_disks-1,
&dd_idx, &pd_idx, conf);
sh2 = get_active_stripe(conf, s, conf->raid_disks, pd_idx, 1);
if (sh2 == NULL)
/* so far only the early blocks of this stripe
* have been requested. When later blocks
* get requested, we will try again
*/
continue;
if(!test_bit(STRIPE_EXPANDING, &sh2->state) ||
test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
/* must have already done this block */
release_stripe(sh2);
continue;
}
memcpy(page_address(sh2->dev[dd_idx].page),
page_address(sh->dev[i].page),
STRIPE_SIZE);
set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
for (j=0; j<conf->raid_disks; j++)
if (j != sh2->pd_idx &&
!test_bit(R5_Expanded, &sh2->dev[j].flags))
break;
if (j == conf->raid_disks) {
set_bit(STRIPE_EXPAND_READY, &sh2->state);
set_bit(STRIPE_HANDLE, &sh2->state);
}
release_stripe(sh2);
}
}
spin_unlock(&sh->lock); spin_unlock(&sh->lock);
while ((bi=return_bi)) { while ((bi=return_bi)) {
...@@ -1509,7 +1586,7 @@ static void handle_stripe(struct stripe_head *sh) ...@@ -1509,7 +1586,7 @@ static void handle_stripe(struct stripe_head *sh)
rcu_read_unlock(); rcu_read_unlock();
if (rdev) { if (rdev) {
if (syncing) if (syncing || expanding || expanded)
md_sync_acct(rdev->bdev, STRIPE_SECTORS); md_sync_acct(rdev->bdev, STRIPE_SECTORS);
bi->bi_bdev = rdev->bdev; bi->bi_bdev = rdev->bdev;
...@@ -1757,12 +1834,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i ...@@ -1757,12 +1834,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
{ {
raid5_conf_t *conf = (raid5_conf_t *) mddev->private; raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
struct stripe_head *sh; struct stripe_head *sh;
int sectors_per_chunk = conf->chunk_size >> 9; int pd_idx;
sector_t x; sector_t first_sector, last_sector;
unsigned long stripe;
int chunk_offset;
int dd_idx, pd_idx;
sector_t first_sector;
int raid_disks = conf->raid_disks; int raid_disks = conf->raid_disks;
int data_disks = raid_disks-1; int data_disks = raid_disks-1;
sector_t max_sector = mddev->size << 1; sector_t max_sector = mddev->size << 1;
...@@ -1781,6 +1854,80 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i ...@@ -1781,6 +1854,80 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
return 0; return 0;
} }
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
/* reshaping is quite different to recovery/resync so it is
* handled quite separately ... here.
*
* On each call to sync_request, we gather one chunk worth of
* destination stripes and flag them as expanding.
* Then we find all the source stripes and request reads.
* As the reads complete, handle_stripe will copy the data
* into the destination stripe and release that stripe.
*/
int i;
int dd_idx;
for (i=0; i < conf->chunk_size/512; i+= STRIPE_SECTORS) {
int j;
int skipped = 0;
pd_idx = stripe_to_pdidx(sector_nr+i, conf, conf->raid_disks);
sh = get_active_stripe(conf, sector_nr+i,
conf->raid_disks, pd_idx, 0);
set_bit(STRIPE_EXPANDING, &sh->state);
/* If any of this stripe is beyond the end of the old
* array, then we need to zero those blocks
*/
for (j=sh->disks; j--;) {
sector_t s;
if (j == sh->pd_idx)
continue;
s = compute_blocknr(sh, j);
if (s < (mddev->array_size<<1)) {
skipped = 1;
continue;
}
memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
set_bit(R5_Expanded, &sh->dev[j].flags);
set_bit(R5_UPTODATE, &sh->dev[j].flags);
}
if (!skipped) {
set_bit(STRIPE_EXPAND_READY, &sh->state);
set_bit(STRIPE_HANDLE, &sh->state);
}
release_stripe(sh);
}
spin_lock_irq(&conf->device_lock);
conf->expand_progress = (sector_nr + i)*(conf->raid_disks-1);
spin_unlock_irq(&conf->device_lock);
/* Ok, those stripe are ready. We can start scheduling
* reads on the source stripes.
* The source stripes are determined by mapping the first and last
* block on the destination stripes.
*/
raid_disks = conf->previous_raid_disks;
data_disks = raid_disks - 1;
first_sector =
raid5_compute_sector(sector_nr*(conf->raid_disks-1),
raid_disks, data_disks,
&dd_idx, &pd_idx, conf);
last_sector =
raid5_compute_sector((sector_nr+conf->chunk_size/512)
*(conf->raid_disks-1) -1,
raid_disks, data_disks,
&dd_idx, &pd_idx, conf);
if (last_sector >= (mddev->size<<1))
last_sector = (mddev->size<<1)-1;
while (first_sector <= last_sector) {
pd_idx = stripe_to_pdidx(first_sector, conf, conf->previous_raid_disks);
sh = get_active_stripe(conf, first_sector,
conf->previous_raid_disks, pd_idx, 0);
set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh);
first_sector += STRIPE_SECTORS;
}
return conf->chunk_size>>9;
}
/* if there is 1 or more failed drives and we are trying /* if there is 1 or more failed drives and we are trying
* to resync, then assert that we are finished, because there is * to resync, then assert that we are finished, because there is
* nothing we can do. * nothing we can do.
...@@ -1799,13 +1946,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i ...@@ -1799,13 +1946,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */ return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
} }
x = sector_nr; pd_idx = stripe_to_pdidx(sector_nr, conf, raid_disks);
chunk_offset = sector_div(x, sectors_per_chunk);
stripe = x;
BUG_ON(x != stripe);
first_sector = raid5_compute_sector((sector_t)stripe*data_disks*sectors_per_chunk
+ chunk_offset, raid_disks, data_disks, &dd_idx, &pd_idx, conf);
sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 1); sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 1);
if (sh == NULL) { if (sh == NULL) {
sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 0); sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 0);
......
...@@ -157,6 +157,9 @@ struct mddev_s ...@@ -157,6 +157,9 @@ struct mddev_s
* DONE: thread is done and is waiting to be reaped * DONE: thread is done and is waiting to be reaped
* REQUEST: user-space has requested a sync (used with SYNC) * REQUEST: user-space has requested a sync (used with SYNC)
* CHECK: user-space request for for check-only, no repair * CHECK: user-space request for for check-only, no repair
* RESHAPE: A reshape is happening
*
* If neither SYNC or RESHAPE are set, then it is a recovery.
*/ */
#define MD_RECOVERY_RUNNING 0 #define MD_RECOVERY_RUNNING 0
#define MD_RECOVERY_SYNC 1 #define MD_RECOVERY_SYNC 1
...@@ -166,6 +169,7 @@ struct mddev_s ...@@ -166,6 +169,7 @@ struct mddev_s
#define MD_RECOVERY_NEEDED 5 #define MD_RECOVERY_NEEDED 5
#define MD_RECOVERY_REQUESTED 6 #define MD_RECOVERY_REQUESTED 6
#define MD_RECOVERY_CHECK 7 #define MD_RECOVERY_CHECK 7
#define MD_RECOVERY_RESHAPE 8
unsigned long recovery; unsigned long recovery;
int in_sync; /* know to not need resync */ int in_sync; /* know to not need resync */
......
...@@ -157,6 +157,7 @@ struct stripe_head { ...@@ -157,6 +157,7 @@ struct stripe_head {
#define R5_ReadError 8 /* seen a read error here recently */ #define R5_ReadError 8 /* seen a read error here recently */
#define R5_ReWrite 9 /* have tried to over-write the readerror */ #define R5_ReWrite 9 /* have tried to over-write the readerror */
#define R5_Expanded 10 /* This block now has post-expand data */
/* /*
* Write method * Write method
*/ */
...@@ -176,7 +177,8 @@ struct stripe_head { ...@@ -176,7 +177,8 @@ struct stripe_head {
#define STRIPE_DEGRADED 7 #define STRIPE_DEGRADED 7
#define STRIPE_BIT_DELAY 8 #define STRIPE_BIT_DELAY 8
#define STRIPE_EXPANDING 9 #define STRIPE_EXPANDING 9
#define STRIPE_EXPAND_SOURCE 10
#define STRIPE_EXPAND_READY 11
/* /*
* Plugging: * Plugging:
* *
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册