diff --git a/drivers/md/md.c b/drivers/md/md.c index ec3d8e8a0bd33422545fd15a9d4df4ec12fdf258..e8807ea5377d75c508e8f791df89eac8ec7ffd65 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -3564,6 +3564,8 @@ static int get_bitmap_file(mddev_t * mddev, void __user * arg) char *ptr, *buf = NULL; int err = -ENOMEM; + md_allow_write(mddev); + file = kmalloc(sizeof(*file), GFP_KERNEL); if (!file) goto out; @@ -5032,6 +5034,33 @@ void md_write_end(mddev_t *mddev) } } +/* md_allow_write(mddev) + * Calling this ensures that the array is marked 'active' so that writes + * may proceed without blocking. It is important to call this before + * attempting a GFP_KERNEL allocation while holding the mddev lock. + * Must be called with mddev_lock held. + */ +void md_allow_write(mddev_t *mddev) +{ + if (!mddev->pers) + return; + if (mddev->ro) + return; + + spin_lock_irq(&mddev->write_lock); + if (mddev->in_sync) { + mddev->in_sync = 0; + set_bit(MD_CHANGE_CLEAN, &mddev->flags); + if (mddev->safemode_delay && + mddev->safemode == 0) + mddev->safemode = 1; + spin_unlock_irq(&mddev->write_lock); + md_update_sb(mddev, 0); + } else + spin_unlock_irq(&mddev->write_lock); +} +EXPORT_SYMBOL_GPL(md_allow_write); + static DECLARE_WAIT_QUEUE_HEAD(resync_wait); #define SYNC_MARKS 10 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index ab74d40cac98c4c35d7785d1c69eb8ca8c96de30..97ee870b265d866ffa818bdf83dc7f551cd40d7b 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2104,6 +2104,8 @@ static int raid1_reshape(mddev_t *mddev) return -EINVAL; } + md_allow_write(mddev); + raid_disks = mddev->raid_disks + mddev->delta_disks; if (raid_disks < conf->raid_disks) { diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index be008f034ada5e222c18043cf0b766426c86f03c..8a30b297ac3a63e72d549b75c8648b9e8e1326b2 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -405,6 +405,8 @@ static int resize_stripes(raid5_conf_t *conf, int newsize) if (newsize <= conf->pool_size) return 0; /* never bother to shrink */ + md_allow_write(conf->mddev); + /* Step 1 */ sc = kmem_cache_create(conf->cache_name[1-conf->active_name], sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev), @@ -3250,6 +3252,7 @@ raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len) else break; } + md_allow_write(mddev); while (new > conf->max_nr_stripes) { if (grow_one_stripe(conf)) conf->max_nr_stripes++; diff --git a/include/linux/raid/md.h b/include/linux/raid/md.h index 866a1e2b0ce049fd991f3e85d52bdeea3b82d520..fbaeda79b2e99fc41815760309e7509ebf4408d1 100644 --- a/include/linux/raid/md.h +++ b/include/linux/raid/md.h @@ -94,7 +94,7 @@ extern int sync_page_io(struct block_device *bdev, sector_t sector, int size, struct page *page, int rw); extern void md_do_sync(mddev_t *mddev); extern void md_new_event(mddev_t *mddev); - +extern void md_allow_write(mddev_t *mddev); #endif /* CONFIG_MD */ #endif