diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 252d55df964296c97960242a016fc7b8573c4279..b65c36d9e240a0f38921b7fd1a0fbc3238debad4 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -1530,6 +1530,8 @@ void bitmap_destroy(mddev_t *mddev) return; mddev->bitmap = NULL; /* disconnect from the md device */ + if (mddev->thread) + mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; bitmap_free(bitmap); } @@ -1636,6 +1638,8 @@ int bitmap_create(mddev_t *mddev) if (IS_ERR(bitmap->writeback_daemon)) return PTR_ERR(bitmap->writeback_daemon); + mddev->thread->timeout = bitmap->daemon_sleep * HZ; + return bitmap_update_sb(bitmap); error: diff --git a/drivers/md/md.c b/drivers/md/md.c index b4fb7247b3ed03907b66cc22d1201f88740c9710..ee199d4625208e6c675a92cf2fc752a3d79f3260 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -2054,13 +2054,15 @@ static int do_md_run(mddev_t * mddev) if (start_readonly) mddev->ro = 2; /* read-only, but switch on first write */ - /* before we start the array running, initialise the bitmap */ - err = bitmap_create(mddev); - if (err) - printk(KERN_ERR "%s: failed to create bitmap (%d)\n", - mdname(mddev), err); - else - err = mddev->pers->run(mddev); + err = mddev->pers->run(mddev); + if (!err && mddev->pers->sync_request) { + err = bitmap_create(mddev); + if (err) { + printk(KERN_ERR "%s: failed to create bitmap (%d)\n", + mdname(mddev), err); + mddev->pers->stop(mddev); + } + } if (err) { printk(KERN_ERR "md: pers->run() failed ...\n"); module_put(mddev->pers->owner); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index f5204149ab65a0c2517664c5a3e26a390eeae8cc..c618015f07f65c4f045bcfb0bd95303f6a3a3424 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1611,7 +1611,6 @@ static int run(mddev_t *mddev) mdname(mddev)); goto out_free_conf; } - if (mddev->bitmap) mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ; printk(KERN_INFO "raid1: raid set %s active with %d out of %d mirrors\n", @@ -1783,13 +1782,6 @@ static void raid1_quiesce(mddev_t *mddev, int state) lower_barrier(conf); break; } - if (mddev->thread) { - if (mddev->bitmap) - mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ; - else - mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; - md_wakeup_thread(mddev->thread); - } } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 53a0f2ce76c84736baac5ec2c3c583118a8195ec..0d016a844ec69373b7433f914f74daf580fbd81c 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1964,9 +1964,6 @@ static int run(mddev_t *mddev) /* Ok, everything is just fine now */ sysfs_create_group(&mddev->kobj, &raid5_attrs_group); - if (mddev->bitmap) - mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ; - mddev->queue->unplug_fn = raid5_unplug_device; mddev->queue->issue_flush_fn = raid5_issue_flush; @@ -2200,14 +2197,8 @@ static void raid5_quiesce(mddev_t *mddev, int state) spin_unlock_irq(&conf->device_lock); break; } - if (mddev->thread) { - if (mddev->bitmap) - mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ; - else - mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; - md_wakeup_thread(mddev->thread); - } } + static mdk_personality_t raid5_personality= { .name = "raid5", diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c index 9ac6dcd551275544935069a26222a4c98e1c658b..304455d236f97ee677f191c4c67f489d44c3a652 100644 --- a/drivers/md/raid6main.c +++ b/drivers/md/raid6main.c @@ -1990,9 +1990,6 @@ static int run(mddev_t *mddev) /* Ok, everything is just fine now */ mddev->array_size = mddev->size * (mddev->raid_disks - 2); - if (mddev->bitmap) - mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ; - mddev->queue->unplug_fn = raid6_unplug_device; mddev->queue->issue_flush_fn = raid6_issue_flush; return 0; @@ -2228,14 +2225,8 @@ static void raid6_quiesce(mddev_t *mddev, int state) spin_unlock_irq(&conf->device_lock); break; } - if (mddev->thread) { - if (mddev->bitmap) - mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ; - else - mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; - md_wakeup_thread(mddev->thread); - } } + static mdk_personality_t raid6_personality= { .name = "raid6",