提交 eea1bf38 编写于 作者: N NeilBrown

md: Fix is_mddev_idle test (again).

There are two problems with is_mddev_idle.

1/ sync_io is 'atomic_t' and hence 'int'.  curr_events and all the
   rest are 'long'.
   So if sync_io were to wrap on a 64bit host, the value of
   curr_events would go very negative suddenly, and take a very
   long time to return to positive.

   So do all calculations as 'int'.  That gives us plenty of precision
   for what we need.

2/ To initialise rdev->last_events we simply call is_mddev_idle, on
   the assumption that it will make sure that last_events is in a
   suitable range.  It used to do this, but now it does not.
   So now we need to be more explicit about initialisation.
Signed-off-by: NNeilBrown <neilb@suse.de>
上级 99adcd9d
...@@ -5716,18 +5716,18 @@ int unregister_md_personality(struct mdk_personality *p) ...@@ -5716,18 +5716,18 @@ int unregister_md_personality(struct mdk_personality *p)
return 0; return 0;
} }
static int is_mddev_idle(mddev_t *mddev) static int is_mddev_idle(mddev_t *mddev, int init)
{ {
mdk_rdev_t * rdev; mdk_rdev_t * rdev;
int idle; int idle;
long curr_events; int curr_events;
idle = 1; idle = 1;
rcu_read_lock(); rcu_read_lock();
rdev_for_each_rcu(rdev, mddev) { rdev_for_each_rcu(rdev, mddev) {
struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
curr_events = part_stat_read(&disk->part0, sectors[0]) + curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
part_stat_read(&disk->part0, sectors[1]) - (int)part_stat_read(&disk->part0, sectors[1]) -
atomic_read(&disk->sync_io); atomic_read(&disk->sync_io);
/* sync IO will cause sync_io to increase before the disk_stats /* sync IO will cause sync_io to increase before the disk_stats
* as sync_io is counted when a request starts, and * as sync_io is counted when a request starts, and
...@@ -5751,7 +5751,7 @@ static int is_mddev_idle(mddev_t *mddev) ...@@ -5751,7 +5751,7 @@ static int is_mddev_idle(mddev_t *mddev)
* always make curr_events less than last_events. * always make curr_events less than last_events.
* *
*/ */
if (curr_events - rdev->last_events > 4096) { if (init || curr_events - rdev->last_events > 64) {
rdev->last_events = curr_events; rdev->last_events = curr_events;
idle = 0; idle = 0;
} }
...@@ -5994,7 +5994,7 @@ void md_do_sync(mddev_t *mddev) ...@@ -5994,7 +5994,7 @@ void md_do_sync(mddev_t *mddev)
"(but not more than %d KB/sec) for %s.\n", "(but not more than %d KB/sec) for %s.\n",
speed_max(mddev), desc); speed_max(mddev), desc);
is_mddev_idle(mddev); /* this also initializes IO event counters */ is_mddev_idle(mddev, 1); /* this initializes IO event counters */
io_sectors = 0; io_sectors = 0;
for (m = 0; m < SYNC_MARKS; m++) { for (m = 0; m < SYNC_MARKS; m++) {
...@@ -6096,7 +6096,7 @@ void md_do_sync(mddev_t *mddev) ...@@ -6096,7 +6096,7 @@ void md_do_sync(mddev_t *mddev)
if (currspeed > speed_min(mddev)) { if (currspeed > speed_min(mddev)) {
if ((currspeed > speed_max(mddev)) || if ((currspeed > speed_max(mddev)) ||
!is_mddev_idle(mddev)) { !is_mddev_idle(mddev, 0)) {
msleep(500); msleep(500);
goto repeat; goto repeat;
} }
......
...@@ -51,7 +51,7 @@ struct mdk_rdev_s ...@@ -51,7 +51,7 @@ struct mdk_rdev_s
sector_t size; /* Device size (in blocks) */ sector_t size; /* Device size (in blocks) */
mddev_t *mddev; /* RAID array if running */ mddev_t *mddev; /* RAID array if running */
long last_events; /* IO event timestamp */ int last_events; /* IO event timestamp */
struct block_device *bdev; /* block device handle */ struct block_device *bdev; /* block device handle */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册