diff options
-rw-r--r-- | drivers/md/md.c | 35 | ||||
-rw-r--r-- | include/linux/raid/md_k.h | 2 |
2 files changed, 23 insertions, 14 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c index 3d5a6beb333..bae42331182 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5091,7 +5091,7 @@ static int is_mddev_idle(mddev_t *mddev) mdk_rdev_t * rdev; struct list_head *tmp; int idle; - unsigned long curr_events; + long curr_events; idle = 1; ITERATE_RDEV(mddev,rdev,tmp) { @@ -5099,20 +5099,29 @@ static int is_mddev_idle(mddev_t *mddev) curr_events = disk_stat_read(disk, sectors[0]) + disk_stat_read(disk, sectors[1]) - atomic_read(&disk->sync_io); - /* The difference between curr_events and last_events - * will be affected by any new non-sync IO (making - * curr_events bigger) and any difference in the amount of - * in-flight syncio (making current_events bigger or smaller) - * The amount in-flight is currently limited to - * 32*64K in raid1/10 and 256*PAGE_SIZE in raid5/6 - * which is at most 4096 sectors. - * These numbers are fairly fragile and should be made - * more robust, probably by enforcing the - * 'window size' that md_do_sync sort-of uses. + /* sync IO will cause sync_io to increase before the disk_stats + * as sync_io is counted when a request starts, and + * disk_stats is counted when it completes. + * So resync activity will cause curr_events to be smaller than + * when there was no such activity. + * non-sync IO will cause disk_stat to increase without + * increasing sync_io so curr_events will (eventually) + * be larger than it was before. Once it becomes + * substantially larger, the test below will cause + * the array to appear non-idle, and resync will slow + * down. + * If there is a lot of outstanding resync activity when + * we set last_event to curr_events, then all that activity + * completing might cause the array to appear non-idle + * and resync will be slowed down even though there might + * not have been non-resync activity. This will only + * happen once though. 'last_events' will soon reflect + * the state where there is little or no outstanding + * resync requests, and further resync activity will + * always make curr_events less than last_events. * - * Note: the following is an unsigned comparison. */ - if ((long)curr_events - (long)rdev->last_events > 4096) { + if (curr_events - rdev->last_events > 4096) { rdev->last_events = curr_events; idle = 0; } diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h index de72c49747c..28ac632b42d 100644 --- a/include/linux/raid/md_k.h +++ b/include/linux/raid/md_k.h @@ -51,7 +51,7 @@ struct mdk_rdev_s sector_t size; /* Device size (in blocks) */ mddev_t *mddev; /* RAID array if running */ - unsigned long last_events; /* IO event timestamp */ + long last_events; /* IO event timestamp */ struct block_device *bdev; /* block device handle */ |