From 16ef510139315a2147ee7525796f8dbd4e4b7864 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 24 Sep 2020 08:51:33 +0200 Subject: [PATCH] md: update the optimal I/O size on reshape The raid5 and raid10 drivers currently update the read-ahead size, but not the optimal I/O size on reshape. To prepare for deriving the read-ahead size from the optimal I/O size make sure it is updated as well. Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Reviewed-by: Martin K. Petersen Acked-by: Song Liu Signed-off-by: Jens Axboe --- drivers/md/raid10.c | 22 ++++++++++++++-------- drivers/md/raid5.c | 10 ++++++++-- 2 files changed, 22 insertions(+), 10 deletions(-) diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index e8fa32733917..9956a04ac13b 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -3703,10 +3703,20 @@ static struct r10conf *setup_conf(struct mddev *mddev) return ERR_PTR(err); } +static void raid10_set_io_opt(struct r10conf *conf) +{ + int raid_disks = conf->geo.raid_disks; + + if (!(conf->geo.raid_disks % conf->geo.near_copies)) + raid_disks /= conf->geo.near_copies; + blk_queue_io_opt(conf->mddev->queue, (conf->mddev->chunk_sectors << 9) * + raid_disks); +} + static int raid10_run(struct mddev *mddev) { struct r10conf *conf; - int i, disk_idx, chunk_size; + int i, disk_idx; struct raid10_info *disk; struct md_rdev *rdev; sector_t size; @@ -3742,18 +3752,13 @@ static int raid10_run(struct mddev *mddev) mddev->thread = conf->thread; conf->thread = NULL; - chunk_size = mddev->chunk_sectors << 9; if (mddev->queue) { blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors); blk_queue_max_write_same_sectors(mddev->queue, 0); blk_queue_max_write_zeroes_sectors(mddev->queue, 0); - blk_queue_io_min(mddev->queue, chunk_size); - if (conf->geo.raid_disks % conf->geo.near_copies) - blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); - else - blk_queue_io_opt(mddev->queue, chunk_size * - (conf->geo.raid_disks / conf->geo.near_copies)); + blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9); + raid10_set_io_opt(conf); } rdev_for_each(rdev, mddev) { @@ -4727,6 +4732,7 @@ static void end_reshape(struct r10conf *conf) stripe /= conf->geo.near_copies; if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe) conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe; + raid10_set_io_opt(conf); } conf->fullsync = 0; } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 225380efd1e2..9a7d1250894e 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -7232,6 +7232,12 @@ static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded return 0; } +static void raid5_set_io_opt(struct r5conf *conf) +{ + blk_queue_io_opt(conf->mddev->queue, (conf->chunk_sectors << 9) * + (conf->raid_disks - conf->max_degraded)); +} + static int raid5_run(struct mddev *mddev) { struct r5conf *conf; @@ -7521,8 +7527,7 @@ static int raid5_run(struct mddev *mddev) chunk_size = mddev->chunk_sectors << 9; blk_queue_io_min(mddev->queue, chunk_size); - blk_queue_io_opt(mddev->queue, chunk_size * - (conf->raid_disks - conf->max_degraded)); + raid5_set_io_opt(conf); mddev->queue->limits.raid_partial_stripes_expensive = 1; /* * We can only discard a whole stripe. It doesn't make sense to @@ -8115,6 +8120,7 @@ static void end_reshape(struct r5conf *conf) / PAGE_SIZE); if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe) conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe; + raid5_set_io_opt(conf); } } } -- 2.11.0