From mboxrd@z Thu Jan 1 00:00:00 1970 From: Christoph Hellwig Subject: [PATCH 05/14] md: update the optimal I/O size on reshape Date: Mon, 20 Jul 2020 09:51:39 +0200 Message-ID: <20200720075148.172156-6-hch@lst.de> References: <20200720075148.172156-1-hch@lst.de> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: In-Reply-To: <20200720075148.172156-1-hch@lst.de> List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: dm-devel-bounces@redhat.com Errors-To: dm-devel-bounces@redhat.com To: Jens Axboe Cc: linux-raid@vger.kernel.org, Hans de Goede , Richard Weinberger , linux-kernel@vger.kernel.org, linux-block@vger.kernel.org, Song Liu , dm-devel@redhat.com, linux-mtd@lists.infradead.org, cgroups@vger.kernel.org, drbd-dev@tron.linbit.com, linux-fsdevel@vger.kernel.org, linux-mm@kvack.org List-Id: linux-raid.ids The raid5 and raid10 drivers currently update the read-ahead size, but not the optimal I/O size on reshape. To prepare for deriving the read-ahead size from the optimal I/O size make sure it is updated as well. Signed-off-by: Christoph Hellwig --- drivers/md/raid10.c | 22 ++++++++++++++-------- drivers/md/raid5.c | 10 ++++++++-- 2 files changed, 22 insertions(+), 10 deletions(-) diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 353288bc4cb706..552ee0058c118d 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -3695,10 +3695,20 @@ static struct r10conf *setup_conf(struct mddev *mddev) return ERR_PTR(err); } +static void raid10_set_io_opt(struct r10conf *conf) +{ + int raid_disks = conf->geo.raid_disks; + + if (!(conf->geo.raid_disks % conf->geo.near_copies)) + raid_disks /= conf->geo.near_copies; + blk_queue_io_opt(conf->mddev->queue, (conf->mddev->chunk_sectors << 9) * + raid_disks); +} + static int raid10_run(struct mddev *mddev) { struct r10conf *conf; - int i, disk_idx, chunk_size; + int i, disk_idx; struct raid10_info *disk; struct md_rdev *rdev; sector_t size; @@ -3734,18 +3744,13 @@ static int raid10_run(struct mddev *mddev) mddev->thread = conf->thread; conf->thread = NULL; - chunk_size = mddev->chunk_sectors << 9; if (mddev->queue) { blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors); blk_queue_max_write_same_sectors(mddev->queue, 0); blk_queue_max_write_zeroes_sectors(mddev->queue, 0); - blk_queue_io_min(mddev->queue, chunk_size); - if (conf->geo.raid_disks % conf->geo.near_copies) - blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); - else - blk_queue_io_opt(mddev->queue, chunk_size * - (conf->geo.raid_disks / conf->geo.near_copies)); + blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9); + raid10_set_io_opt(conf); } rdev_for_each(rdev, mddev) { @@ -4719,6 +4724,7 @@ static void end_reshape(struct r10conf *conf) stripe /= conf->geo.near_copies; if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe) conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe; + raid10_set_io_opt(conf); } conf->fullsync = 0; } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 774ea893d47e21..f571b0eb5ec824 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -7123,6 +7123,12 @@ static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded return 0; } +static void raid5_set_io_opt(struct r5conf *conf) +{ + blk_queue_io_opt(conf->mddev->queue, (conf->chunk_sectors << 9) * + (conf->raid_disks - conf->max_degraded)); +} + static int raid5_run(struct mddev *mddev) { struct r5conf *conf; @@ -7412,8 +7418,7 @@ static int raid5_run(struct mddev *mddev) chunk_size = mddev->chunk_sectors << 9; blk_queue_io_min(mddev->queue, chunk_size); - blk_queue_io_opt(mddev->queue, chunk_size * - (conf->raid_disks - conf->max_degraded)); + raid5_set_io_opt(conf); mddev->queue->limits.raid_partial_stripes_expensive = 1; /* * We can only discard a whole stripe. It doesn't make sense to @@ -8006,6 +8011,7 @@ static void end_reshape(struct r5conf *conf) / PAGE_SIZE); if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe) conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe; + raid5_set_io_opt(conf); } } } -- 2.27.0 From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-12.8 required=3.0 tests=BAYES_00,DKIM_INVALID, DKIM_SIGNED,HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_PATCH,MAILING_LIST_MULTI, SIGNED_OFF_BY,SPF_HELO_NONE,SPF_PASS,URIBL_BLOCKED,USER_AGENT_GIT autolearn=unavailable autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id ADC02C433E4 for ; Mon, 20 Jul 2020 07:53:03 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 886D120709 for ; Mon, 20 Jul 2020 07:53:03 +0000 (UTC) Authentication-Results: mail.kernel.org; dkim=fail reason="signature verification failed" (2048-bit key) header.d=infradead.org header.i=@infradead.org header.b="bJMgqbCj" Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1727881AbgGTHwK (ORCPT ); Mon, 20 Jul 2020 03:52:10 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:50630 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1727817AbgGTHwE (ORCPT ); Mon, 20 Jul 2020 03:52:04 -0400 Received: from casper.infradead.org (casper.infradead.org [IPv6:2001:8b0:10b:1236::1]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 286B5C061794; Mon, 20 Jul 2020 00:52:04 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=infradead.org; s=casper.20170209; h=Content-Transfer-Encoding:MIME-Version: References:In-Reply-To:Message-Id:Date:Subject:Cc:To:From:Sender:Reply-To: Content-Type:Content-ID:Content-Description; bh=y5xFgVJ7P9ea3jEdaYrEy0X1618hfcoxd44fPzgBdeE=; b=bJMgqbCjw2UhLLfuQV0ZNby0ML lLAbgZxD8sxjS8eyh6S58LD4hUTBd6ZZjh8auS0QYXxaxOAdabTyzOp8fRYB7tQ263qPTSU9reT1T Bmot7exTU5vLQpKW4Wop8jMilY9xkjoM2nZ6tTfYpd9IB5F+MyfrXjnDDn7UkVwqpdb+1pslcPfZx uJX+H59EKwdl+uXC2C9wAv9q57uOnIxOPabJPcPMZrl1ykSJwxEv4wiL+G+S5yuKJGW2vEGiOqxrL 58qTHBUUTZFtNVJQdpISFSmH4RAVYem4Z0OhwCMcLSuXzeQiS/FFBBgSoC9g3QFTUDbJGF5fFOpeJ kP7GQzZg==; Received: from [2001:4bb8:105:4a81:5185:88fc:94bb:f8bf] (helo=localhost) by casper.infradead.org with esmtpsa (Exim 4.92.3 #3 (Red Hat Linux)) id 1jxQac-00040e-HF; Mon, 20 Jul 2020 07:51:59 +0000 From: Christoph Hellwig To: Jens Axboe Cc: Song Liu , Hans de Goede , Richard Weinberger , linux-mtd@lists.infradead.org, dm-devel@redhat.com, linux-block@vger.kernel.org, linux-kernel@vger.kernel.org, drbd-dev@lists.linbit.com, linux-raid@vger.kernel.org, linux-fsdevel@vger.kernel.org, linux-mm@kvack.org, cgroups@vger.kernel.org Subject: [PATCH 05/14] md: update the optimal I/O size on reshape Date: Mon, 20 Jul 2020 09:51:39 +0200 Message-Id: <20200720075148.172156-6-hch@lst.de> X-Mailer: git-send-email 2.27.0 In-Reply-To: <20200720075148.172156-1-hch@lst.de> References: <20200720075148.172156-1-hch@lst.de> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-SRS-Rewrite: SMTP reverse-path rewritten from by casper.infradead.org. See http://www.infradead.org/rpr.html Sender: linux-block-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-block@vger.kernel.org The raid5 and raid10 drivers currently update the read-ahead size, but not the optimal I/O size on reshape. To prepare for deriving the read-ahead size from the optimal I/O size make sure it is updated as well. Signed-off-by: Christoph Hellwig --- drivers/md/raid10.c | 22 ++++++++++++++-------- drivers/md/raid5.c | 10 ++++++++-- 2 files changed, 22 insertions(+), 10 deletions(-) diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 353288bc4cb706..552ee0058c118d 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -3695,10 +3695,20 @@ static struct r10conf *setup_conf(struct mddev *mddev) return ERR_PTR(err); } +static void raid10_set_io_opt(struct r10conf *conf) +{ + int raid_disks = conf->geo.raid_disks; + + if (!(conf->geo.raid_disks % conf->geo.near_copies)) + raid_disks /= conf->geo.near_copies; + blk_queue_io_opt(conf->mddev->queue, (conf->mddev->chunk_sectors << 9) * + raid_disks); +} + static int raid10_run(struct mddev *mddev) { struct r10conf *conf; - int i, disk_idx, chunk_size; + int i, disk_idx; struct raid10_info *disk; struct md_rdev *rdev; sector_t size; @@ -3734,18 +3744,13 @@ static int raid10_run(struct mddev *mddev) mddev->thread = conf->thread; conf->thread = NULL; - chunk_size = mddev->chunk_sectors << 9; if (mddev->queue) { blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors); blk_queue_max_write_same_sectors(mddev->queue, 0); blk_queue_max_write_zeroes_sectors(mddev->queue, 0); - blk_queue_io_min(mddev->queue, chunk_size); - if (conf->geo.raid_disks % conf->geo.near_copies) - blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); - else - blk_queue_io_opt(mddev->queue, chunk_size * - (conf->geo.raid_disks / conf->geo.near_copies)); + blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9); + raid10_set_io_opt(conf); } rdev_for_each(rdev, mddev) { @@ -4719,6 +4724,7 @@ static void end_reshape(struct r10conf *conf) stripe /= conf->geo.near_copies; if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe) conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe; + raid10_set_io_opt(conf); } conf->fullsync = 0; } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 774ea893d47e21..f571b0eb5ec824 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -7123,6 +7123,12 @@ static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded return 0; } +static void raid5_set_io_opt(struct r5conf *conf) +{ + blk_queue_io_opt(conf->mddev->queue, (conf->chunk_sectors << 9) * + (conf->raid_disks - conf->max_degraded)); +} + static int raid5_run(struct mddev *mddev) { struct r5conf *conf; @@ -7412,8 +7418,7 @@ static int raid5_run(struct mddev *mddev) chunk_size = mddev->chunk_sectors << 9; blk_queue_io_min(mddev->queue, chunk_size); - blk_queue_io_opt(mddev->queue, chunk_size * - (conf->raid_disks - conf->max_degraded)); + raid5_set_io_opt(conf); mddev->queue->limits.raid_partial_stripes_expensive = 1; /* * We can only discard a whole stripe. It doesn't make sense to @@ -8006,6 +8011,7 @@ static void end_reshape(struct r5conf *conf) / PAGE_SIZE); if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe) conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe; + raid5_set_io_opt(conf); } } } -- 2.27.0 From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-13.0 required=3.0 tests=BAYES_00,DKIMWL_WL_HIGH, DKIM_SIGNED,DKIM_VALID,HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_PATCH, MAILING_LIST_MULTI,SIGNED_OFF_BY,SPF_HELO_NONE,SPF_PASS,URIBL_BLOCKED, USER_AGENT_GIT autolearn=unavailable autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id AE8DBC43457 for ; Mon, 20 Jul 2020 07:53:16 +0000 (UTC) Received: from merlin.infradead.org (merlin.infradead.org [205.233.59.134]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id 465EC22BEF for ; Mon, 20 Jul 2020 07:53:16 +0000 (UTC) Authentication-Results: mail.kernel.org; dkim=pass (2048-bit key) header.d=lists.infradead.org header.i=@lists.infradead.org header.b="R/CvtjF9"; dkim=fail reason="signature verification failed" (2048-bit key) header.d=infradead.org header.i=@infradead.org header.b="bJMgqbCj" DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org 465EC22BEF Authentication-Results: mail.kernel.org; dmarc=none (p=none dis=none) header.from=lst.de Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=linux-mtd-bounces+linux-mtd=archiver.kernel.org@lists.infradead.org DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=lists.infradead.org; s=merlin.20170209; h=Sender:Content-Transfer-Encoding: Content-Type:Cc:List-Subscribe:List-Help:List-Post:List-Archive: List-Unsubscribe:List-Id:MIME-Version:References:In-Reply-To:Message-Id:Date: Subject:To:From:Reply-To:Content-ID:Content-Description:Resent-Date: Resent-From:Resent-Sender:Resent-To:Resent-Cc:Resent-Message-ID:List-Owner; bh=autSpj364CrnhEtc0EMl3RL5ZiqmIt6pA0VEqiZ0mEU=; b=R/CvtjF97WMyyZRb1xuIaaob5 Z2tLTOr1pNhNfD7qILqB5HZH990DIyEZ6I7if9/HFbXDqDpDxp8ADuv7be12NsKBns/65rMbSxgsn 8LtaPNbuvMv3HYEM9asoKMYwItysVL6A9Zd3SExYu9wXIERYyCWszr7wp1GD/YeKruZwbN1wI+ET1 qXqMSmVebE2BZWBAOKNckNyjLj3IZxJMbJFcG91j2jROZZ27jD3lL1nJpOe34IOUNDL30H0lP4RtM 79f9UgAbJ31fohshB5Gnv4r0Z7POftRVEMLLna04w5d/AInfRtWvtrg3jm30THEwxQXvCJDLDeHWd K+v9sWbog==; Received: from localhost ([::1] helo=merlin.infradead.org) by merlin.infradead.org with esmtp (Exim 4.92.3 #3 (Red Hat Linux)) id 1jxQaj-0005wi-8K; Mon, 20 Jul 2020 07:52:05 +0000 Received: from casper.infradead.org ([2001:8b0:10b:1236::1]) by merlin.infradead.org with esmtps (Exim 4.92.3 #3 (Red Hat Linux)) id 1jxQae-0005um-5m for linux-mtd@merlin.infradead.org; Mon, 20 Jul 2020 07:52:00 +0000 DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=infradead.org; s=casper.20170209; h=Content-Transfer-Encoding:MIME-Version: References:In-Reply-To:Message-Id:Date:Subject:Cc:To:From:Sender:Reply-To: Content-Type:Content-ID:Content-Description; bh=y5xFgVJ7P9ea3jEdaYrEy0X1618hfcoxd44fPzgBdeE=; b=bJMgqbCjw2UhLLfuQV0ZNby0ML lLAbgZxD8sxjS8eyh6S58LD4hUTBd6ZZjh8auS0QYXxaxOAdabTyzOp8fRYB7tQ263qPTSU9reT1T Bmot7exTU5vLQpKW4Wop8jMilY9xkjoM2nZ6tTfYpd9IB5F+MyfrXjnDDn7UkVwqpdb+1pslcPfZx uJX+H59EKwdl+uXC2C9wAv9q57uOnIxOPabJPcPMZrl1ykSJwxEv4wiL+G+S5yuKJGW2vEGiOqxrL 58qTHBUUTZFtNVJQdpISFSmH4RAVYem4Z0OhwCMcLSuXzeQiS/FFBBgSoC9g3QFTUDbJGF5fFOpeJ kP7GQzZg==; Received: from [2001:4bb8:105:4a81:5185:88fc:94bb:f8bf] (helo=localhost) by casper.infradead.org with esmtpsa (Exim 4.92.3 #3 (Red Hat Linux)) id 1jxQac-00040e-HF; Mon, 20 Jul 2020 07:51:59 +0000 From: Christoph Hellwig To: Jens Axboe Subject: [PATCH 05/14] md: update the optimal I/O size on reshape Date: Mon, 20 Jul 2020 09:51:39 +0200 Message-Id: <20200720075148.172156-6-hch@lst.de> X-Mailer: git-send-email 2.27.0 In-Reply-To: <20200720075148.172156-1-hch@lst.de> References: <20200720075148.172156-1-hch@lst.de> MIME-Version: 1.0 X-SRS-Rewrite: SMTP reverse-path rewritten from by casper.infradead.org. See http://www.infradead.org/rpr.html X-BeenThere: linux-mtd@lists.infradead.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Linux MTD discussion mailing list List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: linux-raid@vger.kernel.org, Hans de Goede , Richard Weinberger , linux-kernel@vger.kernel.org, linux-block@vger.kernel.org, Song Liu , dm-devel@redhat.com, linux-mtd@lists.infradead.org, linux-fsdevel@vger.kernel.org, cgroups@vger.kernel.org, linux-mm@kvack.org, drbd-dev@lists.linbit.com Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Sender: "linux-mtd" Errors-To: linux-mtd-bounces+linux-mtd=archiver.kernel.org@lists.infradead.org The raid5 and raid10 drivers currently update the read-ahead size, but not the optimal I/O size on reshape. To prepare for deriving the read-ahead size from the optimal I/O size make sure it is updated as well. Signed-off-by: Christoph Hellwig --- drivers/md/raid10.c | 22 ++++++++++++++-------- drivers/md/raid5.c | 10 ++++++++-- 2 files changed, 22 insertions(+), 10 deletions(-) diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 353288bc4cb706..552ee0058c118d 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -3695,10 +3695,20 @@ static struct r10conf *setup_conf(struct mddev *mddev) return ERR_PTR(err); } +static void raid10_set_io_opt(struct r10conf *conf) +{ + int raid_disks = conf->geo.raid_disks; + + if (!(conf->geo.raid_disks % conf->geo.near_copies)) + raid_disks /= conf->geo.near_copies; + blk_queue_io_opt(conf->mddev->queue, (conf->mddev->chunk_sectors << 9) * + raid_disks); +} + static int raid10_run(struct mddev *mddev) { struct r10conf *conf; - int i, disk_idx, chunk_size; + int i, disk_idx; struct raid10_info *disk; struct md_rdev *rdev; sector_t size; @@ -3734,18 +3744,13 @@ static int raid10_run(struct mddev *mddev) mddev->thread = conf->thread; conf->thread = NULL; - chunk_size = mddev->chunk_sectors << 9; if (mddev->queue) { blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors); blk_queue_max_write_same_sectors(mddev->queue, 0); blk_queue_max_write_zeroes_sectors(mddev->queue, 0); - blk_queue_io_min(mddev->queue, chunk_size); - if (conf->geo.raid_disks % conf->geo.near_copies) - blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); - else - blk_queue_io_opt(mddev->queue, chunk_size * - (conf->geo.raid_disks / conf->geo.near_copies)); + blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9); + raid10_set_io_opt(conf); } rdev_for_each(rdev, mddev) { @@ -4719,6 +4724,7 @@ static void end_reshape(struct r10conf *conf) stripe /= conf->geo.near_copies; if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe) conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe; + raid10_set_io_opt(conf); } conf->fullsync = 0; } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 774ea893d47e21..f571b0eb5ec824 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -7123,6 +7123,12 @@ static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded return 0; } +static void raid5_set_io_opt(struct r5conf *conf) +{ + blk_queue_io_opt(conf->mddev->queue, (conf->chunk_sectors << 9) * + (conf->raid_disks - conf->max_degraded)); +} + static int raid5_run(struct mddev *mddev) { struct r5conf *conf; @@ -7412,8 +7418,7 @@ static int raid5_run(struct mddev *mddev) chunk_size = mddev->chunk_sectors << 9; blk_queue_io_min(mddev->queue, chunk_size); - blk_queue_io_opt(mddev->queue, chunk_size * - (conf->raid_disks - conf->max_degraded)); + raid5_set_io_opt(conf); mddev->queue->limits.raid_partial_stripes_expensive = 1; /* * We can only discard a whole stripe. It doesn't make sense to @@ -8006,6 +8011,7 @@ static void end_reshape(struct r5conf *conf) / PAGE_SIZE); if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe) conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe; + raid5_set_io_opt(conf); } } } -- 2.27.0 ______________________________________________________ Linux MTD discussion mailing list http://lists.infradead.org/mailman/listinfo/linux-mtd/