From: Shaohua Li <shli@kernel.org>
To: linux-raid@vger.kernel.org
Cc: neilb@suse.de, axboe@kernel.dk
Subject: [patch 1/3 v3] raid1: make sequential read detection per disk based
Date: Mon, 02 Jul 2012 09:08:41 +0800 [thread overview]
Message-ID: <20120702011025.079670656@kernel.org> (raw)
In-Reply-To: 20120702010840.197370335@kernel.org
[-- Attachment #1: raid1-seq-detection.patch --]
[-- Type: text/plain, Size: 4261 bytes --]
Currently the sequential read detection is global wide. It's natural to make it
per disk based, which can improve the detection for concurrent multiple
sequential reads. And next patch will make SSD read balance not use distance
based algorithm, where this change help detect truly sequential read for SSD.
Signed-off-by: Shaohua Li <shli@fusionio.com>
---
drivers/md/raid1.c | 29 ++++++-----------------------
drivers/md/raid1.h | 11 +++++------
2 files changed, 11 insertions(+), 29 deletions(-)
Index: linux/drivers/md/raid1.c
===================================================================
--- linux.orig/drivers/md/raid1.c 2012-06-28 10:44:47.550666575 +0800
+++ linux/drivers/md/raid1.c 2012-06-28 12:01:03.513137377 +0800
@@ -483,7 +483,6 @@ static int read_balance(struct r1conf *c
const sector_t this_sector = r1_bio->sector;
int sectors;
int best_good_sectors;
- int start_disk;
int best_disk;
int i;
sector_t best_dist;
@@ -503,20 +502,17 @@ static int read_balance(struct r1conf *c
best_good_sectors = 0;
if (conf->mddev->recovery_cp < MaxSector &&
- (this_sector + sectors >= conf->next_resync)) {
+ (this_sector + sectors >= conf->next_resync))
choose_first = 1;
- start_disk = 0;
- } else {
+ else
choose_first = 0;
- start_disk = conf->last_used;
- }
for (i = 0 ; i < conf->raid_disks * 2 ; i++) {
sector_t dist;
sector_t first_bad;
int bad_sectors;
- int disk = start_disk + i;
+ int disk = i;
if (disk >= conf->raid_disks)
disk -= conf->raid_disks;
@@ -580,7 +576,7 @@ static int read_balance(struct r1conf *c
dist = abs(this_sector - conf->mirrors[disk].head_position);
if (choose_first
/* Don't change to another disk for sequential reads */
- || conf->next_seq_sect == this_sector
+ || conf->mirrors[disk].next_seq_sect == this_sector
|| dist == 0
/* If device is idle, use it */
|| atomic_read(&rdev->nr_pending) == 0) {
@@ -606,8 +602,7 @@ static int read_balance(struct r1conf *c
goto retry;
}
sectors = best_good_sectors;
- conf->next_seq_sect = this_sector + sectors;
- conf->last_used = best_disk;
+ conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
}
rcu_read_unlock();
*max_sectors = sectors;
@@ -2581,7 +2576,6 @@ static struct r1conf *setup_conf(struct
conf->recovery_disabled = mddev->recovery_disabled - 1;
err = -EIO;
- conf->last_used = -1;
for (i = 0; i < conf->raid_disks * 2; i++) {
disk = conf->mirrors + i;
@@ -2607,19 +2601,9 @@ static struct r1conf *setup_conf(struct
if (disk->rdev &&
(disk->rdev->saved_raid_disk < 0))
conf->fullsync = 1;
- } else if (conf->last_used < 0)
- /*
- * The first working device is used as a
- * starting point to read balancing.
- */
- conf->last_used = i;
+ }
}
- if (conf->last_used < 0) {
- printk(KERN_ERR "md/raid1:%s: no operational mirrors\n",
- mdname(mddev));
- goto abort;
- }
err = -ENOMEM;
conf->thread = md_register_thread(raid1d, mddev, NULL);
if (!conf->thread) {
@@ -2876,7 +2860,6 @@ static int raid1_reshape(struct mddev *m
conf->raid_disks = mddev->raid_disks = raid_disks;
mddev->delta_disks = 0;
- conf->last_used = 0; /* just make sure it is in-range */
lower_barrier(conf);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
Index: linux/drivers/md/raid1.h
===================================================================
--- linux.orig/drivers/md/raid1.h 2012-06-13 16:18:20.000000000 +0800
+++ linux/drivers/md/raid1.h 2012-06-28 11:55:37.757235867 +0800
@@ -4,6 +4,11 @@
struct mirror_info {
struct md_rdev *rdev;
sector_t head_position;
+
+ /* When choose the best device for a read (read_balance())
+ * we try to keep sequential reads one the same device
+ */
+ sector_t next_seq_sect;
};
/*
@@ -29,12 +34,6 @@ struct r1conf {
*/
int raid_disks;
- /* When choose the best device for a read (read_balance())
- * we try to keep sequential reads one the same device
- * using 'last_used' and 'next_seq_sect'
- */
- int last_used;
- sector_t next_seq_sect;
/* During resync, read_balancing is only allowed on the part
* of the array that has been resynced. 'next_resync' tells us
* where that is.
next prev parent reply other threads:[~2012-07-02 1:08 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2012-07-02 1:08 [patch 0/3 v3] Optimize raid1 read balance for SSD Shaohua Li
2012-07-02 1:08 ` Shaohua Li [this message]
2012-07-04 5:38 ` [patch 1/3 v3] raid1: make sequential read detection per disk based NeilBrown
2012-07-02 1:08 ` [patch 2/3 v3] raid1: read balance chooses idlest disk for SSD Shaohua Li
2012-07-02 2:13 ` Roberto Spadim
2012-07-02 3:02 ` Shaohua Li
2012-07-02 3:57 ` Roberto Spadim
2012-07-02 4:33 ` Roberto Spadim
2012-07-02 4:31 ` Roberto Spadim
2012-07-02 4:36 ` Roberto Spadim
2012-07-04 5:45 ` NeilBrown
2012-07-02 1:08 ` [patch 3/3 v3] raid1: prevent merging too large request Shaohua Li
2012-07-04 5:59 ` NeilBrown
2012-07-04 8:01 ` Shaohua Li
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20120702011025.079670656@kernel.org \
--to=shli@kernel.org \
--cc=axboe@kernel.dk \
--cc=linux-raid@vger.kernel.org \
--cc=neilb@suse.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.