linux-mmc.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Baolin Wang <baolin.wang7@gmail.com>
To: axboe@kernel.dk, paolo.valente@linaro.org,
	adrian.hunter@intel.com, ulf.hansson@linaro.org
Cc: zhang.lyra@gmail.com, orsonzhai@gmail.com, arnd@arndb.de,
	linus.walleij@linaro.org, baolin.wang7@gmail.com,
	linux-mmc@vger.kernel.org, linux-kernel@vger.kernel.org,
	linux-block@vger.kernel.org
Subject: [RFC PATCH 2/8] block: Allow sending a batch of requests from the scheduler to hardware
Date: Fri, 17 Jan 2020 13:24:21 +0800	[thread overview]
Message-ID: <b0c8278946f8501f184e45fecb70a862e9c9b7b2.1579164455.git.baolin.wang7@gmail.com> (raw)
In-Reply-To: <cover.1579164455.git.baolin.wang7@gmail.com>
In-Reply-To: <cover.1579164455.git.baolin.wang7@gmail.com>

As we know, some SD/MMC host controllers can support packed request,
that means we can package several requests to host controller at one
time to improve performence. So the hardware driver expects the blk-mq
can dispatch a batch of requests at one time, and driver can use bd.last
to indicate if it is the last request in the batch to help to combine
requests as much as possible.

Thus we should add batch requests setting from the block driver to tell
the scheduler how many requests can be dispatched in a batch, as well
as changing the scheduler to dispatch more than one request if setting
the maximum batch requests number.

Signed-off-by: Baolin Wang <baolin.wang7@gmail.com>
---
 block/bfq-iosched.c    |   32 +++++++++++++--------
 block/blk-mq.c         |    2 --
 block/blk-settings.c   |   13 +++++++++
 block/kyber-iosched.c  |   74 ++++++++++++++++++++++++++----------------------
 block/mq-deadline.c    |   20 +++++++++----
 include/linux/blkdev.h |    8 ++++++
 6 files changed, 95 insertions(+), 54 deletions(-)

diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index decabc4..bef1187 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -4778,29 +4778,37 @@ static int bfq_dispatch_requests(struct blk_mq_hw_ctx *hctx,
 				 struct list_head *list)
 {
 	struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
+	unsigned int batch_reqs = queue_max_batch_requests(hctx->queue) ? : 1;
 	struct request *rq;
 	struct bfq_queue *in_serv_queue;
 	bool waiting_rq, idle_timer_disabled;
+	int i;
 
-	spin_lock_irq(&bfqd->lock);
+	for (i = 0; i < batch_reqs; i++) {
+		spin_lock_irq(&bfqd->lock);
 
-	in_serv_queue = bfqd->in_service_queue;
-	waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue);
+		in_serv_queue = bfqd->in_service_queue;
+		waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue);
 
-	rq = __bfq_dispatch_request(hctx);
+		rq = __bfq_dispatch_request(hctx);
 
-	idle_timer_disabled =
-		waiting_rq && !bfq_bfqq_wait_request(in_serv_queue);
+		idle_timer_disabled =
+			waiting_rq && !bfq_bfqq_wait_request(in_serv_queue);
 
-	spin_unlock_irq(&bfqd->lock);
+		spin_unlock_irq(&bfqd->lock);
 
-	bfq_update_dispatch_stats(hctx->queue, rq, in_serv_queue,
-				  idle_timer_disabled);
+		bfq_update_dispatch_stats(hctx->queue, rq, in_serv_queue,
+					  idle_timer_disabled);
 
-	if (!rq)
-		return 0;
+		if (!rq) {
+			if (list_empty(list))
+				return 0;
 
-	list_add(&rq->queuelist, list);
+			return 1;
+		}
+
+		list_add(&rq->queuelist, list);
+	}
 
 	return 1;
 }
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 323c9cb..e9a6677 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1185,8 +1185,6 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
 	if (list_empty(list))
 		return false;
 
-	WARN_ON(!list_is_singular(list) && got_budget);
-
 	/*
 	 * Now process all the entries, sending them to the driver.
 	 */
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 5f6dcc7..29e3a3c 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -59,6 +59,7 @@ void blk_set_default_limits(struct queue_limits *lim)
 	lim->io_opt = 0;
 	lim->misaligned = 0;
 	lim->zoned = BLK_ZONED_NONE;
+	lim->max_batch_reqs = 1;
 }
 EXPORT_SYMBOL(blk_set_default_limits);
 
@@ -871,6 +872,18 @@ bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
 }
 EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
 
+/**
+ * blk_queue_max_batch_requests - set max requests for batch processing
+ * @q:	the request queue for the device
+ * @max_batch_requests: maximum number of requests in a batch
+ **/
+void blk_queue_max_batch_requests(struct request_queue *q,
+				  unsigned int max_batch_requests)
+{
+	q->limits.max_batch_reqs = max_batch_requests;
+}
+EXPORT_SYMBOL(blk_queue_max_batch_requests);
+
 static int __init blk_settings_init(void)
 {
 	blk_max_low_pfn = max_low_pfn - 1;
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
index 8f58434..3a84a5f 100644
--- a/block/kyber-iosched.c
+++ b/block/kyber-iosched.c
@@ -801,50 +801,56 @@ static int kyber_dispatch_requests(struct blk_mq_hw_ctx *hctx,
 {
 	struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
 	struct kyber_hctx_data *khd = hctx->sched_data;
+	unsigned int batch_reqs = queue_max_batch_requests(hctx->queue) ? : 1;
 	struct request *rq;
-	int i, ret = 0;
+	int i, j, ret = 0;
 
 	spin_lock(&khd->lock);
 
-	/*
-	 * First, if we are still entitled to batch, try to dispatch a request
-	 * from the batch.
-	 */
-	if (khd->batching < kyber_batch_size[khd->cur_domain]) {
-		rq = kyber_dispatch_cur_domain(kqd, khd, hctx);
-		if (rq) {
-			list_add(&rq->queuelist, list);
-			ret = 1;
-			goto out;
+	for (j = 0; j < batch_reqs; j++) {
+		/*
+		 * First, if we are still entitled to batch, try to dispatch a
+		 * request from the batch.
+		 */
+		if (khd->batching < kyber_batch_size[khd->cur_domain]) {
+			rq = kyber_dispatch_cur_domain(kqd, khd, hctx);
+			if (rq) {
+				list_add(&rq->queuelist, list);
+				ret = 1;
+				continue;
+			}
 		}
-	}
-
-	/*
-	 * Either,
-	 * 1. We were no longer entitled to a batch.
-	 * 2. The domain we were batching didn't have any requests.
-	 * 3. The domain we were batching was out of tokens.
-	 *
-	 * Start another batch. Note that this wraps back around to the original
-	 * domain if no other domains have requests or tokens.
-	 */
-	khd->batching = 0;
-	for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
-		if (khd->cur_domain == KYBER_NUM_DOMAINS - 1)
-			khd->cur_domain = 0;
-		else
-			khd->cur_domain++;
 
-		rq = kyber_dispatch_cur_domain(kqd, khd, hctx);
-		if (rq) {
-			list_add(&rq->queuelist, list);
-			ret = 1;
-			goto out;
+		/*
+		 * Either,
+		 * 1. We were no longer entitled to a batch.
+		 * 2. The domain we were batching didn't have any requests.
+		 * 3. The domain we were batching was out of tokens.
+		 *
+		 * Start another batch. Note that this wraps back around to the
+		 * original domain if no other domains have requests or tokens.
+		 */
+		khd->batching = 0;
+		for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
+			if (khd->cur_domain == KYBER_NUM_DOMAINS - 1)
+				khd->cur_domain = 0;
+			else
+				khd->cur_domain++;
+
+			rq = kyber_dispatch_cur_domain(kqd, khd, hctx);
+			if (rq) {
+				list_add(&rq->queuelist, list);
+				ret = 1;
+				break;
+			}
 		}
 	}
 
-out:
 	spin_unlock(&khd->lock);
+
+	if (list_empty(list))
+		ret = 0;
+
 	return ret;
 }
 
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index 9fbffba..4e3d58a 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -382,16 +382,24 @@ static int dd_dispatch_requests(struct blk_mq_hw_ctx *hctx,
 				struct list_head *list)
 {
 	struct deadline_data *dd = hctx->queue->elevator->elevator_data;
+	unsigned int batch_reqs = queue_max_batch_requests(hctx->queue) ? : 1;
 	struct request *rq;
+	int i;
 
-	spin_lock(&dd->lock);
-	rq = __dd_dispatch_request(dd);
-	spin_unlock(&dd->lock);
+	for (i = 0; i < batch_reqs; i++) {
+		spin_lock(&dd->lock);
+		rq = __dd_dispatch_request(dd);
+		spin_unlock(&dd->lock);
 
-	if (!rq)
-		return 0;
+		if (!rq) {
+			if (list_empty(list))
+				return 0;
 
-	list_add(&rq->queuelist, list);
+			return 1;
+		}
+
+		list_add(&rq->queuelist, list);
+	}
 
 	return 1;
 }
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 47eb22a..9043fdb 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -337,6 +337,7 @@ struct queue_limits {
 	unsigned int		max_write_zeroes_sectors;
 	unsigned int		discard_granularity;
 	unsigned int		discard_alignment;
+	unsigned int		max_batch_reqs;
 
 	unsigned short		logical_block_size;
 	unsigned short		max_segments;
@@ -1109,6 +1110,8 @@ extern void blk_queue_required_elevator_features(struct request_queue *q,
 						 unsigned int features);
 extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
 					      struct device *dev);
+extern void blk_queue_max_batch_requests(struct request_queue *q,
+					 unsigned int max_batch_requests);
 
 /*
  * Number of physical segments as sent to the device.
@@ -1291,6 +1294,11 @@ static inline unsigned int queue_max_segment_size(const struct request_queue *q)
 	return q->limits.max_segment_size;
 }
 
+static inline unsigned int queue_max_batch_requests(const struct request_queue *q)
+{
+	return q->limits.max_batch_reqs;
+}
+
 static inline unsigned short queue_logical_block_size(const struct request_queue *q)
 {
 	int retval = 512;
-- 
1.7.9.5


  parent reply	other threads:[~2020-01-17  5:25 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-01-17  5:24 [RFC PATCH 0/8] Add MMC packed request support Baolin Wang
2020-01-17  5:24 ` [RFC PATCH 1/8] block: Change the dispatch_request() API to support batch requests Baolin Wang
2020-02-10  9:21   ` Baolin Wang
2020-01-17  5:24 ` Baolin Wang [this message]
2020-01-17  5:24 ` [RFC PATCH 3/8] mmc: Add MMC packed request support for MMC software queue Baolin Wang
2020-01-17  5:24 ` [RFC PATCH 4/8] mmc: host: sdhci: Introduce ADMA3 transfer mode Baolin Wang
2020-01-17  5:24 ` [RFC PATCH 5/8] mmc: host: sdhci: Factor out the command configuration Baolin Wang
2020-01-17  5:24 ` [RFC PATCH 6/8] mmc: host: sdhci: Remove redundant sg_count member of struct sdhci_host Baolin Wang
2020-01-17  5:24 ` [RFC PATCH 7/8] mmc: host: sdhci: Add MMC packed request support Baolin Wang
2020-01-17  5:24 ` [RFC PATCH 8/8] mmc: host: sdhci-sprd: " Baolin Wang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=b0c8278946f8501f184e45fecb70a862e9c9b7b2.1579164455.git.baolin.wang7@gmail.com \
    --to=baolin.wang7@gmail.com \
    --cc=adrian.hunter@intel.com \
    --cc=arnd@arndb.de \
    --cc=axboe@kernel.dk \
    --cc=linus.walleij@linaro.org \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mmc@vger.kernel.org \
    --cc=orsonzhai@gmail.com \
    --cc=paolo.valente@linaro.org \
    --cc=ulf.hansson@linaro.org \
    --cc=zhang.lyra@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).