All of lore.kernel.org
 help / color / mirror / Atom feed
From: Wang You <wangyoua@uniontech.com>
To: axboe@kernel.dk, bvanassche@acm.org
Cc: linux-block@vger.kernel.org, linux-kernel@vger.kernel.org,
	hch@lst.de, jaegeuk@kernel.org, fio@vger.kernel.org,
	ming.lei@redhat.com, wangyoua@uniontech.com,
	wangxiaohua@uniontech.com
Subject: [PATCH v2 1/2] block: Introduce nr_sched_batch sys interface
Date: Fri, 22 Jul 2022 17:51:19 +0800	[thread overview]
Message-ID: <20220722095120.371212-2-wangyoua@uniontech.com> (raw)
In-Reply-To: <20220722095120.371212-1-wangyoua@uniontech.com>

The function of this patch is to add an nr_sched_batch interface under
/sys/block/sdx/queue/, which can be used to set the number of batching
requests. Of course, the default value is nr_requests and will follow
nr_request when it has not been changed.

Signed-off-by: Wang You <wangyoua@uniontech.com>
---
 block/blk-mq-sched.c   |  4 +++-
 block/blk-sysfs.c      | 32 ++++++++++++++++++++++++++++++++
 include/linux/blkdev.h |  1 +
 3 files changed, 36 insertions(+), 1 deletion(-)

diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index a4f7c101b53b..92798a0c03bd 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -100,7 +100,7 @@ static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
 	if (hctx->dispatch_busy)
 		max_dispatch = 1;
 	else
-		max_dispatch = hctx->queue->nr_requests;
+		max_dispatch = q->nr_sched_batch;
 
 	do {
 		struct request *rq;
@@ -567,6 +567,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
 		blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q);
 		q->elevator = NULL;
 		q->nr_requests = q->tag_set->queue_depth;
+		q->nr_sched_batch = q->nr_requests;
 		return 0;
 	}
 
@@ -577,6 +578,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
 	 */
 	q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth,
 				   BLKDEV_DEFAULT_RQ);
+	q->nr_sched_batch = q->nr_requests;
 
 	if (blk_mq_is_shared_tags(flags)) {
 		ret = blk_mq_init_sched_shared_tags(q);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 9b905e9443e4..34fbfb22e3e1 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -70,6 +70,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
 {
 	unsigned long nr;
 	int ret, err;
+	unsigned long prev_nr_request = q->nr_requests;
 
 	if (!queue_is_mq(q))
 		return -EINVAL;
@@ -85,6 +86,35 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
 	if (err)
 		return err;
 
+	if (q->nr_sched_batch == prev_nr_request || q->nr_sched_batch > nr)
+		q->nr_sched_batch = nr;
+
+	return ret;
+}
+
+static ssize_t elv_nr_batch_show(struct request_queue *q, char *page)
+{
+	return queue_var_show(q->nr_sched_batch, page);
+}
+
+static ssize_t elv_nr_batch_store(struct request_queue *q, const char *page,
+				  size_t count)
+{
+	unsigned long nr;
+	int ret;
+
+	if (!queue_is_mq(q))
+		return -EINVAL;
+
+	ret = queue_var_store(&nr, page, count);
+	if (ret < 0)
+		return ret;
+
+	if (nr > q->nr_requests || nr < 1)
+		return -EINVAL;
+
+	q->nr_sched_batch = nr;
+
 	return ret;
 }
 
@@ -573,6 +603,7 @@ QUEUE_RO_ENTRY(queue_max_segments, "max_segments");
 QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments");
 QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size");
 QUEUE_RW_ENTRY(elv_iosched, "scheduler");
+QUEUE_RW_ENTRY(elv_nr_batch, "nr_sched_batch");
 
 QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size");
 QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size");
@@ -632,6 +663,7 @@ static struct attribute *queue_attrs[] = {
 	&queue_max_integrity_segments_entry.attr,
 	&queue_max_segment_size_entry.attr,
 	&elv_iosched_entry.attr,
+	&elv_nr_batch_entry.attr,
 	&queue_hw_sector_size_entry.attr,
 	&queue_logical_block_size_entry.attr,
 	&queue_physical_block_size_entry.attr,
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 2f7b43444c5f..e38eba4161a3 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -422,6 +422,7 @@ struct request_queue {
 	 * queue settings
 	 */
 	unsigned long		nr_requests;	/* Max # of requests */
+	unsigned long nr_sched_batch;
 
 	unsigned int		dma_pad_mask;
 	unsigned int		dma_alignment;
-- 
2.27.0




  reply	other threads:[~2022-07-22  9:52 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-07-22  9:51 [PATCH v2 0/2] Improve mq-deadline performance in HDD Wang You
2022-07-22  9:51 ` Wang You [this message]
2022-07-22 17:35   ` [PATCH v2 1/2] block: Introduce nr_sched_batch sys interface Bart Van Assche
2022-07-23 10:04     ` Wang You
2022-07-22  9:51 ` [PATCH v2 2/2] block/mq-deadline: Prioritize first request Wang You
2022-07-22 17:47   ` Bart Van Assche
2022-07-23 10:59     ` Wang You
2022-07-25  1:48       ` Bart Van Assche

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220722095120.371212-2-wangyoua@uniontech.com \
    --to=wangyoua@uniontech.com \
    --cc=axboe@kernel.dk \
    --cc=bvanassche@acm.org \
    --cc=fio@vger.kernel.org \
    --cc=hch@lst.de \
    --cc=jaegeuk@kernel.org \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=ming.lei@redhat.com \
    --cc=wangxiaohua@uniontech.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.