All of lore.kernel.org
 help / color / mirror / Atom feed
From: Christoph Hellwig <hch@lst.de>
To: Jens Axboe <axboe@kernel.dk>
Cc: Jeffle Xu <jefflexu@linux.alibaba.com>,
	Ming Lei <ming.lei@redhat.com>,
	Damien Le Moal <Damien.LeMoal@wdc.com>,
	Keith Busch <kbusch@kernel.org>, Sagi Grimberg <sagi@grimberg.me>,
	"Wunderlich, Mark" <mark.wunderlich@intel.com>,
	"Vasudevan, Anil" <anil.vasudevan@intel.com>,
	linux-block@vger.kernel.org, linux-fsdevel@vger.kernel.org,
	linux-nvme@lists.infradead.org
Subject: [PATCH 05/15] blk-mq: factor out a "classic" poll helper
Date: Wed, 12 May 2021 15:15:35 +0200	[thread overview]
Message-ID: <20210512131545.495160-6-hch@lst.de> (raw)
In-Reply-To: <20210512131545.495160-1-hch@lst.de>

Factor the code to do the classic full metal polling out of blk_poll into
a separate blk_mq_poll_classic helper.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-mq.c | 120 +++++++++++++++++++++++--------------------------
 1 file changed, 56 insertions(+), 64 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 04e17d50fb02..72d5559e4bba 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -69,6 +69,14 @@ static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q,
 	return q->queue_hw_ctx[(qc & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT];
 }
 
+static inline struct request *blk_qc_to_rq(struct blk_mq_hw_ctx *hctx,
+		blk_qc_t qc)
+{
+	if (blk_qc_t_is_internal(qc))
+		return blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(qc));
+	return blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(qc));
+}
+
 /*
  * Check if any of the ctx, dispatch list or elevator
  * have pending work in this hardware queue.
@@ -3796,15 +3804,20 @@ static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
 	return ret;
 }
 
-static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
-				     struct request *rq)
+static bool blk_mq_poll_hybrid(struct request_queue *q, blk_qc_t qc)
 {
+	struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, qc);
+	struct request *rq = blk_qc_to_rq(hctx, qc);
 	struct hrtimer_sleeper hs;
 	enum hrtimer_mode mode;
 	unsigned int nsecs;
 	ktime_t kt;
 
-	if (rq->rq_flags & RQF_MQ_POLL_SLEPT)
+	/*
+	 * If a request has completed on queue that uses an I/O scheduler, we
+	 * won't get back a request from blk_qc_to_rq.
+	 */
+	if (!rq || (rq->rq_flags & RQF_MQ_POLL_SLEPT))
 		return false;
 
 	/*
@@ -3846,32 +3859,48 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
 
 	__set_current_state(TASK_RUNNING);
 	destroy_hrtimer_on_stack(&hs.timer);
+
+	/*
+	 * If we sleep, have the caller restart the poll loop to reset the
+	 * state.  Like for the other success return cases, the caller is
+	 * responsible for checking if the IO completed.  If the IO isn't
+	 * complete, we'll get called again and will go straight to the busy
+	 * poll loop.
+	 */
 	return true;
 }
 
-static bool blk_mq_poll_hybrid(struct request_queue *q,
-			       struct blk_mq_hw_ctx *hctx, blk_qc_t cookie)
+static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie,
+		bool spin)
 {
-	struct request *rq;
+	struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie);
+	long state = current->state;
+	int ret;
 
-	if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
-		return false;
+	hctx->poll_considered++;
 
-	if (!blk_qc_t_is_internal(cookie))
-		rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
-	else {
-		rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
-		/*
-		 * With scheduling, if the request has completed, we'll
-		 * get a NULL return here, as we clear the sched tag when
-		 * that happens. The request still remains valid, like always,
-		 * so we should be safe with just the NULL check.
-		 */
-		if (!rq)
-			return false;
-	}
+	do {
+		hctx->poll_invoked++;
 
-	return blk_mq_poll_hybrid_sleep(q, rq);
+		ret = q->mq_ops->poll(hctx);
+		if (ret > 0) {
+			hctx->poll_success++;
+			__set_current_state(TASK_RUNNING);
+			return ret;
+		}
+
+		if (signal_pending_state(state, current))
+			__set_current_state(TASK_RUNNING);
+		if (current->state == TASK_RUNNING)
+			return 1;
+
+		if (ret < 0 || !spin)
+			break;
+		cpu_relax();
+	} while (!need_resched());
+
+	__set_current_state(TASK_RUNNING);
+	return 0;
 }
 
 /**
@@ -3888,9 +3917,6 @@ static bool blk_mq_poll_hybrid(struct request_queue *q,
  */
 int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
 {
-	struct blk_mq_hw_ctx *hctx;
-	long state;
-
 	if (!blk_qc_t_valid(cookie) ||
 	    !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
 		return 0;
@@ -3898,46 +3924,12 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
 	if (current->plug)
 		blk_flush_plug_list(current->plug, false);
 
-	hctx = blk_qc_to_hctx(q, cookie);
-
-	/*
-	 * If we sleep, have the caller restart the poll loop to reset
-	 * the state. Like for the other success return cases, the
-	 * caller is responsible for checking if the IO completed. If
-	 * the IO isn't complete, we'll get called again and will go
-	 * straight to the busy poll loop. If specified not to spin,
-	 * we also should not sleep.
-	 */
-	if (spin && blk_mq_poll_hybrid(q, hctx, cookie))
-		return 1;
-
-	hctx->poll_considered++;
-
-	state = current->state;
-	do {
-		int ret;
-
-		hctx->poll_invoked++;
-
-		ret = q->mq_ops->poll(hctx);
-		if (ret > 0) {
-			hctx->poll_success++;
-			__set_current_state(TASK_RUNNING);
-			return ret;
-		}
-
-		if (signal_pending_state(state, current))
-			__set_current_state(TASK_RUNNING);
-
-		if (current->state == TASK_RUNNING)
+	/* If specified not to spin, we also should not sleep. */
+	if (spin && q->poll_nsec != BLK_MQ_POLL_CLASSIC) {
+		if (blk_mq_poll_hybrid(q, cookie))
 			return 1;
-		if (ret < 0 || !spin)
-			break;
-		cpu_relax();
-	} while (!need_resched());
-
-	__set_current_state(TASK_RUNNING);
-	return 0;
+	}
+	return blk_mq_poll_classic(q, cookie, spin);
 }
 EXPORT_SYMBOL_GPL(blk_poll);
 
-- 
2.30.2


WARNING: multiple messages have this Message-ID (diff)
From: Christoph Hellwig <hch@lst.de>
To: Jens Axboe <axboe@kernel.dk>
Cc: Jeffle Xu <jefflexu@linux.alibaba.com>,
	Ming Lei <ming.lei@redhat.com>,
	Damien Le Moal <Damien.LeMoal@wdc.com>,
	Keith Busch <kbusch@kernel.org>, Sagi Grimberg <sagi@grimberg.me>,
	"Wunderlich, Mark" <mark.wunderlich@intel.com>,
	"Vasudevan, Anil" <anil.vasudevan@intel.com>,
	linux-block@vger.kernel.org, linux-fsdevel@vger.kernel.org,
	linux-nvme@lists.infradead.org
Subject: [PATCH 05/15] blk-mq: factor out a "classic" poll helper
Date: Wed, 12 May 2021 15:15:35 +0200	[thread overview]
Message-ID: <20210512131545.495160-6-hch@lst.de> (raw)
In-Reply-To: <20210512131545.495160-1-hch@lst.de>

Factor the code to do the classic full metal polling out of blk_poll into
a separate blk_mq_poll_classic helper.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-mq.c | 120 +++++++++++++++++++++++--------------------------
 1 file changed, 56 insertions(+), 64 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 04e17d50fb02..72d5559e4bba 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -69,6 +69,14 @@ static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q,
 	return q->queue_hw_ctx[(qc & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT];
 }
 
+static inline struct request *blk_qc_to_rq(struct blk_mq_hw_ctx *hctx,
+		blk_qc_t qc)
+{
+	if (blk_qc_t_is_internal(qc))
+		return blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(qc));
+	return blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(qc));
+}
+
 /*
  * Check if any of the ctx, dispatch list or elevator
  * have pending work in this hardware queue.
@@ -3796,15 +3804,20 @@ static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
 	return ret;
 }
 
-static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
-				     struct request *rq)
+static bool blk_mq_poll_hybrid(struct request_queue *q, blk_qc_t qc)
 {
+	struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, qc);
+	struct request *rq = blk_qc_to_rq(hctx, qc);
 	struct hrtimer_sleeper hs;
 	enum hrtimer_mode mode;
 	unsigned int nsecs;
 	ktime_t kt;
 
-	if (rq->rq_flags & RQF_MQ_POLL_SLEPT)
+	/*
+	 * If a request has completed on queue that uses an I/O scheduler, we
+	 * won't get back a request from blk_qc_to_rq.
+	 */
+	if (!rq || (rq->rq_flags & RQF_MQ_POLL_SLEPT))
 		return false;
 
 	/*
@@ -3846,32 +3859,48 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
 
 	__set_current_state(TASK_RUNNING);
 	destroy_hrtimer_on_stack(&hs.timer);
+
+	/*
+	 * If we sleep, have the caller restart the poll loop to reset the
+	 * state.  Like for the other success return cases, the caller is
+	 * responsible for checking if the IO completed.  If the IO isn't
+	 * complete, we'll get called again and will go straight to the busy
+	 * poll loop.
+	 */
 	return true;
 }
 
-static bool blk_mq_poll_hybrid(struct request_queue *q,
-			       struct blk_mq_hw_ctx *hctx, blk_qc_t cookie)
+static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie,
+		bool spin)
 {
-	struct request *rq;
+	struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie);
+	long state = current->state;
+	int ret;
 
-	if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
-		return false;
+	hctx->poll_considered++;
 
-	if (!blk_qc_t_is_internal(cookie))
-		rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
-	else {
-		rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
-		/*
-		 * With scheduling, if the request has completed, we'll
-		 * get a NULL return here, as we clear the sched tag when
-		 * that happens. The request still remains valid, like always,
-		 * so we should be safe with just the NULL check.
-		 */
-		if (!rq)
-			return false;
-	}
+	do {
+		hctx->poll_invoked++;
 
-	return blk_mq_poll_hybrid_sleep(q, rq);
+		ret = q->mq_ops->poll(hctx);
+		if (ret > 0) {
+			hctx->poll_success++;
+			__set_current_state(TASK_RUNNING);
+			return ret;
+		}
+
+		if (signal_pending_state(state, current))
+			__set_current_state(TASK_RUNNING);
+		if (current->state == TASK_RUNNING)
+			return 1;
+
+		if (ret < 0 || !spin)
+			break;
+		cpu_relax();
+	} while (!need_resched());
+
+	__set_current_state(TASK_RUNNING);
+	return 0;
 }
 
 /**
@@ -3888,9 +3917,6 @@ static bool blk_mq_poll_hybrid(struct request_queue *q,
  */
 int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
 {
-	struct blk_mq_hw_ctx *hctx;
-	long state;
-
 	if (!blk_qc_t_valid(cookie) ||
 	    !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
 		return 0;
@@ -3898,46 +3924,12 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
 	if (current->plug)
 		blk_flush_plug_list(current->plug, false);
 
-	hctx = blk_qc_to_hctx(q, cookie);
-
-	/*
-	 * If we sleep, have the caller restart the poll loop to reset
-	 * the state. Like for the other success return cases, the
-	 * caller is responsible for checking if the IO completed. If
-	 * the IO isn't complete, we'll get called again and will go
-	 * straight to the busy poll loop. If specified not to spin,
-	 * we also should not sleep.
-	 */
-	if (spin && blk_mq_poll_hybrid(q, hctx, cookie))
-		return 1;
-
-	hctx->poll_considered++;
-
-	state = current->state;
-	do {
-		int ret;
-
-		hctx->poll_invoked++;
-
-		ret = q->mq_ops->poll(hctx);
-		if (ret > 0) {
-			hctx->poll_success++;
-			__set_current_state(TASK_RUNNING);
-			return ret;
-		}
-
-		if (signal_pending_state(state, current))
-			__set_current_state(TASK_RUNNING);
-
-		if (current->state == TASK_RUNNING)
+	/* If specified not to spin, we also should not sleep. */
+	if (spin && q->poll_nsec != BLK_MQ_POLL_CLASSIC) {
+		if (blk_mq_poll_hybrid(q, cookie))
 			return 1;
-		if (ret < 0 || !spin)
-			break;
-		cpu_relax();
-	} while (!need_resched());
-
-	__set_current_state(TASK_RUNNING);
-	return 0;
+	}
+	return blk_mq_poll_classic(q, cookie, spin);
 }
 EXPORT_SYMBOL_GPL(blk_poll);
 
-- 
2.30.2


_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

  parent reply	other threads:[~2021-05-12 13:16 UTC|newest]

Thread overview: 79+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-12 13:15 switch block layer polling to a bio based model v3 Christoph Hellwig
2021-05-12 13:15 ` Christoph Hellwig
2021-05-12 13:15 ` [PATCH 01/15] direct-io: remove blk_poll support Christoph Hellwig
2021-05-12 13:15   ` Christoph Hellwig
2021-05-12 13:15 ` [PATCH 02/15] block: don't try to poll multi-bio I/Os in __blkdev_direct_IO Christoph Hellwig
2021-05-12 13:15   ` Christoph Hellwig
2021-05-12 20:54   ` Sagi Grimberg
2021-05-12 20:54     ` Sagi Grimberg
2021-05-16 18:01   ` Kanchan Joshi
2021-05-16 18:01     ` Kanchan Joshi
2021-05-12 13:15 ` [PATCH 03/15] iomap: don't try to poll multi-bio I/Os in __iomap_dio_rw Christoph Hellwig
2021-05-12 13:15   ` Christoph Hellwig
2021-05-12 21:32   ` Darrick J. Wong
2021-05-12 21:32     ` Darrick J. Wong
2021-05-12 13:15 ` [PATCH 04/15] blk-mq: factor out a blk_qc_to_hctx helper Christoph Hellwig
2021-05-12 13:15   ` Christoph Hellwig
2021-05-12 20:58   ` Sagi Grimberg
2021-05-12 20:58     ` Sagi Grimberg
2021-05-12 13:15 ` Christoph Hellwig [this message]
2021-05-12 13:15   ` [PATCH 05/15] blk-mq: factor out a "classic" poll helper Christoph Hellwig
2021-05-12 20:59   ` Sagi Grimberg
2021-05-12 20:59     ` Sagi Grimberg
2021-05-12 13:15 ` [PATCH 06/15] blk-mq: remove blk_qc_t_to_tag and blk_qc_t_is_internal Christoph Hellwig
2021-05-12 13:15   ` Christoph Hellwig
2021-05-12 21:42   ` Sagi Grimberg
2021-05-12 21:42     ` Sagi Grimberg
2021-05-12 13:15 ` [PATCH 07/15] blk-mq: remove blk_qc_t_valid Christoph Hellwig
2021-05-12 13:15   ` Christoph Hellwig
2021-05-12 21:43   ` Sagi Grimberg
2021-05-12 21:43     ` Sagi Grimberg
2021-05-12 13:15 ` [PATCH 08/15] io_uring: don't sleep when polling for I/O Christoph Hellwig
2021-05-12 13:15   ` Christoph Hellwig
2021-05-12 21:55   ` Sagi Grimberg
2021-05-12 21:55     ` Sagi Grimberg
2021-05-19 13:02     ` Christoph Hellwig
2021-05-19 13:02       ` Christoph Hellwig
2021-05-12 13:15 ` [PATCH 09/15] block: rename REQ_HIPRI to REQ_POLLED Christoph Hellwig
2021-05-12 13:15   ` Christoph Hellwig
2021-05-12 21:56   ` Sagi Grimberg
2021-05-12 21:56     ` Sagi Grimberg
2021-05-12 22:43   ` Chaitanya Kulkarni
2021-05-12 22:43     ` Chaitanya Kulkarni
2021-05-12 13:15 ` [PATCH 10/15] block: use SLAB_TYPESAFE_BY_RCU for the bio slab Christoph Hellwig
2021-05-12 13:15   ` Christoph Hellwig
2021-05-12 13:15 ` [PATCH 11/15] block: define 'struct bvec_iter' as packed Christoph Hellwig
2021-05-12 13:15   ` Christoph Hellwig
2021-05-12 21:58   ` Sagi Grimberg
2021-05-12 21:58     ` Sagi Grimberg
2021-05-12 13:15 ` [PATCH 12/15] block: switch polling to be bio based Christoph Hellwig
2021-05-12 13:15   ` Christoph Hellwig
2021-05-12 22:03   ` Sagi Grimberg
2021-05-12 22:03     ` Sagi Grimberg
2021-05-12 22:12     ` Keith Busch
2021-05-12 22:12       ` Keith Busch
2021-05-14  2:50       ` Keith Busch
2021-05-14  2:50         ` Keith Busch
2021-05-14 16:26         ` Keith Busch
2021-05-14 16:26           ` Keith Busch
2021-05-14 16:28           ` Christoph Hellwig
2021-05-14 16:28             ` Christoph Hellwig
2021-05-13  1:26     ` Ming Lei
2021-05-13  1:26       ` Ming Lei
2021-05-13  1:23   ` Ming Lei
2021-05-13  1:23     ` Ming Lei
2021-05-12 13:15 ` [PATCH 13/15] block: don't allow writing to the poll queue attribute Christoph Hellwig
2021-05-12 13:15   ` Christoph Hellwig
2021-05-12 22:04   ` Sagi Grimberg
2021-05-12 22:04     ` Sagi Grimberg
2021-05-12 13:15 ` [PATCH 14/15] nvme-multipath: set QUEUE_FLAG_NOWAIT Christoph Hellwig
2021-05-12 13:15   ` Christoph Hellwig
2021-05-12 13:15 ` [PATCH 15/15] nvme-multipath: enable polled I/O Christoph Hellwig
2021-05-12 13:15   ` Christoph Hellwig
2021-05-12 22:10   ` Sagi Grimberg
2021-05-12 22:10     ` Sagi Grimberg
2021-05-12 22:16     ` Keith Busch
2021-05-12 22:16       ` Keith Busch
2021-05-13 23:33 ` switch block layer polling to a bio based model v3 Wunderlich, Mark
2021-05-13 23:33   ` Wunderlich, Mark
  -- strict thread matches above, loose matches on Subject: below --
2021-04-27 16:16 switch block layer polling to a bio based model v2 Christoph Hellwig
2021-04-27 16:16 ` [PATCH 05/15] blk-mq: factor out a "classic" poll helper Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210512131545.495160-6-hch@lst.de \
    --to=hch@lst.de \
    --cc=Damien.LeMoal@wdc.com \
    --cc=anil.vasudevan@intel.com \
    --cc=axboe@kernel.dk \
    --cc=jefflexu@linux.alibaba.com \
    --cc=kbusch@kernel.org \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-nvme@lists.infradead.org \
    --cc=mark.wunderlich@intel.com \
    --cc=ming.lei@redhat.com \
    --cc=sagi@grimberg.me \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.