From: Christoph Hellwig <hch@lst.de> To: Jens Axboe <axboe@kernel.dk> Cc: Jeffle Xu <jefflexu@linux.alibaba.com>, Ming Lei <ming.lei@redhat.com>, Damien Le Moal <Damien.LeMoal@wdc.com>, Keith Busch <kbusch@kernel.org>, Sagi Grimberg <sagi@grimberg.me>, "Wunderlich, Mark" <mark.wunderlich@intel.com>, "Vasudevan, Anil" <anil.vasudevan@intel.com>, linux-block@vger.kernel.org, linux-fsdevel@vger.kernel.org, linux-nvme@lists.infradead.org Subject: [PATCH 06/16] blk-mq: factor out a "classic" poll helper Date: Tue, 12 Oct 2021 13:12:16 +0200 [thread overview] Message-ID: <20211012111226.760968-7-hch@lst.de> (raw) In-Reply-To: <20211012111226.760968-1-hch@lst.de> Factor the code to do the classic full metal polling out of blk_poll into a separate blk_mq_poll_classic helper. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Tested-by: Mark Wunderlich <mark.wunderlich@intel.com> --- block/blk-mq.c | 120 +++++++++++++++++++++++-------------------------- 1 file changed, 56 insertions(+), 64 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index e1d1f2109bbed..00bc595d8de6d 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -71,6 +71,14 @@ static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q, return q->queue_hw_ctx[(qc & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT]; } +static inline struct request *blk_qc_to_rq(struct blk_mq_hw_ctx *hctx, + blk_qc_t qc) +{ + if (blk_qc_t_is_internal(qc)) + return blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(qc)); + return blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(qc)); +} + /* * Check if any of the ctx, dispatch list or elevator * have pending work in this hardware queue. @@ -3944,15 +3952,20 @@ static unsigned long blk_mq_poll_nsecs(struct request_queue *q, return ret; } -static bool blk_mq_poll_hybrid_sleep(struct request_queue *q, - struct request *rq) +static bool blk_mq_poll_hybrid(struct request_queue *q, blk_qc_t qc) { + struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, qc); + struct request *rq = blk_qc_to_rq(hctx, qc); struct hrtimer_sleeper hs; enum hrtimer_mode mode; unsigned int nsecs; ktime_t kt; - if (rq->rq_flags & RQF_MQ_POLL_SLEPT) + /* + * If a request has completed on queue that uses an I/O scheduler, we + * won't get back a request from blk_qc_to_rq. + */ + if (!rq || (rq->rq_flags & RQF_MQ_POLL_SLEPT)) return false; /* @@ -3994,32 +4007,48 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q, __set_current_state(TASK_RUNNING); destroy_hrtimer_on_stack(&hs.timer); + + /* + * If we sleep, have the caller restart the poll loop to reset the + * state. Like for the other success return cases, the caller is + * responsible for checking if the IO completed. If the IO isn't + * complete, we'll get called again and will go straight to the busy + * poll loop. + */ return true; } -static bool blk_mq_poll_hybrid(struct request_queue *q, - struct blk_mq_hw_ctx *hctx, blk_qc_t cookie) +static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie, + bool spin) { - struct request *rq; + struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie); + long state = get_current_state(); + int ret; - if (q->poll_nsec == BLK_MQ_POLL_CLASSIC) - return false; + hctx->poll_considered++; - if (!blk_qc_t_is_internal(cookie)) - rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie)); - else { - rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie)); - /* - * With scheduling, if the request has completed, we'll - * get a NULL return here, as we clear the sched tag when - * that happens. The request still remains valid, like always, - * so we should be safe with just the NULL check. - */ - if (!rq) - return false; - } + do { + hctx->poll_invoked++; - return blk_mq_poll_hybrid_sleep(q, rq); + ret = q->mq_ops->poll(hctx); + if (ret > 0) { + hctx->poll_success++; + __set_current_state(TASK_RUNNING); + return ret; + } + + if (signal_pending_state(state, current)) + __set_current_state(TASK_RUNNING); + if (task_is_running(current)) + return 1; + + if (ret < 0 || !spin) + break; + cpu_relax(); + } while (!need_resched()); + + __set_current_state(TASK_RUNNING); + return 0; } /** @@ -4036,9 +4065,6 @@ static bool blk_mq_poll_hybrid(struct request_queue *q, */ int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin) { - struct blk_mq_hw_ctx *hctx; - unsigned int state; - if (!blk_qc_t_valid(cookie) || !test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) return 0; @@ -4046,46 +4072,12 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin) if (current->plug) blk_flush_plug_list(current->plug, false); - hctx = blk_qc_to_hctx(q, cookie); - - /* - * If we sleep, have the caller restart the poll loop to reset - * the state. Like for the other success return cases, the - * caller is responsible for checking if the IO completed. If - * the IO isn't complete, we'll get called again and will go - * straight to the busy poll loop. If specified not to spin, - * we also should not sleep. - */ - if (spin && blk_mq_poll_hybrid(q, hctx, cookie)) - return 1; - - hctx->poll_considered++; - - state = get_current_state(); - do { - int ret; - - hctx->poll_invoked++; - - ret = q->mq_ops->poll(hctx); - if (ret > 0) { - hctx->poll_success++; - __set_current_state(TASK_RUNNING); - return ret; - } - - if (signal_pending_state(state, current)) - __set_current_state(TASK_RUNNING); - - if (task_is_running(current)) + /* If specified not to spin, we also should not sleep. */ + if (spin && q->poll_nsec != BLK_MQ_POLL_CLASSIC) { + if (blk_mq_poll_hybrid(q, cookie)) return 1; - if (ret < 0 || !spin) - break; - cpu_relax(); - } while (!need_resched()); - - __set_current_state(TASK_RUNNING); - return 0; + } + return blk_mq_poll_classic(q, cookie, spin); } EXPORT_SYMBOL_GPL(blk_poll); -- 2.30.2 _______________________________________________ Linux-nvme mailing list Linux-nvme@lists.infradead.org http://lists.infradead.org/mailman/listinfo/linux-nvme
WARNING: multiple messages have this Message-ID (diff)
From: Christoph Hellwig <hch@lst.de> To: Jens Axboe <axboe@kernel.dk> Cc: Jeffle Xu <jefflexu@linux.alibaba.com>, Ming Lei <ming.lei@redhat.com>, Damien Le Moal <Damien.LeMoal@wdc.com>, Keith Busch <kbusch@kernel.org>, Sagi Grimberg <sagi@grimberg.me>, "Wunderlich, Mark" <mark.wunderlich@intel.com>, "Vasudevan, Anil" <anil.vasudevan@intel.com>, linux-block@vger.kernel.org, linux-fsdevel@vger.kernel.org, linux-nvme@lists.infradead.org Subject: [PATCH 06/16] blk-mq: factor out a "classic" poll helper Date: Tue, 12 Oct 2021 13:12:16 +0200 [thread overview] Message-ID: <20211012111226.760968-7-hch@lst.de> (raw) In-Reply-To: <20211012111226.760968-1-hch@lst.de> Factor the code to do the classic full metal polling out of blk_poll into a separate blk_mq_poll_classic helper. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Tested-by: Mark Wunderlich <mark.wunderlich@intel.com> --- block/blk-mq.c | 120 +++++++++++++++++++++++-------------------------- 1 file changed, 56 insertions(+), 64 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index e1d1f2109bbed..00bc595d8de6d 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -71,6 +71,14 @@ static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q, return q->queue_hw_ctx[(qc & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT]; } +static inline struct request *blk_qc_to_rq(struct blk_mq_hw_ctx *hctx, + blk_qc_t qc) +{ + if (blk_qc_t_is_internal(qc)) + return blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(qc)); + return blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(qc)); +} + /* * Check if any of the ctx, dispatch list or elevator * have pending work in this hardware queue. @@ -3944,15 +3952,20 @@ static unsigned long blk_mq_poll_nsecs(struct request_queue *q, return ret; } -static bool blk_mq_poll_hybrid_sleep(struct request_queue *q, - struct request *rq) +static bool blk_mq_poll_hybrid(struct request_queue *q, blk_qc_t qc) { + struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, qc); + struct request *rq = blk_qc_to_rq(hctx, qc); struct hrtimer_sleeper hs; enum hrtimer_mode mode; unsigned int nsecs; ktime_t kt; - if (rq->rq_flags & RQF_MQ_POLL_SLEPT) + /* + * If a request has completed on queue that uses an I/O scheduler, we + * won't get back a request from blk_qc_to_rq. + */ + if (!rq || (rq->rq_flags & RQF_MQ_POLL_SLEPT)) return false; /* @@ -3994,32 +4007,48 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q, __set_current_state(TASK_RUNNING); destroy_hrtimer_on_stack(&hs.timer); + + /* + * If we sleep, have the caller restart the poll loop to reset the + * state. Like for the other success return cases, the caller is + * responsible for checking if the IO completed. If the IO isn't + * complete, we'll get called again and will go straight to the busy + * poll loop. + */ return true; } -static bool blk_mq_poll_hybrid(struct request_queue *q, - struct blk_mq_hw_ctx *hctx, blk_qc_t cookie) +static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie, + bool spin) { - struct request *rq; + struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie); + long state = get_current_state(); + int ret; - if (q->poll_nsec == BLK_MQ_POLL_CLASSIC) - return false; + hctx->poll_considered++; - if (!blk_qc_t_is_internal(cookie)) - rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie)); - else { - rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie)); - /* - * With scheduling, if the request has completed, we'll - * get a NULL return here, as we clear the sched tag when - * that happens. The request still remains valid, like always, - * so we should be safe with just the NULL check. - */ - if (!rq) - return false; - } + do { + hctx->poll_invoked++; - return blk_mq_poll_hybrid_sleep(q, rq); + ret = q->mq_ops->poll(hctx); + if (ret > 0) { + hctx->poll_success++; + __set_current_state(TASK_RUNNING); + return ret; + } + + if (signal_pending_state(state, current)) + __set_current_state(TASK_RUNNING); + if (task_is_running(current)) + return 1; + + if (ret < 0 || !spin) + break; + cpu_relax(); + } while (!need_resched()); + + __set_current_state(TASK_RUNNING); + return 0; } /** @@ -4036,9 +4065,6 @@ static bool blk_mq_poll_hybrid(struct request_queue *q, */ int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin) { - struct blk_mq_hw_ctx *hctx; - unsigned int state; - if (!blk_qc_t_valid(cookie) || !test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) return 0; @@ -4046,46 +4072,12 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin) if (current->plug) blk_flush_plug_list(current->plug, false); - hctx = blk_qc_to_hctx(q, cookie); - - /* - * If we sleep, have the caller restart the poll loop to reset - * the state. Like for the other success return cases, the - * caller is responsible for checking if the IO completed. If - * the IO isn't complete, we'll get called again and will go - * straight to the busy poll loop. If specified not to spin, - * we also should not sleep. - */ - if (spin && blk_mq_poll_hybrid(q, hctx, cookie)) - return 1; - - hctx->poll_considered++; - - state = get_current_state(); - do { - int ret; - - hctx->poll_invoked++; - - ret = q->mq_ops->poll(hctx); - if (ret > 0) { - hctx->poll_success++; - __set_current_state(TASK_RUNNING); - return ret; - } - - if (signal_pending_state(state, current)) - __set_current_state(TASK_RUNNING); - - if (task_is_running(current)) + /* If specified not to spin, we also should not sleep. */ + if (spin && q->poll_nsec != BLK_MQ_POLL_CLASSIC) { + if (blk_mq_poll_hybrid(q, cookie)) return 1; - if (ret < 0 || !spin) - break; - cpu_relax(); - } while (!need_resched()); - - __set_current_state(TASK_RUNNING); - return 0; + } + return blk_mq_poll_classic(q, cookie, spin); } EXPORT_SYMBOL_GPL(blk_poll); -- 2.30.2
next prev parent reply other threads:[~2021-10-12 11:21 UTC|newest] Thread overview: 64+ messages / expand[flat|nested] mbox.gz Atom feed top 2021-10-12 11:12 switch block layer polling to a bio based model v4 Christoph Hellwig 2021-10-12 11:12 ` Christoph Hellwig 2021-10-12 11:12 ` [PATCH 01/16] direct-io: remove blk_poll support Christoph Hellwig 2021-10-12 11:12 ` Christoph Hellwig 2021-10-13 10:42 ` Sagi Grimberg 2021-10-13 10:42 ` Sagi Grimberg 2021-10-28 1:26 ` Chaitanya Kulkarni 2021-10-12 11:12 ` [PATCH 02/16] block: don't try to poll multi-bio I/Os in __blkdev_direct_IO Christoph Hellwig 2021-10-12 11:12 ` Christoph Hellwig 2021-10-12 11:12 ` [PATCH 03/16] iomap: don't try to poll multi-bio I/Os in __iomap_dio_rw Christoph Hellwig 2021-10-12 11:12 ` Christoph Hellwig 2021-10-12 11:12 ` [PATCH 04/16] io_uring: fix a layering violation in io_iopoll_req_issued Christoph Hellwig 2021-10-12 11:12 ` Christoph Hellwig 2021-10-12 11:12 ` [PATCH 05/16] blk-mq: factor out a blk_qc_to_hctx helper Christoph Hellwig 2021-10-12 11:12 ` Christoph Hellwig 2021-10-12 11:12 ` Christoph Hellwig [this message] 2021-10-12 11:12 ` [PATCH 06/16] blk-mq: factor out a "classic" poll helper Christoph Hellwig 2021-10-12 11:12 ` [PATCH 07/16] blk-mq: remove blk_qc_t_to_tag and blk_qc_t_is_internal Christoph Hellwig 2021-10-12 11:12 ` Christoph Hellwig 2021-10-12 11:12 ` [PATCH 08/16] blk-mq: remove blk_qc_t_valid Christoph Hellwig 2021-10-12 11:12 ` Christoph Hellwig 2021-10-12 11:12 ` [PATCH 09/16] block: replace the spin argument to blk_iopoll with a flags argument Christoph Hellwig 2021-10-12 11:12 ` Christoph Hellwig 2021-10-13 10:44 ` Sagi Grimberg 2021-10-13 10:44 ` Sagi Grimberg 2021-10-12 11:12 ` [PATCH 10/16] io_uring: don't sleep when polling for I/O Christoph Hellwig 2021-10-12 11:12 ` Christoph Hellwig 2021-10-13 10:45 ` Sagi Grimberg 2021-10-13 10:45 ` Sagi Grimberg 2021-10-12 11:12 ` [PATCH 11/16] block: rename REQ_HIPRI to REQ_POLLED Christoph Hellwig 2021-10-12 11:12 ` Christoph Hellwig 2021-10-13 10:45 ` Sagi Grimberg 2021-10-13 10:45 ` Sagi Grimberg 2021-10-12 11:12 ` [PATCH 12/16] block: use SLAB_TYPESAFE_BY_RCU for the bio slab Christoph Hellwig 2021-10-12 11:12 ` Christoph Hellwig 2021-10-12 11:12 ` [PATCH 13/16] block: define 'struct bvec_iter' as packed Christoph Hellwig 2021-10-12 11:12 ` Christoph Hellwig 2021-10-12 11:12 ` [PATCH 14/16] block: switch polling to be bio based Christoph Hellwig 2021-10-12 11:12 ` Christoph Hellwig 2021-10-13 9:59 ` Ming Lei 2021-10-13 9:59 ` Ming Lei 2021-10-13 10:45 ` Sagi Grimberg 2021-10-13 10:45 ` Sagi Grimberg 2021-10-15 8:30 ` Pankaj Raghav 2021-10-15 13:24 ` Christoph Hellwig 2021-11-03 7:11 ` chenxiang (M) 2021-11-03 7:22 ` Christoph Hellwig 2021-11-03 8:05 ` chenxiang (M) 2021-10-12 11:12 ` [PATCH 15/16] block: don't allow writing to the poll queue attribute Christoph Hellwig 2021-10-12 11:12 ` Christoph Hellwig 2021-10-12 11:12 ` [PATCH 16/16] nvme-multipath: enable polled I/O Christoph Hellwig 2021-10-12 11:12 ` Christoph Hellwig 2021-10-13 10:46 ` Sagi Grimberg 2021-10-13 10:46 ` Sagi Grimberg 2021-10-12 14:47 ` switch block layer polling to a bio based model v4 Jens Axboe 2021-10-12 14:47 ` Jens Axboe 2021-10-12 14:57 ` Sagi Grimberg 2021-10-12 14:57 ` Sagi Grimberg 2021-10-12 14:58 ` Jens Axboe 2021-10-12 14:58 ` Jens Axboe 2021-10-12 15:09 ` Sagi Grimberg 2021-10-12 15:09 ` Sagi Grimberg 2021-10-12 15:06 ` Christoph Hellwig 2021-10-12 15:06 ` Christoph Hellwig
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20211012111226.760968-7-hch@lst.de \ --to=hch@lst.de \ --cc=Damien.LeMoal@wdc.com \ --cc=anil.vasudevan@intel.com \ --cc=axboe@kernel.dk \ --cc=jefflexu@linux.alibaba.com \ --cc=kbusch@kernel.org \ --cc=linux-block@vger.kernel.org \ --cc=linux-fsdevel@vger.kernel.org \ --cc=linux-nvme@lists.infradead.org \ --cc=mark.wunderlich@intel.com \ --cc=ming.lei@redhat.com \ --cc=sagi@grimberg.me \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.