From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-16.8 required=3.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_CR_TRAILER,INCLUDES_PATCH, MAILING_LIST_MULTI,SPF_HELO_NONE,SPF_PASS,URIBL_BLOCKED,USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 938C9C433ED for ; Tue, 27 Apr 2021 16:20:33 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 6AC4C613D8 for ; Tue, 27 Apr 2021 16:20:33 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S237725AbhD0QUk (ORCPT ); Tue, 27 Apr 2021 12:20:40 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:50912 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S237618AbhD0QTj (ORCPT ); Tue, 27 Apr 2021 12:19:39 -0400 Received: from bombadil.infradead.org (bombadil.infradead.org [IPv6:2607:7c80:54:e::133]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id A8EF9C06138A; Tue, 27 Apr 2021 09:18:56 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=infradead.org; s=bombadil.20210309; h=Content-Transfer-Encoding: MIME-Version:References:In-Reply-To:Message-Id:Date:Subject:Cc:To:From:Sender :Reply-To:Content-Type:Content-ID:Content-Description; bh=Hk9ZAw/C8uBGCgUWQeBBn3SEgqjRb0B6r7B+V8ME4P8=; b=xgTz846VBenoczw0/AkvWhfjGc MZ/DBm0z/o0vvW9zyKLOfYzIfFWhwOVYFmg1ShN44Fz9P0bSsD3p8ctfHa5FV2ewmPN/+KynJSOLD e7j9c0eLZjjW8rX9tMuwUDk/1LvtK3pZs2HTvssM+l2zEMrfD9q93uA04vmAobUd4CSCtoEIr/e93 gQdrJfqQrTJ+TwKM7S/emxEwKlrQG0nWLQD+MpDbyJv8DbRyyDmEP9efLsjkpMJjvqbfTK+x0GvF0 EnRgnGgVRSHZl7WuY69ZWLAt3aJYGd0+e5dK50cPSk/pJIFzZJ+Ohg5DB/m1egBYYpw4+5IvsyDBu XjwEVEFQ==; Received: from [2001:4bb8:18c:28b2:c772:7205:2aa4:840d] (helo=localhost) by bombadil.infradead.org with esmtpsa (Exim 4.94 #2 (Red Hat Linux)) id 1lbQQD-00Gr4c-26; Tue, 27 Apr 2021 16:18:49 +0000 From: Christoph Hellwig To: Jens Axboe Cc: Jeffle Xu , Ming Lei , Damien Le Moal , Keith Busch , Sagi Grimberg , "Wunderlich, Mark" , "Vasudevan, Anil" , linux-block@vger.kernel.org, linux-fsdevel@vger.kernel.org Subject: [PATCH 05/15] blk-mq: factor out a "classic" poll helper Date: Tue, 27 Apr 2021 18:16:09 +0200 Message-Id: <20210427161619.1294399-6-hch@lst.de> X-Mailer: git-send-email 2.30.1 In-Reply-To: <20210427161619.1294399-1-hch@lst.de> References: <20210427161619.1294399-1-hch@lst.de> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-SRS-Rewrite: SMTP reverse-path rewritten from by bombadil.infradead.org. See http://www.infradead.org/rpr.html Precedence: bulk List-ID: X-Mailing-List: linux-block@vger.kernel.org Factor the code to do the classic full metal polling out of blk_poll into a separate blk_mq_poll_classic helper. Signed-off-by: Christoph Hellwig --- block/blk-mq.c | 120 +++++++++++++++++++++++-------------------------- 1 file changed, 56 insertions(+), 64 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 06a43fb5e2c5..ba563ca57bd9 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -69,6 +69,14 @@ static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q, return q->queue_hw_ctx[(qc & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT]; } +static inline struct request *blk_qc_to_rq(struct blk_mq_hw_ctx *hctx, + blk_qc_t qc) +{ + if (blk_qc_t_is_internal(qc)) + return blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(qc)); + return blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(qc)); +} + /* * Check if any of the ctx, dispatch list or elevator * have pending work in this hardware queue. @@ -3781,15 +3789,20 @@ static unsigned long blk_mq_poll_nsecs(struct request_queue *q, return ret; } -static bool blk_mq_poll_hybrid_sleep(struct request_queue *q, - struct request *rq) +static bool blk_mq_poll_hybrid(struct request_queue *q, blk_qc_t qc) { + struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, qc); + struct request *rq = blk_qc_to_rq(hctx, qc); struct hrtimer_sleeper hs; enum hrtimer_mode mode; unsigned int nsecs; ktime_t kt; - if (rq->rq_flags & RQF_MQ_POLL_SLEPT) + /* + * If a request has completed on queue that uses an I/O scheduler, we + * won't get back a request from blk_qc_to_rq. + */ + if (!rq || (rq->rq_flags & RQF_MQ_POLL_SLEPT)) return false; /* @@ -3831,32 +3844,48 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q, __set_current_state(TASK_RUNNING); destroy_hrtimer_on_stack(&hs.timer); + + /* + * If we sleep, have the caller restart the poll loop to reset the + * state. Like for the other success return cases, the caller is + * responsible for checking if the IO completed. If the IO isn't + * complete, we'll get called again and will go straight to the busy + * poll loop. + */ return true; } -static bool blk_mq_poll_hybrid(struct request_queue *q, - struct blk_mq_hw_ctx *hctx, blk_qc_t cookie) +static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie, + bool spin) { - struct request *rq; + struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie); + long state = current->state; + int ret; - if (q->poll_nsec == BLK_MQ_POLL_CLASSIC) - return false; + hctx->poll_considered++; - if (!blk_qc_t_is_internal(cookie)) - rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie)); - else { - rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie)); - /* - * With scheduling, if the request has completed, we'll - * get a NULL return here, as we clear the sched tag when - * that happens. The request still remains valid, like always, - * so we should be safe with just the NULL check. - */ - if (!rq) - return false; - } + do { + hctx->poll_invoked++; - return blk_mq_poll_hybrid_sleep(q, rq); + ret = q->mq_ops->poll(hctx); + if (ret > 0) { + hctx->poll_success++; + __set_current_state(TASK_RUNNING); + return ret; + } + + if (signal_pending_state(state, current)) + __set_current_state(TASK_RUNNING); + if (current->state == TASK_RUNNING) + return 1; + + if (ret < 0 || !spin) + break; + cpu_relax(); + } while (!need_resched()); + + __set_current_state(TASK_RUNNING); + return 0; } /** @@ -3873,9 +3902,6 @@ static bool blk_mq_poll_hybrid(struct request_queue *q, */ int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin) { - struct blk_mq_hw_ctx *hctx; - long state; - if (!blk_qc_t_valid(cookie) || !test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) return 0; @@ -3883,46 +3909,12 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin) if (current->plug) blk_flush_plug_list(current->plug, false); - hctx = blk_qc_to_hctx(q, cookie); - - /* - * If we sleep, have the caller restart the poll loop to reset - * the state. Like for the other success return cases, the - * caller is responsible for checking if the IO completed. If - * the IO isn't complete, we'll get called again and will go - * straight to the busy poll loop. If specified not to spin, - * we also should not sleep. - */ - if (spin && blk_mq_poll_hybrid(q, hctx, cookie)) - return 1; - - hctx->poll_considered++; - - state = current->state; - do { - int ret; - - hctx->poll_invoked++; - - ret = q->mq_ops->poll(hctx); - if (ret > 0) { - hctx->poll_success++; - __set_current_state(TASK_RUNNING); - return ret; - } - - if (signal_pending_state(state, current)) - __set_current_state(TASK_RUNNING); - - if (current->state == TASK_RUNNING) + /* If specified not to spin, we also should not sleep. */ + if (spin && q->poll_nsec != BLK_MQ_POLL_CLASSIC) { + if (blk_mq_poll_hybrid(q, cookie)) return 1; - if (ret < 0 || !spin) - break; - cpu_relax(); - } while (!need_resched()); - - __set_current_state(TASK_RUNNING); - return 0; + } + return blk_mq_poll_classic(q, cookie, spin); } EXPORT_SYMBOL_GPL(blk_poll); -- 2.30.1