All of lore.kernel.org
 help / color / mirror / Atom feed
From: Linus Walleij <linus.walleij@linaro.org>
To: linux-mmc@vger.kernel.org, Ulf Hansson <ulf.hansson@linaro.org>
Cc: linux-block@vger.kernel.org, Jens Axboe <axboe@kernel.dk>,
	Christoph Hellwig <hch@lst.de>, Arnd Bergmann <arnd@arndb.de>,
	Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>,
	Paolo Valente <paolo.valente@linaro.org>,
	Avri Altman <Avri.Altman@sandisk.com>,
	Adrian Hunter <adrian.hunter@intel.com>,
	Linus Walleij <linus.walleij@linaro.org>
Subject: [PATCH 10/12 v5] mmc: queue/block: pass around struct mmc_queue_req*s
Date: Fri, 10 Nov 2017 11:01:41 +0100	[thread overview]
Message-ID: <20171110100143.12256-11-linus.walleij@linaro.org> (raw)
In-Reply-To: <20171110100143.12256-1-linus.walleij@linaro.org>

Instead of passing two pointers around several pointers to
mmc_queue_req, request, mmc_queue, and reassigning to the left and
right, issue mmc_queue_req and dereference the queue and request
from the mmq_queue_req where needed.

The struct mmc_queue_req is the thing that has a lifecycle after
all: this is what we are keeping in our queue, and what the block
layer helps us manager. Augment a bunch of functions to take a
single argument so we can see the trees and not just a big
jungle of arguments.

Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
---
ChangeLog v1->v5:
- Rebasing on the "next" branch in the MMC tree.
---
 drivers/mmc/core/block.c | 128 ++++++++++++++++++++++++-----------------------
 drivers/mmc/core/block.h |   5 +-
 drivers/mmc/core/queue.c |   2 +-
 3 files changed, 69 insertions(+), 66 deletions(-)

diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index c7a57006e27f..2cd9fe5a8c9b 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1208,9 +1208,9 @@ static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
  * processed it with all other requests and then they get issued in this
  * function.
  */
-static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
+static void mmc_blk_issue_drv_op(struct mmc_queue_req *mq_rq)
 {
-	struct mmc_queue_req *mq_rq;
+	struct mmc_queue *mq = mq_rq->mq;
 	struct mmc_card *card = mq->card;
 	struct mmc_blk_data *md = mq->blkdata;
 	struct mmc_blk_ioc_data **idata;
@@ -1220,7 +1220,6 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
 	int ret;
 	int i;
 
-	mq_rq = req_to_mmc_queue_req(req);
 	rpmb_ioctl = (mq_rq->drv_op == MMC_DRV_OP_IOCTL_RPMB);
 
 	switch (mq_rq->drv_op) {
@@ -1264,12 +1263,14 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
 		break;
 	}
 	mq_rq->drv_op_result = ret;
-	blk_end_request_all(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
+	blk_end_request_all(mmc_queue_req_to_req(mq_rq),
+			    ret ? BLK_STS_IOERR : BLK_STS_OK);
 }
 
-static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
+static void mmc_blk_issue_discard_rq(struct mmc_queue_req *mq_rq)
 {
-	struct mmc_blk_data *md = mq->blkdata;
+	struct request *req = mmc_queue_req_to_req(mq_rq);
+	struct mmc_blk_data *md = mq_rq->mq->blkdata;
 	struct mmc_card *card = md->queue.card;
 	unsigned int from, nr, arg;
 	int err = 0, type = MMC_BLK_DISCARD;
@@ -1310,10 +1311,10 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
 	blk_end_request(req, status, blk_rq_bytes(req));
 }
 
-static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
-				       struct request *req)
+static void mmc_blk_issue_secdiscard_rq(struct mmc_queue_req *mq_rq)
 {
-	struct mmc_blk_data *md = mq->blkdata;
+	struct request *req = mmc_queue_req_to_req(mq_rq);
+	struct mmc_blk_data *md = mq_rq->mq->blkdata;
 	struct mmc_card *card = md->queue.card;
 	unsigned int from, nr, arg;
 	int err = 0, type = MMC_BLK_SECDISCARD;
@@ -1380,14 +1381,15 @@ static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
 	blk_end_request(req, status, blk_rq_bytes(req));
 }
 
-static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
+static void mmc_blk_issue_flush(struct mmc_queue_req *mq_rq)
 {
-	struct mmc_blk_data *md = mq->blkdata;
+	struct mmc_blk_data *md = mq_rq->mq->blkdata;
 	struct mmc_card *card = md->queue.card;
 	int ret = 0;
 
 	ret = mmc_flush_cache(card);
-	blk_end_request_all(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
+	blk_end_request_all(mmc_queue_req_to_req(mq_rq),
+			    ret ? BLK_STS_IOERR : BLK_STS_OK);
 }
 
 /*
@@ -1698,18 +1700,18 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
 		*do_data_tag_p = do_data_tag;
 }
 
-static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
-			       struct mmc_card *card,
-			       bool disable_multi,
-			       struct mmc_queue *mq)
+static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mq_rq,
+			       bool disable_multi)
 {
 	u32 readcmd, writecmd;
-	struct mmc_blk_request *brq = &mqrq->brq;
-	struct request *req = mmc_queue_req_to_req(mqrq);
+	struct mmc_queue *mq = mq_rq->mq;
+	struct mmc_card *card = mq->card;
+	struct mmc_blk_request *brq = &mq_rq->brq;
+	struct request *req = mmc_queue_req_to_req(mq_rq);
 	struct mmc_blk_data *md = mq->blkdata;
 	bool do_rel_wr, do_data_tag;
 
-	mmc_blk_data_prep(mq, mqrq, disable_multi, &do_rel_wr, &do_data_tag);
+	mmc_blk_data_prep(mq, mq_rq, disable_multi, &do_rel_wr, &do_data_tag);
 
 	brq->mrq.cmd = &brq->cmd;
 	brq->mrq.areq = NULL;
@@ -1764,9 +1766,9 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
 		brq->mrq.sbc = &brq->sbc;
 	}
 
-	mqrq->areq.err_check = mmc_blk_err_check;
-	mqrq->areq.host = card->host;
-	INIT_WORK(&mqrq->areq.finalization_work, mmc_finalize_areq);
+	mq_rq->areq.err_check = mmc_blk_err_check;
+	mq_rq->areq.host = card->host;
+	INIT_WORK(&mq_rq->areq.finalization_work, mmc_finalize_areq);
 }
 
 static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
@@ -1798,10 +1800,12 @@ static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
 	return req_pending;
 }
 
-static void mmc_blk_rw_cmd_abort(struct mmc_queue *mq, struct mmc_card *card,
-				 struct request *req,
-				 struct mmc_queue_req *mqrq)
+static void mmc_blk_rw_cmd_abort(struct mmc_queue_req *mq_rq)
 {
+	struct mmc_queue *mq = mq_rq->mq;
+	struct mmc_card *card = mq->card;
+	struct request *req = mmc_queue_req_to_req(mq_rq);
+
 	if (mmc_card_removed(card))
 		req->rq_flags |= RQF_QUIET;
 	while (blk_end_request(req, BLK_STS_IOERR, blk_rq_cur_bytes(req)));
@@ -1809,16 +1813,15 @@ static void mmc_blk_rw_cmd_abort(struct mmc_queue *mq, struct mmc_card *card,
 
 /**
  * mmc_blk_rw_try_restart() - tries to restart the current async request
- * @mq: the queue with the card and host to restart
- * @mqrq: the mmc_queue_request containing the areq to be restarted
+ * @mq_rq: the mmc_queue_request containing the areq to be restarted
  */
-static void mmc_blk_rw_try_restart(struct mmc_queue *mq,
-				   struct mmc_queue_req *mqrq)
+static void mmc_blk_rw_try_restart(struct mmc_queue_req *mq_rq)
 {
-	struct mmc_async_req *areq = &mqrq->areq;
+	struct mmc_async_req *areq = &mq_rq->areq;
+	struct mmc_queue *mq = mq_rq->mq;
 
 	/* Proceed and try to restart the current async request */
-	mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq);
+	mmc_blk_rw_rq_prep(mq_rq, 0);
 	areq->disable_multi = false;
 	areq->retry = 0;
 	mmc_restart_areq(mq->card->host, areq);
@@ -1867,7 +1870,7 @@ static void mmc_blk_rw_done(struct mmc_async_req *areq, enum mmc_blk_status stat
 			pr_err("%s BUG rq_tot %d d_xfer %d\n",
 			       __func__, blk_rq_bytes(old_req),
 			       brq->data.bytes_xfered);
-			mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
+			mmc_blk_rw_cmd_abort(mq_rq);
 			return;
 		}
 		break;
@@ -1875,12 +1878,12 @@ static void mmc_blk_rw_done(struct mmc_async_req *areq, enum mmc_blk_status stat
 		req_pending = mmc_blk_rw_cmd_err(md, card, brq, old_req, req_pending);
 		if (mmc_blk_reset(md, card->host, type)) {
 			if (req_pending)
-				mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
-			mmc_blk_rw_try_restart(mq, mq_rq);
+				mmc_blk_rw_cmd_abort(mq_rq);
+			mmc_blk_rw_try_restart(mq_rq);
 			return;
 		}
 		if (!req_pending) {
-			mmc_blk_rw_try_restart(mq, mq_rq);
+			mmc_blk_rw_try_restart(mq_rq);
 			return;
 		}
 		break;
@@ -1892,8 +1895,8 @@ static void mmc_blk_rw_done(struct mmc_async_req *areq, enum mmc_blk_status stat
 	case MMC_BLK_ABORT:
 		if (!mmc_blk_reset(md, card->host, type))
 			break;
-		mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
-		mmc_blk_rw_try_restart(mq, mq_rq);
+		mmc_blk_rw_cmd_abort(mq_rq);
+		mmc_blk_rw_try_restart(mq_rq);
 		return;
 	case MMC_BLK_DATA_ERR: {
 		int err;
@@ -1901,8 +1904,8 @@ static void mmc_blk_rw_done(struct mmc_async_req *areq, enum mmc_blk_status stat
 		if (!err)
 			break;
 		if (err == -ENODEV) {
-			mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
-			mmc_blk_rw_try_restart(mq, mq_rq);
+			mmc_blk_rw_cmd_abort(mq_rq);
+			mmc_blk_rw_try_restart(mq_rq);
 			return;
 		}
 		/* Fall through */
@@ -1923,19 +1926,19 @@ static void mmc_blk_rw_done(struct mmc_async_req *areq, enum mmc_blk_status stat
 		req_pending = blk_end_request(old_req, BLK_STS_IOERR,
 					      brq->data.blksz);
 		if (!req_pending) {
-			mmc_blk_rw_try_restart(mq, mq_rq);
+			mmc_blk_rw_try_restart(mq_rq);
 			return;
 		}
 		break;
 	case MMC_BLK_NOMEDIUM:
-		mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
-		mmc_blk_rw_try_restart(mq, mq_rq);
+		mmc_blk_rw_cmd_abort(mq_rq);
+		mmc_blk_rw_try_restart(mq_rq);
 		return;
 	default:
 		pr_err("%s: Unhandled return value (%d)",
 				old_req->rq_disk->disk_name, status);
-		mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
-		mmc_blk_rw_try_restart(mq, mq_rq);
+		mmc_blk_rw_cmd_abort(mq_rq);
+		mmc_blk_rw_try_restart(mq_rq);
 		return;
 	}
 
@@ -1944,25 +1947,25 @@ static void mmc_blk_rw_done(struct mmc_async_req *areq, enum mmc_blk_status stat
 		 * In case of a incomplete request
 		 * prepare it again and resend.
 		 */
-		mmc_blk_rw_rq_prep(mq_rq, card,
-				areq->disable_multi, mq);
+		mmc_blk_rw_rq_prep(mq_rq, areq->disable_multi);
 		mmc_start_areq(card->host, areq);
 		mq_rq->brq.retune_retry_done = retune_retry_done;
 	}
 }
 
-static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
+static void mmc_blk_issue_rw_rq(struct mmc_queue_req *mq_rq)
 {
+	struct request *req = mmc_queue_req_to_req(mq_rq);
+	struct mmc_queue *mq = mq_rq->mq;
 	struct mmc_card *card = mq->card;
-	struct mmc_queue_req *mqrq_cur = req_to_mmc_queue_req(new_req);
-	struct mmc_async_req *areq = &mqrq_cur->areq;
+	struct mmc_async_req *areq = &mq_rq->areq;
 
 	/*
 	 * If the card was removed, just cancel everything and return.
 	 */
 	if (mmc_card_removed(card)) {
-		new_req->rq_flags |= RQF_QUIET;
-		blk_end_request_all(new_req, BLK_STS_IOERR);
+		req->rq_flags |= RQF_QUIET;
+		blk_end_request_all(req, BLK_STS_IOERR);
 		return;
 	}
 
@@ -1971,24 +1974,25 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
 	 * multiple read or write is allowed
 	 */
 	if (mmc_large_sector(card) &&
-	    !IS_ALIGNED(blk_rq_sectors(new_req), 8)) {
+	    !IS_ALIGNED(blk_rq_sectors(req), 8)) {
 		pr_err("%s: Transfer size is not 4KB sector size aligned\n",
-		       new_req->rq_disk->disk_name);
-		mmc_blk_rw_cmd_abort(mq, card, new_req, mqrq_cur);
+		       req->rq_disk->disk_name);
+		mmc_blk_rw_cmd_abort(mq_rq);
 		return;
 	}
 
-	mmc_blk_rw_rq_prep(mqrq_cur, card, 0, mq);
+	mmc_blk_rw_rq_prep(mq_rq, 0);
 	areq->disable_multi = false;
 	areq->retry = 0;
 	areq->report_done_status = mmc_blk_rw_done;
 	mmc_start_areq(card->host, areq);
 }
 
-void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
+void mmc_blk_issue_rq(struct mmc_queue_req *mq_rq)
 {
 	int ret;
-	struct mmc_blk_data *md = mq->blkdata;
+	struct request *req = mmc_queue_req_to_req(mq_rq);
+	struct mmc_blk_data *md = mq_rq->mq->blkdata;
 	struct mmc_card *card = md->queue.card;
 
 	if (!req) {
@@ -2010,7 +2014,7 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
 		 * ioctl()s
 		 */
 		mmc_wait_for_areq(card->host);
-		mmc_blk_issue_drv_op(mq, req);
+		mmc_blk_issue_drv_op(mq_rq);
 		break;
 	case REQ_OP_DISCARD:
 		/*
@@ -2018,7 +2022,7 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
 		 * discard.
 		 */
 		mmc_wait_for_areq(card->host);
-		mmc_blk_issue_discard_rq(mq, req);
+		mmc_blk_issue_discard_rq(mq_rq);
 		break;
 	case REQ_OP_SECURE_ERASE:
 		/*
@@ -2026,7 +2030,7 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
 		 * secure erase.
 		 */
 		mmc_wait_for_areq(card->host);
-		mmc_blk_issue_secdiscard_rq(mq, req);
+		mmc_blk_issue_secdiscard_rq(mq_rq);
 		break;
 	case REQ_OP_FLUSH:
 		/*
@@ -2034,11 +2038,11 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
 		 * flush.
 		 */
 		mmc_wait_for_areq(card->host);
-		mmc_blk_issue_flush(mq, req);
+		mmc_blk_issue_flush(mq_rq);
 		break;
 	default:
 		/* Normal request, just issue it */
-		mmc_blk_issue_rw_rq(mq, req);
+		mmc_blk_issue_rw_rq(mq_rq);
 		break;
 	}
 }
diff --git a/drivers/mmc/core/block.h b/drivers/mmc/core/block.h
index 860ca7c8df86..bbc1c8029b3b 100644
--- a/drivers/mmc/core/block.h
+++ b/drivers/mmc/core/block.h
@@ -1,9 +1,8 @@
 #ifndef _MMC_CORE_BLOCK_H
 #define _MMC_CORE_BLOCK_H
 
-struct mmc_queue;
-struct request;
+struct mmc_queue_req;
 
-void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req);
+void mmc_blk_issue_rq(struct mmc_queue_req *mq_rq);
 
 #endif
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index cf43a2d5410d..5511e323db31 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -62,7 +62,7 @@ static int mmc_queue_thread(void *d)
 				claimed_card = true;
 			}
 			set_current_state(TASK_RUNNING);
-			mmc_blk_issue_rq(mq, req);
+			mmc_blk_issue_rq(req_to_mmc_queue_req(req));
 			cond_resched();
 		} else {
 			mq->asleep = true;
-- 
2.13.6

  parent reply	other threads:[~2017-11-10 10:01 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <CGME20171110104657epcas1p278e62237982d200175480c28080cb708@epcas1p2.samsung.com>
2017-11-10 10:01 ` [PATCH 00/12 v5] Multiqueue for MMC/SD Linus Walleij
2017-11-10 10:01   ` [PATCH 01/12 v5] mmc: core: move the asynchronous post-processing Linus Walleij
2017-11-10 10:01   ` [PATCH 02/12 v5] mmc: core: add a workqueue for completing requests Linus Walleij
2017-11-10 10:01   ` [PATCH 03/12 v5] mmc: core: replace waitqueue with worker Linus Walleij
2017-11-10 10:01   ` [PATCH 04/12] mmc: core: do away with is_done_rcv Linus Walleij
2017-11-10 10:01   ` [PATCH 05/12] mmc: core: do away with is_new_req Linus Walleij
2017-11-10 10:01   ` [PATCH 06/12 v5] mmc: core: kill off the context info Linus Walleij
2017-11-10 10:01   ` [PATCH 07/12 v5] mmc: queue: simplify queue logic Linus Walleij
2017-11-10 10:01   ` [PATCH 08/12 v5] mmc: block: shuffle retry and error handling Linus Walleij
2017-11-10 10:01   ` [PATCH 09/12 v5] mmc: queue: stop flushing the pipeline with NULL Linus Walleij
2017-11-10 10:01   ` Linus Walleij [this message]
2017-11-10 10:01   ` [PATCH 11/12 v5] mmc: block: issue requests in massive parallel Linus Walleij
2017-11-10 10:01   ` [PATCH 12/12 v5] mmc: switch MMC/SD to use blk-mq multiqueueing v5 Linus Walleij
2017-11-10 13:39   ` [PATCH 00/12 v5] Multiqueue for MMC/SD Linus Walleij
2017-11-10 15:24   ` Ulf Hansson
2017-11-14 21:17     ` Linus Walleij
2017-11-15 10:24       ` Ulf Hansson
2017-11-15 13:50       ` Adrian Hunter
2017-11-29 13:13         ` Linus Walleij
2017-11-14 12:17   ` Bartlomiej Zolnierkiewicz
2017-11-14 13:30     ` Bartlomiej Zolnierkiewicz
2017-11-14 21:19       ` Linus Walleij

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20171110100143.12256-11-linus.walleij@linaro.org \
    --to=linus.walleij@linaro.org \
    --cc=Avri.Altman@sandisk.com \
    --cc=adrian.hunter@intel.com \
    --cc=arnd@arndb.de \
    --cc=axboe@kernel.dk \
    --cc=b.zolnierkie@samsung.com \
    --cc=hch@lst.de \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-mmc@vger.kernel.org \
    --cc=paolo.valente@linaro.org \
    --cc=ulf.hansson@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.