All of lore.kernel.org
 help / color / mirror / Atom feed
From: Linus Walleij <linus.walleij@linaro.org>
To: linux-mmc@vger.kernel.org, Ulf Hansson <ulf.hansson@linaro.org>,
	Adrian Hunter <adrian.hunter@intel.com>,
	Paolo Valente <paolo.valente@linaro.org>
Cc: Chunyan Zhang <zhang.chunyan@linaro.org>,
	Baolin Wang <baolin.wang@linaro.org>,
	linux-block@vger.kernel.org, Jens Axboe <axboe@kernel.dk>,
	Christoph Hellwig <hch@lst.de>, Arnd Bergmann <arnd@arndb.de>,
	Linus Walleij <linus.walleij@linaro.org>
Subject: [PATCH 13/16] mmc: queue: issue struct mmc_queue_req items
Date: Thu,  9 Feb 2017 16:34:00 +0100	[thread overview]
Message-ID: <20170209153403.9730-14-linus.walleij@linaro.org> (raw)
In-Reply-To: <20170209153403.9730-1-linus.walleij@linaro.org>

Instead of passing two pointers around and messing and reassigning
to the left and right, issue mmc_queue_req and dereference
the queue from the request where needed. The struct mmc_queue_req
is the thing that has a lifecycle after all: this is what we are
keepin in out queue. Augment all users to be passed the struct
mmc_queue_req as well.

Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
---
 drivers/mmc/core/block.c | 88 ++++++++++++++++++++++++------------------------
 drivers/mmc/core/block.h |  5 ++-
 drivers/mmc/core/queue.c |  6 ++--
 3 files changed, 50 insertions(+), 49 deletions(-)

diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 4952a105780e..628a22b9bf41 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1151,9 +1151,9 @@ int mmc_access_rpmb(struct mmc_queue *mq)
 	return false;
 }
 
-static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
+static void mmc_blk_issue_discard_rq(struct mmc_queue_req *mq_rq)
 {
-	struct mmc_blk_data *md = mq->blkdata;
+	struct mmc_blk_data *md = mq_rq->mq->blkdata;
 	struct mmc_card *card = md->queue.card;
 	unsigned int from, nr, arg;
 	int err = 0, type = MMC_BLK_DISCARD;
@@ -1163,8 +1163,8 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
 		goto fail;
 	}
 
-	from = blk_rq_pos(req);
-	nr = blk_rq_sectors(req);
+	from = blk_rq_pos(mq_rq->req);
+	nr = blk_rq_sectors(mq_rq->req);
 
 	if (mmc_can_discard(card))
 		arg = MMC_DISCARD_ARG;
@@ -1188,13 +1188,12 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
 	if (!err)
 		mmc_blk_reset_success(md, type);
 fail:
-	blk_end_request(req, err, blk_rq_bytes(req));
+	blk_end_request(mq_rq->req, err, blk_rq_bytes(mq_rq->req));
 }
 
-static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
-				       struct request *req)
+static void mmc_blk_issue_secdiscard_rq(struct mmc_queue_req *mq_rq)
 {
-	struct mmc_blk_data *md = mq->blkdata;
+	struct mmc_blk_data *md = mq_rq->mq->blkdata;
 	struct mmc_card *card = md->queue.card;
 	unsigned int from, nr, arg;
 	int err = 0, type = MMC_BLK_SECDISCARD;
@@ -1204,8 +1203,8 @@ static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
 		goto out;
 	}
 
-	from = blk_rq_pos(req);
-	nr = blk_rq_sectors(req);
+	from = blk_rq_pos(mq_rq->req);
+	nr = blk_rq_sectors(mq_rq->req);
 
 	if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
 		arg = MMC_SECURE_TRIM1_ARG;
@@ -1253,12 +1252,12 @@ static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
 	if (!err)
 		mmc_blk_reset_success(md, type);
 out:
-	blk_end_request(req, err, blk_rq_bytes(req));
+	blk_end_request(mq_rq->req, err, blk_rq_bytes(mq_rq->req));
 }
 
-static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
+static void mmc_blk_issue_flush(struct mmc_queue_req *mq_rq)
 {
-	struct mmc_blk_data *md = mq->blkdata;
+	struct mmc_blk_data *md = mq_rq->mq->blkdata;
 	struct mmc_card *card = md->queue.card;
 	int ret = 0;
 
@@ -1266,7 +1265,7 @@ static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
 	if (ret)
 		ret = -EIO;
 
-	blk_end_request_all(req, ret);
+	blk_end_request_all(mq_rq->req, ret);
 }
 
 /*
@@ -1614,11 +1613,13 @@ static void mmc_blk_rw_cmd_abort(struct mmc_card *card, struct request *req)
  * @mq: the queue with the card and host to restart
  * @req: a new request that want to be started after the current one
  */
-static void mmc_blk_rw_try_restart(struct mmc_queue *mq)
+static void mmc_blk_rw_try_restart(struct mmc_queue_req *mq_rq)
 {
+	struct mmc_queue *mq = mq_rq->mq;
+
 	/* Proceed and try to restart the current async request */
-	mmc_blk_rw_rq_prep(mq->mqrq_cur, mq->card, 0, mq);
-	mmc_restart_areq(mq->card->host, &mq->mqrq_cur->areq);
+	mmc_blk_rw_rq_prep(mq_rq, mq->card, 0, mq);
+	mmc_restart_areq(mq->card->host, &mq_rq->areq);
 }
 
 void mmc_blk_rw_done(struct mmc_async_req *areq,
@@ -1676,11 +1677,11 @@ void mmc_blk_rw_done(struct mmc_async_req *areq,
 		req_pending = mmc_blk_rw_cmd_err(md, card, brq, old_req, req_pending);
 		if (mmc_blk_reset(md, host, type)) {
 			mmc_blk_rw_cmd_abort(card, old_req);
-			mmc_blk_rw_try_restart(mq);
+			mmc_blk_rw_try_restart(mq_rq);
 			return;
 		}
 		if (!req_pending) {
-			mmc_blk_rw_try_restart(mq);
+			mmc_blk_rw_try_restart(mq_rq);
 			return;
 		}
 		break;
@@ -1693,7 +1694,7 @@ void mmc_blk_rw_done(struct mmc_async_req *areq,
 		if (!mmc_blk_reset(md, host, type))
 			break;
 		mmc_blk_rw_cmd_abort(card, old_req);
-		mmc_blk_rw_try_restart(mq);
+		mmc_blk_rw_try_restart(mq_rq);
 		return;
 	case MMC_BLK_DATA_ERR: {
 		int err;
@@ -1702,7 +1703,7 @@ void mmc_blk_rw_done(struct mmc_async_req *areq,
 			break;
 		if (err == -ENODEV) {
 			mmc_blk_rw_cmd_abort(card, old_req);
-			mmc_blk_rw_try_restart(mq);
+			mmc_blk_rw_try_restart(mq_rq);
 			return;
 		}
 		/* Fall through */
@@ -1723,19 +1724,19 @@ void mmc_blk_rw_done(struct mmc_async_req *areq,
 		req_pending = blk_end_request(old_req, -EIO,
 					      brq->data.blksz);
 		if (!req_pending) {
-			mmc_blk_rw_try_restart(mq);
+			mmc_blk_rw_try_restart(mq_rq);
 			return;
 		}
 		break;
 	case MMC_BLK_NOMEDIUM:
 		mmc_blk_rw_cmd_abort(card, old_req);
-		mmc_blk_rw_try_restart(mq);
+		mmc_blk_rw_try_restart(mq_rq);
 		return;
 	default:
 		pr_err("%s: Unhandled return value (%d)",
 		       old_req->rq_disk->disk_name, status);
 		mmc_blk_rw_cmd_abort(card, old_req);
-		mmc_blk_rw_try_restart(mq);
+		mmc_blk_rw_try_restart(mq_rq);
 		return;
 	}
 
@@ -1747,15 +1748,16 @@ void mmc_blk_rw_done(struct mmc_async_req *areq,
 		mmc_blk_rw_rq_prep(mq_rq, card,
 				   disable_multi, mq);
 		mq_rq->brq.retune_retry_done = retune_retry_done;
-		mmc_restart_areq(host, &mq->mqrq_cur->areq);
+		mmc_restart_areq(host, &mq_rq->areq);
 	}
 }
 
-static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
+static void mmc_blk_issue_rw_rq(struct mmc_queue_req *mq_rq)
 {
+	struct mmc_queue *mq = mq_rq->mq;
 	struct mmc_card *card = mq->card;
 
-	if (!new_req) {
+	if (!mq_rq->req) {
 		pr_err("%s: NULL request!\n", __func__);
 		return;
 	}
@@ -1765,54 +1767,52 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
 	 * multiple read or write is allowed
 	 */
 	if (mmc_large_sector(card) &&
-	    !IS_ALIGNED(blk_rq_sectors(new_req), 8)) {
+	    !IS_ALIGNED(blk_rq_sectors(mq_rq->req), 8)) {
 		pr_err("%s: Transfer size is not 4KB sector size aligned\n",
-		       new_req->rq_disk->disk_name);
-		mmc_blk_rw_cmd_abort(card, new_req);
+		       mq_rq->req->rq_disk->disk_name);
+		mmc_blk_rw_cmd_abort(card, mq_rq->req);
 		return;
 	}
 
-	mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
-	mmc_start_areq(card->host, &mq->mqrq_cur->areq);
+	mmc_blk_rw_rq_prep(mq_rq, card, 0, mq);
+	mmc_start_areq(card->host, &mq_rq->areq);
 }
 
-void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
+void mmc_blk_issue_rq(struct mmc_queue_req *mq_rq)
 {
 	int ret;
-	struct mmc_blk_data *md = mq->blkdata;
+	struct mmc_blk_data *md = mq_rq->mq->blkdata;
 	struct mmc_card *card = md->queue.card;
 
 	ret = mmc_blk_part_switch(card, md);
 	if (ret) {
-		if (req) {
-			blk_end_request_all(req, -EIO);
-		}
+		blk_end_request_all(mq_rq->req, -EIO);
 		return;
 	}
 
-	if (req && req_op(req) == REQ_OP_DISCARD) {
+	if (req_op(mq_rq->req) == REQ_OP_DISCARD) {
 		/* complete ongoing async transfer before issuing discard */
 		if (card->host->areq) {
 			wait_for_completion(&card->host->areq->complete);
 			card->host->areq = NULL;
 		}
-		mmc_blk_issue_discard_rq(mq, req);
-	} else if (req && req_op(req) == REQ_OP_SECURE_ERASE) {
+		mmc_blk_issue_discard_rq(mq_rq);
+	} else if (req_op(mq_rq->req) == REQ_OP_SECURE_ERASE) {
 		/* complete ongoing async transfer before issuing secure erase*/
 		if (card->host->areq) {
 			wait_for_completion(&card->host->areq->complete);
 			card->host->areq = NULL;
 		}
-		mmc_blk_issue_secdiscard_rq(mq, req);
-	} else if (req && req_op(req) == REQ_OP_FLUSH) {
+		mmc_blk_issue_secdiscard_rq(mq_rq);
+	} else if (req_op(mq_rq->req) == REQ_OP_FLUSH) {
 		/* complete ongoing async transfer before issuing flush */
 		if (card->host->areq) {
 			wait_for_completion(&card->host->areq->complete);
 			card->host->areq = NULL;
 		}
-		mmc_blk_issue_flush(mq, req);
+		mmc_blk_issue_flush(mq_rq);
 	} else {
-		mmc_blk_issue_rw_rq(mq, req);
+		mmc_blk_issue_rw_rq(mq_rq);
 	}
 }
 
diff --git a/drivers/mmc/core/block.h b/drivers/mmc/core/block.h
index b4b489911599..0326fa5d8217 100644
--- a/drivers/mmc/core/block.h
+++ b/drivers/mmc/core/block.h
@@ -3,10 +3,9 @@
 
 struct mmc_async_req;
 enum mmc_blk_status;
-struct mmc_queue;
-struct request;
+struct mmc_queue_req;
 
 void mmc_blk_rw_done(struct mmc_async_req *areq, enum mmc_blk_status status);
-void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req);
+void mmc_blk_issue_rq(struct mmc_queue_req *mq_rq);
 
 #endif
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index c9f28de7b0f4..c4e1ced55796 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -54,6 +54,7 @@ static int mmc_queue_thread(void *d)
 	struct mmc_queue *mq = d;
 	struct request_queue *q = mq->queue;
 	bool claimed_host = false;
+	struct mmc_queue_req *mq_rq;
 
 	current->flags |= PF_MEMALLOC;
 
@@ -65,7 +66,8 @@ static int mmc_queue_thread(void *d)
 		set_current_state(TASK_INTERRUPTIBLE);
 		req = blk_fetch_request(q);
 		mq->asleep = false;
-		mq->mqrq_cur->req = req;
+		mq_rq = mq->mqrq_cur;
+		mq_rq->req = req;
 		spin_unlock_irq(q->queue_lock);
 
 		if (req) {
@@ -74,7 +76,7 @@ static int mmc_queue_thread(void *d)
 			if (!claimed_host)
 				mmc_get_card(mq->card);
 			set_current_state(TASK_RUNNING);
-			mmc_blk_issue_rq(mq, req);
+			mmc_blk_issue_rq(mq_rq);
 			cond_resched();
 			/*
 			 * Current request becomes previous request
-- 
2.9.3

  parent reply	other threads:[~2017-02-09 15:34 UTC|newest]

Thread overview: 51+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-02-09 15:33 [PATCH 00/16] multiqueue for MMC/SD third try Linus Walleij
2017-02-09 15:33 ` [PATCH 01/16] mmc: core: move some code in mmc_start_areq() Linus Walleij
     [not found]   ` <CGME20170228145506epcas1p1dd72cc5738c3f36df97ac06603ad2731@epcas1p1.samsung.com>
2017-02-28 14:55     ` Bartlomiej Zolnierkiewicz
2017-02-09 15:33 ` [PATCH 02/16] mmc: core: refactor asynchronous request finalization Linus Walleij
     [not found]   ` <CGME20170228145552epcas5p4a43c23971d58b30ad6ab9d2c612abe9a@epcas5p4.samsung.com>
2017-02-28 14:55     ` Bartlomiej Zolnierkiewicz
2017-02-09 15:33 ` [PATCH 03/16] mmc: core: refactor mmc_request_done() Linus Walleij
     [not found]   ` <CGME20170228145627epcas1p18fb6390b7ae14a6961fac9703712e0a0@epcas1p1.samsung.com>
2017-02-28 14:56     ` Bartlomiej Zolnierkiewicz
2017-02-09 15:33 ` [PATCH 04/16] mmc: core: move the asynchronous post-processing Linus Walleij
2017-02-09 15:33 ` [PATCH 05/16] mmc: core: add a kthread for completing requests Linus Walleij
     [not found]   ` <CGME20170228145719epcas5p33d013fd48483bfba477b3f607dcdccb4@epcas5p3.samsung.com>
2017-02-28 14:57     ` Bartlomiej Zolnierkiewicz
2017-02-09 15:33 ` [PATCH 06/16] mmc: core: replace waitqueue with worker Linus Walleij
2017-02-22 13:29   ` Adrian Hunter
2017-03-09 22:49     ` Linus Walleij
2017-03-10 14:21       ` Adrian Hunter
2017-03-10 22:05         ` Jens Axboe
2017-03-13  9:25           ` Adrian Hunter
2017-03-13 14:19             ` Jens Axboe
2017-03-14 12:59               ` Adrian Hunter
2017-03-14 14:36                 ` Jens Axboe
2017-03-14 14:43                   ` Christoph Hellwig
2017-03-14 14:52                     ` Jens Axboe
2017-03-28  7:47                   ` Linus Walleij
2017-03-28  7:46         ` Linus Walleij
     [not found]   ` <CGME20170228161023epcas5p3916c2e171d57b8c7814be7841fbab3aa@epcas5p3.samsung.com>
2017-02-28 16:10     ` Bartlomiej Zolnierkiewicz
2017-02-09 15:33 ` [PATCH 07/16] mmc: core: do away with is_done_rcv Linus Walleij
     [not found]   ` <CGME20170228161047epcas1p2f307733cb1c441d0c290e794a04a06a8@epcas1p2.samsung.com>
2017-02-28 16:10     ` Bartlomiej Zolnierkiewicz
2017-02-09 15:33 ` [PATCH 08/16] mmc: core: do away with is_new_req Linus Walleij
     [not found]   ` <CGME20170228161102epcas5p25dc3b560013599fda6cc750f6d528595@epcas5p2.samsung.com>
2017-02-28 16:11     ` Bartlomiej Zolnierkiewicz
2017-02-09 15:33 ` [PATCH 09/16] mmc: core: kill off the context info Linus Walleij
     [not found]   ` <CGME20170228161117epcas5p20a6e62146733466b98c0ef4ea6efbb5f@epcas5p2.samsung.com>
2017-02-28 16:11     ` Bartlomiej Zolnierkiewicz
2017-02-09 15:33 ` [PATCH 10/16] mmc: queue: simplify queue logic Linus Walleij
     [not found]   ` <CGME20170228161132epcas5p265793e8675aa2f1e5dd199a9ee0ab6f1@epcas5p2.samsung.com>
2017-02-28 16:11     ` Bartlomiej Zolnierkiewicz
2017-02-09 15:33 ` [PATCH 11/16] mmc: block: shuffle retry and error handling Linus Walleij
     [not found]   ` <CGME20170228174522epcas5p34dce6477eb96f7e0fb38431c4de35f60@epcas5p3.samsung.com>
2017-02-28 17:45     ` Bartlomiej Zolnierkiewicz
     [not found]       ` <CGME20170301114559epcas5p1a0c32fbc3a5573a6f1c6291792ea1b2e@epcas5p1.samsung.com>
2017-03-01 11:45         ` Bartlomiej Zolnierkiewicz
     [not found]           ` <CGME20170301155243epcas1p1140ce11db60b31065a0356525a2ee0a0@epcas1p1.samsung.com>
2017-03-01 15:52             ` Bartlomiej Zolnierkiewicz
     [not found]               ` <CGME20170301155822epcas5p103373c6afbd516e4792ebef9bb202b94@epcas5p1.samsung.com>
2017-03-01 15:58                 ` Bartlomiej Zolnierkiewicz
     [not found]               ` <CGME20170301174856epcas5p16bdf861a0117a33f9dad37a81449a95e@epcas5p1.samsung.com>
2017-03-01 17:48                 ` Bartlomiej Zolnierkiewicz
2017-02-09 15:33 ` [PATCH 12/16] mmc: queue: stop flushing the pipeline with NULL Linus Walleij
     [not found]   ` <CGME20170228180309epcas5p317af83f41d3b0426868dcfd660bd0aec@epcas5p3.samsung.com>
2017-02-28 18:03     ` Bartlomiej Zolnierkiewicz
2017-02-09 15:34 ` Linus Walleij [this message]
     [not found]   ` <CGME20170228181009epcas1p4ca0e714214097d07d7172182ba8e032b@epcas1p4.samsung.com>
2017-02-28 18:10     ` [PATCH 13/16] mmc: queue: issue struct mmc_queue_req items Bartlomiej Zolnierkiewicz
2017-02-09 15:34 ` [PATCH 14/16] mmc: queue: get/put struct mmc_queue_req Linus Walleij
     [not found]   ` <CGME20170228182149epcas1p28789bce5433cee1579e8b8d083ba5811@epcas1p2.samsung.com>
2017-02-28 18:21     ` Bartlomiej Zolnierkiewicz
2017-02-09 15:34 ` [PATCH 15/16] mmc: queue: issue requests in massive parallel Linus Walleij
     [not found]   ` <CGME20170301120247epcas1p1ad2be24dc9bbd1218dab8f565fb82b27@epcas1p1.samsung.com>
2017-03-01 12:02     ` Bartlomiej Zolnierkiewicz
2017-02-09 15:34 ` [PATCH 16/16] RFC: mmc: switch MMC/SD to use blk-mq multiqueueing v3 Linus Walleij
2017-02-09 15:39 ` [PATCH 00/16] multiqueue for MMC/SD third try Christoph Hellwig
2017-02-11 13:03 ` Avri Altman
2017-02-11 13:03   ` Avri Altman
2017-02-12 16:16   ` Linus Walleij

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170209153403.9730-14-linus.walleij@linaro.org \
    --to=linus.walleij@linaro.org \
    --cc=adrian.hunter@intel.com \
    --cc=arnd@arndb.de \
    --cc=axboe@kernel.dk \
    --cc=baolin.wang@linaro.org \
    --cc=hch@lst.de \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-mmc@vger.kernel.org \
    --cc=paolo.valente@linaro.org \
    --cc=ulf.hansson@linaro.org \
    --cc=zhang.chunyan@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.