All of lore.kernel.org
 help / color / mirror / Atom feed
From: Adrian Hunter <adrian.hunter@intel.com>
To: Ulf Hansson <ulf.hansson@linaro.org>
Cc: linux-mmc <linux-mmc@vger.kernel.org>,
	Alex Lemberg <alex.lemberg@sandisk.com>,
	Mateusz Nowak <mateusz.nowak@intel.com>,
	Yuliy Izrailov <Yuliy.Izrailov@sandisk.com>,
	Jaehoon Chung <jh80.chung@samsung.com>,
	Dong Aisheng <dongas86@gmail.com>,
	Das Asutosh <asutoshd@codeaurora.org>,
	Zhangfei Gao <zhangfei.gao@gmail.com>,
	Sujit Reddy Thumma <sthumma@codeaurora.org>,
	Dorfman Konstantin <kdorfman@codeaurora.org>,
	David Griego <david.griego@linaro.org>,
	Sahitya Tummala <stummala@codeaurora.org>,
	Harjani Ritesh <riteshh@codeaurora.org>
Subject: [PATCH RFC 41/46] mmc: block: Introduce queue semantics
Date: Thu,  9 Jun 2016 14:52:41 +0300	[thread overview]
Message-ID: <1465473166-22532-42-git-send-email-adrian.hunter@intel.com> (raw)
In-Reply-To: <1465473166-22532-1-git-send-email-adrian.hunter@intel.com>

Change from viewing the requests in progress as 'current' and 'previous',
to viewing them as a queue. The current request is allocated to the first
free slot. The presence of incomplete requests is determined from the
count (mq->qcnt) of entries in the queue. Non-read-write requests (i.e.
discards and flushes) are not added to the queue at all and require no
special handling. Also no special handling is needed for the
MMC_BLK_NEW_REQUEST case.

As well as allowing an arbitrarily sized queue, the queue thread function
is significantly simpler.

Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
---
 drivers/mmc/card/block.c | 40 +++++++++++++------------
 drivers/mmc/card/queue.c | 76 ++++++++++++++++++++++++++++++------------------
 drivers/mmc/card/queue.h | 10 +++++--
 3 files changed, 77 insertions(+), 49 deletions(-)

diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 1cb823a4dc76..215cfa359a3f 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1989,14 +1989,23 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
 	struct mmc_blk_request *brq;
 	int ret = 1, disable_multi = 0, retry = 0, type, retune_retry_done = 0;
 	enum mmc_blk_status status;
-	struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
+	struct mmc_queue_req *mqrq_cur = NULL;
 	struct mmc_queue_req *mq_rq;
 	struct request *req;
 	struct mmc_async_req *areq;
 	const u8 packed_nr = 2;
 	u8 reqs = 0;
 
-	if (!rqc && !mq->mqrq_prev->req)
+	if (rqc) {
+		mqrq_cur = mmc_queue_req_find(mq, rqc);
+		if (!mqrq_cur) {
+			WARN_ON(1);
+			mmc_blk_requeue(mq->queue, rqc);
+			rqc = NULL;
+		}
+	}
+
+	if (!mq->qcnt)
 		return 0;
 
 	if (mqrq_cur)
@@ -2027,11 +2036,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
 		} else
 			areq = NULL;
 		areq = mmc_start_req(card->host, areq, (int *) &status);
-		if (!areq) {
-			if (status == MMC_BLK_NEW_REQUEST)
-				mq->flags |= MMC_QUEUE_NEW_REQUEST;
+		if (!areq)
 			return 0;
-		}
 
 		mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
 		brq = &mq_rq->brq;
@@ -2143,6 +2149,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
 		}
 	} while (ret);
 
+	mmc_queue_req_free(mq, mq_rq);
+
 	return 1;
 
  cmd_abort:
@@ -2161,6 +2169,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
 		if (mmc_card_removed(card)) {
 			rqc->cmd_flags |= REQ_QUIET;
 			blk_end_request_all(rqc, -EIO);
+			mmc_queue_req_free(mq, mqrq_cur);
 		} else {
 			/*
 			 * If current request is packed, it needs to put back.
@@ -2174,6 +2183,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
 		}
 	}
 
+	mmc_queue_req_free(mq, mq_rq);
+
 	return 0;
 }
 
@@ -2184,7 +2195,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
 	struct mmc_card *card = md->queue.card;
 	unsigned int cmd_flags = req ? req->cmd_flags : 0;
 
-	if (req && !mq->mqrq_prev->req)
+	if (req && !mq->qcnt)
 		/* claim host only for the first request */
 		mmc_get_card(card);
 
@@ -2197,10 +2208,9 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
 		goto out;
 	}
 
-	mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
 	if (cmd_flags & REQ_DISCARD) {
 		/* complete ongoing async transfer before issuing discard */
-		if (card->host->areq)
+		if (mq->qcnt)
 			mmc_blk_issue_rw_rq(mq, NULL);
 		if (req->cmd_flags & REQ_SECURE)
 			ret = mmc_blk_issue_secdiscard_rq(mq, req);
@@ -2208,7 +2218,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
 			ret = mmc_blk_issue_discard_rq(mq, req);
 	} else if (cmd_flags & REQ_FLUSH) {
 		/* complete ongoing async transfer before issuing flush */
-		if (card->host->areq)
+		if (mq->qcnt)
 			mmc_blk_issue_rw_rq(mq, NULL);
 		ret = mmc_blk_issue_flush(mq, req);
 	} else {
@@ -2216,14 +2226,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
 	}
 
 out:
-	if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
-	     (cmd_flags & MMC_REQ_SPECIAL_MASK))
-		/*
-		 * Release host when there are no more requests
-		 * and after special request(discard, flush) is done.
-		 * In case sepecial request, there is no reentry to
-		 * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'.
-		 */
+	/* Release host when there are no more requests */
+	if (!mq->qcnt)
 		mmc_put_card(card);
 	return ret;
 }
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 4463d33094e4..4e134884a183 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -46,6 +46,35 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
 	return BLKPREP_OK;
 }
 
+struct mmc_queue_req *mmc_queue_req_find(struct mmc_queue *mq,
+					 struct request *req)
+{
+	struct mmc_queue_req *mqrq;
+	int i = ffz(mq->qslots);
+
+	if (i >= mq->qdepth)
+		return NULL;
+
+	mqrq = &mq->mqrq[i];
+	WARN_ON(mqrq->req || mq->qcnt >= mq->qdepth ||
+		test_bit(mqrq->task_id, &mq->qslots));
+	mqrq->req = req;
+	mq->qcnt += 1;
+	__set_bit(mqrq->task_id, &mq->qslots);
+
+	return mqrq;
+}
+
+void mmc_queue_req_free(struct mmc_queue *mq,
+			struct mmc_queue_req *mqrq)
+{
+	WARN_ON(!mqrq->req || mq->qcnt < 1 ||
+		!test_bit(mqrq->task_id, &mq->qslots));
+	mqrq->req = NULL;
+	mq->qcnt -= 1;
+	__clear_bit(mqrq->task_id, &mq->qslots);
+}
+
 static int mmc_queue_thread(void *d)
 {
 	struct mmc_queue *mq = d;
@@ -56,8 +85,7 @@ static int mmc_queue_thread(void *d)
 
 	down(&mq->thread_sem);
 	do {
-		struct request *req = NULL;
-		unsigned int cmd_flags = 0;
+		struct request *req;
 
 		spin_lock_irq(q->queue_lock);
 		set_current_state(TASK_INTERRUPTIBLE);
@@ -70,37 +98,17 @@ static int mmc_queue_thread(void *d)
 			 * Dispatch queue is empty so set flags for
 			 * mmc_request_fn() to wake us up.
 			 */
-			if (mq->mqrq_prev->req)
+			if (mq->qcnt)
 				cntx->is_waiting_last_req = true;
 			else
 				mq->asleep = true;
 		}
-		mq->mqrq_cur->req = req;
 		spin_unlock_irq(q->queue_lock);
 
-		if (req || mq->mqrq_prev->req) {
+		if (req || mq->qcnt) {
 			set_current_state(TASK_RUNNING);
-			cmd_flags = req ? req->cmd_flags : 0;
 			mq->issue_fn(mq, req);
 			cond_resched();
-			if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
-				mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
-				continue; /* fetch again */
-			}
-
-			/*
-			 * Current request becomes previous request
-			 * and vice versa.
-			 * In case of special requests, current request
-			 * has been finished. Do not assign it to previous
-			 * request.
-			 */
-			if (cmd_flags & MMC_REQ_SPECIAL_MASK)
-				mq->mqrq_cur->req = NULL;
-
-			mq->mqrq_prev->brq.mrq.data = NULL;
-			mq->mqrq_prev->req = NULL;
-			swap(mq->mqrq_prev, mq->mqrq_cur);
 		} else {
 			if (kthread_should_stop()) {
 				set_current_state(TASK_RUNNING);
@@ -183,6 +191,21 @@ static void mmc_queue_setup_discard(struct request_queue *q,
 		queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
 }
 
+static struct mmc_queue_req *mmc_queue_alloc_mqrqs(struct mmc_queue *mq,
+						   int qdepth)
+{
+	struct mmc_queue_req *mqrq;
+	int i;
+
+	mqrq = kcalloc(qdepth, sizeof(*mqrq), GFP_KERNEL);
+	if (mqrq) {
+		for (i = 0; i < mq->qdepth; i++)
+			mqrq[i].task_id = i;
+	}
+
+	return mqrq;
+}
+
 static bool mmc_queue_alloc_bounce_bufs(struct mmc_queue *mq,
 					unsigned int bouncesz)
 {
@@ -283,12 +306,9 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
 		return -ENOMEM;
 
 	mq->qdepth = 2;
-	mq->mqrq = kcalloc(mq->qdepth, sizeof(struct mmc_queue_req),
-			   GFP_KERNEL);
+	mq->mqrq = mmc_queue_alloc_mqrqs(mq, mq->qdepth);
 	if (!mq->mqrq)
 		goto cleanup_queue;
-	mq->mqrq_cur = &mq->mqrq[0];
-	mq->mqrq_prev = &mq->mqrq[1];
 	mq->queue->queuedata = mq;
 
 	blk_queue_prep_rq(mq->queue, mmc_prep_request);
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index bb8dad281f72..1afd5dafd46d 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -42,6 +42,7 @@ struct mmc_queue_req {
 	struct mmc_async_req	mmc_active;
 	enum mmc_packed_type	cmd_type;
 	struct mmc_packed	*packed;
+	int			task_id;
 };
 
 struct mmc_queue {
@@ -50,16 +51,15 @@ struct mmc_queue {
 	struct semaphore	thread_sem;
 	unsigned int		flags;
 #define MMC_QUEUE_SUSPENDED	(1 << 0)
-#define MMC_QUEUE_NEW_REQUEST	(1 << 1)
 	bool			asleep;
 
 	int			(*issue_fn)(struct mmc_queue *, struct request *);
 	void			*data;
 	struct request_queue	*queue;
 	struct mmc_queue_req	*mqrq;
-	struct mmc_queue_req	*mqrq_cur;
-	struct mmc_queue_req	*mqrq_prev;
 	int			qdepth;
+	int			qcnt;
+	unsigned long		qslots;
 };
 
 extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
@@ -78,4 +78,8 @@ extern void mmc_packed_clean(struct mmc_queue *);
 
 extern int mmc_access_rpmb(struct mmc_queue *);
 
+extern struct mmc_queue_req *mmc_queue_req_find(struct mmc_queue *,
+						struct request *);
+extern void mmc_queue_req_free(struct mmc_queue *, struct mmc_queue_req *);
+
 #endif
-- 
1.9.1


  parent reply	other threads:[~2016-06-09 11:59 UTC|newest]

Thread overview: 51+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-06-09 11:52 [PATCH RFC 00/46] mmc: mmc: Add Software Command Queuing Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 01/46] mmc: core: Add support for sending commands during data transfer Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 02/46] mmc: mmc_test: Add tests for sending commands during transfer Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 03/46] mmc: sdhci: Move busy signal handling into sdhci_finish_cmd() Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 04/46] mmc: sdhci: Get rid of redundant BUG_ONs Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 05/46] mmc: sdhci: Simplify sdhci_finish_command() by clearing host->cmd at the start Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 06/46] mmc: sdhci: Record what command is using the data lines Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 07/46] mmc: sdhci: Get rid of host->busy_handle Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 08/46] mmc: sdhci: Reduce the use of host->mrq Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 09/46] mmc: sdhci: Move host->data warning Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 10/46] mmc: sdhci: Factor out sdhci_finish_mrq() Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 11/46] mmc: sdhci: Factor out sdhci_needs_reset() Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 12/46] mmc: sdhci: Track whether a reset is pending Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 13/46] mmc: sdhci: Clear pointers when a request finishes Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 14/46] mmc: sdhci: Ensure all requests get errored out Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 15/46] mmc: sdhci: Factor out sdhci_data_line_cmd() Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 16/46] mmc: sdhci: Separate timer timeout for command and data requests Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 17/46] mmc: sdhci: Allow for finishing multiple requests Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 18/46] mmc: sdhci: Factor out sdhci_auto_cmd12() Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 19/46] mmc: sdhci: Do not reset cmd or data circuits that are in use Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 20/46] mmc: sdhci: Support cap_cmd_during_tfr requests Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 21/46] mmc: sdhci-pci: Set MMC_CAP_CMD_DURING_TFR for Intel eMMC controllers Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 22/46] mmc: sdhci-acpi: " Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 23/46] mmc: queue: Fix queue thread wake-up Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 24/46] mmc: queue: Factor out mmc_queue_alloc_bounce_bufs() Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 25/46] mmc: queue: Factor out mmc_queue_alloc_bounce_sgs() Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 26/46] mmc: queue: Factor out mmc_queue_alloc_sgs() Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 27/46] mmc: queue: Factor out mmc_queue_reqs_free_bufs() Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 28/46] mmc: queue: Introduce queue depth Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 29/46] mmc: queue: Use queue depth to allocate and free Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 30/46] mmc: queue: Allocate queue of size qdepth Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 31/46] mmc: mmc: Add Command Queue definitions Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 32/46] mmc: mmc: Add functions to enable / disable the Command Queue Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 33/46] mmc: mmc_test: Disable Command Queue while mmc_test is used Adrian Hunter
2016-06-10 10:44   ` [PATCH RFC V2 " Adrian Hunter
2016-06-10 10:59     ` Venu Byravarasu
2016-06-10 11:36       ` Adrian Hunter
2016-06-10 12:03         ` Venu Byravarasu
2016-06-09 11:52 ` [PATCH RFC 34/46] mmc: block: Disable Command Queue while RPMB " Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 35/46] mmc: core: Do not prepare a new request twice Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 36/46] mmc: core: Export mmc_retune_hold() and mmc_retune_release() Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 37/46] mmc: block: Factor out mmc_blk_requeue() Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 38/46] mmc: block: Fix 4K native sector check Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 39/46] mmc: block: Use local var for mqrq_cur Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 40/46] mmc: block: Pass mqrq to mmc_blk_prep_packed_list() Adrian Hunter
2016-06-09 11:52 ` Adrian Hunter [this message]
2016-06-09 11:52 ` [PATCH RFC 42/46] mmc: queue: Add a function to control wake-up on new requests Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 43/46] mmc: block: Add Software Command Queuing Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 44/46] mmc: mmc: Enable " Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 45/46] mmc: sdhci-pci: Enable Software Command Queuing for some Intel controllers Adrian Hunter
2016-06-09 11:52 ` [PATCH RFC 46/46] mmc: sdhci-acpi: " Adrian Hunter

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1465473166-22532-42-git-send-email-adrian.hunter@intel.com \
    --to=adrian.hunter@intel.com \
    --cc=Yuliy.Izrailov@sandisk.com \
    --cc=alex.lemberg@sandisk.com \
    --cc=asutoshd@codeaurora.org \
    --cc=david.griego@linaro.org \
    --cc=dongas86@gmail.com \
    --cc=jh80.chung@samsung.com \
    --cc=kdorfman@codeaurora.org \
    --cc=linux-mmc@vger.kernel.org \
    --cc=mateusz.nowak@intel.com \
    --cc=riteshh@codeaurora.org \
    --cc=sthumma@codeaurora.org \
    --cc=stummala@codeaurora.org \
    --cc=ulf.hansson@linaro.org \
    --cc=zhangfei.gao@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.