linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Abhishek Sahu <absahu@codeaurora.org>
To: Boris Brezillon <boris.brezillon@bootlin.com>,
	Miquel Raynal <miquel.raynal@bootlin.com>
Cc: David Woodhouse <dwmw2@infradead.org>,
	Brian Norris <computersforpeace@gmail.com>,
	Marek Vasut <marek.vasut@gmail.com>,
	Richard Weinberger <richard@nod.at>,
	linux-arm-msm@vger.kernel.org, linux-kernel@vger.kernel.org,
	linux-mtd@lists.infradead.org, Andy Gross <andy.gross@linaro.org>,
	Archit Taneja <architt@codeaurora.org>,
	Abhishek Sahu <absahu@codeaurora.org>,
	stable@vger.kernel.org
Subject: [PATCH v4 06/15] mtd: rawnand: qcom: wait for desc completion in all BAM channels
Date: Wed, 20 Jun 2018 12:57:33 +0530	[thread overview]
Message-ID: <1529479662-4026-7-git-send-email-absahu@codeaurora.org> (raw)
In-Reply-To: <1529479662-4026-1-git-send-email-absahu@codeaurora.org>

The BAM has 3 channels - tx, rx and command. command channel
is used for register read/writes, tx channel for data writes
and rx channel for data reads. Currently, the driver assumes the
transfer completion once it gets all the command descriptors
completed. Sometimes, there is race condition between data channel
(tx/rx) and command channel completion. In these cases,
the data present in buffer is not valid during small window
between command descriptor completion and data descriptor
completion.

This patch generates NAND transfer completion when both
(Data and Command) DMA channels have completed all its DMA
descriptors. It assigns completion callback in last
DMA descriptors of that channel and wait for completion.

Fixes: 8d6b6d7e135e ("mtd: nand: qcom: support for command descriptor formation")
Cc: stable@vger.kernel.org
Acked-by: Miquel Raynal <miquel.raynal@bootlin.com>
Signed-off-by: Abhishek Sahu <absahu@codeaurora.org>
---
* Changes from v3:
  1. NONE

* Changes from v2:
  1. Changed commit message and comments slightly
  2. Renamed wait_second_completion from first_chan_done and set
     it before submit desc
  3. Mark for stable tree

* Changes from v1:
  NONE

 drivers/mtd/nand/raw/qcom_nandc.c | 53 ++++++++++++++++++++++++++++++++++++++-
 1 file changed, 52 insertions(+), 1 deletion(-)

diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 2375780..fc20149 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -213,6 +213,8 @@
 #define QPIC_PER_CW_CMD_SGL		32
 #define QPIC_PER_CW_DATA_SGL		8
 
+#define QPIC_NAND_COMPLETION_TIMEOUT	msecs_to_jiffies(2000)
+
 /*
  * Flags used in DMA descriptor preparation helper functions
  * (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma)
@@ -245,6 +247,11 @@
  * @tx_sgl_start - start index in data sgl for tx.
  * @rx_sgl_pos - current index in data sgl for rx.
  * @rx_sgl_start - start index in data sgl for rx.
+ * @wait_second_completion - wait for second DMA desc completion before making
+ *			     the NAND transfer completion.
+ * @txn_done - completion for NAND transfer.
+ * @last_data_desc - last DMA desc in data channel (tx/rx).
+ * @last_cmd_desc - last DMA desc in command channel.
  */
 struct bam_transaction {
 	struct bam_cmd_element *bam_ce;
@@ -258,6 +265,10 @@ struct bam_transaction {
 	u32 tx_sgl_start;
 	u32 rx_sgl_pos;
 	u32 rx_sgl_start;
+	bool wait_second_completion;
+	struct completion txn_done;
+	struct dma_async_tx_descriptor *last_data_desc;
+	struct dma_async_tx_descriptor *last_cmd_desc;
 };
 
 /*
@@ -504,6 +515,8 @@ static void free_bam_transaction(struct qcom_nand_controller *nandc)
 
 	bam_txn->data_sgl = bam_txn_buf;
 
+	init_completion(&bam_txn->txn_done);
+
 	return bam_txn;
 }
 
@@ -523,11 +536,33 @@ static void clear_bam_transaction(struct qcom_nand_controller *nandc)
 	bam_txn->tx_sgl_start = 0;
 	bam_txn->rx_sgl_pos = 0;
 	bam_txn->rx_sgl_start = 0;
+	bam_txn->last_data_desc = NULL;
+	bam_txn->wait_second_completion = false;
 
 	sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
 		      QPIC_PER_CW_CMD_SGL);
 	sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
 		      QPIC_PER_CW_DATA_SGL);
+
+	reinit_completion(&bam_txn->txn_done);
+}
+
+/* Callback for DMA descriptor completion */
+static void qpic_bam_dma_done(void *data)
+{
+	struct bam_transaction *bam_txn = data;
+
+	/*
+	 * In case of data transfer with NAND, 2 callbacks will be generated.
+	 * One for command channel and another one for data channel.
+	 * If current transaction has data descriptors
+	 * (i.e. wait_second_completion is true), then set this to false
+	 * and wait for second DMA descriptor completion.
+	 */
+	if (bam_txn->wait_second_completion)
+		bam_txn->wait_second_completion = false;
+	else
+		complete(&bam_txn->txn_done);
 }
 
 static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
@@ -756,6 +791,12 @@ static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
 
 	desc->dma_desc = dma_desc;
 
+	/* update last data/command descriptor */
+	if (chan == nandc->cmd_chan)
+		bam_txn->last_cmd_desc = dma_desc;
+	else
+		bam_txn->last_data_desc = dma_desc;
+
 	list_add_tail(&desc->node, &nandc->desc_list);
 
 	return 0;
@@ -1273,10 +1314,20 @@ static int submit_descs(struct qcom_nand_controller *nandc)
 		cookie = dmaengine_submit(desc->dma_desc);
 
 	if (nandc->props->is_bam) {
+		bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
+		bam_txn->last_cmd_desc->callback_param = bam_txn;
+		if (bam_txn->last_data_desc) {
+			bam_txn->last_data_desc->callback = qpic_bam_dma_done;
+			bam_txn->last_data_desc->callback_param = bam_txn;
+			bam_txn->wait_second_completion = true;
+		}
+
 		dma_async_issue_pending(nandc->tx_chan);
 		dma_async_issue_pending(nandc->rx_chan);
+		dma_async_issue_pending(nandc->cmd_chan);
 
-		if (dma_sync_wait(nandc->cmd_chan, cookie) != DMA_COMPLETE)
+		if (!wait_for_completion_timeout(&bam_txn->txn_done,
+						 QPIC_NAND_COMPLETION_TIMEOUT))
 			return -ETIMEDOUT;
 	} else {
 		if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
-- 
QUALCOMM INDIA, on behalf of Qualcomm Innovation Center, Inc.
is a member of Code Aurora Forum, hosted by The Linux Foundation


  parent reply	other threads:[~2018-06-20  8:57 UTC|newest]

Thread overview: 21+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-06-20  7:27 [PATCH v4 00/15] Update for QCOM NAND driver Abhishek Sahu
2018-06-20  7:27 ` [PATCH v4 01/15] mtd: rawnand: helper function for setting up ECC configuration Abhishek Sahu
2018-06-25  2:23   ` Masahiro Yamada
2018-06-20  7:27 ` [PATCH v4 02/15] mtd: rawnand: denali: use helper function for ecc setup Abhishek Sahu
2018-06-20  7:27 ` [PATCH v4 03/15] dt-bindings: qcom_nandc: update for ECC strength and step size Abhishek Sahu
2018-06-20 16:01   ` Rob Herring
2018-06-20  7:27 ` [PATCH v4 04/15] mtd: rawnand: qcom: remove dt property nand-ecc-step-size Abhishek Sahu
2018-06-20  7:27 ` [PATCH v4 05/15] mtd: rawnand: qcom: use the ecc strength from device parameter Abhishek Sahu
2018-06-20  7:27 ` Abhishek Sahu [this message]
2018-06-20  7:27 ` [PATCH v4 07/15] mtd: rawnand: qcom: erased page detection for uncorrectable errors only Abhishek Sahu
2018-06-20  7:27 ` [PATCH v4 08/15] mtd: rawnand: qcom: fix null pointer access for erased page detection Abhishek Sahu
2018-06-20  7:27 ` [PATCH v4 09/15] mtd: rawnand: qcom: parse read errors for read oob also Abhishek Sahu
2018-06-20  7:27 ` [PATCH v4 10/15] mtd: rawnand: qcom: modify write_oob to remove read codeword part Abhishek Sahu
2018-06-20  7:27 ` [PATCH v4 11/15] mtd: rawnand: qcom: fix return value for raw page read Abhishek Sahu
2018-06-20  7:27 ` [PATCH v4 12/15] mtd: rawnand: qcom: check for operation errors in case of raw read Abhishek Sahu
2018-06-20  7:27 ` [PATCH v4 13/15] mtd: rawnand: qcom: code reorganization for " Abhishek Sahu
2018-06-20  7:27 ` [PATCH v4 14/15] mtd: rawnand: qcom: erased page bitflips detection Abhishek Sahu
2018-06-26 18:02   ` Miquel Raynal
2018-06-20  7:27 ` [PATCH v4 15/15] mtd: rawnand: provide only single helper function for ECC conf Abhishek Sahu
2018-07-01 18:09 ` [PATCH v4 00/15] Update for QCOM NAND driver Miquel Raynal
2018-07-03  3:30   ` Abhishek Sahu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1529479662-4026-7-git-send-email-absahu@codeaurora.org \
    --to=absahu@codeaurora.org \
    --cc=andy.gross@linaro.org \
    --cc=architt@codeaurora.org \
    --cc=boris.brezillon@bootlin.com \
    --cc=computersforpeace@gmail.com \
    --cc=dwmw2@infradead.org \
    --cc=linux-arm-msm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mtd@lists.infradead.org \
    --cc=marek.vasut@gmail.com \
    --cc=miquel.raynal@bootlin.com \
    --cc=richard@nod.at \
    --cc=stable@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).