All of lore.kernel.org
 help / color / mirror / Atom feed
From: Krishna Yarlagadda <kyarlagadda@nvidia.com>
To: <broonie@kernel.org>, <thierry.reding@gmail.com>,
	<jonathanh@nvidia.com>, <linux-spi@vger.kernel.org>,
	<linux-tegra@vger.kernel.org>
Cc: <skomatineni@nvidia.com>, <ldewangan@nvidia.com>,
	<linux-kernel@vger.kernel.org>,
	Krishna Yarlagadda <kyarlagadda@nvidia.com>
Subject: [PATCH 5/5] spi: tegra210-quad: native dma support
Date: Sat, 1 Oct 2022 17:51:48 +0530	[thread overview]
Message-ID: <20221001122148.9158-5-kyarlagadda@nvidia.com> (raw)
In-Reply-To: <20221001122148.9158-1-kyarlagadda@nvidia.com>

Enable Native DMA support for Tegra23 & Tegra24

Signed-off-by: Krishna Yarlagadda <kyarlagadda@nvidia.com>
---
 drivers/spi/spi-tegra210-quad.c | 136 +++++++++++++++++++++++---------
 1 file changed, 97 insertions(+), 39 deletions(-)

diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c
index 99811509dafa..edecb999a614 100644
--- a/drivers/spi/spi-tegra210-quad.c
+++ b/drivers/spi/spi-tegra210-quad.c
@@ -111,6 +111,9 @@
 #define QSPI_DMA_BLK				0x024
 #define QSPI_DMA_BLK_SET(x)			(((x) & 0xffff) << 0)
 
+#define QSPI_DMA_MEM_ADDRESS_REG		0x028
+#define QSPI_DMA_HI_ADDRESS_REG			0x02c
+
 #define QSPI_TX_FIFO				0x108
 #define QSPI_RX_FIFO				0x188
 
@@ -155,6 +158,9 @@
 #define DATA_DIR_TX				BIT(0)
 #define DATA_DIR_RX				BIT(1)
 
+#define QSPI_DMA_EXT				BIT(0)
+#define QSPI_DMA_INT				BIT(1)
+
 #define QSPI_DMA_TIMEOUT			(msecs_to_jiffies(1000))
 #define DEFAULT_QSPI_DMA_BUF_LEN		(64 * 1024)
 #define CMD_TRANSFER				0
@@ -163,7 +169,7 @@
 #define DATA_TRANSFER				3
 
 struct tegra_qspi_soc_data {
-	bool has_dma;
+	int has_dma;
 	bool cmb_xfer_capable;
 	unsigned int cs_count;
 };
@@ -600,17 +606,22 @@ static void tegra_qspi_dma_unmap_xfer(struct tegra_qspi *tqspi, struct spi_trans
 
 	len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
 
-	dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE);
-	dma_unmap_single(tqspi->dev, t->rx_dma, len, DMA_FROM_DEVICE);
+	if (t->tx_buf)
+		dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE);
+	if (t->rx_buf)
+		dma_unmap_single(tqspi->dev, t->rx_dma, len, DMA_FROM_DEVICE);
 }
 
 static int tegra_qspi_start_dma_based_transfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
 {
 	struct dma_slave_config dma_sconfig = { 0 };
+	dma_addr_t rx_dma_phys, tx_dma_phys;
 	unsigned int len;
 	u8 dma_burst;
 	int ret = 0;
 	u32 val;
+	bool has_ext_dma = (tqspi->soc_data->has_dma &
+			    QSPI_DMA_EXT) ? true : false;
 
 	if (tqspi->is_packed) {
 		ret = tegra_qspi_dma_map_xfer(tqspi, t);
@@ -629,23 +640,35 @@ static int tegra_qspi_start_dma_based_transfer(struct tegra_qspi *tqspi, struct
 		len = tqspi->curr_dma_words * 4;
 
 	/* set attention level based on length of transfer */
-	val = 0;
-	if (len & 0xf) {
-		val |= QSPI_TX_TRIG_1 | QSPI_RX_TRIG_1;
-		dma_burst = 1;
-	} else if (((len) >> 4) & 0x1) {
-		val |= QSPI_TX_TRIG_4 | QSPI_RX_TRIG_4;
-		dma_burst = 4;
-	} else {
-		val |= QSPI_TX_TRIG_8 | QSPI_RX_TRIG_8;
-		dma_burst = 8;
+	if (has_ext_dma) {
+		val = 0;
+		if (len & 0xf) {
+			val |= QSPI_TX_TRIG_1 | QSPI_RX_TRIG_1;
+			dma_burst = 1;
+		} else if (((len) >> 4) & 0x1) {
+			val |= QSPI_TX_TRIG_4 | QSPI_RX_TRIG_4;
+			dma_burst = 4;
+		} else {
+			val |= QSPI_TX_TRIG_8 | QSPI_RX_TRIG_8;
+			dma_burst = 8;
+		}
 	}
 
 	tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
 	tqspi->dma_control_reg = val;
 
 	dma_sconfig.device_fc = true;
-	if (tqspi->cur_direction & DATA_DIR_TX) {
+	if ((tqspi->cur_direction & DATA_DIR_TX) && !has_ext_dma) {
+		if (tqspi->is_packed)
+			tx_dma_phys = t->tx_dma;
+		else
+			tx_dma_phys = tqspi->tx_dma_phys;
+		tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t);
+		tegra_qspi_writel(tqspi, (tx_dma_phys & 0xffffffff),
+				  QSPI_DMA_MEM_ADDRESS_REG);
+		tegra_qspi_writel(tqspi, ((tx_dma_phys >> 32) & 0xff),
+				  QSPI_DMA_HI_ADDRESS_REG);
+	} else if ((tqspi->cur_direction & DATA_DIR_TX) && has_ext_dma) {
 		dma_sconfig.dst_addr = tqspi->phys + QSPI_TX_FIFO;
 		dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 		dma_sconfig.dst_maxburst = dma_burst;
@@ -663,7 +686,16 @@ static int tegra_qspi_start_dma_based_transfer(struct tegra_qspi *tqspi, struct
 		}
 	}
 
-	if (tqspi->cur_direction & DATA_DIR_RX) {
+	if ((tqspi->cur_direction & DATA_DIR_RX) && !has_ext_dma) {
+		if (tqspi->is_packed)
+			rx_dma_phys = t->rx_dma;
+		else
+			rx_dma_phys = tqspi->rx_dma_phys;
+		tegra_qspi_writel(tqspi, (rx_dma_phys & 0xffffffff),
+				  QSPI_DMA_MEM_ADDRESS_REG);
+		tegra_qspi_writel(tqspi, ((rx_dma_phys >> 32) & 0xff),
+				  QSPI_DMA_HI_ADDRESS_REG);
+	} else if ((tqspi->cur_direction & DATA_DIR_RX) && has_ext_dma) {
 		dma_sconfig.src_addr = tqspi->phys + QSPI_RX_FIFO;
 		dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 		dma_sconfig.src_maxburst = dma_burst;
@@ -751,13 +783,29 @@ static int tegra_qspi_init_dma(struct tegra_qspi *tqspi)
 	u32 *dma_buf;
 	int err;
 
-	dma_chan = dma_request_chan(tqspi->dev, "rx");
-	if (IS_ERR(dma_chan)) {
-		err = PTR_ERR(dma_chan);
-		goto err_out;
-	}
+	if (!tqspi->soc_data->has_dma)
+		return -ENODEV;
+
+	if (tqspi->soc_data->has_dma & QSPI_DMA_EXT) {
+		dma_chan = dma_request_chan(tqspi->dev, "rx");
+		if (IS_ERR(dma_chan)) {
+			err = PTR_ERR(dma_chan);
+			goto err_out;
+		}
 
-	tqspi->rx_dma_chan = dma_chan;
+		tqspi->rx_dma_chan = dma_chan;
+
+		dma_chan = dma_request_chan(tqspi->dev, "tx");
+		if (IS_ERR(dma_chan)) {
+			err = PTR_ERR(dma_chan);
+			goto err_out;
+		}
+
+		tqspi->tx_dma_chan = dma_chan;
+	} else {
+		tqspi->rx_dma_chan = NULL;
+		tqspi->tx_dma_chan = NULL;
+	}
 
 	dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL);
 	if (!dma_buf) {
@@ -768,14 +816,6 @@ static int tegra_qspi_init_dma(struct tegra_qspi *tqspi)
 	tqspi->rx_dma_buf = dma_buf;
 	tqspi->rx_dma_phys = dma_phys;
 
-	dma_chan = dma_request_chan(tqspi->dev, "tx");
-	if (IS_ERR(dma_chan)) {
-		err = PTR_ERR(dma_chan);
-		goto err_out;
-	}
-
-	tqspi->tx_dma_chan = dma_chan;
-
 	dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL);
 	if (!dma_buf) {
 		err = -ENOMEM;
@@ -1045,6 +1085,8 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
 					struct spi_message *msg)
 {
 	bool is_first_msg = true;
+	bool has_ext_dma = (tqspi->soc_data->has_dma &
+			    QSPI_DMA_EXT) ? true : false;
 	struct spi_transfer *xfer;
 	struct spi_device *spi = msg->spi;
 	u8 transfer_phase = 0;
@@ -1109,12 +1151,12 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
 			if (WARN_ON(ret == 0)) {
 				dev_err(tqspi->dev, "QSPI Transfer failed with timeout: %d\n",
 					ret);
-				if (tqspi->is_curr_dma_xfer &&
+				if (tqspi->is_curr_dma_xfer && has_ext_dma &&
 				    (tqspi->cur_direction & DATA_DIR_TX))
 					dmaengine_terminate_all
 						(tqspi->tx_dma_chan);
 
-				if (tqspi->is_curr_dma_xfer &&
+				if (tqspi->is_curr_dma_xfer && has_ext_dma &&
 				    (tqspi->cur_direction & DATA_DIR_RX))
 					dmaengine_terminate_all
 						(tqspi->rx_dma_chan);
@@ -1178,6 +1220,8 @@ static int tegra_qspi_non_combined_seq_xfer(struct tegra_qspi *tqspi,
 	struct spi_device *spi = msg->spi;
 	struct spi_transfer *transfer;
 	bool is_first_msg = true;
+	bool has_ext_dma = (tqspi->soc_data->has_dma &
+			    QSPI_DMA_EXT) ? true : false;
 	int ret = 0, val = 0;
 
 	msg->status = 0;
@@ -1230,9 +1274,11 @@ static int tegra_qspi_non_combined_seq_xfer(struct tegra_qspi *tqspi,
 						  QSPI_DMA_TIMEOUT);
 		if (WARN_ON(ret == 0)) {
 			dev_err(tqspi->dev, "transfer timeout\n");
-			if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_TX))
+			if (tqspi->is_curr_dma_xfer && has_ext_dma &&
+			    (tqspi->cur_direction & DATA_DIR_TX))
 				dmaengine_terminate_all(tqspi->tx_dma_chan);
-			if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_RX))
+			if (tqspi->is_curr_dma_xfer && has_ext_dma &&
+			    (tqspi->cur_direction & DATA_DIR_RX))
 				dmaengine_terminate_all(tqspi->rx_dma_chan);
 			tegra_qspi_handle_error(tqspi);
 			ret = -EIO;
@@ -1365,8 +1411,20 @@ static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi)
 	unsigned long flags;
 	long wait_status;
 	int err = 0;
+	bool has_ext_dma = (tqspi->soc_data->has_dma &
+			    QSPI_DMA_EXT) ? true : false;
+
+	if (tqspi->cur_direction & DATA_DIR_TX && !has_ext_dma) {
+		if (tqspi->tx_status)
+			err += 1;
+	}
+
+	if (tqspi->cur_direction & DATA_DIR_RX && !has_ext_dma) {
+		if (tqspi->rx_status)
+			err += 2;
+	}
 
-	if (tqspi->cur_direction & DATA_DIR_TX) {
+	if (tqspi->cur_direction & DATA_DIR_TX && has_ext_dma) {
 		if (tqspi->tx_status) {
 			dmaengine_terminate_all(tqspi->tx_dma_chan);
 			err += 1;
@@ -1381,7 +1439,7 @@ static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi)
 		}
 	}
 
-	if (tqspi->cur_direction & DATA_DIR_RX) {
+	if (tqspi->cur_direction & DATA_DIR_RX && has_ext_dma) {
 		if (tqspi->rx_status) {
 			dmaengine_terminate_all(tqspi->rx_dma_chan);
 			err += 2;
@@ -1454,25 +1512,25 @@ static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data)
 }
 
 static struct tegra_qspi_soc_data tegra210_qspi_soc_data = {
-	.has_dma = true,
+	.has_dma = QSPI_DMA_EXT,
 	.cmb_xfer_capable = false,
 	.cs_count = 1,
 };
 
 static struct tegra_qspi_soc_data tegra186_qspi_soc_data = {
-	.has_dma = true,
+	.has_dma = QSPI_DMA_EXT,
 	.cmb_xfer_capable = true,
 	.cs_count = 1,
 };
 
 static struct tegra_qspi_soc_data tegra234_qspi_soc_data = {
-	.has_dma = false,
+	.has_dma = QSPI_DMA_INT,
 	.cmb_xfer_capable = true,
 	.cs_count = 1,
 };
 
 static struct tegra_qspi_soc_data tegra241_qspi_soc_data = {
-	.has_dma = false,
+	.has_dma = QSPI_DMA_INT,
 	.cmb_xfer_capable = true,
 	.cs_count = 4,
 };
-- 
2.17.1


  parent reply	other threads:[~2022-10-01 12:22 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-10-01 12:21 [PATCH 1/5] spi: tegra210-quad: Fix combined sequence Krishna Yarlagadda
2022-10-01 12:21 ` [PATCH 2/5] spi: tegra210-quad: Fix duplicate resource error Krishna Yarlagadda
2022-11-11  9:49   ` Jon Hunter
2022-10-01 12:21 ` [PATCH 3/5] spi: tegra210-quad: Use nbits in combined seq Krishna Yarlagadda
2022-10-03 15:24   ` Mark Brown
2022-10-01 12:21 ` [PATCH 4/5] spi: tegra210-quad: combined seq for 4READ Krishna Yarlagadda
2022-10-01 12:21 ` Krishna Yarlagadda [this message]
2022-10-01 20:08   ` [PATCH 5/5] spi: tegra210-quad: native dma support kernel test robot
2022-10-01 20:50   ` kernel test robot
2022-10-02 10:37   ` Geert Uytterhoeven
2022-10-03 16:47 ` (subset) [PATCH 1/5] spi: tegra210-quad: Fix combined sequence Mark Brown

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221001122148.9158-5-kyarlagadda@nvidia.com \
    --to=kyarlagadda@nvidia.com \
    --cc=broonie@kernel.org \
    --cc=jonathanh@nvidia.com \
    --cc=ldewangan@nvidia.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-spi@vger.kernel.org \
    --cc=linux-tegra@vger.kernel.org \
    --cc=skomatineni@nvidia.com \
    --cc=thierry.reding@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.