linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Laurentiu Tudor <laurentiu.tudor@nxp.com>
To: vkoul@kernel.org, dmaengine@vger.kernel.org, linux-imx@nxp.com
Cc: linux-arm-kernel@lists.infradead.org,
	linux-kernel@vger.kernel.org, iommu@lists.linux-foundation.org,
	robin.murphy@arm.com, Laurentiu Tudor <laurentiu.tudor@nxp.com>
Subject: [PATCH] dmaengine: fsl-edma: dma map slave device address
Date: Fri, 18 Jan 2019 12:06:23 +0200	[thread overview]
Message-ID: <20190118100623.13271-1-laurentiu.tudor@nxp.com> (raw)

This mapping needs to be created in order for slave dma transfers
to work on systems with SMMU. The implementation mostly mimics the
one in pl330 dma driver, authored by Robin Murphy.

Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
Suggested-by: Robin Murphy <robin.murphy@arm.com>
---
Original approach was to add the missing mappings in the i2c client driver,
see here for discussion: https://patchwork.ozlabs.org/patch/1026013/

 drivers/dma/fsl-edma-common.c | 66 ++++++++++++++++++++++++++++++++---
 drivers/dma/fsl-edma-common.h |  4 +++
 drivers/dma/fsl-edma.c        |  1 +
 drivers/dma/mcf-edma.c        |  1 +
 4 files changed, 68 insertions(+), 4 deletions(-)

diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index 8876c4c1bb2c..0e95ee24b6d4 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -6,6 +6,7 @@
 #include <linux/dmapool.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/dma-mapping.h>
 
 #include "fsl-edma-common.h"
 
@@ -173,12 +174,62 @@ int fsl_edma_resume(struct dma_chan *chan)
 }
 EXPORT_SYMBOL_GPL(fsl_edma_resume);
 
+static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan *fsl_chan)
+{
+	if (fsl_chan->dma_dir != DMA_NONE)
+		dma_unmap_resource(fsl_chan->vchan.chan.device->dev,
+				   fsl_chan->dma_dev_addr,
+				   fsl_chan->dma_dev_size,
+				   fsl_chan->dma_dir, 0);
+	fsl_chan->dma_dir = DMA_NONE;
+}
+
+static bool fsl_edma_prep_slave_dma(struct fsl_edma_chan *fsl_chan,
+				    enum dma_transfer_direction dir)
+{
+	struct device *dev = fsl_chan->vchan.chan.device->dev;
+	enum dma_data_direction dma_dir;
+	phys_addr_t addr = 0;
+	u32 size = 0;
+
+	switch (dir) {
+	case DMA_MEM_TO_DEV:
+		dma_dir = DMA_FROM_DEVICE;
+		addr = fsl_chan->cfg.dst_addr;
+		size = fsl_chan->cfg.dst_maxburst;
+		break;
+	case DMA_DEV_TO_MEM:
+		dma_dir = DMA_TO_DEVICE;
+		addr = fsl_chan->cfg.src_addr;
+		size = fsl_chan->cfg.src_maxburst;
+		break;
+	default:
+		dma_dir = DMA_NONE;
+		break;
+	}
+
+	/* Already mapped for this config? */
+	if (fsl_chan->dma_dir == dma_dir)
+		return true;
+
+	fsl_edma_unprep_slave_dma(fsl_chan);
+
+	fsl_chan->dma_dev_addr = dma_map_resource(dev, addr, size, dma_dir, 0);
+	if (dma_mapping_error(dev, fsl_chan->dma_dev_addr))
+		return false;
+	fsl_chan->dma_dev_size = size;
+	fsl_chan->dma_dir = dma_dir;
+
+	return true;
+}
+
 int fsl_edma_slave_config(struct dma_chan *chan,
 				 struct dma_slave_config *cfg)
 {
 	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
 
 	memcpy(&fsl_chan->cfg, cfg, sizeof(*cfg));
+	fsl_edma_unprep_slave_dma(fsl_chan);
 
 	return 0;
 }
@@ -378,6 +429,9 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
 	if (!is_slave_direction(direction))
 		return NULL;
 
+	if (!fsl_edma_prep_slave_dma(fsl_chan, direction))
+		return NULL;
+
 	sg_len = buf_len / period_len;
 	fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
 	if (!fsl_desc)
@@ -409,11 +463,11 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
 
 		if (direction == DMA_MEM_TO_DEV) {
 			src_addr = dma_buf_next;
-			dst_addr = fsl_chan->cfg.dst_addr;
+			dst_addr = fsl_chan->dma_dev_addr;
 			soff = fsl_chan->cfg.dst_addr_width;
 			doff = 0;
 		} else {
-			src_addr = fsl_chan->cfg.src_addr;
+			src_addr = fsl_chan->dma_dev_addr;
 			dst_addr = dma_buf_next;
 			soff = 0;
 			doff = fsl_chan->cfg.src_addr_width;
@@ -444,6 +498,9 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
 	if (!is_slave_direction(direction))
 		return NULL;
 
+	if (!fsl_edma_prep_slave_dma(fsl_chan, direction))
+		return NULL;
+
 	fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
 	if (!fsl_desc)
 		return NULL;
@@ -468,11 +525,11 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
 
 		if (direction == DMA_MEM_TO_DEV) {
 			src_addr = sg_dma_address(sg);
-			dst_addr = fsl_chan->cfg.dst_addr;
+			dst_addr = fsl_chan->dma_dev_addr;
 			soff = fsl_chan->cfg.dst_addr_width;
 			doff = 0;
 		} else {
-			src_addr = fsl_chan->cfg.src_addr;
+			src_addr = fsl_chan->dma_dev_addr;
 			dst_addr = sg_dma_address(sg);
 			soff = 0;
 			doff = fsl_chan->cfg.src_addr_width;
@@ -555,6 +612,7 @@ void fsl_edma_free_chan_resources(struct dma_chan *chan)
 	fsl_edma_chan_mux(fsl_chan, 0, false);
 	fsl_chan->edesc = NULL;
 	vchan_get_all_descriptors(&fsl_chan->vchan, &head);
+	fsl_edma_unprep_slave_dma(fsl_chan);
 	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
 
 	vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index 8917e8865959..b435d8e1e3a1 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -6,6 +6,7 @@
 #ifndef _FSL_EDMA_COMMON_H_
 #define _FSL_EDMA_COMMON_H_
 
+#include <linux/dma-direction.h>
 #include "virt-dma.h"
 
 #define EDMA_CR_EDBG		BIT(1)
@@ -120,6 +121,9 @@ struct fsl_edma_chan {
 	struct dma_slave_config		cfg;
 	u32				attr;
 	struct dma_pool			*tcd_pool;
+	dma_addr_t			dma_dev_addr;
+	u32				dma_dev_size;
+	enum dma_data_direction		dma_dir;
 };
 
 struct fsl_edma_desc {
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index 34d70112fcc9..75e8a7ba3a22 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -254,6 +254,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
 		fsl_chan->pm_state = RUNNING;
 		fsl_chan->slave_id = 0;
 		fsl_chan->idle = true;
+		fsl_chan->dma_dir = DMA_NONE;
 		fsl_chan->vchan.desc_free = fsl_edma_free_desc;
 		vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
 
diff --git a/drivers/dma/mcf-edma.c b/drivers/dma/mcf-edma.c
index 5de1b07eddff..7de54b2fafdb 100644
--- a/drivers/dma/mcf-edma.c
+++ b/drivers/dma/mcf-edma.c
@@ -214,6 +214,7 @@ static int mcf_edma_probe(struct platform_device *pdev)
 		mcf_chan->edma = mcf_edma;
 		mcf_chan->slave_id = i;
 		mcf_chan->idle = true;
+		mcf_chan->dma_dir = DMA_NONE;
 		mcf_chan->vchan.desc_free = fsl_edma_free_desc;
 		vchan_init(&mcf_chan->vchan, &mcf_edma->dma_dev);
 		iowrite32(0x0, &regs->tcd[i].csr);
-- 
2.17.1


             reply	other threads:[~2019-01-18 10:07 UTC|newest]

Thread overview: 4+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-01-18 10:06 Laurentiu Tudor [this message]
2019-01-18 21:50 ` [PATCH] dmaengine: fsl-edma: dma map slave device address Angelo Dureghello
2019-01-21 14:13   ` Laurentiu Tudor
2019-02-04  7:03 ` Vinod Koul

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190118100623.13271-1-laurentiu.tudor@nxp.com \
    --to=laurentiu.tudor@nxp.com \
    --cc=dmaengine@vger.kernel.org \
    --cc=iommu@lists.linux-foundation.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-imx@nxp.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=robin.murphy@arm.com \
    --cc=vkoul@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).