All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 00/12] dma: pdma: some patches needed for PXA DT environments
@ 2013-08-07 10:19 Daniel Mack
  2013-08-07 10:19 ` [PATCH 01/12] dma: mmp_pdma: add protect when alloc/free phy channels Daniel Mack
                   ` (11 more replies)
  0 siblings, 12 replies; 26+ messages in thread
From: Daniel Mack @ 2013-08-07 10:19 UTC (permalink / raw)
  To: linux-arm-kernel

I've been working on the PXA DMA transition for some days now, and
tweaked the mmp-pdma driver in order to make it work in pure DT
environments.

I also ported over most PXA peripheral drivers, but that will take some
more time to settle. I'll post those in a separate series. Up front,
here are some patches for the mmp-pdma that can be reviewed and merged
independently.

With these patches applied, I can successfully use pxa3xx-nand, pxamci
and audio on a custom pxa3xx board.

I'm including two patches from Xiang Wang which have been posted
but not yet merged, as far as I can tell.


Many thanks,
Daniel


Daniel Mack (10):
  dma: mmp_pdma: factor out DRCMR register calculation
  dma: mmp_pdma: fix maximum transfer length
  dma: mmp_pdma: add filter function
  dma: mmp_pdma: make the controller a DMA provider
  dma: mmp_pdma: print the number of channels at probe time
  dma: mmp_pdma: remove duplicate assignment
  dma: mmp_pdma: add support for byte-aligned transfers
  dma: mmp_pdma: implement DMA_PAUSE and DMA_RESUME
  dma: mmp_pdma: add support for residue reporting
  dma: mmp_pdma: add support for cyclic DMA descriptors

Xiang Wang (2):
  dma: mmp_pdma: add protect when alloc/free phy channels
  dma: mmp_pdma: clear DRCMR when free a phy channel

 drivers/dma/mmp_pdma.c       | 274 +++++++++++++++++++++++++++++++++++++++----
 include/linux/dma/mmp-pdma.h |  15 +++
 2 files changed, 263 insertions(+), 26 deletions(-)
 create mode 100644 include/linux/dma/mmp-pdma.h

-- 
1.8.3.1

^ permalink raw reply	[flat|nested] 26+ messages in thread

* [PATCH 01/12] dma: mmp_pdma: add protect when alloc/free phy channels
  2013-08-07 10:19 [PATCH 00/12] dma: pdma: some patches needed for PXA DT environments Daniel Mack
@ 2013-08-07 10:19 ` Daniel Mack
  2013-08-07 12:40   ` Ezequiel Garcia
  2013-08-07 10:19 ` [PATCH 02/12] dma: mmp_pdma: clear DRCMR when free a phy channel Daniel Mack
                   ` (10 subsequent siblings)
  11 siblings, 1 reply; 26+ messages in thread
From: Daniel Mack @ 2013-08-07 10:19 UTC (permalink / raw)
  To: linux-arm-kernel

From: Xiang Wang <wangx@marvell.com>

In mmp pdma, phy channels are allocated/freed dynamically
and frequently. But no proper protection is added.
Conflict will happen when multi-users are requesting phy
channels at the same time. Use spinlock to protect.

Signed-off-by: Xiang Wang <wangx@marvell.com>
---
 drivers/dma/mmp_pdma.c | 42 ++++++++++++++++++++++++++----------------
 1 file changed, 26 insertions(+), 16 deletions(-)

diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index c26699f..226158d 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -121,6 +121,7 @@ struct mmp_pdma_device {
 	struct device			*dev;
 	struct dma_device		device;
 	struct mmp_pdma_phy		*phy;
+	spinlock_t phy_lock; /* protect alloc/free phy channels */
 };
 
 #define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx)
@@ -219,6 +220,7 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
 	int prio, i;
 	struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
 	struct mmp_pdma_phy *phy;
+	unsigned long flags;
 
 	/*
 	 * dma channel priorities
@@ -227,6 +229,8 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
 	 * ch 8 - 11, 24 - 27  <--> (2)
 	 * ch 12 - 15, 28 - 31  <--> (3)
 	 */
+
+	spin_lock_irqsave(&pdev->phy_lock, flags);
 	for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) {
 		for (i = 0; i < pdev->dma_channels; i++) {
 			if (prio != ((i & 0xf) >> 2))
@@ -234,14 +238,30 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
 			phy = &pdev->phy[i];
 			if (!phy->vchan) {
 				phy->vchan = pchan;
+				spin_unlock_irqrestore(&pdev->phy_lock, flags);
 				return phy;
 			}
 		}
 	}
 
+	spin_unlock_irqrestore(&pdev->phy_lock, flags);
 	return NULL;
 }
 
+static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan)
+{
+	struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
+	unsigned long flags;
+
+	if (!pchan->phy)
+		return;
+
+	spin_lock_irqsave(&pdev->phy_lock, flags);
+	pchan->phy->vchan = NULL;
+	pchan->phy = NULL;
+	spin_unlock_irqrestore(&pdev->phy_lock, flags);
+}
+
 /* desc->tx_list ==> pending list */
 static void append_pending_queue(struct mmp_pdma_chan *chan,
 					struct mmp_pdma_desc_sw *desc)
@@ -277,10 +297,7 @@ static void start_pending_queue(struct mmp_pdma_chan *chan)
 
 	if (list_empty(&chan->chain_pending)) {
 		/* chance to re-fetch phy channel with higher prio */
-		if (chan->phy) {
-			chan->phy->vchan = NULL;
-			chan->phy = NULL;
-		}
+		mmp_pdma_free_phy(chan);
 		dev_dbg(chan->dev, "no pending list\n");
 		return;
 	}
@@ -377,10 +394,7 @@ static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
 		dev_err(chan->dev, "unable to allocate descriptor pool\n");
 		return -ENOMEM;
 	}
-	if (chan->phy) {
-		chan->phy->vchan = NULL;
-		chan->phy = NULL;
-	}
+	mmp_pdma_free_phy(chan);
 	chan->idle = true;
 	chan->dev_addr = 0;
 	return 1;
@@ -411,10 +425,7 @@ static void mmp_pdma_free_chan_resources(struct dma_chan *dchan)
 	chan->desc_pool = NULL;
 	chan->idle = true;
 	chan->dev_addr = 0;
-	if (chan->phy) {
-		chan->phy->vchan = NULL;
-		chan->phy = NULL;
-	}
+	mmp_pdma_free_phy(chan);
 	return;
 }
 
@@ -581,10 +592,7 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
 	switch (cmd) {
 	case DMA_TERMINATE_ALL:
 		disable_chan(chan->phy);
-		if (chan->phy) {
-			chan->phy->vchan = NULL;
-			chan->phy = NULL;
-		}
+		mmp_pdma_free_phy(chan);
 		spin_lock_irqsave(&chan->desc_lock, flags);
 		mmp_pdma_free_desc_list(chan, &chan->chain_pending);
 		mmp_pdma_free_desc_list(chan, &chan->chain_running);
@@ -777,6 +785,8 @@ static int mmp_pdma_probe(struct platform_device *op)
 		return -ENOMEM;
 	pdev->dev = &op->dev;
 
+	spin_lock_init(&pdev->phy_lock);
+
 	iores = platform_get_resource(op, IORESOURCE_MEM, 0);
 	if (!iores)
 		return -EINVAL;
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 26+ messages in thread

* [PATCH 02/12] dma: mmp_pdma: clear DRCMR when free a phy channel
  2013-08-07 10:19 [PATCH 00/12] dma: pdma: some patches needed for PXA DT environments Daniel Mack
  2013-08-07 10:19 ` [PATCH 01/12] dma: mmp_pdma: add protect when alloc/free phy channels Daniel Mack
@ 2013-08-07 10:19 ` Daniel Mack
  2013-08-07 10:19 ` [PATCH 03/12] dma: mmp_pdma: factor out DRCMR register calculation Daniel Mack
                   ` (9 subsequent siblings)
  11 siblings, 0 replies; 26+ messages in thread
From: Daniel Mack @ 2013-08-07 10:19 UTC (permalink / raw)
  To: linux-arm-kernel

From: Xiang Wang <wangx@marvell.com>

In mmp pdma, phy channels are allocated/freed dynamically.
The mapping from DMA request to DMA channel number in DRCMR
should be cleared when a phy channel is freed. Otherwise
conflicts will happen when:
1. A is using channel 2 and free it after finished, but A
still maps to channel 2 in DRCMR of A.
2. Now another one B gets channel 2. So B maps to channel 2
too in DRCMR of B.
In the datasheet, it is described that "Do not map two active
requests to the same channel since it produces unpredictable
results" and we can observe that during test.

Signed-off-by: Xiang Wang <wangx@marvell.com>
---
 drivers/dma/mmp_pdma.c | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index 226158d..2844eaf 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -252,10 +252,16 @@ static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan)
 {
 	struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
 	unsigned long flags;
+	u32 reg;
 
 	if (!pchan->phy)
 		return;
 
+	/* clear the channel mapping in DRCMR */
+	reg = pchan->phy->vchan->drcmr;
+	reg = ((reg < 64) ? 0x0100 : 0x1100) + ((reg & 0x3f) << 2);
+	writel(0, pchan->phy->base + reg);
+
 	spin_lock_irqsave(&pdev->phy_lock, flags);
 	pchan->phy->vchan = NULL;
 	pchan->phy = NULL;
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 26+ messages in thread

* [PATCH 03/12] dma: mmp_pdma: factor out DRCMR register calculation
  2013-08-07 10:19 [PATCH 00/12] dma: pdma: some patches needed for PXA DT environments Daniel Mack
  2013-08-07 10:19 ` [PATCH 01/12] dma: mmp_pdma: add protect when alloc/free phy channels Daniel Mack
  2013-08-07 10:19 ` [PATCH 02/12] dma: mmp_pdma: clear DRCMR when free a phy channel Daniel Mack
@ 2013-08-07 10:19 ` Daniel Mack
  2013-08-07 10:19 ` [PATCH 04/12] dma: mmp_pdma: fix maximum transfer length Daniel Mack
                   ` (8 subsequent siblings)
  11 siblings, 0 replies; 26+ messages in thread
From: Daniel Mack @ 2013-08-07 10:19 UTC (permalink / raw)
  To: linux-arm-kernel

The exact same calculation is done twice, so let's factor it out to a
macro.

Signed-off-by: Daniel Mack <zonque@gmail.com>
---
 drivers/dma/mmp_pdma.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index 2844eaf..8d6aae3 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -47,6 +47,8 @@
 #define DCSR_CMPST	(1 << 10)       /* The Descriptor Compare Status */
 #define DCSR_EORINTR	(1 << 9)        /* The end of Receive */
 
+#define DRCMR(n)	((((n) < 64) ? 0x0100 : 0x1100) + \
+				 (((n) & 0x3f) << 2))
 #define DRCMR_MAPVLD	(1 << 7)	/* Map Valid (read / write) */
 #define DRCMR_CHLNUM	0x1f		/* mask for Channel Number (read / write) */
 
@@ -143,8 +145,7 @@ static void enable_chan(struct mmp_pdma_phy *phy)
 	if (!phy->vchan)
 		return;
 
-	reg = phy->vchan->drcmr;
-	reg = (((reg) < 64) ? 0x0100 : 0x1100) + (((reg) & 0x3f) << 2);
+	reg = DRCMR(phy->vchan->drcmr);
 	writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
 
 	reg = (phy->idx << 2) + DCSR;
@@ -258,8 +259,7 @@ static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan)
 		return;
 
 	/* clear the channel mapping in DRCMR */
-	reg = pchan->phy->vchan->drcmr;
-	reg = ((reg < 64) ? 0x0100 : 0x1100) + ((reg & 0x3f) << 2);
+	reg = DRCMR(pchan->phy->vchan->drcmr);
 	writel(0, pchan->phy->base + reg);
 
 	spin_lock_irqsave(&pdev->phy_lock, flags);
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 26+ messages in thread

* [PATCH 04/12] dma: mmp_pdma: fix maximum transfer length
  2013-08-07 10:19 [PATCH 00/12] dma: pdma: some patches needed for PXA DT environments Daniel Mack
                   ` (2 preceding siblings ...)
  2013-08-07 10:19 ` [PATCH 03/12] dma: mmp_pdma: factor out DRCMR register calculation Daniel Mack
@ 2013-08-07 10:19 ` Daniel Mack
  2013-08-07 10:19 ` [PATCH 05/12] dma: mmp_pdma: add filter function Daniel Mack
                   ` (7 subsequent siblings)
  11 siblings, 0 replies; 26+ messages in thread
From: Daniel Mack @ 2013-08-07 10:19 UTC (permalink / raw)
  To: linux-arm-kernel

There's no reason for limiting the maximum transfer length to 0x1000.
Take the actual bit mask instead; the PDMA is able to transfer chunks of
up to SZ_8K - 1.

Signed-off-by: Daniel Mack <zonque@gmail.com>
---
 drivers/dma/mmp_pdma.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index 8d6aae3..1fe78d8 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -71,7 +71,7 @@
 #define DCMD_LENGTH	0x01fff		/* length mask (max = 8K - 1) */
 
 #define PDMA_ALIGNMENT		3
-#define PDMA_MAX_DESC_BYTES	0x1000
+#define PDMA_MAX_DESC_BYTES	DCMD_LENGTH
 
 struct mmp_pdma_desc_hw {
 	u32 ddadr;	/* Points to the next descriptor + flags */
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 26+ messages in thread

* [PATCH 05/12] dma: mmp_pdma: add filter function
  2013-08-07 10:19 [PATCH 00/12] dma: pdma: some patches needed for PXA DT environments Daniel Mack
                   ` (3 preceding siblings ...)
  2013-08-07 10:19 ` [PATCH 04/12] dma: mmp_pdma: fix maximum transfer length Daniel Mack
@ 2013-08-07 10:19 ` Daniel Mack
  2013-08-07 10:19 ` [PATCH 06/12] dma: mmp_pdma: make the controller a DMA provider Daniel Mack
                   ` (6 subsequent siblings)
  11 siblings, 0 replies; 26+ messages in thread
From: Daniel Mack @ 2013-08-07 10:19 UTC (permalink / raw)
  To: linux-arm-kernel

PXA peripherals need to obtain specific DMA request ids which will
eventually be stored in the DRCMR register.

Currently, clients are expected to store that number inside the slave
config block as slave_id, which is unfortunately incompatible with the
way DMA resources are handled in DT environments.

This patch adds a filter function which stores the filter parameter
passed in by of-dma.c into the channel's drcmr register.

For backward compatability, cfg->slave_id is still used if set to
a non-zero value.

Signed-off-by: Daniel Mack <zonque@gmail.com>
---
 drivers/dma/mmp_pdma.c       | 21 ++++++++++++++++++++-
 include/linux/dma/mmp-pdma.h | 15 +++++++++++++++
 2 files changed, 35 insertions(+), 1 deletion(-)
 create mode 100644 include/linux/dma/mmp-pdma.h

diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index 1fe78d8..60a1410 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -19,6 +19,7 @@
 #include <linux/dmapool.h>
 #include <linux/of_device.h>
 #include <linux/of.h>
+#include <linux/dma/mmp-pdma.h>
 
 #include "dmaengine.h"
 
@@ -633,8 +634,13 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
 			chan->dcmd |= DCMD_BURST32;
 
 		chan->dir = cfg->direction;
-		chan->drcmr = cfg->slave_id;
 		chan->dev_addr = addr;
+		/* FIXME: drivers should be ported over to use the filter
+		 * function. Once that's done, the following two lines can
+		 * be removed.
+		 */
+		if (cfg->slave_id)
+			chan->drcmr = cfg->slave_id;
 		break;
 	default:
 		return -ENOSYS;
@@ -883,6 +889,19 @@ static struct platform_driver mmp_pdma_driver = {
 	.remove		= mmp_pdma_remove,
 };
 
+bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
+{
+	struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan);
+
+	if (chan->device->dev->driver != &mmp_pdma_driver.driver)
+		return false;
+
+	c->drcmr = *(unsigned int *) param;
+
+	return true;
+}
+EXPORT_SYMBOL_GPL(mmp_pdma_filter_fn);
+
 module_platform_driver(mmp_pdma_driver);
 
 MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver");
diff --git a/include/linux/dma/mmp-pdma.h b/include/linux/dma/mmp-pdma.h
new file mode 100644
index 0000000..2dc9b2b
--- /dev/null
+++ b/include/linux/dma/mmp-pdma.h
@@ -0,0 +1,15 @@
+#ifndef _MMP_PDMA_H_
+#define _MMP_PDMA_H_
+
+struct dma_chan;
+
+#ifdef CONFIG_MMP_PDMA
+bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param);
+#else
+static inline bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
+{
+	return false;
+}
+#endif
+
+#endif /* _MMP_PDMA_H_ */
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 26+ messages in thread

* [PATCH 06/12] dma: mmp_pdma: make the controller a DMA provider
  2013-08-07 10:19 [PATCH 00/12] dma: pdma: some patches needed for PXA DT environments Daniel Mack
                   ` (4 preceding siblings ...)
  2013-08-07 10:19 ` [PATCH 05/12] dma: mmp_pdma: add filter function Daniel Mack
@ 2013-08-07 10:19 ` Daniel Mack
  2013-08-07 16:12   ` Arnd Bergmann
  2013-08-07 10:19 ` [PATCH 07/12] dma: mmp_pdma: print the number of channels at probe time Daniel Mack
                   ` (5 subsequent siblings)
  11 siblings, 1 reply; 26+ messages in thread
From: Daniel Mack @ 2013-08-07 10:19 UTC (permalink / raw)
  To: linux-arm-kernel

This patch makes the mmp_pdma controller able to provide DMA resources
in DT environments.

Signed-off-by: Daniel Mack <zonque@gmail.com>
---
 drivers/dma/mmp_pdma.c | 18 ++++++++++++++++++
 1 file changed, 18 insertions(+)

diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index 60a1410..d60217a 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -18,6 +18,7 @@
 #include <linux/platform_data/mmp_dma.h>
 #include <linux/dmapool.h>
 #include <linux/of_device.h>
+#include <linux/of_dma.h>
 #include <linux/of.h>
 #include <linux/dma/mmp-pdma.h>
 
@@ -783,6 +784,10 @@ static struct of_device_id mmp_pdma_dt_ids[] = {
 };
 MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
 
+static struct of_dma_filter_info mmp_pdma_info = {
+	.filter_fn = mmp_pdma_filter_fn,
+};
+
 static int mmp_pdma_probe(struct platform_device *op)
 {
 	struct mmp_pdma_device *pdev;
@@ -869,6 +874,19 @@ static int mmp_pdma_probe(struct platform_device *op)
 		return ret;
 	}
 
+	if (op->dev.of_node) {
+		mmp_pdma_info.dma_cap = pdev->device.cap_mask;
+
+		/* Device-tree DMA controller registration */
+		ret = of_dma_controller_register(op->dev.of_node,
+						 of_dma_simple_xlate,
+						 &mmp_pdma_info);
+		if (ret < 0) {
+			dev_err(&op->dev, "of_dma_controller_register failed\n");
+			return ret;
+		}
+	}
+
 	dev_info(pdev->device.dev, "initialized\n");
 	return 0;
 }
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 26+ messages in thread

* [PATCH 07/12] dma: mmp_pdma: print the number of channels at probe time
  2013-08-07 10:19 [PATCH 00/12] dma: pdma: some patches needed for PXA DT environments Daniel Mack
                   ` (5 preceding siblings ...)
  2013-08-07 10:19 ` [PATCH 06/12] dma: mmp_pdma: make the controller a DMA provider Daniel Mack
@ 2013-08-07 10:19 ` Daniel Mack
  2013-08-07 10:19 ` [PATCH 08/12] dma: mmp_pdma: remove duplicate assignment Daniel Mack
                   ` (4 subsequent siblings)
  11 siblings, 0 replies; 26+ messages in thread
From: Daniel Mack @ 2013-08-07 10:19 UTC (permalink / raw)
  To: linux-arm-kernel

That helps check the provided runtime information.

Signed-off-by: Daniel Mack <zonque@gmail.com>
---
 drivers/dma/mmp_pdma.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index d60217a..2d48f31 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -887,7 +887,7 @@ static int mmp_pdma_probe(struct platform_device *op)
 		}
 	}
 
-	dev_info(pdev->device.dev, "initialized\n");
+	dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels);
 	return 0;
 }
 
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 26+ messages in thread

* [PATCH 08/12] dma: mmp_pdma: remove duplicate assignment
  2013-08-07 10:19 [PATCH 00/12] dma: pdma: some patches needed for PXA DT environments Daniel Mack
                   ` (6 preceding siblings ...)
  2013-08-07 10:19 ` [PATCH 07/12] dma: mmp_pdma: print the number of channels at probe time Daniel Mack
@ 2013-08-07 10:19 ` Daniel Mack
  2013-08-07 10:19 ` [PATCH 09/12] dma: mmp_pdma: add support for byte-aligned transfers Daniel Mack
                   ` (3 subsequent siblings)
  11 siblings, 0 replies; 26+ messages in thread
From: Daniel Mack @ 2013-08-07 10:19 UTC (permalink / raw)
  To: linux-arm-kernel

The DMA_SLAVE is currently set twice.

Signed-off-by: Daniel Mack <zonque@gmail.com>
---
 drivers/dma/mmp_pdma.c | 1 -
 1 file changed, 1 deletion(-)

diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index 2d48f31..1c2c00b 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -852,7 +852,6 @@ static int mmp_pdma_probe(struct platform_device *op)
 
 	dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
 	dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask);
-	dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
 	pdev->device.dev = &op->dev;
 	pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources;
 	pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources;
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 26+ messages in thread

* [PATCH 09/12] dma: mmp_pdma: add support for byte-aligned transfers
  2013-08-07 10:19 [PATCH 00/12] dma: pdma: some patches needed for PXA DT environments Daniel Mack
                   ` (7 preceding siblings ...)
  2013-08-07 10:19 ` [PATCH 08/12] dma: mmp_pdma: remove duplicate assignment Daniel Mack
@ 2013-08-07 10:19 ` Daniel Mack
  2013-08-08  9:04   ` Xiang Wang
  2013-08-07 10:19 ` [PATCH 10/12] dma: mmp_pdma: implement DMA_PAUSE and DMA_RESUME Daniel Mack
                   ` (2 subsequent siblings)
  11 siblings, 1 reply; 26+ messages in thread
From: Daniel Mack @ 2013-08-07 10:19 UTC (permalink / raw)
  To: linux-arm-kernel

The PXA DMA controller has a DALGN register which allows for
byte-aligned DMA transfers. Use it in case any of the transfer
descriptors is not aligned to a mask of ~0x7.

Signed-off-by: Daniel Mack <zonque@gmail.com>
---
 drivers/dma/mmp_pdma.c | 17 ++++++++++++++++-
 1 file changed, 16 insertions(+), 1 deletion(-)

diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index 1c2c00b..7eb235b 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -109,6 +109,7 @@ struct mmp_pdma_chan {
 	struct list_head chain_pending;	/* Link descriptors queue for pending */
 	struct list_head chain_running;	/* Link descriptors queue for running */
 	bool idle;			/* channel statue machine */
+	bool byte_align;
 
 	struct dma_pool *desc_pool;	/* Descriptors pool */
 };
@@ -142,7 +143,7 @@ static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
 
 static void enable_chan(struct mmp_pdma_phy *phy)
 {
-	u32 reg;
+	u32 reg, dalgn;
 
 	if (!phy->vchan)
 		return;
@@ -150,6 +151,13 @@ static void enable_chan(struct mmp_pdma_phy *phy)
 	reg = DRCMR(phy->vchan->drcmr);
 	writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
 
+	dalgn = readl(phy->base + DALGN);
+	if (phy->vchan->byte_align)
+		dalgn |= 1 << phy->idx;
+	else
+		dalgn &= ~(1 << phy->idx);
+	writel(dalgn, phy->base + DALGN);
+
 	reg = (phy->idx << 2) + DCSR;
 	writel(readl(phy->base + reg) | DCSR_RUN,
 					phy->base + reg);
@@ -453,6 +461,7 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
 		return NULL;
 
 	chan = to_mmp_pdma_chan(dchan);
+	chan->byte_align = false;
 
 	if (!chan->dir) {
 		chan->dir = DMA_MEM_TO_MEM;
@@ -469,6 +478,8 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
 		}
 
 		copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
+		if (dma_src & 0x7 || dma_dst & 0x7)
+			chan->byte_align = true;
 
 		new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy);
 		new->desc.dsadr = dma_src;
@@ -528,12 +539,16 @@ mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
 	if ((sgl == NULL) || (sg_len == 0))
 		return NULL;
 
+	chan->byte_align = false;
+
 	for_each_sg(sgl, sg, sg_len, i) {
 		addr = sg_dma_address(sg);
 		avail = sg_dma_len(sgl);
 
 		do {
 			len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
+			if (addr & 0x7)
+				chan->byte_align = true;
 
 			/* allocate and populate the descriptor */
 			new = mmp_pdma_alloc_descriptor(chan);
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 26+ messages in thread

* [PATCH 10/12] dma: mmp_pdma: implement DMA_PAUSE and DMA_RESUME
  2013-08-07 10:19 [PATCH 00/12] dma: pdma: some patches needed for PXA DT environments Daniel Mack
                   ` (8 preceding siblings ...)
  2013-08-07 10:19 ` [PATCH 09/12] dma: mmp_pdma: add support for byte-aligned transfers Daniel Mack
@ 2013-08-07 10:19 ` Daniel Mack
  2013-08-07 10:19 ` [PATCH 11/12] dma: mmp_pdma: add support for residue reporting Daniel Mack
  2013-08-07 10:19 ` [PATCH 12/12] dma: mmp_pdma: add support for cyclic DMA descriptors Daniel Mack
  11 siblings, 0 replies; 26+ messages in thread
From: Daniel Mack @ 2013-08-07 10:19 UTC (permalink / raw)
  To: linux-arm-kernel

This is needed at least for audio operation.

Signed-off-by: Daniel Mack <zonque@gmail.com>
---
 drivers/dma/mmp_pdma.c | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index 7eb235b..c56da7c 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -658,6 +658,12 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
 		if (cfg->slave_id)
 			chan->drcmr = cfg->slave_id;
 		break;
+	case DMA_PAUSE:
+		disable_chan(chan->phy);
+		break;
+	case DMA_RESUME:
+		start_pending_queue(chan);
+		break;
 	default:
 		return -ENOSYS;
 	}
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 26+ messages in thread

* [PATCH 11/12] dma: mmp_pdma: add support for residue reporting
  2013-08-07 10:19 [PATCH 00/12] dma: pdma: some patches needed for PXA DT environments Daniel Mack
                   ` (9 preceding siblings ...)
  2013-08-07 10:19 ` [PATCH 10/12] dma: mmp_pdma: implement DMA_PAUSE and DMA_RESUME Daniel Mack
@ 2013-08-07 10:19 ` Daniel Mack
  2013-08-07 10:19 ` [PATCH 12/12] dma: mmp_pdma: add support for cyclic DMA descriptors Daniel Mack
  11 siblings, 0 replies; 26+ messages in thread
From: Daniel Mack @ 2013-08-07 10:19 UTC (permalink / raw)
  To: linux-arm-kernel

In order to report the channel's residue, we have to memorize the first
dma buffer position when the channel is configured.

Signed-off-by: Daniel Mack <zonque@gmail.com>
---
 drivers/dma/mmp_pdma.c | 39 +++++++++++++++++++++++++++++++++++++--
 1 file changed, 37 insertions(+), 2 deletions(-)

diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index c56da7c..0c4a2d5 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -28,8 +28,8 @@
 #define DALGN		0x00a0
 #define DINT		0x00f0
 #define DDADR		0x0200
-#define DSADR		0x0204
-#define DTADR		0x0208
+#define DSADR(n)	(0x0204 + ((n) << 4))
+#define DTADR(n)	(0x0208 + ((n) << 4))
 #define DCMD		0x020c
 
 #define DCSR_RUN	(1 << 31)	/* Run Bit (read / write) */
@@ -97,6 +97,13 @@ struct mmp_pdma_chan {
 	struct dma_async_tx_descriptor desc;
 	struct mmp_pdma_phy *phy;
 	enum dma_transfer_direction dir;
+	/*
+	 * We memorize the original start address of the first descriptor as
+	 * well as the original total length so we can later determine the
+	 * channel's residue.
+	 */
+	dma_addr_t start_addr;
+	u32 total_len;
 
 	/* channel's basic info */
 	struct tasklet_struct tasklet;
@@ -469,6 +476,13 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
 		chan->dcmd |= DCMD_BURST32;
 	}
 
+	if (chan->dir == DMA_MEM_TO_DEV)
+		chan->start_addr = dma_src;
+	else
+		chan->start_addr = dma_dst;
+
+	chan->total_len = len;
+
 	do {
 		/* Allocate the link descriptor from DMA pool */
 		new = mmp_pdma_alloc_descriptor(chan);
@@ -540,11 +554,17 @@ mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
 		return NULL;
 
 	chan->byte_align = false;
+	chan->total_len = 0;
 
 	for_each_sg(sgl, sg, sg_len, i) {
 		addr = sg_dma_address(sg);
 		avail = sg_dma_len(sgl);
 
+		if (!first)
+			chan->start_addr = addr;
+
+		chan->total_len += avail;
+
 		do {
 			len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
 			if (addr & 0x7)
@@ -671,6 +691,20 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
 	return ret;
 }
 
+static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan)
+{
+	u32 curr, done;
+
+	if (chan->dir == DMA_DEV_TO_MEM)
+		curr = readl(chan->phy->base + DTADR(chan->phy->idx));
+	else
+		curr = readl(chan->phy->base + DSADR(chan->phy->idx));
+
+	done = curr - chan->start_addr;
+
+	return chan->total_len - done;
+}
+
 static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
 			dma_cookie_t cookie, struct dma_tx_state *txstate)
 {
@@ -680,6 +714,7 @@ static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
 
 	spin_lock_irqsave(&chan->desc_lock, flags);
 	ret = dma_cookie_status(dchan, cookie, txstate);
+	txstate->residue = mmp_pdma_residue(chan);
 	spin_unlock_irqrestore(&chan->desc_lock, flags);
 
 	return ret;
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 26+ messages in thread

* [PATCH 12/12] dma: mmp_pdma: add support for cyclic DMA descriptors
  2013-08-07 10:19 [PATCH 00/12] dma: pdma: some patches needed for PXA DT environments Daniel Mack
                   ` (10 preceding siblings ...)
  2013-08-07 10:19 ` [PATCH 11/12] dma: mmp_pdma: add support for residue reporting Daniel Mack
@ 2013-08-07 10:19 ` Daniel Mack
  11 siblings, 0 replies; 26+ messages in thread
From: Daniel Mack @ 2013-08-07 10:19 UTC (permalink / raw)
  To: linux-arm-kernel

Provide a callback to prepare cyclic DMA transfers.
This is for instance needed for audio channel transport.

Signed-off-by: Daniel Mack <zonque@gmail.com>
---
 drivers/dma/mmp_pdma.c | 116 ++++++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 115 insertions(+), 1 deletion(-)

diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index 0c4a2d5..e95a685 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -97,6 +97,8 @@ struct mmp_pdma_chan {
 	struct dma_async_tx_descriptor desc;
 	struct mmp_pdma_phy *phy;
 	enum dma_transfer_direction dir;
+	struct mmp_pdma_desc_sw *cyclic_first;	/* first desc_sw if channel
+						 * is in cyclic mode */
 	/*
 	 * We memorize the original start address of the first descriptor as
 	 * well as the original total length so we can later determine the
@@ -530,6 +532,8 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
 	new->desc.ddadr = DDADR_STOP;
 	new->desc.dcmd |= DCMD_ENDIRQEN;
 
+	chan->cyclic_first = NULL;
+
 	return &first->async_tx;
 
 fail:
@@ -611,6 +615,96 @@ mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
 	new->desc.ddadr = DDADR_STOP;
 	new->desc.dcmd |= DCMD_ENDIRQEN;
 
+	chan->dir = dir;
+	chan->cyclic_first = NULL;
+
+	return &first->async_tx;
+
+fail:
+	if (first)
+		mmp_pdma_free_desc_list(chan, &first->tx_list);
+	return NULL;
+}
+
+static struct dma_async_tx_descriptor *mmp_pdma_prep_dma_cyclic(
+	struct dma_chan *dchan, dma_addr_t buf_addr, size_t len,
+	size_t period_len, enum dma_transfer_direction direction,
+	unsigned long flags, void *context)
+{
+	struct mmp_pdma_chan *chan;
+	struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
+	dma_addr_t dma_src, dma_dst;
+
+	if (!dchan || !len || !period_len)
+		return NULL;
+
+	/* the buffer length must be a multiple of period_len */
+	if (len % period_len != 0)
+		return NULL;
+
+	if (period_len > PDMA_MAX_DESC_BYTES)
+		return NULL;
+
+	chan = to_mmp_pdma_chan(dchan);
+
+	switch (direction) {
+	case DMA_MEM_TO_DEV:
+		dma_src = buf_addr;
+		dma_dst = chan->dev_addr;
+		break;
+	case DMA_DEV_TO_MEM:
+		dma_dst = buf_addr;
+		dma_src = chan->dev_addr;
+		break;
+	default:
+		dev_err(chan->dev, "Unsupported direction for cyclic DMA\n");
+		return NULL;
+	}
+
+	chan->start_addr = buf_addr;
+	chan->total_len = len;
+	chan->dir = direction;
+
+	do {
+		/* Allocate the link descriptor from DMA pool */
+		new = mmp_pdma_alloc_descriptor(chan);
+		if (!new) {
+			dev_err(chan->dev, "no memory for desc\n");
+			goto fail;
+		}
+
+		new->desc.dcmd = chan->dcmd | DCMD_ENDIRQEN |
+					(DCMD_LENGTH & period_len);
+		new->desc.dsadr = dma_src;
+		new->desc.dtadr = dma_dst;
+
+		if (!first)
+			first = new;
+		else
+			prev->desc.ddadr = new->async_tx.phys;
+
+		new->async_tx.cookie = 0;
+		async_tx_ack(&new->async_tx);
+
+		prev = new;
+		len -= period_len;
+
+		if (chan->dir == DMA_MEM_TO_DEV)
+			dma_src += period_len;
+		else
+			dma_dst += period_len;
+
+		/* Insert the link descriptor to the LD ring */
+		list_add_tail(&new->node, &first->tx_list);
+	} while (len);
+
+	first->async_tx.flags = flags; /* client is in control of this ack */
+	first->async_tx.cookie = -EBUSY;
+
+	/* make the cyclic link */
+	new->desc.ddadr = first->async_tx.phys;
+	chan->cyclic_first = first;
+
 	return &first->async_tx;
 
 fail:
@@ -746,8 +840,25 @@ static void dma_do_tasklet(unsigned long data)
 	LIST_HEAD(chain_cleanup);
 	unsigned long flags;
 
-	/* submit pending list; callback for each desc; free desc */
+	if (chan->cyclic_first) {
+		dma_async_tx_callback cb = NULL;
+		void *cb_data = NULL;
+
+		spin_lock_irqsave(&chan->desc_lock, flags);
+		desc = chan->cyclic_first;
+		cb = desc->async_tx.callback;
+		cb_data = desc->async_tx.callback_param;
+		spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+		start_pending_queue(chan);
 
+		if (cb)
+			cb(cb_data);
+
+		return;
+	}
+
+	/* submit pending list; callback for each desc; free desc */
 	spin_lock_irqsave(&chan->desc_lock, flags);
 
 	/* update the cookie if we have some descriptors to cleanup */
@@ -780,6 +891,7 @@ static void dma_do_tasklet(unsigned long data)
 
 		/* Remove from the list of transactions */
 		list_del(&desc->node);
+
 		/* Run the link descriptor callback function */
 		if (txd->callback)
 			txd->callback(txd->callback_param);
@@ -908,12 +1020,14 @@ static int mmp_pdma_probe(struct platform_device *op)
 
 	dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
 	dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask);
+	dma_cap_set(DMA_CYCLIC, pdev->device.cap_mask);
 	pdev->device.dev = &op->dev;
 	pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources;
 	pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources;
 	pdev->device.device_tx_status = mmp_pdma_tx_status;
 	pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy;
 	pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg;
+	pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic;
 	pdev->device.device_issue_pending = mmp_pdma_issue_pending;
 	pdev->device.device_control = mmp_pdma_control;
 	pdev->device.copy_align = PDMA_ALIGNMENT;
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 26+ messages in thread

* [PATCH 01/12] dma: mmp_pdma: add protect when alloc/free phy channels
  2013-08-07 10:19 ` [PATCH 01/12] dma: mmp_pdma: add protect when alloc/free phy channels Daniel Mack
@ 2013-08-07 12:40   ` Ezequiel Garcia
  2013-08-07 12:42     ` Daniel Mack
  2013-08-08  8:35     ` Daniel Mack
  0 siblings, 2 replies; 26+ messages in thread
From: Ezequiel Garcia @ 2013-08-07 12:40 UTC (permalink / raw)
  To: linux-arm-kernel

Daniel,

It's good to see this effort is moving forward!

I just have a minor nitpick.

On Wed, Aug 07, 2013 at 12:19:19PM +0200, Daniel Mack wrote:
>  
>  #define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx)
> @@ -219,6 +220,7 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
>  	int prio, i;
>  	struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
>  	struct mmp_pdma_phy *phy;
> +	unsigned long flags;
>  
>  	/*
>  	 * dma channel priorities
> @@ -227,6 +229,8 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
>  	 * ch 8 - 11, 24 - 27  <--> (2)
>  	 * ch 12 - 15, 28 - 31  <--> (3)
>  	 */
> +
> +	spin_lock_irqsave(&pdev->phy_lock, flags);
>  	for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) {
>  		for (i = 0; i < pdev->dma_channels; i++) {
>  			if (prio != ((i & 0xf) >> 2))
> @@ -234,14 +238,30 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
>  			phy = &pdev->phy[i];
>  			if (!phy->vchan) {
>  				phy->vchan = pchan;
> +				spin_unlock_irqrestore(&pdev->phy_lock, flags);

Isn't it better to goto ...

>  				return phy;
>  			}
>  		}
>  	}
>  

... here?

I think it's generally cleaner and less error-prone.

> +	spin_unlock_irqrestore(&pdev->phy_lock, flags);
>  	return NULL;
>  }
>  

-- 
Ezequiel Garc?a, Free Electrons
Embedded Linux, Kernel and Android Engineering
http://free-electrons.com

^ permalink raw reply	[flat|nested] 26+ messages in thread

* [PATCH 01/12] dma: mmp_pdma: add protect when alloc/free phy channels
  2013-08-07 12:40   ` Ezequiel Garcia
@ 2013-08-07 12:42     ` Daniel Mack
  2013-08-08  8:35     ` Daniel Mack
  1 sibling, 0 replies; 26+ messages in thread
From: Daniel Mack @ 2013-08-07 12:42 UTC (permalink / raw)
  To: linux-arm-kernel

On 07.08.2013 14:40, Ezequiel Garcia wrote:
> On Wed, Aug 07, 2013 at 12:19:19PM +0200, Daniel Mack wrote:
>>  
>>  #define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx)
>> @@ -219,6 +220,7 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
>>  	int prio, i;
>>  	struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
>>  	struct mmp_pdma_phy *phy;
>> +	unsigned long flags;
>>  
>>  	/*
>>  	 * dma channel priorities
>> @@ -227,6 +229,8 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
>>  	 * ch 8 - 11, 24 - 27  <--> (2)
>>  	 * ch 12 - 15, 28 - 31  <--> (3)
>>  	 */
>> +
>> +	spin_lock_irqsave(&pdev->phy_lock, flags);
>>  	for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) {
>>  		for (i = 0; i < pdev->dma_channels; i++) {
>>  			if (prio != ((i & 0xf) >> 2))
>> @@ -234,14 +238,30 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
>>  			phy = &pdev->phy[i];
>>  			if (!phy->vchan) {
>>  				phy->vchan = pchan;
>> +				spin_unlock_irqrestore(&pdev->phy_lock, flags);
> 
> Isn't it better to goto ...
> 
>>  				return phy;
>>  			}
>>  		}
>>  	}
>>  
> 
> ... here?
> 
> I think it's generally cleaner and less error-prone.

It's not my patch, but I can do that, unless Xiang Wang has objections.


Thanks,
Daniel

^ permalink raw reply	[flat|nested] 26+ messages in thread

* [PATCH 06/12] dma: mmp_pdma: make the controller a DMA provider
  2013-08-07 10:19 ` [PATCH 06/12] dma: mmp_pdma: make the controller a DMA provider Daniel Mack
@ 2013-08-07 16:12   ` Arnd Bergmann
  2013-08-07 16:17     ` Daniel Mack
                       ` (2 more replies)
  0 siblings, 3 replies; 26+ messages in thread
From: Arnd Bergmann @ 2013-08-07 16:12 UTC (permalink / raw)
  To: linux-arm-kernel

On Wednesday 07 August 2013, Daniel Mack wrote:
> This patch makes the mmp_pdma controller able to provide DMA resources
> in DT environments.
> 
> Signed-off-by: Daniel Mack <zonque@gmail.com>
> ---
>  drivers/dma/mmp_pdma.c | 18 ++++++++++++++++++
>  1 file changed, 18 insertions(+)
> 
> diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
> index 60a1410..d60217a 100644
> --- a/drivers/dma/mmp_pdma.c
> +++ b/drivers/dma/mmp_pdma.c
> @@ -18,6 +18,7 @@
>  #include <linux/platform_data/mmp_dma.h>
>  #include <linux/dmapool.h>
>  #include <linux/of_device.h>
> +#include <linux/of_dma.h>
>  #include <linux/of.h>
>  #include <linux/dma/mmp-pdma.h>
>  
> @@ -783,6 +784,10 @@ static struct of_device_id mmp_pdma_dt_ids[] = {
>  };
>  MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
>  
> +static struct of_dma_filter_info mmp_pdma_info = {
> +	.filter_fn = mmp_pdma_filter_fn,
> +};
> +
>  static int mmp_pdma_probe(struct platform_device *op)
>  {
>  	struct mmp_pdma_device *pdev;
> @@ -869,6 +874,19 @@ static int mmp_pdma_probe(struct platform_device *op)
>  		return ret;
>  	}
>  
> +	if (op->dev.of_node) {
> +		mmp_pdma_info.dma_cap = pdev->device.cap_mask;
> +
> +		/* Device-tree DMA controller registration */
> +		ret = of_dma_controller_register(op->dev.of_node,
> +						 of_dma_simple_xlate,
> +						 &mmp_pdma_info);

of_dma_simple_xlate can not be used if there is a chance that multiple instances
of the same dma engine, or multiple different DMA engines are present in the
system. I generally advise against using it.

Please have a look at the changes that Zhangfei Gao proposed in
http://comments.gmane.org/gmane.linux.ports.arm.kernel/249077
and see if you can do the same here.

	Arnd

^ permalink raw reply	[flat|nested] 26+ messages in thread

* [PATCH 06/12] dma: mmp_pdma: make the controller a DMA provider
  2013-08-07 16:12   ` Arnd Bergmann
@ 2013-08-07 16:17     ` Daniel Mack
  2013-08-07 20:17       ` Arnd Bergmann
  2013-08-08  8:38     ` Daniel Mack
  2013-08-09 13:10     ` Daniel Mack
  2 siblings, 1 reply; 26+ messages in thread
From: Daniel Mack @ 2013-08-07 16:17 UTC (permalink / raw)
  To: linux-arm-kernel

On 07.08.2013 18:12, Arnd Bergmann wrote:
> On Wednesday 07 August 2013, Daniel Mack wrote:

>> +	if (op->dev.of_node) {
>> +		mmp_pdma_info.dma_cap = pdev->device.cap_mask;
>> +
>> +		/* Device-tree DMA controller registration */
>> +		ret = of_dma_controller_register(op->dev.of_node,
>> +						 of_dma_simple_xlate,
>> +						 &mmp_pdma_info);
> 
> of_dma_simple_xlate can not be used if there is a chance that multiple instances
> of the same dma engine, or multiple different DMA engines are present in the
> system.

Both can't be the case really for PXA, but I see your point.

> Please have a look at the changes that Zhangfei Gao proposed in
> http://comments.gmane.org/gmane.linux.ports.arm.kernel/249077
> and see if you can do the same here.

Ok, I can do the same. As I can directly access dma_spec->args[0] from
that context, that approach would also solve the problem with the
hard-coded filter function, right?


Thanks,
Daniel

^ permalink raw reply	[flat|nested] 26+ messages in thread

* [PATCH 06/12] dma: mmp_pdma: make the controller a DMA provider
  2013-08-07 16:17     ` Daniel Mack
@ 2013-08-07 20:17       ` Arnd Bergmann
  0 siblings, 0 replies; 26+ messages in thread
From: Arnd Bergmann @ 2013-08-07 20:17 UTC (permalink / raw)
  To: linux-arm-kernel

On Wednesday 07 August 2013, Daniel Mack wrote:

> > Please have a look at the changes that Zhangfei Gao proposed in
> > http://comments.gmane.org/gmane.linux.ports.arm.kernel/249077
> > and see if you can do the same here.
> 
> Ok, I can do the same. As I can directly access dma_spec->args[0] from
> that context, that approach would also solve the problem with the
> hard-coded filter function, right?

You mean the problem of using the exported filter function pointer in
other drivers? No, since the filter function is used only for the non-DT
path, while the xlate function is only used for DT.

	Arnd

^ permalink raw reply	[flat|nested] 26+ messages in thread

* [PATCH 01/12] dma: mmp_pdma: add protect when alloc/free phy channels
  2013-08-07 12:40   ` Ezequiel Garcia
  2013-08-07 12:42     ` Daniel Mack
@ 2013-08-08  8:35     ` Daniel Mack
  1 sibling, 0 replies; 26+ messages in thread
From: Daniel Mack @ 2013-08-08  8:35 UTC (permalink / raw)
  To: linux-arm-kernel

On 07.08.2013 14:40, Ezequiel Garcia wrote:
> Daniel,
> 
> It's good to see this effort is moving forward!
> 
> I just have a minor nitpick.

I just realized that Vinod already commited this patch to his tree, so I
won't be sending it again.


Daniel

> 
> On Wed, Aug 07, 2013 at 12:19:19PM +0200, Daniel Mack wrote:
>>  
>>  #define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx)
>> @@ -219,6 +220,7 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
>>  	int prio, i;
>>  	struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
>>  	struct mmp_pdma_phy *phy;
>> +	unsigned long flags;
>>  
>>  	/*
>>  	 * dma channel priorities
>> @@ -227,6 +229,8 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
>>  	 * ch 8 - 11, 24 - 27  <--> (2)
>>  	 * ch 12 - 15, 28 - 31  <--> (3)
>>  	 */
>> +
>> +	spin_lock_irqsave(&pdev->phy_lock, flags);
>>  	for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) {
>>  		for (i = 0; i < pdev->dma_channels; i++) {
>>  			if (prio != ((i & 0xf) >> 2))
>> @@ -234,14 +238,30 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
>>  			phy = &pdev->phy[i];
>>  			if (!phy->vchan) {
>>  				phy->vchan = pchan;
>> +				spin_unlock_irqrestore(&pdev->phy_lock, flags);
> 
> Isn't it better to goto ...
> 
>>  				return phy;
>>  			}
>>  		}
>>  	}
>>  
> 
> ... here?
> 
> I think it's generally cleaner and less error-prone.
> 
>> +	spin_unlock_irqrestore(&pdev->phy_lock, flags);
>>  	return NULL;
>>  }
>>  
> 

^ permalink raw reply	[flat|nested] 26+ messages in thread

* [PATCH 06/12] dma: mmp_pdma: make the controller a DMA provider
  2013-08-07 16:12   ` Arnd Bergmann
  2013-08-07 16:17     ` Daniel Mack
@ 2013-08-08  8:38     ` Daniel Mack
  2013-08-09 13:10     ` Daniel Mack
  2 siblings, 0 replies; 26+ messages in thread
From: Daniel Mack @ 2013-08-08  8:38 UTC (permalink / raw)
  To: linux-arm-kernel

On 07.08.2013 18:12, Arnd Bergmann wrote:
> On Wednesday 07 August 2013, Daniel Mack wrote:

>> @@ -869,6 +874,19 @@ static int mmp_pdma_probe(struct platform_device *op)
>>  		return ret;
>>  	}
>>  
>> +	if (op->dev.of_node) {
>> +		mmp_pdma_info.dma_cap = pdev->device.cap_mask;
>> +
>> +		/* Device-tree DMA controller registration */
>> +		ret = of_dma_controller_register(op->dev.of_node,
>> +						 of_dma_simple_xlate,
>> +						 &mmp_pdma_info);
> 
> of_dma_simple_xlate can not be used if there is a chance that multiple instances
> of the same dma engine, or multiple different DMA engines are present in the
> system. I generally advise against using it.
> 
> Please have a look at the changes that Zhangfei Gao proposed in
> http://comments.gmane.org/gmane.linux.ports.arm.kernel/249077
> and see if you can do the same here.

Ok, I'll rebase my series on top of that one, hoping that Zhangfei's
patch will make it upstream before mine.


Daniel

^ permalink raw reply	[flat|nested] 26+ messages in thread

* [PATCH 09/12] dma: mmp_pdma: add support for byte-aligned transfers
  2013-08-07 10:19 ` [PATCH 09/12] dma: mmp_pdma: add support for byte-aligned transfers Daniel Mack
@ 2013-08-08  9:04   ` Xiang Wang
  2013-08-08  9:11     ` Daniel Mack
  0 siblings, 1 reply; 26+ messages in thread
From: Xiang Wang @ 2013-08-08  9:04 UTC (permalink / raw)
  To: linux-arm-kernel

> Subject: [PATCH 09/12] dma: mmp_pdma: add support for byte-aligned transfers
> 
> The PXA DMA controller has a DALGN register which allows for
> byte-aligned DMA transfers. Use it in case any of the transfer
> descriptors is not aligned to a mask of ~0x7.
> 
> Signed-off-by: Daniel Mack <zonque@gmail.com>
> ---
>  drivers/dma/mmp_pdma.c | 17 ++++++++++++++++-
>  1 file changed, 16 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
> index 1c2c00b..7eb235b 100644
> --- a/drivers/dma/mmp_pdma.c
> +++ b/drivers/dma/mmp_pdma.c
> @@ -109,6 +109,7 @@ struct mmp_pdma_chan {
>  	struct list_head chain_pending;	/* Link descriptors queue for pending */
>  	struct list_head chain_running;	/* Link descriptors queue for running */
>  	bool idle;			/* channel statue machine */
> +	bool byte_align;
> 
>  	struct dma_pool *desc_pool;	/* Descriptors pool */
>  };
> @@ -142,7 +143,7 @@ static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
> 
>  static void enable_chan(struct mmp_pdma_phy *phy)
>  {
> -	u32 reg;
> +	u32 reg, dalgn;
> 
>  	if (!phy->vchan)
>  		return;
> @@ -150,6 +151,13 @@ static void enable_chan(struct mmp_pdma_phy *phy)
>  	reg = DRCMR(phy->vchan->drcmr);
>  	writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
> 
> +	dalgn = readl(phy->base + DALGN);
> +	if (phy->vchan->byte_align)
> +		dalgn |= 1 << phy->idx;
> +	else
> +		dalgn &= ~(1 << phy->idx);
> +	writel(dalgn, phy->base + DALGN);
> +
>  	reg = (phy->idx << 2) + DCSR;
>  	writel(readl(phy->base + reg) | DCSR_RUN,
>  					phy->base + reg);
> @@ -453,6 +461,7 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
>  		return NULL;
> 
>  	chan = to_mmp_pdma_chan(dchan);
> +	chan->byte_align = false;
> 
>  	if (!chan->dir) {
>  		chan->dir = DMA_MEM_TO_MEM;
> @@ -469,6 +478,8 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
>  		}
> 
>  		copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
> +		if (dma_src & 0x7 || dma_dst & 0x7)
> +			chan->byte_align = true;
> 
>  		new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy);
>  		new->desc.dsadr = dma_src;
> @@ -528,12 +539,16 @@ mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
>  	if ((sgl == NULL) || (sg_len == 0))
>  		return NULL;
> 
> +	chan->byte_align = false;
> +
>  	for_each_sg(sgl, sg, sg_len, i) {
>  		addr = sg_dma_address(sg);
>  		avail = sg_dma_len(sgl);
> 
>  		do {
>  			len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
> +			if (addr & 0x7)
> +				chan->byte_align = true;
> 
>  			/* allocate and populate the descriptor */
>  			new = mmp_pdma_alloc_descriptor(chan);
> --
> 1.8.3.1
We do need to set DALGN bit in some of our drivers. But we cannot configure this via standard dma engine API.
In this patch, dma address is used to determine whether or not to set DALGN. But what if we need to use 1-byte-aligned mode when addresses are 8-byte-aligned?
Is it proper to always use 1-byte-aligned mode?
Thanks.

Regards,
Xiang

^ permalink raw reply	[flat|nested] 26+ messages in thread

* [PATCH 09/12] dma: mmp_pdma: add support for byte-aligned transfers
  2013-08-08  9:04   ` Xiang Wang
@ 2013-08-08  9:11     ` Daniel Mack
  2013-08-16  8:05       ` Xiang Wang
  0 siblings, 1 reply; 26+ messages in thread
From: Daniel Mack @ 2013-08-08  9:11 UTC (permalink / raw)
  To: linux-arm-kernel

Hi Xiang,

On 08.08.2013 11:04, Xiang Wang wrote:
>> Subject: [PATCH 09/12] dma: mmp_pdma: add support for byte-aligned
>> transfers
>> 
>> The PXA DMA controller has a DALGN register which allows for 
>> byte-aligned DMA transfers. Use it in case any of the transfer 
>> descriptors is not aligned to a mask of ~0x7.

[...]

> We do need to set DALGN bit in some of our drivers.

Which ones, and how do you currently do that? I didn't find any code to
support this yet in mmp-pdma.

> But we cannot
> configure this via standard dma engine API. In this patch, dma
> address is used to determine whether or not to set DALGN. But what if
> we need to use 1-byte-aligned mode when addresses are
> 8-byte-aligned?

Hmm, why would you need that? What's the constraint for this driver that
they have to rely on that?

> Is it proper to always use 1-byte-aligned mode? 

As far as I understand the datasheet, this bit has performance
implications and should only be used if really needed.

I think if you have that constraint in drivers, and the dmaengine
implementation can't determine that automatically, we should introduce a
flag or something in the dma_slave_config struct.


Daniel

^ permalink raw reply	[flat|nested] 26+ messages in thread

* [PATCH 06/12] dma: mmp_pdma: make the controller a DMA provider
  2013-08-07 16:12   ` Arnd Bergmann
  2013-08-07 16:17     ` Daniel Mack
  2013-08-08  8:38     ` Daniel Mack
@ 2013-08-09 13:10     ` Daniel Mack
  2013-08-09 14:32       ` zhangfei gao
  2 siblings, 1 reply; 26+ messages in thread
From: Daniel Mack @ 2013-08-09 13:10 UTC (permalink / raw)
  To: linux-arm-kernel

On 07.08.2013 18:12, Arnd Bergmann wrote:
> On Wednesday 07 August 2013, Daniel Mack wrote:
>> +	if (op->dev.of_node) {
>> +		mmp_pdma_info.dma_cap = pdev->device.cap_mask;
>> +
>> +		/* Device-tree DMA controller registration */
>> +		ret = of_dma_controller_register(op->dev.of_node,
>> +						 of_dma_simple_xlate,
>> +						 &mmp_pdma_info);
> 
> of_dma_simple_xlate can not be used if there is a chance that multiple instances
> of the same dma engine, or multiple different DMA engines are present in the
> system. I generally advise against using it.
> 
> Please have a look at the changes that Zhangfei Gao proposed in
> http://comments.gmane.org/gmane.linux.ports.arm.kernel/249077
> and see if you can do the same here.

I had another look at that and Zhangfei's case is not really applicable
to mine, unfortunately.

In his case, one specific out of many channels has to be used, depending
on the first argument of the phandle. In my case though, the pdma
controller may just take any of its channels, and just assign the
correct DMA request to it.

So if I provide a private xlate function, I need a way to obtain *any*
of the channels in my instance. Open-coding that is not easily possible,
as I need to hold the dmaengine's local dma_list_mutex for that.

I have to dig deeper here, but if anyone has a hint, please let me know :)


Daniel

^ permalink raw reply	[flat|nested] 26+ messages in thread

* [PATCH 06/12] dma: mmp_pdma: make the controller a DMA provider
  2013-08-09 13:10     ` Daniel Mack
@ 2013-08-09 14:32       ` zhangfei gao
  2013-08-09 21:08         ` Arnd Bergmann
  0 siblings, 1 reply; 26+ messages in thread
From: zhangfei gao @ 2013-08-09 14:32 UTC (permalink / raw)
  To: linux-arm-kernel

On Fri, Aug 9, 2013 at 9:10 PM, Daniel Mack <zonque@gmail.com> wrote:
> On 07.08.2013 18:12, Arnd Bergmann wrote:
>> On Wednesday 07 August 2013, Daniel Mack wrote:
>>> +    if (op->dev.of_node) {
>>> +            mmp_pdma_info.dma_cap = pdev->device.cap_mask;
>>> +
>>> +            /* Device-tree DMA controller registration */
>>> +            ret = of_dma_controller_register(op->dev.of_node,
>>> +                                             of_dma_simple_xlate,
>>> +                                             &mmp_pdma_info);
>>
>> of_dma_simple_xlate can not be used if there is a chance that multiple instances
>> of the same dma engine, or multiple different DMA engines are present in the
>> system. I generally advise against using it.
>>
>> Please have a look at the changes that Zhangfei Gao proposed in
>> http://comments.gmane.org/gmane.linux.ports.arm.kernel/249077
>> and see if you can do the same here.
>
> I had another look at that and Zhangfei's case is not really applicable
> to mine, unfortunately.
>
> In his case, one specific out of many channels has to be used, depending
> on the first argument of the phandle. In my case though, the pdma
> controller may just take any of its channels, and just assign the
> correct DMA request to it.

Dear Daniel

Though any physical channel is workable, the virtual channel does not.
Each device has to set specific request line.

pdma.c
chan->drcmr = cfg->slave_id;
nand.c
  conf.slave_id = info->drcmr_dat;

The specific virtual channel can be directly specificied by request line.
While pdma.c choose the free physical channel inside, which is
transparent to client.

It should be same.

Thanks

>
> So if I provide a private xlate function, I need a way to obtain *any*
> of the channels in my instance. Open-coding that is not easily possible,
> as I need to hold the dmaengine's local dma_list_mutex for that.
>
> I have to dig deeper here, but if anyone has a hint, please let me know :)
>
>
> Daniel
>
>
> _______________________________________________
> linux-arm-kernel mailing list
> linux-arm-kernel at lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 26+ messages in thread

* [PATCH 06/12] dma: mmp_pdma: make the controller a DMA provider
  2013-08-09 14:32       ` zhangfei gao
@ 2013-08-09 21:08         ` Arnd Bergmann
  0 siblings, 0 replies; 26+ messages in thread
From: Arnd Bergmann @ 2013-08-09 21:08 UTC (permalink / raw)
  To: linux-arm-kernel

On Friday 09 August 2013, zhangfei gao wrote:
> On Fri, Aug 9, 2013 at 9:10 PM, Daniel Mack <zonque@gmail.com> wrote:
> > On 07.08.2013 18:12, Arnd Bergmann wrote:

> >
> > I had another look at that and Zhangfei's case is not really applicable
> > to mine, unfortunately.
> >
> > In his case, one specific out of many channels has to be used, depending
> > on the first argument of the phandle. In my case though, the pdma
> > controller may just take any of its channels, and just assign the
> > correct DMA request to it.

This should still be fine since your driver can keep track of which
channels are currently in use. The locking can be done inside of the
dmaengine core, but you might have to retry if the channel is getting
acquired between your lookup and the lock.

> Though any physical channel is workable, the virtual channel does not.
> Each device has to set specific request line.
> 
> pdma.c
> chan->drcmr = cfg->slave_id;
> nand.c
>   conf.slave_id = info->drcmr_dat;
> 
> The specific virtual channel can be directly specificied by request line.
> While pdma.c choose the free physical channel inside, which is
> transparent to client.

It is a bug to override the slave_id value from the dma slave driver when
using dma_request_slave_channel, and the dmaengine driver should not allow
that. The slave_config function should only be used to set the configuration
parts that the slave driver knows about, which does not include the
slave_id in this case (since there is no platform data).

	Arnd

^ permalink raw reply	[flat|nested] 26+ messages in thread

* [PATCH 09/12] dma: mmp_pdma: add support for byte-aligned transfers
  2013-08-08  9:11     ` Daniel Mack
@ 2013-08-16  8:05       ` Xiang Wang
  0 siblings, 0 replies; 26+ messages in thread
From: Xiang Wang @ 2013-08-16  8:05 UTC (permalink / raw)
  To: linux-arm-kernel

On 08/08/2013 05:11 PM, Daniel Mack wrote:
> Hi Xiang,
>
> On 08.08.2013 11:04, Xiang Wang wrote:
>>> Subject: [PATCH 09/12] dma: mmp_pdma: add support for byte-aligned
>>> transfers
>>>
>>> The PXA DMA controller has a DALGN register which allows for
>>> byte-aligned DMA transfers. Use it in case any of the transfer
>>> descriptors is not aligned to a mask of ~0x7.
>
> [...]
>
>> We do need to set DALGN bit in some of our drivers.
>
> Which ones, and how do you currently do that? I didn't find any code to
> support this yet in mmp-pdma.
Hi, Daniel
When we use DMA in UART and let DMA handle the UART trailing bytes, we 
should set the DALGN bit. Otherwise, DMA controller cannot move the 
bytes from UART FIFO to memory correctly because they are not 
8-bytes-alligned.
>
>> But we cannot
>> configure this via standard dma engine API. In this patch, dma
>> address is used to determine whether or not to set DALGN. But what if
>> we need to use 1-byte-aligned mode when addresses are
>> 8-byte-aligned?
>
> Hmm, why would you need that? What's the constraint for this driver that
> they have to rely on that?
>
>> Is it proper to always use 1-byte-aligned mode?
>
> As far as I understand the datasheet, this bit has performance
> implications and should only be used if really needed.
>
> I think if you have that constraint in drivers, and the dmaengine
> implementation can't determine that automatically, we should introduce a
> flag or something in the dma_slave_config struct.
>
>
> Daniel
>


-- 
Regards,
Xiang

^ permalink raw reply	[flat|nested] 26+ messages in thread

end of thread, other threads:[~2013-08-16  8:05 UTC | newest]

Thread overview: 26+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2013-08-07 10:19 [PATCH 00/12] dma: pdma: some patches needed for PXA DT environments Daniel Mack
2013-08-07 10:19 ` [PATCH 01/12] dma: mmp_pdma: add protect when alloc/free phy channels Daniel Mack
2013-08-07 12:40   ` Ezequiel Garcia
2013-08-07 12:42     ` Daniel Mack
2013-08-08  8:35     ` Daniel Mack
2013-08-07 10:19 ` [PATCH 02/12] dma: mmp_pdma: clear DRCMR when free a phy channel Daniel Mack
2013-08-07 10:19 ` [PATCH 03/12] dma: mmp_pdma: factor out DRCMR register calculation Daniel Mack
2013-08-07 10:19 ` [PATCH 04/12] dma: mmp_pdma: fix maximum transfer length Daniel Mack
2013-08-07 10:19 ` [PATCH 05/12] dma: mmp_pdma: add filter function Daniel Mack
2013-08-07 10:19 ` [PATCH 06/12] dma: mmp_pdma: make the controller a DMA provider Daniel Mack
2013-08-07 16:12   ` Arnd Bergmann
2013-08-07 16:17     ` Daniel Mack
2013-08-07 20:17       ` Arnd Bergmann
2013-08-08  8:38     ` Daniel Mack
2013-08-09 13:10     ` Daniel Mack
2013-08-09 14:32       ` zhangfei gao
2013-08-09 21:08         ` Arnd Bergmann
2013-08-07 10:19 ` [PATCH 07/12] dma: mmp_pdma: print the number of channels at probe time Daniel Mack
2013-08-07 10:19 ` [PATCH 08/12] dma: mmp_pdma: remove duplicate assignment Daniel Mack
2013-08-07 10:19 ` [PATCH 09/12] dma: mmp_pdma: add support for byte-aligned transfers Daniel Mack
2013-08-08  9:04   ` Xiang Wang
2013-08-08  9:11     ` Daniel Mack
2013-08-16  8:05       ` Xiang Wang
2013-08-07 10:19 ` [PATCH 10/12] dma: mmp_pdma: implement DMA_PAUSE and DMA_RESUME Daniel Mack
2013-08-07 10:19 ` [PATCH 11/12] dma: mmp_pdma: add support for residue reporting Daniel Mack
2013-08-07 10:19 ` [PATCH 12/12] dma: mmp_pdma: add support for cyclic DMA descriptors Daniel Mack

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.