All of lore.kernel.org
 help / color / mirror / Atom feed
* [CFT] DMA engine patches
@ 2012-06-07 10:34 ` Russell King - ARM Linux
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King - ARM Linux @ 2012-06-07 10:34 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Dan Williams, Vinod Koul

I am intending to post the _entire_ set of DMA patches I have so far.
I'm going to be doing this in a slightly different way to normal,
because of the way the branches are structured.

What will come first is a set of three common patches to all the
branches.  Following on from that will be the individual sets for
sa11x0, pl08x and OMAP.

These are for testing, and are based on v3.5-rc1.

 drivers/dma/Kconfig      |    4 +
 drivers/dma/Makefile     |    1 +
 drivers/dma/sa11x0-dma.c |  249 ++++++++++++++-------------------------------
 drivers/dma/virt-dma.c   |  123 +++++++++++++++++++++++
 drivers/dma/virt-dma.h   |  152 ++++++++++++++++++++++++++++
 5 files changed, 358 insertions(+), 171 deletions(-)


^ permalink raw reply	[flat|nested] 172+ messages in thread

* [CFT] DMA engine patches
@ 2012-06-07 10:34 ` Russell King - ARM Linux
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King - ARM Linux @ 2012-06-07 10:34 UTC (permalink / raw)
  To: linux-arm-kernel

I am intending to post the _entire_ set of DMA patches I have so far.
I'm going to be doing this in a slightly different way to normal,
because of the way the branches are structured.

What will come first is a set of three common patches to all the
branches.  Following on from that will be the individual sets for
sa11x0, pl08x and OMAP.

These are for testing, and are based on v3.5-rc1.

 drivers/dma/Kconfig      |    4 +
 drivers/dma/Makefile     |    1 +
 drivers/dma/sa11x0-dma.c |  249 ++++++++++++++-------------------------------
 drivers/dma/virt-dma.c   |  123 +++++++++++++++++++++++
 drivers/dma/virt-dma.h   |  152 ++++++++++++++++++++++++++++
 5 files changed, 358 insertions(+), 171 deletions(-)

^ permalink raw reply	[flat|nested] 172+ messages in thread

* [CFT 1/3] dmaengine: split out virtual channel DMA support from sa11x0 driver
  2012-06-07 10:34 ` Russell King - ARM Linux
@ 2012-06-07 10:40   ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:40 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Dan Williams, Vinod Koul

Split the virtual slave channel DMA support from the sa11x0 driver so
this code can be shared with other slave DMA engine drivers.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/Kconfig      |    4 +
 drivers/dma/Makefile     |    1 +
 drivers/dma/sa11x0-dma.c |  249 ++++++++++++++-------------------------------
 drivers/dma/virt-dma.c   |   99 ++++++++++++++++++
 drivers/dma/virt-dma.h   |  138 +++++++++++++++++++++++++
 5 files changed, 320 insertions(+), 171 deletions(-)
 create mode 100644 drivers/dma/virt-dma.c
 create mode 100644 drivers/dma/virt-dma.h

diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index aadeb5b..eb2b60e 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -255,6 +255,7 @@ config DMA_SA11X0
 	tristate "SA-11x0 DMA support"
 	depends on ARCH_SA1100
 	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
 	help
 	  Support the DMA engine found on Intel StrongARM SA-1100 and
 	  SA-1110 SoCs.  This DMA engine can only be used with on-chip
@@ -263,6 +264,9 @@ config DMA_SA11X0
 config DMA_ENGINE
 	bool
 
+config DMA_VIRTUAL_CHANNELS
+	tristate
+
 comment "DMA Clients"
 	depends on DMA_ENGINE
 
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 86b795b..fc05f7d 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -2,6 +2,7 @@ ccflags-$(CONFIG_DMADEVICES_DEBUG)  := -DDEBUG
 ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG
 
 obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
+obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o
 obj-$(CONFIG_NET_DMA) += iovlock.o
 obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o
 obj-$(CONFIG_DMATEST) += dmatest.o
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index ec78cce..5f1d2e6 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -21,6 +21,8 @@
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 
+#include "virt-dma.h"
+
 #define NR_PHY_CHAN	6
 #define DMA_ALIGN	3
 #define DMA_MAX_SIZE	0x1fff
@@ -72,12 +74,11 @@ struct sa11x0_dma_sg {
 };
 
 struct sa11x0_dma_desc {
-	struct dma_async_tx_descriptor tx;
+	struct virt_dma_desc	vd;
+
 	u32			ddar;
 	size_t			size;
 
-	/* maybe protected by c->lock */
-	struct list_head	node;
 	unsigned		sglen;
 	struct sa11x0_dma_sg	sg[0];
 };
@@ -85,15 +86,11 @@ struct sa11x0_dma_desc {
 struct sa11x0_dma_phy;
 
 struct sa11x0_dma_chan {
-	struct dma_chan		chan;
-	spinlock_t		lock;
-	dma_cookie_t		lc;
+	struct virt_dma_chan	vc;
 
-	/* protected by c->lock */
+	/* protected by c->vc.lock */
 	struct sa11x0_dma_phy	*phy;
 	enum dma_status		status;
-	struct list_head	desc_submitted;
-	struct list_head	desc_issued;
 
 	/* protected by d->lock */
 	struct list_head	node;
@@ -109,7 +106,7 @@ struct sa11x0_dma_phy {
 
 	struct sa11x0_dma_chan	*vchan;
 
-	/* Protected by c->lock */
+	/* Protected by c->vc.lock */
 	unsigned		sg_load;
 	struct sa11x0_dma_desc	*txd_load;
 	unsigned		sg_done;
@@ -127,13 +124,12 @@ struct sa11x0_dma_dev {
 	spinlock_t		lock;
 	struct tasklet_struct	task;
 	struct list_head	chan_pending;
-	struct list_head	desc_complete;
 	struct sa11x0_dma_phy	phy[NR_PHY_CHAN];
 };
 
 static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan)
 {
-	return container_of(chan, struct sa11x0_dma_chan, chan);
+	return container_of(chan, struct sa11x0_dma_chan, vc.chan);
 }
 
 static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev)
@@ -141,27 +137,26 @@ static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev)
 	return container_of(dmadev, struct sa11x0_dma_dev, slave);
 }
 
-static struct sa11x0_dma_desc *to_sa11x0_dma_tx(struct dma_async_tx_descriptor *tx)
+static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c)
 {
-	return container_of(tx, struct sa11x0_dma_desc, tx);
+	struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
+
+	return vd ? container_of(vd, struct sa11x0_dma_desc, vd) : NULL;
 }
 
-static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c)
+static void sa11x0_dma_free_desc(struct virt_dma_desc *vd)
 {
-	if (list_empty(&c->desc_issued))
-		return NULL;
-
-	return list_first_entry(&c->desc_issued, struct sa11x0_dma_desc, node);
+	kfree(container_of(vd, struct sa11x0_dma_desc, vd));
 }
 
 static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd)
 {
-	list_del(&txd->node);
+	list_del(&txd->vd.node);
 	p->txd_load = txd;
 	p->sg_load = 0;
 
 	dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n",
-		p->num, txd, txd->tx.cookie, txd->ddar);
+		p->num, &txd->vd, txd->vd.tx.cookie, txd->ddar);
 }
 
 static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p,
@@ -229,21 +224,13 @@ static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p,
 	struct sa11x0_dma_desc *txd = p->txd_done;
 
 	if (++p->sg_done == txd->sglen) {
-		struct sa11x0_dma_dev *d = p->dev;
-
-		dev_vdbg(d->slave.dev, "pchan %u: txd %p[%x]: completed\n",
-			p->num, p->txd_done, p->txd_done->tx.cookie);
-
-		c->lc = txd->tx.cookie;
-
-		spin_lock(&d->lock);
-		list_add_tail(&txd->node, &d->desc_complete);
-		spin_unlock(&d->lock);
+		vchan_cookie_complete(&txd->vd);
 
 		p->sg_done = 0;
 		p->txd_done = p->txd_load;
 
-		tasklet_schedule(&d->task);
+		if (!p->txd_done)
+			tasklet_schedule(&p->dev->task);
 	}
 
 	sa11x0_dma_start_sg(p, c);
@@ -280,7 +267,7 @@ static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id)
 	if (c) {
 		unsigned long flags;
 
-		spin_lock_irqsave(&c->lock, flags);
+		spin_lock_irqsave(&c->vc.lock, flags);
 		/*
 		 * Now that we're holding the lock, check that the vchan
 		 * really is associated with this pchan before touching the
@@ -294,7 +281,7 @@ static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id)
 			if (dcsr & DCSR_DONEB)
 				sa11x0_dma_complete(p, c);
 		}
-		spin_unlock_irqrestore(&c->lock, flags);
+		spin_unlock_irqrestore(&c->vc.lock, flags);
 	}
 
 	return IRQ_HANDLED;
@@ -332,28 +319,15 @@ static void sa11x0_dma_tasklet(unsigned long arg)
 	struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg;
 	struct sa11x0_dma_phy *p;
 	struct sa11x0_dma_chan *c;
-	struct sa11x0_dma_desc *txd, *txn;
-	LIST_HEAD(head);
 	unsigned pch, pch_alloc = 0;
 
 	dev_dbg(d->slave.dev, "tasklet enter\n");
 
-	/* Get the completed tx descriptors */
-	spin_lock_irq(&d->lock);
-	list_splice_init(&d->desc_complete, &head);
-	spin_unlock_irq(&d->lock);
-
-	list_for_each_entry(txd, &head, node) {
-		c = to_sa11x0_dma_chan(txd->tx.chan);
-
-		dev_dbg(d->slave.dev, "vchan %p: txd %p[%x] completed\n",
-			c, txd, txd->tx.cookie);
-
-		spin_lock_irq(&c->lock);
+	list_for_each_entry(c, &d->slave.channels, vc.chan.device_node) {
+		spin_lock_irq(&c->vc.lock);
 		p = c->phy;
-		if (p) {
-			if (!p->txd_done)
-				sa11x0_dma_start_txd(c);
+		if (p && !p->txd_done) {
+			sa11x0_dma_start_txd(c);
 			if (!p->txd_done) {
 				/* No current txd associated with this channel */
 				dev_dbg(d->slave.dev, "pchan %u: free\n", p->num);
@@ -363,7 +337,7 @@ static void sa11x0_dma_tasklet(unsigned long arg)
 				p->vchan = NULL;
 			}
 		}
-		spin_unlock_irq(&c->lock);
+		spin_unlock_irq(&c->vc.lock);
 	}
 
 	spin_lock_irq(&d->lock);
@@ -380,7 +354,7 @@ static void sa11x0_dma_tasklet(unsigned long arg)
 			/* Mark this channel allocated */
 			p->vchan = c;
 
-			dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, c);
+			dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
 		}
 	}
 	spin_unlock_irq(&d->lock);
@@ -390,42 +364,18 @@ static void sa11x0_dma_tasklet(unsigned long arg)
 			p = &d->phy[pch];
 			c = p->vchan;
 
-			spin_lock_irq(&c->lock);
+			spin_lock_irq(&c->vc.lock);
 			c->phy = p;
 
 			sa11x0_dma_start_txd(c);
-			spin_unlock_irq(&c->lock);
+			spin_unlock_irq(&c->vc.lock);
 		}
 	}
 
-	/* Now free the completed tx descriptor, and call their callbacks */
-	list_for_each_entry_safe(txd, txn, &head, node) {
-		dma_async_tx_callback callback = txd->tx.callback;
-		void *callback_param = txd->tx.callback_param;
-
-		dev_dbg(d->slave.dev, "txd %p[%x]: callback and free\n",
-			txd, txd->tx.cookie);
-
-		kfree(txd);
-
-		if (callback)
-			callback(callback_param);
-	}
-
 	dev_dbg(d->slave.dev, "tasklet exit\n");
 }
 
 
-static void sa11x0_dma_desc_free(struct sa11x0_dma_dev *d, struct list_head *head)
-{
-	struct sa11x0_dma_desc *txd, *txn;
-
-	list_for_each_entry_safe(txd, txn, head, node) {
-		dev_dbg(d->slave.dev, "txd %p: freeing\n", txd);
-		kfree(txd);
-	}
-}
-
 static int sa11x0_dma_alloc_chan_resources(struct dma_chan *chan)
 {
 	return 0;
@@ -436,18 +386,12 @@ static void sa11x0_dma_free_chan_resources(struct dma_chan *chan)
 	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
 	struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
 	unsigned long flags;
-	LIST_HEAD(head);
 
-	spin_lock_irqsave(&c->lock, flags);
-	spin_lock(&d->lock);
+	spin_lock_irqsave(&d->lock, flags);
 	list_del_init(&c->node);
-	spin_unlock(&d->lock);
-
-	list_splice_tail_init(&c->desc_submitted, &head);
-	list_splice_tail_init(&c->desc_issued, &head);
-	spin_unlock_irqrestore(&c->lock, flags);
+	spin_unlock_irqrestore(&d->lock, flags);
 
-	sa11x0_dma_desc_free(d, &head);
+	vchan_free_chan_resources(&c->vc);
 }
 
 static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p)
@@ -473,21 +417,15 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
 	struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
 	struct sa11x0_dma_phy *p;
 	struct sa11x0_dma_desc *txd;
-	dma_cookie_t last_used, last_complete;
 	unsigned long flags;
 	enum dma_status ret;
 	size_t bytes = 0;
 
-	last_used = c->chan.cookie;
-	last_complete = c->lc;
-
-	ret = dma_async_is_complete(cookie, last_complete, last_used);
-	if (ret == DMA_SUCCESS) {
-		dma_set_tx_state(state, last_complete, last_used, 0);
+	ret = dma_cookie_status(&c->vc.chan, cookie, state);
+	if (ret == DMA_SUCCESS)
 		return ret;
-	}
 
-	spin_lock_irqsave(&c->lock, flags);
+	spin_lock_irqsave(&c->vc.lock, flags);
 	p = c->phy;
 	ret = c->status;
 	if (p) {
@@ -524,12 +462,13 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
 		if (txd != p->txd_load && p->txd_load)
 			bytes += p->txd_load->size;
 	}
-	list_for_each_entry(txd, &c->desc_issued, node) {
+	list_for_each_entry(txd, &c->vc.desc_issued, vd.node) {
 		bytes += txd->size;
 	}
-	spin_unlock_irqrestore(&c->lock, flags);
+	spin_unlock_irqrestore(&c->vc.lock, flags);
 
-	dma_set_tx_state(state, last_complete, last_used, bytes);
+	if (state)
+		state->residue = bytes;
 
 	dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", bytes);
 
@@ -547,40 +486,20 @@ static void sa11x0_dma_issue_pending(struct dma_chan *chan)
 	struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
 	unsigned long flags;
 
-	spin_lock_irqsave(&c->lock, flags);
-	list_splice_tail_init(&c->desc_submitted, &c->desc_issued);
-	if (!list_empty(&c->desc_issued)) {
-		spin_lock(&d->lock);
-		if (!c->phy && list_empty(&c->node)) {
-			list_add_tail(&c->node, &d->chan_pending);
-			tasklet_schedule(&d->task);
-			dev_dbg(d->slave.dev, "vchan %p: issued\n", c);
+	spin_lock_irqsave(&c->vc.lock, flags);
+	if (vchan_issue_pending(&c->vc)) {
+		if (!c->phy) {
+			spin_lock(&d->lock);
+			if (list_empty(&c->node)) {
+				list_add_tail(&c->node, &d->chan_pending);
+				tasklet_schedule(&d->task);
+				dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
+			}
+			spin_unlock(&d->lock);
 		}
-		spin_unlock(&d->lock);
 	} else
-		dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", c);
-	spin_unlock_irqrestore(&c->lock, flags);
-}
-
-static dma_cookie_t sa11x0_dma_tx_submit(struct dma_async_tx_descriptor *tx)
-{
-	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(tx->chan);
-	struct sa11x0_dma_desc *txd = to_sa11x0_dma_tx(tx);
-	unsigned long flags;
-
-	spin_lock_irqsave(&c->lock, flags);
-	c->chan.cookie += 1;
-	if (c->chan.cookie < 0)
-		c->chan.cookie = 1;
-	txd->tx.cookie = c->chan.cookie;
-
-	list_add_tail(&txd->node, &c->desc_submitted);
-	spin_unlock_irqrestore(&c->lock, flags);
-
-	dev_dbg(tx->chan->device->dev, "vchan %p: txd %p[%x]: submitted\n",
-		c, txd, txd->tx.cookie);
-
-	return txd->tx.cookie;
+		dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
+	spin_unlock_irqrestore(&c->vc.lock, flags);
 }
 
 static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
@@ -596,7 +515,7 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
 	/* SA11x0 channels can only operate in their native direction */
 	if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
 		dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
-			c, c->ddar, dir);
+			&c->vc, c->ddar, dir);
 		return NULL;
 	}
 
@@ -612,14 +531,14 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
 			j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1;
 		if (addr & DMA_ALIGN) {
 			dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %08x\n",
-				c, addr);
+				&c->vc, addr);
 			return NULL;
 		}
 	}
 
 	txd = kzalloc(sizeof(*txd) + j * sizeof(txd->sg[0]), GFP_ATOMIC);
 	if (!txd) {
-		dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", c);
+		dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
 		return NULL;
 	}
 
@@ -655,17 +574,14 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
 		} while (len);
 	}
 
-	dma_async_tx_descriptor_init(&txd->tx, &c->chan);
-	txd->tx.flags = flags;
-	txd->tx.tx_submit = sa11x0_dma_tx_submit;
 	txd->ddar = c->ddar;
 	txd->size = size;
 	txd->sglen = j;
 
 	dev_dbg(chan->device->dev, "vchan %p: txd %p: size %u nr %u\n",
-		c, txd, txd->size, txd->sglen);
+		&c->vc, &txd->vd, txd->size, txd->sglen);
 
-	return &txd->tx;
+	return vchan_tx_prep(&c->vc, &txd->vd, flags);
 }
 
 static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg)
@@ -695,8 +611,8 @@ static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_c
 	if (maxburst == 8)
 		ddar |= DDAR_BS;
 
-	dev_dbg(c->chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n",
-		c, addr, width, maxburst);
+	dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n",
+		&c->vc, addr, width, maxburst);
 
 	c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6;
 
@@ -718,16 +634,13 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
 		return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg);
 
 	case DMA_TERMINATE_ALL:
-		dev_dbg(d->slave.dev, "vchan %p: terminate all\n", c);
+		dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
 		/* Clear the tx descriptor lists */
-		spin_lock_irqsave(&c->lock, flags);
-		list_splice_tail_init(&c->desc_submitted, &head);
-		list_splice_tail_init(&c->desc_issued, &head);
+		spin_lock_irqsave(&c->vc.lock, flags);
+		vchan_get_all_descriptors(&c->vc, &head);
 
 		p = c->phy;
 		if (p) {
-			struct sa11x0_dma_desc *txd, *txn;
-
 			dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num);
 			/* vchan is assigned to a pchan - stop the channel */
 			writel(DCSR_RUN | DCSR_IE |
@@ -735,17 +648,13 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
 				DCSR_STRTB | DCSR_DONEB,
 				p->base + DMA_DCSR_C);
 
-			list_for_each_entry_safe(txd, txn, &d->desc_complete, node)
-				if (txd->tx.chan == &c->chan)
-					list_move(&txd->node, &head);
-
 			if (p->txd_load) {
 				if (p->txd_load != p->txd_done)
-					list_add_tail(&p->txd_load->node, &head);
+					list_add_tail(&p->txd_load->vd.node, &head);
 				p->txd_load = NULL;
 			}
 			if (p->txd_done) {
-				list_add_tail(&p->txd_done->node, &head);
+				list_add_tail(&p->txd_done->vd.node, &head);
 				p->txd_done = NULL;
 			}
 			c->phy = NULL;
@@ -754,14 +663,14 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
 			spin_unlock(&d->lock);
 			tasklet_schedule(&d->task);
 		}
-		spin_unlock_irqrestore(&c->lock, flags);
-		sa11x0_dma_desc_free(d, &head);
+		spin_unlock_irqrestore(&c->vc.lock, flags);
+		vchan_dma_desc_free_list(&c->vc, &head);
 		ret = 0;
 		break;
 
 	case DMA_PAUSE:
-		dev_dbg(d->slave.dev, "vchan %p: pause\n", c);
-		spin_lock_irqsave(&c->lock, flags);
+		dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
+		spin_lock_irqsave(&c->vc.lock, flags);
 		if (c->status == DMA_IN_PROGRESS) {
 			c->status = DMA_PAUSED;
 
@@ -774,26 +683,26 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
 				spin_unlock(&d->lock);
 			}
 		}
-		spin_unlock_irqrestore(&c->lock, flags);
+		spin_unlock_irqrestore(&c->vc.lock, flags);
 		ret = 0;
 		break;
 
 	case DMA_RESUME:
-		dev_dbg(d->slave.dev, "vchan %p: resume\n", c);
-		spin_lock_irqsave(&c->lock, flags);
+		dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
+		spin_lock_irqsave(&c->vc.lock, flags);
 		if (c->status == DMA_PAUSED) {
 			c->status = DMA_IN_PROGRESS;
 
 			p = c->phy;
 			if (p) {
 				writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S);
-			} else if (!list_empty(&c->desc_issued)) {
+			} else if (!list_empty(&c->vc.desc_issued)) {
 				spin_lock(&d->lock);
 				list_add_tail(&c->node, &d->chan_pending);
 				spin_unlock(&d->lock);
 			}
 		}
-		spin_unlock_irqrestore(&c->lock, flags);
+		spin_unlock_irqrestore(&c->vc.lock, flags);
 		ret = 0;
 		break;
 
@@ -853,15 +762,13 @@ static int __devinit sa11x0_dma_init_dmadev(struct dma_device *dmadev,
 			return -ENOMEM;
 		}
 
-		c->chan.device = dmadev;
 		c->status = DMA_IN_PROGRESS;
 		c->ddar = chan_desc[i].ddar;
 		c->name = chan_desc[i].name;
-		spin_lock_init(&c->lock);
-		INIT_LIST_HEAD(&c->desc_submitted);
-		INIT_LIST_HEAD(&c->desc_issued);
 		INIT_LIST_HEAD(&c->node);
-		list_add_tail(&c->chan.device_node, &dmadev->channels);
+
+		c->vc.desc_free = sa11x0_dma_free_desc;
+		vchan_init(&c->vc, dmadev);
 	}
 
 	return dma_async_device_register(dmadev);
@@ -890,8 +797,9 @@ static void sa11x0_dma_free_channels(struct dma_device *dmadev)
 {
 	struct sa11x0_dma_chan *c, *cn;
 
-	list_for_each_entry_safe(c, cn, &dmadev->channels, chan.device_node) {
-		list_del(&c->chan.device_node);
+	list_for_each_entry_safe(c, cn, &dmadev->channels, vc.chan.device_node) {
+		list_del(&c->vc.chan.device_node);
+		tasklet_kill(&c->vc.task);
 		kfree(c);
 	}
 }
@@ -915,7 +823,6 @@ static int __devinit sa11x0_dma_probe(struct platform_device *pdev)
 
 	spin_lock_init(&d->lock);
 	INIT_LIST_HEAD(&d->chan_pending);
-	INIT_LIST_HEAD(&d->desc_complete);
 
 	d->base = ioremap(res->start, resource_size(res));
 	if (!d->base) {
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c
new file mode 100644
index 0000000..bd85b05
--- /dev/null
+++ b/drivers/dma/virt-dma.c
@@ -0,0 +1,99 @@
+/*
+ * Virtual DMA channel support for DMAengine
+ *
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+
+#include "virt-dma.h"
+
+static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx)
+{
+	return container_of(tx, struct virt_dma_desc, tx);
+}
+
+dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+	struct virt_dma_chan *vc = to_virt_chan(tx->chan);
+	struct virt_dma_desc *vd = to_virt_desc(tx);
+	unsigned long flags;
+	dma_cookie_t cookie;
+
+	spin_lock_irqsave(&vc->lock, flags);
+	cookie = dma_cookie_assign(tx);
+
+	list_add_tail(&vd->node, &vc->desc_submitted);
+	spin_unlock_irqrestore(&vc->lock, flags);
+
+	dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
+		vc, vd, cookie);
+
+	return cookie;
+}
+EXPORT_SYMBOL_GPL(vchan_tx_submit);
+
+/*
+ * This tasklet handles the completion of a DMA descriptor by
+ * calling its callback and freeing it.
+ */
+static void vchan_complete(unsigned long arg)
+{
+	struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
+	LIST_HEAD(head);
+
+	spin_lock_irq(&vc->lock);
+	list_splice_tail_init(&vc->desc_completed, &head);
+	spin_unlock_irq(&vc->lock);
+
+	while (!list_empty(&head)) {
+		struct virt_dma_desc *vd = list_first_entry(&head,
+				struct virt_dma_desc, node);
+		dma_async_tx_callback cb = vd->tx.callback;
+		void *cb_data = vd->tx.callback_param;
+
+		list_del(&vd->node);
+
+		vc->desc_free(vd);
+
+		if (cb)
+			cb(cb_data);
+	}
+}
+
+void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
+{
+	while (!list_empty(head)) {
+		struct virt_dma_desc *vd = list_first_entry(head,
+			struct virt_dma_desc, node);
+		list_del(&vd->node);
+		dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
+		vc->desc_free(vd);
+	}
+}
+EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
+
+void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
+{
+	dma_cookie_init(&vc->chan);
+
+	spin_lock_init(&vc->lock);
+	INIT_LIST_HEAD(&vc->desc_submitted);
+	INIT_LIST_HEAD(&vc->desc_issued);
+	INIT_LIST_HEAD(&vc->desc_completed);
+
+	tasklet_init(&vc->task, vchan_complete, (unsigned long)vc);
+
+	vc->chan.device = dmadev;
+	list_add_tail(&vc->chan.device_node, &dmadev->channels);
+}
+EXPORT_SYMBOL_GPL(vchan_init);
+
+MODULE_AUTHOR("Russell King");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
new file mode 100644
index 0000000..825bb96
--- /dev/null
+++ b/drivers/dma/virt-dma.h
@@ -0,0 +1,138 @@
+/*
+ * Virtual DMA channel support for DMAengine
+ *
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef VIRT_DMA_H
+#define VIRT_DMA_H
+
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+
+#include "dmaengine.h"
+
+struct virt_dma_desc {
+	struct dma_async_tx_descriptor tx;
+	/* protected by vc.lock */
+	struct list_head node;
+};
+
+struct virt_dma_chan {
+	struct dma_chan	chan;
+	struct tasklet_struct task;
+	void (*desc_free)(struct virt_dma_desc *);
+
+	spinlock_t lock;
+
+	/* protected by vc.lock */
+	struct list_head desc_submitted;
+	struct list_head desc_issued;
+	struct list_head desc_completed;
+};
+
+static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
+{
+	return container_of(chan, struct virt_dma_chan, chan);
+}
+
+void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head);
+
+void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev);
+
+/**
+ * vchan_tx_prep - prepare a descriptor
+ * vc: virtual channel allocating this descriptor
+ * vd: virtual descriptor to prepare
+ * tx_flags: flags argument passed in to prepare function
+ */
+static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc,
+	struct virt_dma_desc *vd, unsigned long tx_flags)
+{
+	extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
+
+	dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
+	vd->tx.flags = tx_flags;
+	vd->tx.tx_submit = vchan_tx_submit;
+
+	return &vd->tx;
+}
+
+/**
+ * vchan_issue_pending - move submitted descriptors to issued list
+ * vc: virtual channel to update
+ *
+ * vc.lock must be held by caller
+ */
+static inline bool vchan_issue_pending(struct virt_dma_chan *vc)
+{
+	list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued);
+	return !list_empty(&vc->desc_issued);
+}
+
+/**
+ * vchan_cookie_complete - report completion of a descriptor
+ * vd: virtual descriptor to update
+ *
+ * vc.lock must be held by caller
+ */
+static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
+{
+	struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+
+	dma_cookie_complete(&vd->tx);
+	dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n",
+		vd, vd->tx.cookie);
+	list_add_tail(&vd->node, &vc->desc_completed);
+
+	tasklet_schedule(&vc->task);
+}
+
+/**
+ * vchan_next_desc - peek at the next descriptor to be processed
+ * vc: virtual channel to obtain descriptor from
+ *
+ * vc.lock must be held by caller
+ */
+static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
+{
+	if (list_empty(&vc->desc_issued))
+		return NULL;
+
+	return list_first_entry(&vc->desc_issued, struct virt_dma_desc, node);
+}
+
+/**
+ * vchan_get_all_descriptors - obtain all submitted and issued descriptors
+ * vc: virtual channel to get descriptors from
+ * head: list of descriptors found
+ *
+ * vc.lock must be held by caller
+ *
+ * Removes all submitted and issued descriptors from internal lists, and
+ * provides a list of all descriptors found
+ */
+static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
+	struct list_head *head)
+{
+	list_splice_tail_init(&vc->desc_submitted, head);
+	list_splice_tail_init(&vc->desc_issued, head);
+	list_splice_tail_init(&vc->desc_completed, head);
+}
+
+static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
+{
+	unsigned long flags;
+	LIST_HEAD(head);
+
+	spin_lock_irqsave(&vc->lock, flags);
+	vchan_get_all_descriptors(vc, &head);
+	spin_unlock_irqrestore(&vc->lock, flags);
+
+	vchan_dma_desc_free_list(vc, &head);
+}
+
+#endif
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 1/3] dmaengine: split out virtual channel DMA support from sa11x0 driver
@ 2012-06-07 10:40   ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:40 UTC (permalink / raw)
  To: linux-arm-kernel

Split the virtual slave channel DMA support from the sa11x0 driver so
this code can be shared with other slave DMA engine drivers.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/Kconfig      |    4 +
 drivers/dma/Makefile     |    1 +
 drivers/dma/sa11x0-dma.c |  249 ++++++++++++++-------------------------------
 drivers/dma/virt-dma.c   |   99 ++++++++++++++++++
 drivers/dma/virt-dma.h   |  138 +++++++++++++++++++++++++
 5 files changed, 320 insertions(+), 171 deletions(-)
 create mode 100644 drivers/dma/virt-dma.c
 create mode 100644 drivers/dma/virt-dma.h

diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index aadeb5b..eb2b60e 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -255,6 +255,7 @@ config DMA_SA11X0
 	tristate "SA-11x0 DMA support"
 	depends on ARCH_SA1100
 	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
 	help
 	  Support the DMA engine found on Intel StrongARM SA-1100 and
 	  SA-1110 SoCs.  This DMA engine can only be used with on-chip
@@ -263,6 +264,9 @@ config DMA_SA11X0
 config DMA_ENGINE
 	bool
 
+config DMA_VIRTUAL_CHANNELS
+	tristate
+
 comment "DMA Clients"
 	depends on DMA_ENGINE
 
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 86b795b..fc05f7d 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -2,6 +2,7 @@ ccflags-$(CONFIG_DMADEVICES_DEBUG)  := -DDEBUG
 ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG
 
 obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
+obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o
 obj-$(CONFIG_NET_DMA) += iovlock.o
 obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o
 obj-$(CONFIG_DMATEST) += dmatest.o
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index ec78cce..5f1d2e6 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -21,6 +21,8 @@
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 
+#include "virt-dma.h"
+
 #define NR_PHY_CHAN	6
 #define DMA_ALIGN	3
 #define DMA_MAX_SIZE	0x1fff
@@ -72,12 +74,11 @@ struct sa11x0_dma_sg {
 };
 
 struct sa11x0_dma_desc {
-	struct dma_async_tx_descriptor tx;
+	struct virt_dma_desc	vd;
+
 	u32			ddar;
 	size_t			size;
 
-	/* maybe protected by c->lock */
-	struct list_head	node;
 	unsigned		sglen;
 	struct sa11x0_dma_sg	sg[0];
 };
@@ -85,15 +86,11 @@ struct sa11x0_dma_desc {
 struct sa11x0_dma_phy;
 
 struct sa11x0_dma_chan {
-	struct dma_chan		chan;
-	spinlock_t		lock;
-	dma_cookie_t		lc;
+	struct virt_dma_chan	vc;
 
-	/* protected by c->lock */
+	/* protected by c->vc.lock */
 	struct sa11x0_dma_phy	*phy;
 	enum dma_status		status;
-	struct list_head	desc_submitted;
-	struct list_head	desc_issued;
 
 	/* protected by d->lock */
 	struct list_head	node;
@@ -109,7 +106,7 @@ struct sa11x0_dma_phy {
 
 	struct sa11x0_dma_chan	*vchan;
 
-	/* Protected by c->lock */
+	/* Protected by c->vc.lock */
 	unsigned		sg_load;
 	struct sa11x0_dma_desc	*txd_load;
 	unsigned		sg_done;
@@ -127,13 +124,12 @@ struct sa11x0_dma_dev {
 	spinlock_t		lock;
 	struct tasklet_struct	task;
 	struct list_head	chan_pending;
-	struct list_head	desc_complete;
 	struct sa11x0_dma_phy	phy[NR_PHY_CHAN];
 };
 
 static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan)
 {
-	return container_of(chan, struct sa11x0_dma_chan, chan);
+	return container_of(chan, struct sa11x0_dma_chan, vc.chan);
 }
 
 static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev)
@@ -141,27 +137,26 @@ static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev)
 	return container_of(dmadev, struct sa11x0_dma_dev, slave);
 }
 
-static struct sa11x0_dma_desc *to_sa11x0_dma_tx(struct dma_async_tx_descriptor *tx)
+static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c)
 {
-	return container_of(tx, struct sa11x0_dma_desc, tx);
+	struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
+
+	return vd ? container_of(vd, struct sa11x0_dma_desc, vd) : NULL;
 }
 
-static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c)
+static void sa11x0_dma_free_desc(struct virt_dma_desc *vd)
 {
-	if (list_empty(&c->desc_issued))
-		return NULL;
-
-	return list_first_entry(&c->desc_issued, struct sa11x0_dma_desc, node);
+	kfree(container_of(vd, struct sa11x0_dma_desc, vd));
 }
 
 static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd)
 {
-	list_del(&txd->node);
+	list_del(&txd->vd.node);
 	p->txd_load = txd;
 	p->sg_load = 0;
 
 	dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n",
-		p->num, txd, txd->tx.cookie, txd->ddar);
+		p->num, &txd->vd, txd->vd.tx.cookie, txd->ddar);
 }
 
 static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p,
@@ -229,21 +224,13 @@ static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p,
 	struct sa11x0_dma_desc *txd = p->txd_done;
 
 	if (++p->sg_done == txd->sglen) {
-		struct sa11x0_dma_dev *d = p->dev;
-
-		dev_vdbg(d->slave.dev, "pchan %u: txd %p[%x]: completed\n",
-			p->num, p->txd_done, p->txd_done->tx.cookie);
-
-		c->lc = txd->tx.cookie;
-
-		spin_lock(&d->lock);
-		list_add_tail(&txd->node, &d->desc_complete);
-		spin_unlock(&d->lock);
+		vchan_cookie_complete(&txd->vd);
 
 		p->sg_done = 0;
 		p->txd_done = p->txd_load;
 
-		tasklet_schedule(&d->task);
+		if (!p->txd_done)
+			tasklet_schedule(&p->dev->task);
 	}
 
 	sa11x0_dma_start_sg(p, c);
@@ -280,7 +267,7 @@ static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id)
 	if (c) {
 		unsigned long flags;
 
-		spin_lock_irqsave(&c->lock, flags);
+		spin_lock_irqsave(&c->vc.lock, flags);
 		/*
 		 * Now that we're holding the lock, check that the vchan
 		 * really is associated with this pchan before touching the
@@ -294,7 +281,7 @@ static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id)
 			if (dcsr & DCSR_DONEB)
 				sa11x0_dma_complete(p, c);
 		}
-		spin_unlock_irqrestore(&c->lock, flags);
+		spin_unlock_irqrestore(&c->vc.lock, flags);
 	}
 
 	return IRQ_HANDLED;
@@ -332,28 +319,15 @@ static void sa11x0_dma_tasklet(unsigned long arg)
 	struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg;
 	struct sa11x0_dma_phy *p;
 	struct sa11x0_dma_chan *c;
-	struct sa11x0_dma_desc *txd, *txn;
-	LIST_HEAD(head);
 	unsigned pch, pch_alloc = 0;
 
 	dev_dbg(d->slave.dev, "tasklet enter\n");
 
-	/* Get the completed tx descriptors */
-	spin_lock_irq(&d->lock);
-	list_splice_init(&d->desc_complete, &head);
-	spin_unlock_irq(&d->lock);
-
-	list_for_each_entry(txd, &head, node) {
-		c = to_sa11x0_dma_chan(txd->tx.chan);
-
-		dev_dbg(d->slave.dev, "vchan %p: txd %p[%x] completed\n",
-			c, txd, txd->tx.cookie);
-
-		spin_lock_irq(&c->lock);
+	list_for_each_entry(c, &d->slave.channels, vc.chan.device_node) {
+		spin_lock_irq(&c->vc.lock);
 		p = c->phy;
-		if (p) {
-			if (!p->txd_done)
-				sa11x0_dma_start_txd(c);
+		if (p && !p->txd_done) {
+			sa11x0_dma_start_txd(c);
 			if (!p->txd_done) {
 				/* No current txd associated with this channel */
 				dev_dbg(d->slave.dev, "pchan %u: free\n", p->num);
@@ -363,7 +337,7 @@ static void sa11x0_dma_tasklet(unsigned long arg)
 				p->vchan = NULL;
 			}
 		}
-		spin_unlock_irq(&c->lock);
+		spin_unlock_irq(&c->vc.lock);
 	}
 
 	spin_lock_irq(&d->lock);
@@ -380,7 +354,7 @@ static void sa11x0_dma_tasklet(unsigned long arg)
 			/* Mark this channel allocated */
 			p->vchan = c;
 
-			dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, c);
+			dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
 		}
 	}
 	spin_unlock_irq(&d->lock);
@@ -390,42 +364,18 @@ static void sa11x0_dma_tasklet(unsigned long arg)
 			p = &d->phy[pch];
 			c = p->vchan;
 
-			spin_lock_irq(&c->lock);
+			spin_lock_irq(&c->vc.lock);
 			c->phy = p;
 
 			sa11x0_dma_start_txd(c);
-			spin_unlock_irq(&c->lock);
+			spin_unlock_irq(&c->vc.lock);
 		}
 	}
 
-	/* Now free the completed tx descriptor, and call their callbacks */
-	list_for_each_entry_safe(txd, txn, &head, node) {
-		dma_async_tx_callback callback = txd->tx.callback;
-		void *callback_param = txd->tx.callback_param;
-
-		dev_dbg(d->slave.dev, "txd %p[%x]: callback and free\n",
-			txd, txd->tx.cookie);
-
-		kfree(txd);
-
-		if (callback)
-			callback(callback_param);
-	}
-
 	dev_dbg(d->slave.dev, "tasklet exit\n");
 }
 
 
-static void sa11x0_dma_desc_free(struct sa11x0_dma_dev *d, struct list_head *head)
-{
-	struct sa11x0_dma_desc *txd, *txn;
-
-	list_for_each_entry_safe(txd, txn, head, node) {
-		dev_dbg(d->slave.dev, "txd %p: freeing\n", txd);
-		kfree(txd);
-	}
-}
-
 static int sa11x0_dma_alloc_chan_resources(struct dma_chan *chan)
 {
 	return 0;
@@ -436,18 +386,12 @@ static void sa11x0_dma_free_chan_resources(struct dma_chan *chan)
 	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
 	struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
 	unsigned long flags;
-	LIST_HEAD(head);
 
-	spin_lock_irqsave(&c->lock, flags);
-	spin_lock(&d->lock);
+	spin_lock_irqsave(&d->lock, flags);
 	list_del_init(&c->node);
-	spin_unlock(&d->lock);
-
-	list_splice_tail_init(&c->desc_submitted, &head);
-	list_splice_tail_init(&c->desc_issued, &head);
-	spin_unlock_irqrestore(&c->lock, flags);
+	spin_unlock_irqrestore(&d->lock, flags);
 
-	sa11x0_dma_desc_free(d, &head);
+	vchan_free_chan_resources(&c->vc);
 }
 
 static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p)
@@ -473,21 +417,15 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
 	struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
 	struct sa11x0_dma_phy *p;
 	struct sa11x0_dma_desc *txd;
-	dma_cookie_t last_used, last_complete;
 	unsigned long flags;
 	enum dma_status ret;
 	size_t bytes = 0;
 
-	last_used = c->chan.cookie;
-	last_complete = c->lc;
-
-	ret = dma_async_is_complete(cookie, last_complete, last_used);
-	if (ret == DMA_SUCCESS) {
-		dma_set_tx_state(state, last_complete, last_used, 0);
+	ret = dma_cookie_status(&c->vc.chan, cookie, state);
+	if (ret == DMA_SUCCESS)
 		return ret;
-	}
 
-	spin_lock_irqsave(&c->lock, flags);
+	spin_lock_irqsave(&c->vc.lock, flags);
 	p = c->phy;
 	ret = c->status;
 	if (p) {
@@ -524,12 +462,13 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
 		if (txd != p->txd_load && p->txd_load)
 			bytes += p->txd_load->size;
 	}
-	list_for_each_entry(txd, &c->desc_issued, node) {
+	list_for_each_entry(txd, &c->vc.desc_issued, vd.node) {
 		bytes += txd->size;
 	}
-	spin_unlock_irqrestore(&c->lock, flags);
+	spin_unlock_irqrestore(&c->vc.lock, flags);
 
-	dma_set_tx_state(state, last_complete, last_used, bytes);
+	if (state)
+		state->residue = bytes;
 
 	dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", bytes);
 
@@ -547,40 +486,20 @@ static void sa11x0_dma_issue_pending(struct dma_chan *chan)
 	struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
 	unsigned long flags;
 
-	spin_lock_irqsave(&c->lock, flags);
-	list_splice_tail_init(&c->desc_submitted, &c->desc_issued);
-	if (!list_empty(&c->desc_issued)) {
-		spin_lock(&d->lock);
-		if (!c->phy && list_empty(&c->node)) {
-			list_add_tail(&c->node, &d->chan_pending);
-			tasklet_schedule(&d->task);
-			dev_dbg(d->slave.dev, "vchan %p: issued\n", c);
+	spin_lock_irqsave(&c->vc.lock, flags);
+	if (vchan_issue_pending(&c->vc)) {
+		if (!c->phy) {
+			spin_lock(&d->lock);
+			if (list_empty(&c->node)) {
+				list_add_tail(&c->node, &d->chan_pending);
+				tasklet_schedule(&d->task);
+				dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
+			}
+			spin_unlock(&d->lock);
 		}
-		spin_unlock(&d->lock);
 	} else
-		dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", c);
-	spin_unlock_irqrestore(&c->lock, flags);
-}
-
-static dma_cookie_t sa11x0_dma_tx_submit(struct dma_async_tx_descriptor *tx)
-{
-	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(tx->chan);
-	struct sa11x0_dma_desc *txd = to_sa11x0_dma_tx(tx);
-	unsigned long flags;
-
-	spin_lock_irqsave(&c->lock, flags);
-	c->chan.cookie += 1;
-	if (c->chan.cookie < 0)
-		c->chan.cookie = 1;
-	txd->tx.cookie = c->chan.cookie;
-
-	list_add_tail(&txd->node, &c->desc_submitted);
-	spin_unlock_irqrestore(&c->lock, flags);
-
-	dev_dbg(tx->chan->device->dev, "vchan %p: txd %p[%x]: submitted\n",
-		c, txd, txd->tx.cookie);
-
-	return txd->tx.cookie;
+		dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
+	spin_unlock_irqrestore(&c->vc.lock, flags);
 }
 
 static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
@@ -596,7 +515,7 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
 	/* SA11x0 channels can only operate in their native direction */
 	if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
 		dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
-			c, c->ddar, dir);
+			&c->vc, c->ddar, dir);
 		return NULL;
 	}
 
@@ -612,14 +531,14 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
 			j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1;
 		if (addr & DMA_ALIGN) {
 			dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %08x\n",
-				c, addr);
+				&c->vc, addr);
 			return NULL;
 		}
 	}
 
 	txd = kzalloc(sizeof(*txd) + j * sizeof(txd->sg[0]), GFP_ATOMIC);
 	if (!txd) {
-		dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", c);
+		dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
 		return NULL;
 	}
 
@@ -655,17 +574,14 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
 		} while (len);
 	}
 
-	dma_async_tx_descriptor_init(&txd->tx, &c->chan);
-	txd->tx.flags = flags;
-	txd->tx.tx_submit = sa11x0_dma_tx_submit;
 	txd->ddar = c->ddar;
 	txd->size = size;
 	txd->sglen = j;
 
 	dev_dbg(chan->device->dev, "vchan %p: txd %p: size %u nr %u\n",
-		c, txd, txd->size, txd->sglen);
+		&c->vc, &txd->vd, txd->size, txd->sglen);
 
-	return &txd->tx;
+	return vchan_tx_prep(&c->vc, &txd->vd, flags);
 }
 
 static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg)
@@ -695,8 +611,8 @@ static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_c
 	if (maxburst == 8)
 		ddar |= DDAR_BS;
 
-	dev_dbg(c->chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n",
-		c, addr, width, maxburst);
+	dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n",
+		&c->vc, addr, width, maxburst);
 
 	c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6;
 
@@ -718,16 +634,13 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
 		return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg);
 
 	case DMA_TERMINATE_ALL:
-		dev_dbg(d->slave.dev, "vchan %p: terminate all\n", c);
+		dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
 		/* Clear the tx descriptor lists */
-		spin_lock_irqsave(&c->lock, flags);
-		list_splice_tail_init(&c->desc_submitted, &head);
-		list_splice_tail_init(&c->desc_issued, &head);
+		spin_lock_irqsave(&c->vc.lock, flags);
+		vchan_get_all_descriptors(&c->vc, &head);
 
 		p = c->phy;
 		if (p) {
-			struct sa11x0_dma_desc *txd, *txn;
-
 			dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num);
 			/* vchan is assigned to a pchan - stop the channel */
 			writel(DCSR_RUN | DCSR_IE |
@@ -735,17 +648,13 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
 				DCSR_STRTB | DCSR_DONEB,
 				p->base + DMA_DCSR_C);
 
-			list_for_each_entry_safe(txd, txn, &d->desc_complete, node)
-				if (txd->tx.chan == &c->chan)
-					list_move(&txd->node, &head);
-
 			if (p->txd_load) {
 				if (p->txd_load != p->txd_done)
-					list_add_tail(&p->txd_load->node, &head);
+					list_add_tail(&p->txd_load->vd.node, &head);
 				p->txd_load = NULL;
 			}
 			if (p->txd_done) {
-				list_add_tail(&p->txd_done->node, &head);
+				list_add_tail(&p->txd_done->vd.node, &head);
 				p->txd_done = NULL;
 			}
 			c->phy = NULL;
@@ -754,14 +663,14 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
 			spin_unlock(&d->lock);
 			tasklet_schedule(&d->task);
 		}
-		spin_unlock_irqrestore(&c->lock, flags);
-		sa11x0_dma_desc_free(d, &head);
+		spin_unlock_irqrestore(&c->vc.lock, flags);
+		vchan_dma_desc_free_list(&c->vc, &head);
 		ret = 0;
 		break;
 
 	case DMA_PAUSE:
-		dev_dbg(d->slave.dev, "vchan %p: pause\n", c);
-		spin_lock_irqsave(&c->lock, flags);
+		dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
+		spin_lock_irqsave(&c->vc.lock, flags);
 		if (c->status == DMA_IN_PROGRESS) {
 			c->status = DMA_PAUSED;
 
@@ -774,26 +683,26 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
 				spin_unlock(&d->lock);
 			}
 		}
-		spin_unlock_irqrestore(&c->lock, flags);
+		spin_unlock_irqrestore(&c->vc.lock, flags);
 		ret = 0;
 		break;
 
 	case DMA_RESUME:
-		dev_dbg(d->slave.dev, "vchan %p: resume\n", c);
-		spin_lock_irqsave(&c->lock, flags);
+		dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
+		spin_lock_irqsave(&c->vc.lock, flags);
 		if (c->status == DMA_PAUSED) {
 			c->status = DMA_IN_PROGRESS;
 
 			p = c->phy;
 			if (p) {
 				writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S);
-			} else if (!list_empty(&c->desc_issued)) {
+			} else if (!list_empty(&c->vc.desc_issued)) {
 				spin_lock(&d->lock);
 				list_add_tail(&c->node, &d->chan_pending);
 				spin_unlock(&d->lock);
 			}
 		}
-		spin_unlock_irqrestore(&c->lock, flags);
+		spin_unlock_irqrestore(&c->vc.lock, flags);
 		ret = 0;
 		break;
 
@@ -853,15 +762,13 @@ static int __devinit sa11x0_dma_init_dmadev(struct dma_device *dmadev,
 			return -ENOMEM;
 		}
 
-		c->chan.device = dmadev;
 		c->status = DMA_IN_PROGRESS;
 		c->ddar = chan_desc[i].ddar;
 		c->name = chan_desc[i].name;
-		spin_lock_init(&c->lock);
-		INIT_LIST_HEAD(&c->desc_submitted);
-		INIT_LIST_HEAD(&c->desc_issued);
 		INIT_LIST_HEAD(&c->node);
-		list_add_tail(&c->chan.device_node, &dmadev->channels);
+
+		c->vc.desc_free = sa11x0_dma_free_desc;
+		vchan_init(&c->vc, dmadev);
 	}
 
 	return dma_async_device_register(dmadev);
@@ -890,8 +797,9 @@ static void sa11x0_dma_free_channels(struct dma_device *dmadev)
 {
 	struct sa11x0_dma_chan *c, *cn;
 
-	list_for_each_entry_safe(c, cn, &dmadev->channels, chan.device_node) {
-		list_del(&c->chan.device_node);
+	list_for_each_entry_safe(c, cn, &dmadev->channels, vc.chan.device_node) {
+		list_del(&c->vc.chan.device_node);
+		tasklet_kill(&c->vc.task);
 		kfree(c);
 	}
 }
@@ -915,7 +823,6 @@ static int __devinit sa11x0_dma_probe(struct platform_device *pdev)
 
 	spin_lock_init(&d->lock);
 	INIT_LIST_HEAD(&d->chan_pending);
-	INIT_LIST_HEAD(&d->desc_complete);
 
 	d->base = ioremap(res->start, resource_size(res));
 	if (!d->base) {
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c
new file mode 100644
index 0000000..bd85b05
--- /dev/null
+++ b/drivers/dma/virt-dma.c
@@ -0,0 +1,99 @@
+/*
+ * Virtual DMA channel support for DMAengine
+ *
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+
+#include "virt-dma.h"
+
+static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx)
+{
+	return container_of(tx, struct virt_dma_desc, tx);
+}
+
+dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+	struct virt_dma_chan *vc = to_virt_chan(tx->chan);
+	struct virt_dma_desc *vd = to_virt_desc(tx);
+	unsigned long flags;
+	dma_cookie_t cookie;
+
+	spin_lock_irqsave(&vc->lock, flags);
+	cookie = dma_cookie_assign(tx);
+
+	list_add_tail(&vd->node, &vc->desc_submitted);
+	spin_unlock_irqrestore(&vc->lock, flags);
+
+	dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
+		vc, vd, cookie);
+
+	return cookie;
+}
+EXPORT_SYMBOL_GPL(vchan_tx_submit);
+
+/*
+ * This tasklet handles the completion of a DMA descriptor by
+ * calling its callback and freeing it.
+ */
+static void vchan_complete(unsigned long arg)
+{
+	struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
+	LIST_HEAD(head);
+
+	spin_lock_irq(&vc->lock);
+	list_splice_tail_init(&vc->desc_completed, &head);
+	spin_unlock_irq(&vc->lock);
+
+	while (!list_empty(&head)) {
+		struct virt_dma_desc *vd = list_first_entry(&head,
+				struct virt_dma_desc, node);
+		dma_async_tx_callback cb = vd->tx.callback;
+		void *cb_data = vd->tx.callback_param;
+
+		list_del(&vd->node);
+
+		vc->desc_free(vd);
+
+		if (cb)
+			cb(cb_data);
+	}
+}
+
+void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
+{
+	while (!list_empty(head)) {
+		struct virt_dma_desc *vd = list_first_entry(head,
+			struct virt_dma_desc, node);
+		list_del(&vd->node);
+		dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
+		vc->desc_free(vd);
+	}
+}
+EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
+
+void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
+{
+	dma_cookie_init(&vc->chan);
+
+	spin_lock_init(&vc->lock);
+	INIT_LIST_HEAD(&vc->desc_submitted);
+	INIT_LIST_HEAD(&vc->desc_issued);
+	INIT_LIST_HEAD(&vc->desc_completed);
+
+	tasklet_init(&vc->task, vchan_complete, (unsigned long)vc);
+
+	vc->chan.device = dmadev;
+	list_add_tail(&vc->chan.device_node, &dmadev->channels);
+}
+EXPORT_SYMBOL_GPL(vchan_init);
+
+MODULE_AUTHOR("Russell King");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
new file mode 100644
index 0000000..825bb96
--- /dev/null
+++ b/drivers/dma/virt-dma.h
@@ -0,0 +1,138 @@
+/*
+ * Virtual DMA channel support for DMAengine
+ *
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef VIRT_DMA_H
+#define VIRT_DMA_H
+
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+
+#include "dmaengine.h"
+
+struct virt_dma_desc {
+	struct dma_async_tx_descriptor tx;
+	/* protected by vc.lock */
+	struct list_head node;
+};
+
+struct virt_dma_chan {
+	struct dma_chan	chan;
+	struct tasklet_struct task;
+	void (*desc_free)(struct virt_dma_desc *);
+
+	spinlock_t lock;
+
+	/* protected by vc.lock */
+	struct list_head desc_submitted;
+	struct list_head desc_issued;
+	struct list_head desc_completed;
+};
+
+static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
+{
+	return container_of(chan, struct virt_dma_chan, chan);
+}
+
+void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head);
+
+void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev);
+
+/**
+ * vchan_tx_prep - prepare a descriptor
+ * vc: virtual channel allocating this descriptor
+ * vd: virtual descriptor to prepare
+ * tx_flags: flags argument passed in to prepare function
+ */
+static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc,
+	struct virt_dma_desc *vd, unsigned long tx_flags)
+{
+	extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
+
+	dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
+	vd->tx.flags = tx_flags;
+	vd->tx.tx_submit = vchan_tx_submit;
+
+	return &vd->tx;
+}
+
+/**
+ * vchan_issue_pending - move submitted descriptors to issued list
+ * vc: virtual channel to update
+ *
+ * vc.lock must be held by caller
+ */
+static inline bool vchan_issue_pending(struct virt_dma_chan *vc)
+{
+	list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued);
+	return !list_empty(&vc->desc_issued);
+}
+
+/**
+ * vchan_cookie_complete - report completion of a descriptor
+ * vd: virtual descriptor to update
+ *
+ * vc.lock must be held by caller
+ */
+static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
+{
+	struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+
+	dma_cookie_complete(&vd->tx);
+	dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n",
+		vd, vd->tx.cookie);
+	list_add_tail(&vd->node, &vc->desc_completed);
+
+	tasklet_schedule(&vc->task);
+}
+
+/**
+ * vchan_next_desc - peek at the next descriptor to be processed
+ * vc: virtual channel to obtain descriptor from
+ *
+ * vc.lock must be held by caller
+ */
+static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
+{
+	if (list_empty(&vc->desc_issued))
+		return NULL;
+
+	return list_first_entry(&vc->desc_issued, struct virt_dma_desc, node);
+}
+
+/**
+ * vchan_get_all_descriptors - obtain all submitted and issued descriptors
+ * vc: virtual channel to get descriptors from
+ * head: list of descriptors found
+ *
+ * vc.lock must be held by caller
+ *
+ * Removes all submitted and issued descriptors from internal lists, and
+ * provides a list of all descriptors found
+ */
+static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
+	struct list_head *head)
+{
+	list_splice_tail_init(&vc->desc_submitted, head);
+	list_splice_tail_init(&vc->desc_issued, head);
+	list_splice_tail_init(&vc->desc_completed, head);
+}
+
+static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
+{
+	unsigned long flags;
+	LIST_HEAD(head);
+
+	spin_lock_irqsave(&vc->lock, flags);
+	vchan_get_all_descriptors(vc, &head);
+	spin_unlock_irqrestore(&vc->lock, flags);
+
+	vchan_dma_desc_free_list(vc, &head);
+}
+
+#endif
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 2/3] dmaengine: virt-dma: vchan_find_desc()
  2012-06-07 10:34 ` Russell King - ARM Linux
@ 2012-06-07 10:41   ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:41 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Dan Williams, Vinod Koul

Add a function to find a descriptor within the depths of the
virtualized DMA channel support.  Needed for tx_status functionality.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/virt-dma.c |   13 +++++++++++++
 drivers/dma/virt-dma.h |    2 +-
 2 files changed, 14 insertions(+), 1 deletions(-)

diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c
index bd85b05..a8054fc 100644
--- a/drivers/dma/virt-dma.c
+++ b/drivers/dma/virt-dma.c
@@ -39,6 +39,19 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
 }
 EXPORT_SYMBOL_GPL(vchan_tx_submit);
 
+struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc,
+	dma_cookie_t cookie)
+{
+	struct virt_dma_desc *vd;
+
+	list_for_each_entry(vd, &vc->desc_issued, node)
+		if (vd->tx.cookie == cookie)
+			return vd;
+
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(vchan_find_desc);
+
 /*
  * This tasklet handles the completion of a DMA descriptor by
  * calling its callback and freeing it.
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
index 825bb96..44ec57e 100644
--- a/drivers/dma/virt-dma.h
+++ b/drivers/dma/virt-dma.h
@@ -40,8 +40,8 @@ static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
 }
 
 void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head);
-
 void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev);
+struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t);
 
 /**
  * vchan_tx_prep - prepare a descriptor
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 2/3] dmaengine: virt-dma: vchan_find_desc()
@ 2012-06-07 10:41   ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:41 UTC (permalink / raw)
  To: linux-arm-kernel

Add a function to find a descriptor within the depths of the
virtualized DMA channel support.  Needed for tx_status functionality.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/virt-dma.c |   13 +++++++++++++
 drivers/dma/virt-dma.h |    2 +-
 2 files changed, 14 insertions(+), 1 deletions(-)

diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c
index bd85b05..a8054fc 100644
--- a/drivers/dma/virt-dma.c
+++ b/drivers/dma/virt-dma.c
@@ -39,6 +39,19 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
 }
 EXPORT_SYMBOL_GPL(vchan_tx_submit);
 
+struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc,
+	dma_cookie_t cookie)
+{
+	struct virt_dma_desc *vd;
+
+	list_for_each_entry(vd, &vc->desc_issued, node)
+		if (vd->tx.cookie == cookie)
+			return vd;
+
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(vchan_find_desc);
+
 /*
  * This tasklet handles the completion of a DMA descriptor by
  * calling its callback and freeing it.
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
index 825bb96..44ec57e 100644
--- a/drivers/dma/virt-dma.h
+++ b/drivers/dma/virt-dma.h
@@ -40,8 +40,8 @@ static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
 }
 
 void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head);
-
 void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev);
+struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t);
 
 /**
  * vchan_tx_prep - prepare a descriptor
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 3/3] dmaengine: virt-dma: add support for cyclic DMA periodic callbacks
  2012-06-07 10:34 ` Russell King - ARM Linux
@ 2012-06-07 10:41   ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:41 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Dan Williams, Vinod Koul

Add support for cyclic DMA's periodic callbacks.  Drivers are expected
to call vchan_cyclic_callback() when a period has completed, which will
schedule the tasklet to make the callback into the driver.

As callbacks are made from tasklet context, it is important to realise
that we don't guarantee a callback for each completed period, but for
N completed periods where N may be greater than one.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/virt-dma.c |   19 +++++++++++++++----
 drivers/dma/virt-dma.h |   14 ++++++++++++++
 2 files changed, 29 insertions(+), 4 deletions(-)

diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c
index a8054fc..6f80432 100644
--- a/drivers/dma/virt-dma.c
+++ b/drivers/dma/virt-dma.c
@@ -59,17 +59,28 @@ EXPORT_SYMBOL_GPL(vchan_find_desc);
 static void vchan_complete(unsigned long arg)
 {
 	struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
+	struct virt_dma_desc *vd;
+	dma_async_tx_callback cb = NULL;
+	void *cb_data = NULL;
 	LIST_HEAD(head);
 
 	spin_lock_irq(&vc->lock);
 	list_splice_tail_init(&vc->desc_completed, &head);
+	vd = vc->cyclic;
+	if (vd) {
+		vc->cyclic = NULL;
+		cb = vd->tx.callback;
+		cb_data = vd->tx.callback_param;
+	}
 	spin_unlock_irq(&vc->lock);
 
+	if (cb)
+		cb(cb_data);
+
 	while (!list_empty(&head)) {
-		struct virt_dma_desc *vd = list_first_entry(&head,
-				struct virt_dma_desc, node);
-		dma_async_tx_callback cb = vd->tx.callback;
-		void *cb_data = vd->tx.callback_param;
+		vd = list_first_entry(&head, struct virt_dma_desc, node);
+		cb = vd->tx.callback;
+		cb_data = vd->tx.callback_param;
 
 		list_del(&vd->node);
 
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
index 44ec57e..85c19d6 100644
--- a/drivers/dma/virt-dma.h
+++ b/drivers/dma/virt-dma.h
@@ -32,6 +32,8 @@ struct virt_dma_chan {
 	struct list_head desc_submitted;
 	struct list_head desc_issued;
 	struct list_head desc_completed;
+
+	struct virt_dma_desc *cyclic;
 };
 
 static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
@@ -92,6 +94,18 @@ static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
 }
 
 /**
+ * vchan_cyclic_callback - report the completion of a period
+ * vd: virtual descriptor
+ */
+static inline void vchan_cyclic_callback(struct virt_dma_desc *vd)
+{
+	struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+
+	vc->cyclic = vd;
+	tasklet_schedule(&vc->task);
+}
+
+/**
  * vchan_next_desc - peek at the next descriptor to be processed
  * vc: virtual channel to obtain descriptor from
  *
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 3/3] dmaengine: virt-dma: add support for cyclic DMA periodic callbacks
@ 2012-06-07 10:41   ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:41 UTC (permalink / raw)
  To: linux-arm-kernel

Add support for cyclic DMA's periodic callbacks.  Drivers are expected
to call vchan_cyclic_callback() when a period has completed, which will
schedule the tasklet to make the callback into the driver.

As callbacks are made from tasklet context, it is important to realise
that we don't guarantee a callback for each completed period, but for
N completed periods where N may be greater than one.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/virt-dma.c |   19 +++++++++++++++----
 drivers/dma/virt-dma.h |   14 ++++++++++++++
 2 files changed, 29 insertions(+), 4 deletions(-)

diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c
index a8054fc..6f80432 100644
--- a/drivers/dma/virt-dma.c
+++ b/drivers/dma/virt-dma.c
@@ -59,17 +59,28 @@ EXPORT_SYMBOL_GPL(vchan_find_desc);
 static void vchan_complete(unsigned long arg)
 {
 	struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
+	struct virt_dma_desc *vd;
+	dma_async_tx_callback cb = NULL;
+	void *cb_data = NULL;
 	LIST_HEAD(head);
 
 	spin_lock_irq(&vc->lock);
 	list_splice_tail_init(&vc->desc_completed, &head);
+	vd = vc->cyclic;
+	if (vd) {
+		vc->cyclic = NULL;
+		cb = vd->tx.callback;
+		cb_data = vd->tx.callback_param;
+	}
 	spin_unlock_irq(&vc->lock);
 
+	if (cb)
+		cb(cb_data);
+
 	while (!list_empty(&head)) {
-		struct virt_dma_desc *vd = list_first_entry(&head,
-				struct virt_dma_desc, node);
-		dma_async_tx_callback cb = vd->tx.callback;
-		void *cb_data = vd->tx.callback_param;
+		vd = list_first_entry(&head, struct virt_dma_desc, node);
+		cb = vd->tx.callback;
+		cb_data = vd->tx.callback_param;
 
 		list_del(&vd->node);
 
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
index 44ec57e..85c19d6 100644
--- a/drivers/dma/virt-dma.h
+++ b/drivers/dma/virt-dma.h
@@ -32,6 +32,8 @@ struct virt_dma_chan {
 	struct list_head desc_submitted;
 	struct list_head desc_issued;
 	struct list_head desc_completed;
+
+	struct virt_dma_desc *cyclic;
 };
 
 static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
@@ -92,6 +94,18 @@ static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
 }
 
 /**
+ * vchan_cyclic_callback - report the completion of a period
+ * vd: virtual descriptor
+ */
+static inline void vchan_cyclic_callback(struct virt_dma_desc *vd)
+{
+	struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+
+	vc->cyclic = vd;
+	tasklet_schedule(&vc->task);
+}
+
+/**
  * vchan_next_desc - peek at the next descriptor to be processed
  * vc: virtual channel to obtain descriptor from
  *
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT] SA11x0 patches
  2012-06-07 10:41   ` Russell King
@ 2012-06-07 10:42     ` Russell King - ARM Linux
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King - ARM Linux @ 2012-06-07 10:42 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Vinod Koul, Dan Williams

Updates to the SA11x0 DMA engine driver.  This fixes the residue
calculation, and implements cyclic transfers.

 drivers/dma/sa11x0-dma.c |  153 ++++++++++++++++++++++++++++++++++++----------
 1 files changed, 121 insertions(+), 32 deletions(-)


^ permalink raw reply	[flat|nested] 172+ messages in thread

* [CFT] SA11x0 patches
@ 2012-06-07 10:42     ` Russell King - ARM Linux
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King - ARM Linux @ 2012-06-07 10:42 UTC (permalink / raw)
  To: linux-arm-kernel

Updates to the SA11x0 DMA engine driver.  This fixes the residue
calculation, and implements cyclic transfers.

 drivers/dma/sa11x0-dma.c |  153 ++++++++++++++++++++++++++++++++++++----------
 1 files changed, 121 insertions(+), 32 deletions(-)

^ permalink raw reply	[flat|nested] 172+ messages in thread

* [CFT 1/2] dmaengine: sa11x0-dma: fix DMA residue support
  2012-06-07 10:42     ` Russell King - ARM Linux
@ 2012-06-07 10:43       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:43 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Dan Williams, Vinod Koul

The semantics now implemented are:

- If the cookie has completed successfully, the residue will be zero.
- If the cookie is in progress or the channel is paused, it will be the
  number of bytes yet to be transferred. [*]
- If the cookie is queued, it will be the number of bytes in the
  descriptor.

* - where this is the number of bytes yet to be transferred to/from
  RAM.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/sa11x0-dma.c |   45 +++++++++++++++++++++++++++++----------------
 1 files changed, 29 insertions(+), 16 deletions(-)

diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index 5f1d2e6..db4fcbd 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -416,27 +416,47 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
 	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
 	struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
 	struct sa11x0_dma_phy *p;
-	struct sa11x0_dma_desc *txd;
+	struct virt_dma_desc *vd;
 	unsigned long flags;
 	enum dma_status ret;
-	size_t bytes = 0;
 
 	ret = dma_cookie_status(&c->vc.chan, cookie, state);
 	if (ret == DMA_SUCCESS)
 		return ret;
 
+	if (!state)
+		return c->status;
+
 	spin_lock_irqsave(&c->vc.lock, flags);
 	p = c->phy;
-	ret = c->status;
-	if (p) {
-		dma_addr_t addr = sa11x0_dma_pos(p);
 
-		dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr);
+	/*
+	 * If the cookie is on our issue queue, then the residue is
+	 * its total size.
+	 */
+	vd = vchan_find_desc(&c->vc, cookie);
+	if (vd) {
+		state->residue = container_of(vd, struct sa11x0_dma_desc, vd)->size;
+	} else if (!p) {
+		state->residue = 0;
+	} else {
+		struct sa11x0_dma_desc *txd;
+		size_t bytes = 0;
+
+		if (p->txd_done && p->txd_done->vd.tx.cookie == cookie)
+			txd = p->txd_done;
+		else if (p->txd_load && p->txd_load->vd.tx.cookie == cookie)
+			txd = p->txd_load;
+		else
+			txd = NULL;
 
-		txd = p->txd_done;
+		ret = c->status;
 		if (txd) {
+			dma_addr_t addr = sa11x0_dma_pos(p);
 			unsigned i;
 
+			dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr);
+
 			for (i = 0; i < txd->sglen; i++) {
 				dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n",
 					i, txd->sg[i].addr, txd->sg[i].len);
@@ -459,18 +479,11 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
 				bytes += txd->sg[i].len;
 			}
 		}
-		if (txd != p->txd_load && p->txd_load)
-			bytes += p->txd_load->size;
-	}
-	list_for_each_entry(txd, &c->vc.desc_issued, vd.node) {
-		bytes += txd->size;
+		state->residue = bytes;
 	}
 	spin_unlock_irqrestore(&c->vc.lock, flags);
 
-	if (state)
-		state->residue = bytes;
-
-	dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", bytes);
+	dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", state->residue);
 
 	return ret;
 }
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 1/2] dmaengine: sa11x0-dma: fix DMA residue support
@ 2012-06-07 10:43       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:43 UTC (permalink / raw)
  To: linux-arm-kernel

The semantics now implemented are:

- If the cookie has completed successfully, the residue will be zero.
- If the cookie is in progress or the channel is paused, it will be the
  number of bytes yet to be transferred. [*]
- If the cookie is queued, it will be the number of bytes in the
  descriptor.

* - where this is the number of bytes yet to be transferred to/from
  RAM.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/sa11x0-dma.c |   45 +++++++++++++++++++++++++++++----------------
 1 files changed, 29 insertions(+), 16 deletions(-)

diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index 5f1d2e6..db4fcbd 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -416,27 +416,47 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
 	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
 	struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
 	struct sa11x0_dma_phy *p;
-	struct sa11x0_dma_desc *txd;
+	struct virt_dma_desc *vd;
 	unsigned long flags;
 	enum dma_status ret;
-	size_t bytes = 0;
 
 	ret = dma_cookie_status(&c->vc.chan, cookie, state);
 	if (ret == DMA_SUCCESS)
 		return ret;
 
+	if (!state)
+		return c->status;
+
 	spin_lock_irqsave(&c->vc.lock, flags);
 	p = c->phy;
-	ret = c->status;
-	if (p) {
-		dma_addr_t addr = sa11x0_dma_pos(p);
 
-		dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr);
+	/*
+	 * If the cookie is on our issue queue, then the residue is
+	 * its total size.
+	 */
+	vd = vchan_find_desc(&c->vc, cookie);
+	if (vd) {
+		state->residue = container_of(vd, struct sa11x0_dma_desc, vd)->size;
+	} else if (!p) {
+		state->residue = 0;
+	} else {
+		struct sa11x0_dma_desc *txd;
+		size_t bytes = 0;
+
+		if (p->txd_done && p->txd_done->vd.tx.cookie == cookie)
+			txd = p->txd_done;
+		else if (p->txd_load && p->txd_load->vd.tx.cookie == cookie)
+			txd = p->txd_load;
+		else
+			txd = NULL;
 
-		txd = p->txd_done;
+		ret = c->status;
 		if (txd) {
+			dma_addr_t addr = sa11x0_dma_pos(p);
 			unsigned i;
 
+			dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr);
+
 			for (i = 0; i < txd->sglen; i++) {
 				dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n",
 					i, txd->sg[i].addr, txd->sg[i].len);
@@ -459,18 +479,11 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
 				bytes += txd->sg[i].len;
 			}
 		}
-		if (txd != p->txd_load && p->txd_load)
-			bytes += p->txd_load->size;
-	}
-	list_for_each_entry(txd, &c->vc.desc_issued, vd.node) {
-		bytes += txd->size;
+		state->residue = bytes;
 	}
 	spin_unlock_irqrestore(&c->vc.lock, flags);
 
-	if (state)
-		state->residue = bytes;
-
-	dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", bytes);
+	dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", state->residue);
 
 	return ret;
 }
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 2/2] dmaengine: sa11x0-dma: add cyclic DMA support
  2012-06-07 10:42     ` Russell King - ARM Linux
@ 2012-06-07 10:43       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:43 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Dan Williams, Vinod Koul

Add support for cyclic DMA on sa11x0 platforms.  This follows the
discussed behaviour that the callback will be called at some point
after period expires, and may coalesce multiple period expiries into
one callback (due to the tasklet behaviour.)

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/sa11x0-dma.c |  108 +++++++++++++++++++++++++++++++++++++++-------
 1 files changed, 92 insertions(+), 16 deletions(-)

diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index db4fcbd..f5a7360 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -78,6 +78,8 @@ struct sa11x0_dma_desc {
 
 	u32			ddar;
 	size_t			size;
+	unsigned		period;
+	bool			cyclic;
 
 	unsigned		sglen;
 	struct sa11x0_dma_sg	sg[0];
@@ -178,19 +180,24 @@ static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p,
 		return;
 
 	if (p->sg_load == txd->sglen) {
-		struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c);
+		if (!txd->cyclic) {
+			struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c);
 
-		/*
-		 * We have reached the end of the current descriptor.
-		 * Peek at the next descriptor, and if compatible with
-		 * the current, start processing it.
-		 */
-		if (txn && txn->ddar == txd->ddar) {
-			txd = txn;
-			sa11x0_dma_start_desc(p, txn);
+			/*
+			 * We have reached the end of the current descriptor.
+			 * Peek at the next descriptor, and if compatible with
+			 * the current, start processing it.
+			 */
+			if (txn && txn->ddar == txd->ddar) {
+				txd = txn;
+				sa11x0_dma_start_desc(p, txn);
+			} else {
+				p->txd_load = NULL;
+				return;
+			}
 		} else {
-			p->txd_load = NULL;
-			return;
+			/* Cyclic: reset back to beginning */
+			p->sg_load = 0;
 		}
 	}
 
@@ -224,13 +231,21 @@ static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p,
 	struct sa11x0_dma_desc *txd = p->txd_done;
 
 	if (++p->sg_done == txd->sglen) {
-		vchan_cookie_complete(&txd->vd);
+		if (!txd->cyclic) {
+			vchan_cookie_complete(&txd->vd);
 
-		p->sg_done = 0;
-		p->txd_done = p->txd_load;
+			p->sg_done = 0;
+			p->txd_done = p->txd_load;
+
+			if (!p->txd_done)
+				tasklet_schedule(&p->dev->task);
+		} else {
+			if ((p->sg_done % txd->period) == 0)
+				vchan_cyclic_callback(&txd->vd);
 
-		if (!p->txd_done)
-			tasklet_schedule(&p->dev->task);
+			/* Cyclic: reset back to beginning */
+			p->sg_done = 0;
+		}
 	}
 
 	sa11x0_dma_start_sg(p, c);
@@ -597,6 +612,65 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
 	return vchan_tx_prep(&c->vc, &txd->vd, flags);
 }
 
+static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic(
+	struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period,
+	enum dma_transfer_direction dir, void *context)
+{
+	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
+	struct sa11x0_dma_desc *txd;
+	unsigned i, j, k, sglen, sgperiod;
+
+	/* SA11x0 channels can only operate in their native direction */
+	if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
+		dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
+			&c->vc, c->ddar, dir);
+		return NULL;
+	}
+
+	sgperiod = DIV_ROUND_UP(period, DMA_MAX_SIZE & ~DMA_ALIGN);
+	sglen = size * sgperiod / period;
+
+	/* Do not allow zero-sized txds */
+	if (sglen == 0)
+		return NULL;
+
+	txd = kzalloc(sizeof(*txd) + sglen * sizeof(txd->sg[0]), GFP_ATOMIC);
+	if (!txd) {
+		dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
+		return NULL;
+	}
+
+	for (i = k = 0; i < size / period; i++) {
+		size_t tlen, len = period;
+
+		for (j = 0; j < sgperiod; j++, k++) {
+			tlen = len;
+
+			if (tlen > DMA_MAX_SIZE) {
+				unsigned mult = DIV_ROUND_UP(tlen, DMA_MAX_SIZE & ~DMA_ALIGN);
+				tlen = (tlen / mult) & ~DMA_ALIGN;
+			}
+
+			txd->sg[k].addr = addr;
+			txd->sg[k].len = tlen;
+			addr += tlen;
+			len -= tlen;
+		}
+
+		WARN_ON(len != 0);
+	}
+
+	WARN_ON(k != sglen);
+
+	txd->ddar = c->ddar;
+	txd->size = size;
+	txd->sglen = sglen;
+	txd->cyclic = 1;
+	txd->period = sgperiod;
+
+	return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+}
+
 static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg)
 {
 	u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW);
@@ -867,7 +941,9 @@ static int __devinit sa11x0_dma_probe(struct platform_device *pdev)
 	}
 
 	dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
+	dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
 	d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg;
+	d->slave.device_prep_dma_cyclic = sa11x0_dma_prep_dma_cyclic;
 	ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev);
 	if (ret) {
 		dev_warn(d->slave.dev, "failed to register slave async device: %d\n",
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 2/2] dmaengine: sa11x0-dma: add cyclic DMA support
@ 2012-06-07 10:43       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:43 UTC (permalink / raw)
  To: linux-arm-kernel

Add support for cyclic DMA on sa11x0 platforms.  This follows the
discussed behaviour that the callback will be called at some point
after period expires, and may coalesce multiple period expiries into
one callback (due to the tasklet behaviour.)

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/sa11x0-dma.c |  108 +++++++++++++++++++++++++++++++++++++++-------
 1 files changed, 92 insertions(+), 16 deletions(-)

diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index db4fcbd..f5a7360 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -78,6 +78,8 @@ struct sa11x0_dma_desc {
 
 	u32			ddar;
 	size_t			size;
+	unsigned		period;
+	bool			cyclic;
 
 	unsigned		sglen;
 	struct sa11x0_dma_sg	sg[0];
@@ -178,19 +180,24 @@ static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p,
 		return;
 
 	if (p->sg_load == txd->sglen) {
-		struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c);
+		if (!txd->cyclic) {
+			struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c);
 
-		/*
-		 * We have reached the end of the current descriptor.
-		 * Peek at the next descriptor, and if compatible with
-		 * the current, start processing it.
-		 */
-		if (txn && txn->ddar == txd->ddar) {
-			txd = txn;
-			sa11x0_dma_start_desc(p, txn);
+			/*
+			 * We have reached the end of the current descriptor.
+			 * Peek at the next descriptor, and if compatible with
+			 * the current, start processing it.
+			 */
+			if (txn && txn->ddar == txd->ddar) {
+				txd = txn;
+				sa11x0_dma_start_desc(p, txn);
+			} else {
+				p->txd_load = NULL;
+				return;
+			}
 		} else {
-			p->txd_load = NULL;
-			return;
+			/* Cyclic: reset back to beginning */
+			p->sg_load = 0;
 		}
 	}
 
@@ -224,13 +231,21 @@ static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p,
 	struct sa11x0_dma_desc *txd = p->txd_done;
 
 	if (++p->sg_done == txd->sglen) {
-		vchan_cookie_complete(&txd->vd);
+		if (!txd->cyclic) {
+			vchan_cookie_complete(&txd->vd);
 
-		p->sg_done = 0;
-		p->txd_done = p->txd_load;
+			p->sg_done = 0;
+			p->txd_done = p->txd_load;
+
+			if (!p->txd_done)
+				tasklet_schedule(&p->dev->task);
+		} else {
+			if ((p->sg_done % txd->period) == 0)
+				vchan_cyclic_callback(&txd->vd);
 
-		if (!p->txd_done)
-			tasklet_schedule(&p->dev->task);
+			/* Cyclic: reset back to beginning */
+			p->sg_done = 0;
+		}
 	}
 
 	sa11x0_dma_start_sg(p, c);
@@ -597,6 +612,65 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
 	return vchan_tx_prep(&c->vc, &txd->vd, flags);
 }
 
+static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic(
+	struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period,
+	enum dma_transfer_direction dir, void *context)
+{
+	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
+	struct sa11x0_dma_desc *txd;
+	unsigned i, j, k, sglen, sgperiod;
+
+	/* SA11x0 channels can only operate in their native direction */
+	if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
+		dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
+			&c->vc, c->ddar, dir);
+		return NULL;
+	}
+
+	sgperiod = DIV_ROUND_UP(period, DMA_MAX_SIZE & ~DMA_ALIGN);
+	sglen = size * sgperiod / period;
+
+	/* Do not allow zero-sized txds */
+	if (sglen == 0)
+		return NULL;
+
+	txd = kzalloc(sizeof(*txd) + sglen * sizeof(txd->sg[0]), GFP_ATOMIC);
+	if (!txd) {
+		dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
+		return NULL;
+	}
+
+	for (i = k = 0; i < size / period; i++) {
+		size_t tlen, len = period;
+
+		for (j = 0; j < sgperiod; j++, k++) {
+			tlen = len;
+
+			if (tlen > DMA_MAX_SIZE) {
+				unsigned mult = DIV_ROUND_UP(tlen, DMA_MAX_SIZE & ~DMA_ALIGN);
+				tlen = (tlen / mult) & ~DMA_ALIGN;
+			}
+
+			txd->sg[k].addr = addr;
+			txd->sg[k].len = tlen;
+			addr += tlen;
+			len -= tlen;
+		}
+
+		WARN_ON(len != 0);
+	}
+
+	WARN_ON(k != sglen);
+
+	txd->ddar = c->ddar;
+	txd->size = size;
+	txd->sglen = sglen;
+	txd->cyclic = 1;
+	txd->period = sgperiod;
+
+	return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+}
+
 static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg)
 {
 	u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW);
@@ -867,7 +941,9 @@ static int __devinit sa11x0_dma_probe(struct platform_device *pdev)
 	}
 
 	dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
+	dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
 	d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg;
+	d->slave.device_prep_dma_cyclic = sa11x0_dma_prep_dma_cyclic;
 	ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev);
 	if (ret) {
 		dev_warn(d->slave.dev, "failed to register slave async device: %d\n",
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* Re: [CFT] PL08x patches
  2012-06-07 10:41   ` Russell King
@ 2012-06-07 10:45     ` Russell King - ARM Linux
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King - ARM Linux @ 2012-06-07 10:45 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Vinod Koul, Dan Williams

Here's the PL08x patches.

 drivers/dma/Kconfig        |    1 +
 drivers/dma/amba-pl08x.c   |  941 ++++++++++++++++++++++----------------------
 include/linux/amba/pl08x.h |  156 +-------
 3 files changed, 482 insertions(+), 616 deletions(-)


^ permalink raw reply	[flat|nested] 172+ messages in thread

* [CFT] PL08x patches
@ 2012-06-07 10:45     ` Russell King - ARM Linux
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King - ARM Linux @ 2012-06-07 10:45 UTC (permalink / raw)
  To: linux-arm-kernel

Here's the PL08x patches.

 drivers/dma/Kconfig        |    1 +
 drivers/dma/amba-pl08x.c   |  941 ++++++++++++++++++++++----------------------
 include/linux/amba/pl08x.h |  156 +-------
 3 files changed, 482 insertions(+), 616 deletions(-)

^ permalink raw reply	[flat|nested] 172+ messages in thread

* [CFT 01/31] dmaengine: PL08x: remove runtime PM support
  2012-06-07 10:45     ` Russell King - ARM Linux
@ 2012-06-07 10:46       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:46 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Dan Williams, Vinod Koul

The runtime PM support conflicts with the generic AMBA bus PM, and also
causes a potential deadlock with the PL011 driver as it results in
interrupts being enabled beneath a spinlock.

I don't presently see any solution to this other than by removing the
runtime PM support entirely from the DMA engine driver.  Alternative
suggestions welcome.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |   10 ----------
 1 files changed, 0 insertions(+), 10 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 49ecbbb..5586d9a 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -404,7 +404,6 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
 		return NULL;
 	}
 
-	pm_runtime_get_sync(&pl08x->adev->dev);
 	return ch;
 }
 
@@ -418,8 +417,6 @@ static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
 	/* Stop the channel and clear its interrupts */
 	pl08x_terminate_phy_chan(pl08x, ch);
 
-	pm_runtime_put(&pl08x->adev->dev);
-
 	/* Mark it as free */
 	ch->serving = NULL;
 	spin_unlock_irqrestore(&ch->lock, flags);
@@ -1851,9 +1848,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
 		goto out_no_pl08x;
 	}
 
-	pm_runtime_set_active(&adev->dev);
-	pm_runtime_enable(&adev->dev);
-
 	/* Initialize memcpy engine */
 	dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask);
 	pl08x->memcpy.dev = &adev->dev;
@@ -2007,7 +2001,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
 		 amba_part(adev), amba_rev(adev),
 		 (unsigned long long)adev->res.start, adev->irq[0]);
 
-	pm_runtime_put(&adev->dev);
 	return 0;
 
 out_no_slave_reg:
@@ -2026,9 +2019,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
 	dma_pool_destroy(pl08x->pool);
 out_no_lli_pool:
 out_no_platdata:
-	pm_runtime_put(&adev->dev);
-	pm_runtime_disable(&adev->dev);
-
 	kfree(pl08x);
 out_no_pl08x:
 	amba_release_regions(adev);
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 01/31] dmaengine: PL08x: remove runtime PM support
@ 2012-06-07 10:46       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:46 UTC (permalink / raw)
  To: linux-arm-kernel

The runtime PM support conflicts with the generic AMBA bus PM, and also
causes a potential deadlock with the PL011 driver as it results in
interrupts being enabled beneath a spinlock.

I don't presently see any solution to this other than by removing the
runtime PM support entirely from the DMA engine driver.  Alternative
suggestions welcome.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |   10 ----------
 1 files changed, 0 insertions(+), 10 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 49ecbbb..5586d9a 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -404,7 +404,6 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
 		return NULL;
 	}
 
-	pm_runtime_get_sync(&pl08x->adev->dev);
 	return ch;
 }
 
@@ -418,8 +417,6 @@ static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
 	/* Stop the channel and clear its interrupts */
 	pl08x_terminate_phy_chan(pl08x, ch);
 
-	pm_runtime_put(&pl08x->adev->dev);
-
 	/* Mark it as free */
 	ch->serving = NULL;
 	spin_unlock_irqrestore(&ch->lock, flags);
@@ -1851,9 +1848,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
 		goto out_no_pl08x;
 	}
 
-	pm_runtime_set_active(&adev->dev);
-	pm_runtime_enable(&adev->dev);
-
 	/* Initialize memcpy engine */
 	dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask);
 	pl08x->memcpy.dev = &adev->dev;
@@ -2007,7 +2001,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
 		 amba_part(adev), amba_rev(adev),
 		 (unsigned long long)adev->res.start, adev->irq[0]);
 
-	pm_runtime_put(&adev->dev);
 	return 0;
 
 out_no_slave_reg:
@@ -2026,9 +2019,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
 	dma_pool_destroy(pl08x->pool);
 out_no_lli_pool:
 out_no_platdata:
-	pm_runtime_put(&adev->dev);
-	pm_runtime_disable(&adev->dev);
-
 	kfree(pl08x);
 out_no_pl08x:
 	amba_release_regions(adev);
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 02/31] dmaengine: PL08x: fix missed dma_transfer_direction fixup
  2012-06-07 10:45     ` Russell King - ARM Linux
@ 2012-06-07 10:46       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:46 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Dan Williams, Vinod Koul

db8196df4 (dmaengine: move drivers to dma_transfer_direction) missed
fixing up the "DMA_NONE" case.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |    2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 5586d9a..f3ab004 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1287,7 +1287,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
 	}
 	list_add_tail(&dsg->node, &txd->dsg_list);
 
-	txd->direction = DMA_NONE;
+	txd->direction = DMA_MEM_TO_MEM;
 	dsg->src_addr = src;
 	dsg->dst_addr = dest;
 	dsg->len = len;
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 02/31] dmaengine: PL08x: fix missed dma_transfer_direction fixup
@ 2012-06-07 10:46       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:46 UTC (permalink / raw)
  To: linux-arm-kernel

db8196df4 (dmaengine: move drivers to dma_transfer_direction) missed
fixing up the "DMA_NONE" case.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |    2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 5586d9a..f3ab004 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1287,7 +1287,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
 	}
 	list_add_tail(&dsg->node, &txd->dsg_list);
 
-	txd->direction = DMA_NONE;
+	txd->direction = DMA_MEM_TO_MEM;
 	dsg->src_addr = src;
 	dsg->dst_addr = dest;
 	dsg->len = len;
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 03/31] dmaengine: PL08x: remove redundant spinlock
  2012-06-07 10:45     ` Russell King - ARM Linux
@ 2012-06-07 10:46       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:46 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Dan Williams, Vinod Koul

The pl08x_driver_data spinlock is only ever initialized.  Nothing else
uses it.  Let's get rid of it.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |    3 ---
 1 files changed, 0 insertions(+), 3 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index f3ab004..9c5bae6 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -146,7 +146,6 @@ struct pl08x_driver_data {
 	int pool_ctr;
 	u8 lli_buses;
 	u8 mem_buses;
-	spinlock_t lock;
 };
 
 /*
@@ -1897,8 +1896,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
 		goto out_no_lli_pool;
 	}
 
-	spin_lock_init(&pl08x->lock);
-
 	pl08x->base = ioremap(adev->res.start, resource_size(&adev->res));
 	if (!pl08x->base) {
 		ret = -ENOMEM;
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 03/31] dmaengine: PL08x: remove redundant spinlock
@ 2012-06-07 10:46       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:46 UTC (permalink / raw)
  To: linux-arm-kernel

The pl08x_driver_data spinlock is only ever initialized.  Nothing else
uses it.  Let's get rid of it.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |    3 ---
 1 files changed, 0 insertions(+), 3 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index f3ab004..9c5bae6 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -146,7 +146,6 @@ struct pl08x_driver_data {
 	int pool_ctr;
 	u8 lli_buses;
 	u8 mem_buses;
-	spinlock_t lock;
 };
 
 /*
@@ -1897,8 +1896,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
 		goto out_no_lli_pool;
 	}
 
-	spin_lock_init(&pl08x->lock);
-
 	pl08x->base = ioremap(adev->res.start, resource_size(&adev->res));
 	if (!pl08x->base) {
 		ret = -ENOMEM;
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 04/31] dmaengine: PL08x: remove circular_buffer boolean from channel data
  2012-06-07 10:45     ` Russell King - ARM Linux
@ 2012-06-07 10:47       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:47 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Dan Williams, Vinod Koul

Circular buffers are not handled in this way; we have a separate API
call now to setup circular buffers.  So lets not mislead people with
this bool.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c   |    7 -------
 include/linux/amba/pl08x.h |    4 ----
 2 files changed, 0 insertions(+), 11 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 9c5bae6..5821125 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1701,13 +1701,6 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
 				return -ENOMEM;
 			}
 		}
-		if (chan->cd->circular_buffer) {
-			dev_err(&pl08x->adev->dev,
-				"channel %s: circular buffers not supported\n",
-				chan->name);
-			kfree(chan);
-			continue;
-		}
 		dev_dbg(&pl08x->adev->dev,
 			 "initialize virtual channel \"%s\"\n",
 			 chan->name);
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h
index 0254901..0f5b34d 100644
--- a/include/linux/amba/pl08x.h
+++ b/include/linux/amba/pl08x.h
@@ -51,9 +51,6 @@ enum {
  * can be the address of a FIFO register for burst requests for example.
  * This can be left undefined if the PrimeCell API is used for configuring
  * this.
- * @circular_buffer: whether the buffer passed in is circular and
- * shall simply be looped round round (like a record baby round
- * round round round)
  * @single: the device connected to this channel will request single DMA
  * transfers, not bursts. (Bursts are default.)
  * @periph_buses: the device connected to this channel is accessible via
@@ -66,7 +63,6 @@ struct pl08x_channel_data {
 	u32 muxval;
 	u32 cctl;
 	dma_addr_t addr;
-	bool circular_buffer;
 	bool single;
 	u8 periph_buses;
 };
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 04/31] dmaengine: PL08x: remove circular_buffer boolean from channel data
@ 2012-06-07 10:47       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:47 UTC (permalink / raw)
  To: linux-arm-kernel

Circular buffers are not handled in this way; we have a separate API
call now to setup circular buffers.  So lets not mislead people with
this bool.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c   |    7 -------
 include/linux/amba/pl08x.h |    4 ----
 2 files changed, 0 insertions(+), 11 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 9c5bae6..5821125 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1701,13 +1701,6 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
 				return -ENOMEM;
 			}
 		}
-		if (chan->cd->circular_buffer) {
-			dev_err(&pl08x->adev->dev,
-				"channel %s: circular buffers not supported\n",
-				chan->name);
-			kfree(chan);
-			continue;
-		}
 		dev_dbg(&pl08x->adev->dev,
 			 "initialize virtual channel \"%s\"\n",
 			 chan->name);
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h
index 0254901..0f5b34d 100644
--- a/include/linux/amba/pl08x.h
+++ b/include/linux/amba/pl08x.h
@@ -51,9 +51,6 @@ enum {
  * can be the address of a FIFO register for burst requests for example.
  * This can be left undefined if the PrimeCell API is used for configuring
  * this.
- * @circular_buffer: whether the buffer passed in is circular and
- * shall simply be looped round round (like a record baby round
- * round round round)
  * @single: the device connected to this channel will request single DMA
  * transfers, not bursts. (Bursts are default.)
  * @periph_buses: the device connected to this channel is accessible via
@@ -66,7 +63,6 @@ struct pl08x_channel_data {
 	u32 muxval;
 	u32 cctl;
 	dma_addr_t addr;
-	bool circular_buffer;
 	bool single;
 	u8 periph_buses;
 };
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 05/31] dmaengine: PL08x: clean up get_signal/put_signal
  2012-06-07 10:45     ` Russell King - ARM Linux
@ 2012-06-07 10:47       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:47 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Dan Williams, Vinod Koul

Try to avoid dereferencing the DMA engine's channel struct in these
platform helpers; instead, pass a pointer to the channel data into
get_signal(), and the returned signal number to put_signal().

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c   |    4 ++--
 include/linux/amba/pl08x.h |    4 ++--
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 5821125..cc08c8c 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -874,7 +874,7 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
 	 * Can the platform allow us to use this channel?
 	 */
 	if (plchan->slave && pl08x->pd->get_signal) {
-		ret = pl08x->pd->get_signal(plchan);
+		ret = pl08x->pd->get_signal(plchan->cd);
 		if (ret < 0) {
 			dev_dbg(&pl08x->adev->dev,
 				"unable to use physical channel %d for transfer on %s due to platform restrictions\n",
@@ -909,7 +909,7 @@ static void release_phy_channel(struct pl08x_dma_chan *plchan)
 	struct pl08x_driver_data *pl08x = plchan->host;
 
 	if ((plchan->phychan->signal >= 0) && pl08x->pd->put_signal) {
-		pl08x->pd->put_signal(plchan);
+		pl08x->pd->put_signal(plchan->cd, plchan->phychan->signal);
 		plchan->phychan->signal = -1;
 	}
 	pl08x_put_phy_channel(pl08x, plchan->phychan);
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h
index 0f5b34d..88765a6 100644
--- a/include/linux/amba/pl08x.h
+++ b/include/linux/amba/pl08x.h
@@ -225,8 +225,8 @@ struct pl08x_platform_data {
 	const struct pl08x_channel_data *slave_channels;
 	unsigned int num_slave_channels;
 	struct pl08x_channel_data memcpy_channel;
-	int (*get_signal)(struct pl08x_dma_chan *);
-	void (*put_signal)(struct pl08x_dma_chan *);
+	int (*get_signal)(const struct pl08x_channel_data *);
+	void (*put_signal)(const struct pl08x_channel_data *, int);
 	u8 lli_buses;
 	u8 mem_buses;
 };
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 05/31] dmaengine: PL08x: clean up get_signal/put_signal
@ 2012-06-07 10:47       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:47 UTC (permalink / raw)
  To: linux-arm-kernel

Try to avoid dereferencing the DMA engine's channel struct in these
platform helpers; instead, pass a pointer to the channel data into
get_signal(), and the returned signal number to put_signal().

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c   |    4 ++--
 include/linux/amba/pl08x.h |    4 ++--
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 5821125..cc08c8c 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -874,7 +874,7 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
 	 * Can the platform allow us to use this channel?
 	 */
 	if (plchan->slave && pl08x->pd->get_signal) {
-		ret = pl08x->pd->get_signal(plchan);
+		ret = pl08x->pd->get_signal(plchan->cd);
 		if (ret < 0) {
 			dev_dbg(&pl08x->adev->dev,
 				"unable to use physical channel %d for transfer on %s due to platform restrictions\n",
@@ -909,7 +909,7 @@ static void release_phy_channel(struct pl08x_dma_chan *plchan)
 	struct pl08x_driver_data *pl08x = plchan->host;
 
 	if ((plchan->phychan->signal >= 0) && pl08x->pd->put_signal) {
-		pl08x->pd->put_signal(plchan);
+		pl08x->pd->put_signal(plchan->cd, plchan->phychan->signal);
 		plchan->phychan->signal = -1;
 	}
 	pl08x_put_phy_channel(pl08x, plchan->phychan);
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h
index 0f5b34d..88765a6 100644
--- a/include/linux/amba/pl08x.h
+++ b/include/linux/amba/pl08x.h
@@ -225,8 +225,8 @@ struct pl08x_platform_data {
 	const struct pl08x_channel_data *slave_channels;
 	unsigned int num_slave_channels;
 	struct pl08x_channel_data memcpy_channel;
-	int (*get_signal)(struct pl08x_dma_chan *);
-	void (*put_signal)(struct pl08x_dma_chan *);
+	int (*get_signal)(const struct pl08x_channel_data *);
+	void (*put_signal)(const struct pl08x_channel_data *, int);
 	u8 lli_buses;
 	u8 mem_buses;
 };
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 06/31] dmaengine: PL08x: move private data structures into amba-pl08x.c
  2012-06-07 10:45     ` Russell King - ARM Linux
@ 2012-06-07 10:47       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:47 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Dan Williams, Vinod Koul

Move the driver private data structures into the driver itself, rather
than having them exposed to everyone in a header file.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c   |  136 ++++++++++++++++++++++++++++++++++++++++++
 include/linux/amba/pl08x.h |  141 +-------------------------------------------
 2 files changed, 138 insertions(+), 139 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index cc08c8c..9494990 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -90,6 +90,7 @@
 #define DRIVER_NAME	"pl08xdmac"
 
 static struct amba_driver pl08x_amba_driver;
+struct pl08x_driver_data;
 
 /**
  * struct vendor_data - vendor-specific config parameters for PL08x derivatives
@@ -119,6 +120,141 @@ struct pl08x_lli {
 };
 
 /**
+ * struct pl08x_bus_data - information of source or destination
+ * busses for a transfer
+ * @addr: current address
+ * @maxwidth: the maximum width of a transfer on this bus
+ * @buswidth: the width of this bus in bytes: 1, 2 or 4
+ */
+struct pl08x_bus_data {
+	dma_addr_t addr;
+	u8 maxwidth;
+	u8 buswidth;
+};
+
+/**
+ * struct pl08x_phy_chan - holder for the physical channels
+ * @id: physical index to this channel
+ * @lock: a lock to use when altering an instance of this struct
+ * @signal: the physical signal (aka channel) serving this physical channel
+ * right now
+ * @serving: the virtual channel currently being served by this physical
+ * channel
+ */
+struct pl08x_phy_chan {
+	unsigned int id;
+	void __iomem *base;
+	spinlock_t lock;
+	int signal;
+	struct pl08x_dma_chan *serving;
+};
+
+/**
+ * struct pl08x_sg - structure containing data per sg
+ * @src_addr: src address of sg
+ * @dst_addr: dst address of sg
+ * @len: transfer len in bytes
+ * @node: node for txd's dsg_list
+ */
+struct pl08x_sg {
+	dma_addr_t src_addr;
+	dma_addr_t dst_addr;
+	size_t len;
+	struct list_head node;
+};
+
+/**
+ * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
+ * @tx: async tx descriptor
+ * @node: node for txd list for channels
+ * @dsg_list: list of children sg's
+ * @direction: direction of transfer
+ * @llis_bus: DMA memory address (physical) start for the LLIs
+ * @llis_va: virtual memory address start for the LLIs
+ * @cctl: control reg values for current txd
+ * @ccfg: config reg values for current txd
+ */
+struct pl08x_txd {
+	struct dma_async_tx_descriptor tx;
+	struct list_head node;
+	struct list_head dsg_list;
+	enum dma_transfer_direction direction;
+	dma_addr_t llis_bus;
+	struct pl08x_lli *llis_va;
+	/* Default cctl value for LLIs */
+	u32 cctl;
+	/*
+	 * Settings to be put into the physical channel when we
+	 * trigger this txd.  Other registers are in llis_va[0].
+	 */
+	u32 ccfg;
+};
+
+/**
+ * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel
+ * states
+ * @PL08X_CHAN_IDLE: the channel is idle
+ * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
+ * channel and is running a transfer on it
+ * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport
+ * channel, but the transfer is currently paused
+ * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport
+ * channel to become available (only pertains to memcpy channels)
+ */
+enum pl08x_dma_chan_state {
+	PL08X_CHAN_IDLE,
+	PL08X_CHAN_RUNNING,
+	PL08X_CHAN_PAUSED,
+	PL08X_CHAN_WAITING,
+};
+
+/**
+ * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
+ * @chan: wrappped abstract channel
+ * @phychan: the physical channel utilized by this channel, if there is one
+ * @phychan_hold: if non-zero, hold on to the physical channel even if we
+ * have no pending entries
+ * @tasklet: tasklet scheduled by the IRQ to handle actual work etc
+ * @name: name of channel
+ * @cd: channel platform data
+ * @runtime_addr: address for RX/TX according to the runtime config
+ * @runtime_direction: current direction of this channel according to
+ * runtime config
+ * @pend_list: queued transactions pending on this channel
+ * @at: active transaction on this channel
+ * @lock: a lock for this channel data
+ * @host: a pointer to the host (internal use)
+ * @state: whether the channel is idle, paused, running etc
+ * @slave: whether this channel is a device (slave) or for memcpy
+ * @device_fc: Flow Controller Settings for ccfg register. Only valid for slave
+ * channels. Fill with 'true' if peripheral should be flow controller. Direction
+ * will be selected at Runtime.
+ * @waiting: a TX descriptor on this channel which is waiting for a physical
+ * channel to become available
+ */
+struct pl08x_dma_chan {
+	struct dma_chan chan;
+	struct pl08x_phy_chan *phychan;
+	int phychan_hold;
+	struct tasklet_struct tasklet;
+	char *name;
+	const struct pl08x_channel_data *cd;
+	dma_addr_t src_addr;
+	dma_addr_t dst_addr;
+	u32 src_cctl;
+	u32 dst_cctl;
+	enum dma_transfer_direction runtime_direction;
+	struct list_head pend_list;
+	struct pl08x_txd *at;
+	spinlock_t lock;
+	struct pl08x_driver_data *host;
+	enum pl08x_dma_chan_state state;
+	bool slave;
+	bool device_fc;
+	struct pl08x_txd *waiting;
+};
+
+/**
  * struct pl08x_driver_data - the local state holder for the PL08x
  * @slave: slave engine for this instance
  * @memcpy: memcpy engine for this instance
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h
index 88765a6..48d02bf 100644
--- a/include/linux/amba/pl08x.h
+++ b/include/linux/amba/pl08x.h
@@ -21,8 +21,9 @@
 #include <linux/dmaengine.h>
 #include <linux/interrupt.h>
 
-struct pl08x_lli;
 struct pl08x_driver_data;
+struct pl08x_phy_chan;
+struct pl08x_txd;
 
 /* Bitmasks for selecting AHB ports for DMA transfers */
 enum {
@@ -68,144 +69,6 @@ struct pl08x_channel_data {
 };
 
 /**
- * Struct pl08x_bus_data - information of source or destination
- * busses for a transfer
- * @addr: current address
- * @maxwidth: the maximum width of a transfer on this bus
- * @buswidth: the width of this bus in bytes: 1, 2 or 4
- */
-struct pl08x_bus_data {
-	dma_addr_t addr;
-	u8 maxwidth;
-	u8 buswidth;
-};
-
-/**
- * struct pl08x_phy_chan - holder for the physical channels
- * @id: physical index to this channel
- * @lock: a lock to use when altering an instance of this struct
- * @signal: the physical signal (aka channel) serving this physical channel
- * right now
- * @serving: the virtual channel currently being served by this physical
- * channel
- * @locked: channel unavailable for the system, e.g. dedicated to secure
- * world
- */
-struct pl08x_phy_chan {
-	unsigned int id;
-	void __iomem *base;
-	spinlock_t lock;
-	int signal;
-	struct pl08x_dma_chan *serving;
-	bool locked;
-};
-
-/**
- * struct pl08x_sg - structure containing data per sg
- * @src_addr: src address of sg
- * @dst_addr: dst address of sg
- * @len: transfer len in bytes
- * @node: node for txd's dsg_list
- */
-struct pl08x_sg {
-	dma_addr_t src_addr;
-	dma_addr_t dst_addr;
-	size_t len;
-	struct list_head node;
-};
-
-/**
- * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
- * @tx: async tx descriptor
- * @node: node for txd list for channels
- * @dsg_list: list of children sg's
- * @direction: direction of transfer
- * @llis_bus: DMA memory address (physical) start for the LLIs
- * @llis_va: virtual memory address start for the LLIs
- * @cctl: control reg values for current txd
- * @ccfg: config reg values for current txd
- */
-struct pl08x_txd {
-	struct dma_async_tx_descriptor tx;
-	struct list_head node;
-	struct list_head dsg_list;
-	enum dma_transfer_direction direction;
-	dma_addr_t llis_bus;
-	struct pl08x_lli *llis_va;
-	/* Default cctl value for LLIs */
-	u32 cctl;
-	/*
-	 * Settings to be put into the physical channel when we
-	 * trigger this txd.  Other registers are in llis_va[0].
-	 */
-	u32 ccfg;
-};
-
-/**
- * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel
- * states
- * @PL08X_CHAN_IDLE: the channel is idle
- * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
- * channel and is running a transfer on it
- * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport
- * channel, but the transfer is currently paused
- * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport
- * channel to become available (only pertains to memcpy channels)
- */
-enum pl08x_dma_chan_state {
-	PL08X_CHAN_IDLE,
-	PL08X_CHAN_RUNNING,
-	PL08X_CHAN_PAUSED,
-	PL08X_CHAN_WAITING,
-};
-
-/**
- * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
- * @chan: wrappped abstract channel
- * @phychan: the physical channel utilized by this channel, if there is one
- * @phychan_hold: if non-zero, hold on to the physical channel even if we
- * have no pending entries
- * @tasklet: tasklet scheduled by the IRQ to handle actual work etc
- * @name: name of channel
- * @cd: channel platform data
- * @runtime_addr: address for RX/TX according to the runtime config
- * @runtime_direction: current direction of this channel according to
- * runtime config
- * @pend_list: queued transactions pending on this channel
- * @at: active transaction on this channel
- * @lock: a lock for this channel data
- * @host: a pointer to the host (internal use)
- * @state: whether the channel is idle, paused, running etc
- * @slave: whether this channel is a device (slave) or for memcpy
- * @device_fc: Flow Controller Settings for ccfg register. Only valid for slave
- * channels. Fill with 'true' if peripheral should be flow controller. Direction
- * will be selected at Runtime.
- * @waiting: a TX descriptor on this channel which is waiting for a physical
- * channel to become available
- */
-struct pl08x_dma_chan {
-	struct dma_chan chan;
-	struct pl08x_phy_chan *phychan;
-	int phychan_hold;
-	struct tasklet_struct tasklet;
-	char *name;
-	const struct pl08x_channel_data *cd;
-	dma_addr_t src_addr;
-	dma_addr_t dst_addr;
-	u32 src_cctl;
-	u32 dst_cctl;
-	enum dma_transfer_direction runtime_direction;
-	struct list_head pend_list;
-	struct pl08x_txd *at;
-	spinlock_t lock;
-	struct pl08x_driver_data *host;
-	enum pl08x_dma_chan_state state;
-	bool slave;
-	bool device_fc;
-	struct pl08x_txd *waiting;
-};
-
-/**
  * struct pl08x_platform_data - the platform configuration for the PL08x
  * PrimeCells.
  * @slave_channels: the channels defined for the different devices on the
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 06/31] dmaengine: PL08x: move private data structures into amba-pl08x.c
@ 2012-06-07 10:47       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:47 UTC (permalink / raw)
  To: linux-arm-kernel

Move the driver private data structures into the driver itself, rather
than having them exposed to everyone in a header file.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c   |  136 ++++++++++++++++++++++++++++++++++++++++++
 include/linux/amba/pl08x.h |  141 +-------------------------------------------
 2 files changed, 138 insertions(+), 139 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index cc08c8c..9494990 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -90,6 +90,7 @@
 #define DRIVER_NAME	"pl08xdmac"
 
 static struct amba_driver pl08x_amba_driver;
+struct pl08x_driver_data;
 
 /**
  * struct vendor_data - vendor-specific config parameters for PL08x derivatives
@@ -119,6 +120,141 @@ struct pl08x_lli {
 };
 
 /**
+ * struct pl08x_bus_data - information of source or destination
+ * busses for a transfer
+ * @addr: current address
+ * @maxwidth: the maximum width of a transfer on this bus
+ * @buswidth: the width of this bus in bytes: 1, 2 or 4
+ */
+struct pl08x_bus_data {
+	dma_addr_t addr;
+	u8 maxwidth;
+	u8 buswidth;
+};
+
+/**
+ * struct pl08x_phy_chan - holder for the physical channels
+ * @id: physical index to this channel
+ * @lock: a lock to use when altering an instance of this struct
+ * @signal: the physical signal (aka channel) serving this physical channel
+ * right now
+ * @serving: the virtual channel currently being served by this physical
+ * channel
+ */
+struct pl08x_phy_chan {
+	unsigned int id;
+	void __iomem *base;
+	spinlock_t lock;
+	int signal;
+	struct pl08x_dma_chan *serving;
+};
+
+/**
+ * struct pl08x_sg - structure containing data per sg
+ * @src_addr: src address of sg
+ * @dst_addr: dst address of sg
+ * @len: transfer len in bytes
+ * @node: node for txd's dsg_list
+ */
+struct pl08x_sg {
+	dma_addr_t src_addr;
+	dma_addr_t dst_addr;
+	size_t len;
+	struct list_head node;
+};
+
+/**
+ * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
+ * @tx: async tx descriptor
+ * @node: node for txd list for channels
+ * @dsg_list: list of children sg's
+ * @direction: direction of transfer
+ * @llis_bus: DMA memory address (physical) start for the LLIs
+ * @llis_va: virtual memory address start for the LLIs
+ * @cctl: control reg values for current txd
+ * @ccfg: config reg values for current txd
+ */
+struct pl08x_txd {
+	struct dma_async_tx_descriptor tx;
+	struct list_head node;
+	struct list_head dsg_list;
+	enum dma_transfer_direction direction;
+	dma_addr_t llis_bus;
+	struct pl08x_lli *llis_va;
+	/* Default cctl value for LLIs */
+	u32 cctl;
+	/*
+	 * Settings to be put into the physical channel when we
+	 * trigger this txd.  Other registers are in llis_va[0].
+	 */
+	u32 ccfg;
+};
+
+/**
+ * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel
+ * states
+ * @PL08X_CHAN_IDLE: the channel is idle
+ * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
+ * channel and is running a transfer on it
+ * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport
+ * channel, but the transfer is currently paused
+ * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport
+ * channel to become available (only pertains to memcpy channels)
+ */
+enum pl08x_dma_chan_state {
+	PL08X_CHAN_IDLE,
+	PL08X_CHAN_RUNNING,
+	PL08X_CHAN_PAUSED,
+	PL08X_CHAN_WAITING,
+};
+
+/**
+ * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
+ * @chan: wrappped abstract channel
+ * @phychan: the physical channel utilized by this channel, if there is one
+ * @phychan_hold: if non-zero, hold on to the physical channel even if we
+ * have no pending entries
+ * @tasklet: tasklet scheduled by the IRQ to handle actual work etc
+ * @name: name of channel
+ * @cd: channel platform data
+ * @runtime_addr: address for RX/TX according to the runtime config
+ * @runtime_direction: current direction of this channel according to
+ * runtime config
+ * @pend_list: queued transactions pending on this channel
+ * @at: active transaction on this channel
+ * @lock: a lock for this channel data
+ * @host: a pointer to the host (internal use)
+ * @state: whether the channel is idle, paused, running etc
+ * @slave: whether this channel is a device (slave) or for memcpy
+ * @device_fc: Flow Controller Settings for ccfg register. Only valid for slave
+ * channels. Fill with 'true' if peripheral should be flow controller. Direction
+ * will be selected at Runtime.
+ * @waiting: a TX descriptor on this channel which is waiting for a physical
+ * channel to become available
+ */
+struct pl08x_dma_chan {
+	struct dma_chan chan;
+	struct pl08x_phy_chan *phychan;
+	int phychan_hold;
+	struct tasklet_struct tasklet;
+	char *name;
+	const struct pl08x_channel_data *cd;
+	dma_addr_t src_addr;
+	dma_addr_t dst_addr;
+	u32 src_cctl;
+	u32 dst_cctl;
+	enum dma_transfer_direction runtime_direction;
+	struct list_head pend_list;
+	struct pl08x_txd *at;
+	spinlock_t lock;
+	struct pl08x_driver_data *host;
+	enum pl08x_dma_chan_state state;
+	bool slave;
+	bool device_fc;
+	struct pl08x_txd *waiting;
+};
+
+/**
  * struct pl08x_driver_data - the local state holder for the PL08x
  * @slave: slave engine for this instance
  * @memcpy: memcpy engine for this instance
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h
index 88765a6..48d02bf 100644
--- a/include/linux/amba/pl08x.h
+++ b/include/linux/amba/pl08x.h
@@ -21,8 +21,9 @@
 #include <linux/dmaengine.h>
 #include <linux/interrupt.h>
 
-struct pl08x_lli;
 struct pl08x_driver_data;
+struct pl08x_phy_chan;
+struct pl08x_txd;
 
 /* Bitmasks for selecting AHB ports for DMA transfers */
 enum {
@@ -68,144 +69,6 @@ struct pl08x_channel_data {
 };
 
 /**
- * Struct pl08x_bus_data - information of source or destination
- * busses for a transfer
- * @addr: current address
- * @maxwidth: the maximum width of a transfer on this bus
- * @buswidth: the width of this bus in bytes: 1, 2 or 4
- */
-struct pl08x_bus_data {
-	dma_addr_t addr;
-	u8 maxwidth;
-	u8 buswidth;
-};
-
-/**
- * struct pl08x_phy_chan - holder for the physical channels
- * @id: physical index to this channel
- * @lock: a lock to use when altering an instance of this struct
- * @signal: the physical signal (aka channel) serving this physical channel
- * right now
- * @serving: the virtual channel currently being served by this physical
- * channel
- * @locked: channel unavailable for the system, e.g. dedicated to secure
- * world
- */
-struct pl08x_phy_chan {
-	unsigned int id;
-	void __iomem *base;
-	spinlock_t lock;
-	int signal;
-	struct pl08x_dma_chan *serving;
-	bool locked;
-};
-
-/**
- * struct pl08x_sg - structure containing data per sg
- * @src_addr: src address of sg
- * @dst_addr: dst address of sg
- * @len: transfer len in bytes
- * @node: node for txd's dsg_list
- */
-struct pl08x_sg {
-	dma_addr_t src_addr;
-	dma_addr_t dst_addr;
-	size_t len;
-	struct list_head node;
-};
-
-/**
- * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
- * @tx: async tx descriptor
- * @node: node for txd list for channels
- * @dsg_list: list of children sg's
- * @direction: direction of transfer
- * @llis_bus: DMA memory address (physical) start for the LLIs
- * @llis_va: virtual memory address start for the LLIs
- * @cctl: control reg values for current txd
- * @ccfg: config reg values for current txd
- */
-struct pl08x_txd {
-	struct dma_async_tx_descriptor tx;
-	struct list_head node;
-	struct list_head dsg_list;
-	enum dma_transfer_direction direction;
-	dma_addr_t llis_bus;
-	struct pl08x_lli *llis_va;
-	/* Default cctl value for LLIs */
-	u32 cctl;
-	/*
-	 * Settings to be put into the physical channel when we
-	 * trigger this txd.  Other registers are in llis_va[0].
-	 */
-	u32 ccfg;
-};
-
-/**
- * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel
- * states
- * @PL08X_CHAN_IDLE: the channel is idle
- * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
- * channel and is running a transfer on it
- * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport
- * channel, but the transfer is currently paused
- * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport
- * channel to become available (only pertains to memcpy channels)
- */
-enum pl08x_dma_chan_state {
-	PL08X_CHAN_IDLE,
-	PL08X_CHAN_RUNNING,
-	PL08X_CHAN_PAUSED,
-	PL08X_CHAN_WAITING,
-};
-
-/**
- * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
- * @chan: wrappped abstract channel
- * @phychan: the physical channel utilized by this channel, if there is one
- * @phychan_hold: if non-zero, hold on to the physical channel even if we
- * have no pending entries
- * @tasklet: tasklet scheduled by the IRQ to handle actual work etc
- * @name: name of channel
- * @cd: channel platform data
- * @runtime_addr: address for RX/TX according to the runtime config
- * @runtime_direction: current direction of this channel according to
- * runtime config
- * @pend_list: queued transactions pending on this channel
- * @at: active transaction on this channel
- * @lock: a lock for this channel data
- * @host: a pointer to the host (internal use)
- * @state: whether the channel is idle, paused, running etc
- * @slave: whether this channel is a device (slave) or for memcpy
- * @device_fc: Flow Controller Settings for ccfg register. Only valid for slave
- * channels. Fill with 'true' if peripheral should be flow controller. Direction
- * will be selected at Runtime.
- * @waiting: a TX descriptor on this channel which is waiting for a physical
- * channel to become available
- */
-struct pl08x_dma_chan {
-	struct dma_chan chan;
-	struct pl08x_phy_chan *phychan;
-	int phychan_hold;
-	struct tasklet_struct tasklet;
-	char *name;
-	const struct pl08x_channel_data *cd;
-	dma_addr_t src_addr;
-	dma_addr_t dst_addr;
-	u32 src_cctl;
-	u32 dst_cctl;
-	enum dma_transfer_direction runtime_direction;
-	struct list_head pend_list;
-	struct pl08x_txd *at;
-	spinlock_t lock;
-	struct pl08x_driver_data *host;
-	enum pl08x_dma_chan_state state;
-	bool slave;
-	bool device_fc;
-	struct pl08x_txd *waiting;
-};
-
-/**
  * struct pl08x_platform_data - the platform configuration for the PL08x
  * PrimeCells.
  * @slave_channels: the channels defined for the different devices on the
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 07/31] dmaengine: PL08x: constify channel names and bus_id strings
  2012-06-07 10:45     ` Russell King - ARM Linux
@ 2012-06-07 10:48       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:48 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Dan Williams, Vinod Koul

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c   |    2 +-
 include/linux/amba/pl08x.h |    2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 9494990..775efef 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -237,7 +237,7 @@ struct pl08x_dma_chan {
 	struct pl08x_phy_chan *phychan;
 	int phychan_hold;
 	struct tasklet_struct tasklet;
-	char *name;
+	const char *name;
 	const struct pl08x_channel_data *cd;
 	dma_addr_t src_addr;
 	dma_addr_t dst_addr;
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h
index 48d02bf..158ce26 100644
--- a/include/linux/amba/pl08x.h
+++ b/include/linux/amba/pl08x.h
@@ -58,7 +58,7 @@ enum {
  * these buses (use PL08X_AHB1 | PL08X_AHB2).
  */
 struct pl08x_channel_data {
-	char *bus_id;
+	const char *bus_id;
 	int min_signal;
 	int max_signal;
 	u32 muxval;
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 07/31] dmaengine: PL08x: constify channel names and bus_id strings
@ 2012-06-07 10:48       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:48 UTC (permalink / raw)
  To: linux-arm-kernel

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c   |    2 +-
 include/linux/amba/pl08x.h |    2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 9494990..775efef 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -237,7 +237,7 @@ struct pl08x_dma_chan {
 	struct pl08x_phy_chan *phychan;
 	int phychan_hold;
 	struct tasklet_struct tasklet;
-	char *name;
+	const char *name;
 	const struct pl08x_channel_data *cd;
 	dma_addr_t src_addr;
 	dma_addr_t dst_addr;
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h
index 48d02bf..158ce26 100644
--- a/include/linux/amba/pl08x.h
+++ b/include/linux/amba/pl08x.h
@@ -58,7 +58,7 @@ enum {
  * these buses (use PL08X_AHB1 | PL08X_AHB2).
  */
 struct pl08x_channel_data {
-	char *bus_id;
+	const char *bus_id;
 	int min_signal;
 	int max_signal;
 	u32 muxval;
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 08/31] dmaengine: PL08x: get src/dst addr direct from dma_slave_config struct
  2012-06-07 10:45     ` Russell King - ARM Linux
@ 2012-06-07 10:48       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:48 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Dan Williams, Vinod Koul

Add a dma_slave_config struct to struct pl08x_dma_chan, and move the
src_addr/dst_addr arguments into this struct.  This is a step away
from using the dma_slave_config's direction member.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |   15 +++++++--------
 1 files changed, 7 insertions(+), 8 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 775efef..31447db 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -239,8 +239,7 @@ struct pl08x_dma_chan {
 	struct tasklet_struct tasklet;
 	const char *name;
 	const struct pl08x_channel_data *cd;
-	dma_addr_t src_addr;
-	dma_addr_t dst_addr;
+	struct dma_slave_config cfg;
 	u32 src_cctl;
 	u32 dst_cctl;
 	enum dma_transfer_direction runtime_direction;
@@ -1245,6 +1244,8 @@ static int dma_set_runtime_config(struct dma_chan *chan,
 		return -EINVAL;
 	}
 
+	plchan->cfg = *config;
+
 	cctl |= width << PL080_CONTROL_SWIDTH_SHIFT;
 	cctl |= width << PL080_CONTROL_DWIDTH_SHIFT;
 
@@ -1263,12 +1264,10 @@ static int dma_set_runtime_config(struct dma_chan *chan,
 	plchan->device_fc = config->device_fc;
 
 	if (plchan->runtime_direction == DMA_DEV_TO_MEM) {
-		plchan->src_addr = config->src_addr;
 		plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR |
 			pl08x_select_bus(plchan->cd->periph_buses,
 					 pl08x->mem_buses);
 	} else {
-		plchan->dst_addr = config->dst_addr;
 		plchan->dst_cctl = pl08x_cctl(cctl) | PL080_CONTROL_SRC_INCR |
 			pl08x_select_bus(pl08x->mem_buses,
 					 plchan->cd->periph_buses);
@@ -1482,10 +1481,10 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
 
 	if (direction == DMA_MEM_TO_DEV) {
 		txd->cctl = plchan->dst_cctl;
-		slave_addr = plchan->dst_addr;
+		slave_addr = plchan->cfg.dst_addr;
 	} else if (direction == DMA_DEV_TO_MEM) {
 		txd->cctl = plchan->src_cctl;
-		slave_addr = plchan->src_addr;
+		slave_addr = plchan->cfg.src_addr;
 	} else {
 		pl08x_free_txd(pl08x, txd);
 		dev_err(&pl08x->adev->dev,
@@ -1790,8 +1789,8 @@ static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan)
 
 	chan->slave = true;
 	chan->name = chan->cd->bus_id;
-	chan->src_addr = chan->cd->addr;
-	chan->dst_addr = chan->cd->addr;
+	chan->cfg.src_addr = chan->cd->addr;
+	chan->cfg.dst_addr = chan->cd->addr;
 	chan->src_cctl = cctl | PL080_CONTROL_DST_INCR |
 		pl08x_select_bus(chan->cd->periph_buses, chan->host->mem_buses);
 	chan->dst_cctl = cctl | PL080_CONTROL_SRC_INCR |
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 08/31] dmaengine: PL08x: get src/dst addr direct from dma_slave_config struct
@ 2012-06-07 10:48       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:48 UTC (permalink / raw)
  To: linux-arm-kernel

Add a dma_slave_config struct to struct pl08x_dma_chan, and move the
src_addr/dst_addr arguments into this struct.  This is a step away
from using the dma_slave_config's direction member.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |   15 +++++++--------
 1 files changed, 7 insertions(+), 8 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 775efef..31447db 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -239,8 +239,7 @@ struct pl08x_dma_chan {
 	struct tasklet_struct tasklet;
 	const char *name;
 	const struct pl08x_channel_data *cd;
-	dma_addr_t src_addr;
-	dma_addr_t dst_addr;
+	struct dma_slave_config cfg;
 	u32 src_cctl;
 	u32 dst_cctl;
 	enum dma_transfer_direction runtime_direction;
@@ -1245,6 +1244,8 @@ static int dma_set_runtime_config(struct dma_chan *chan,
 		return -EINVAL;
 	}
 
+	plchan->cfg = *config;
+
 	cctl |= width << PL080_CONTROL_SWIDTH_SHIFT;
 	cctl |= width << PL080_CONTROL_DWIDTH_SHIFT;
 
@@ -1263,12 +1264,10 @@ static int dma_set_runtime_config(struct dma_chan *chan,
 	plchan->device_fc = config->device_fc;
 
 	if (plchan->runtime_direction == DMA_DEV_TO_MEM) {
-		plchan->src_addr = config->src_addr;
 		plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR |
 			pl08x_select_bus(plchan->cd->periph_buses,
 					 pl08x->mem_buses);
 	} else {
-		plchan->dst_addr = config->dst_addr;
 		plchan->dst_cctl = pl08x_cctl(cctl) | PL080_CONTROL_SRC_INCR |
 			pl08x_select_bus(pl08x->mem_buses,
 					 plchan->cd->periph_buses);
@@ -1482,10 +1481,10 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
 
 	if (direction == DMA_MEM_TO_DEV) {
 		txd->cctl = plchan->dst_cctl;
-		slave_addr = plchan->dst_addr;
+		slave_addr = plchan->cfg.dst_addr;
 	} else if (direction == DMA_DEV_TO_MEM) {
 		txd->cctl = plchan->src_cctl;
-		slave_addr = plchan->src_addr;
+		slave_addr = plchan->cfg.src_addr;
 	} else {
 		pl08x_free_txd(pl08x, txd);
 		dev_err(&pl08x->adev->dev,
@@ -1790,8 +1789,8 @@ static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan)
 
 	chan->slave = true;
 	chan->name = chan->cd->bus_id;
-	chan->src_addr = chan->cd->addr;
-	chan->dst_addr = chan->cd->addr;
+	chan->cfg.src_addr = chan->cd->addr;
+	chan->cfg.dst_addr = chan->cd->addr;
 	chan->src_cctl = cctl | PL080_CONTROL_DST_INCR |
 		pl08x_select_bus(chan->cd->periph_buses, chan->host->mem_buses);
 	chan->dst_cctl = cctl | PL080_CONTROL_SRC_INCR |
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 09/31] dmaengine: PL08x: get rid of device_fc in struct pl08x_dma_chan
  2012-06-07 10:45     ` Russell King - ARM Linux
@ 2012-06-07 10:48       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:48 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Dan Williams, Vinod Koul

As we now store the dma_slave_config in pl08x_dma_chan, we don't need
to store this separately.  Use the one in dma_slave_config directly.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |    8 +-------
 1 files changed, 1 insertions(+), 7 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 31447db..7eb0e8e 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -226,9 +226,6 @@ enum pl08x_dma_chan_state {
  * @host: a pointer to the host (internal use)
  * @state: whether the channel is idle, paused, running etc
  * @slave: whether this channel is a device (slave) or for memcpy
- * @device_fc: Flow Controller Settings for ccfg register. Only valid for slave
- * channels. Fill with 'true' if peripheral should be flow controller. Direction
- * will be selected at Runtime.
  * @waiting: a TX descriptor on this channel which is waiting for a physical
  * channel to become available
  */
@@ -249,7 +246,6 @@ struct pl08x_dma_chan {
 	struct pl08x_driver_data *host;
 	enum pl08x_dma_chan_state state;
 	bool slave;
-	bool device_fc;
 	struct pl08x_txd *waiting;
 };
 
@@ -1261,8 +1257,6 @@ static int dma_set_runtime_config(struct dma_chan *chan,
 	cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
 	cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
 
-	plchan->device_fc = config->device_fc;
-
 	if (plchan->runtime_direction == DMA_DEV_TO_MEM) {
 		plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR |
 			pl08x_select_bus(plchan->cd->periph_buses,
@@ -1492,7 +1486,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
 		return NULL;
 	}
 
-	if (plchan->device_fc)
+	if (plchan->cfg.device_fc)
 		tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER :
 			PL080_FLOW_PER2MEM_PER;
 	else
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 09/31] dmaengine: PL08x: get rid of device_fc in struct pl08x_dma_chan
@ 2012-06-07 10:48       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:48 UTC (permalink / raw)
  To: linux-arm-kernel

As we now store the dma_slave_config in pl08x_dma_chan, we don't need
to store this separately.  Use the one in dma_slave_config directly.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |    8 +-------
 1 files changed, 1 insertions(+), 7 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 31447db..7eb0e8e 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -226,9 +226,6 @@ enum pl08x_dma_chan_state {
  * @host: a pointer to the host (internal use)
  * @state: whether the channel is idle, paused, running etc
  * @slave: whether this channel is a device (slave) or for memcpy
- * @device_fc: Flow Controller Settings for ccfg register. Only valid for slave
- * channels. Fill with 'true' if peripheral should be flow controller. Direction
- * will be selected at Runtime.
  * @waiting: a TX descriptor on this channel which is waiting for a physical
  * channel to become available
  */
@@ -249,7 +246,6 @@ struct pl08x_dma_chan {
 	struct pl08x_driver_data *host;
 	enum pl08x_dma_chan_state state;
 	bool slave;
-	bool device_fc;
 	struct pl08x_txd *waiting;
 };
 
@@ -1261,8 +1257,6 @@ static int dma_set_runtime_config(struct dma_chan *chan,
 	cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
 	cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
 
-	plchan->device_fc = config->device_fc;
-
 	if (plchan->runtime_direction == DMA_DEV_TO_MEM) {
 		plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR |
 			pl08x_select_bus(plchan->cd->periph_buses,
@@ -1492,7 +1486,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
 		return NULL;
 	}
 
-	if (plchan->device_fc)
+	if (plchan->cfg.device_fc)
 		tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER :
 			PL080_FLOW_PER2MEM_PER;
 	else
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 10/31] dmaengine: PL08x: move the bus and increment selection to dma prepare function
  2012-06-07 10:45     ` Russell King - ARM Linux
@ 2012-06-07 10:49       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:49 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Dan Williams, Vinod Koul

Move the bus and transfer increment selection to the DMA prepare
function rather than the slave configuration function.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |   26 ++++++++++++++------------
 1 files changed, 14 insertions(+), 12 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 7eb0e8e..bd51a44 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1258,13 +1258,9 @@ static int dma_set_runtime_config(struct dma_chan *chan,
 	cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
 
 	if (plchan->runtime_direction == DMA_DEV_TO_MEM) {
-		plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR |
-			pl08x_select_bus(plchan->cd->periph_buses,
-					 pl08x->mem_buses);
+		plchan->src_cctl = pl08x_cctl(cctl);
 	} else {
-		plchan->dst_cctl = pl08x_cctl(cctl) | PL080_CONTROL_SRC_INCR |
-			pl08x_select_bus(pl08x->mem_buses,
-					 plchan->cd->periph_buses);
+		plchan->dst_cctl = pl08x_cctl(cctl);
 	}
 
 	dev_dbg(&pl08x->adev->dev,
@@ -1451,6 +1447,8 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
 	struct scatterlist *sg;
 	dma_addr_t slave_addr;
 	int ret, tmp;
+	u8 src_buses, dst_buses;
+	u32 cctl;
 
 	dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
 			__func__, sg_dma_len(sgl), plchan->name);
@@ -1474,11 +1472,15 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
 	txd->direction = direction;
 
 	if (direction == DMA_MEM_TO_DEV) {
-		txd->cctl = plchan->dst_cctl;
+		cctl = plchan->dst_cctl | PL080_CONTROL_SRC_INCR;
 		slave_addr = plchan->cfg.dst_addr;
+		src_buses = pl08x->mem_buses;
+		dst_buses = plchan->cd->periph_buses;
 	} else if (direction == DMA_DEV_TO_MEM) {
-		txd->cctl = plchan->src_cctl;
+		cctl = plchan->src_cctl | PL080_CONTROL_DST_INCR;
 		slave_addr = plchan->cfg.src_addr;
+		src_buses = plchan->cd->periph_buses;
+		dst_buses = pl08x->mem_buses;
 	} else {
 		pl08x_free_txd(pl08x, txd);
 		dev_err(&pl08x->adev->dev,
@@ -1486,6 +1488,8 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
 		return NULL;
 	}
 
+	txd->cctl = cctl | pl08x_select_bus(src_buses, dst_buses);
+
 	if (plchan->cfg.device_fc)
 		tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER :
 			PL080_FLOW_PER2MEM_PER;
@@ -1785,10 +1789,8 @@ static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan)
 	chan->name = chan->cd->bus_id;
 	chan->cfg.src_addr = chan->cd->addr;
 	chan->cfg.dst_addr = chan->cd->addr;
-	chan->src_cctl = cctl | PL080_CONTROL_DST_INCR |
-		pl08x_select_bus(chan->cd->periph_buses, chan->host->mem_buses);
-	chan->dst_cctl = cctl | PL080_CONTROL_SRC_INCR |
-		pl08x_select_bus(chan->host->mem_buses, chan->cd->periph_buses);
+	chan->src_cctl = cctl;
+	chan->dst_cctl = cctl;
 }
 
 /*
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 10/31] dmaengine: PL08x: move the bus and increment selection to dma prepare function
@ 2012-06-07 10:49       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:49 UTC (permalink / raw)
  To: linux-arm-kernel

Move the bus and transfer increment selection to the DMA prepare
function rather than the slave configuration function.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |   26 ++++++++++++++------------
 1 files changed, 14 insertions(+), 12 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 7eb0e8e..bd51a44 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1258,13 +1258,9 @@ static int dma_set_runtime_config(struct dma_chan *chan,
 	cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
 
 	if (plchan->runtime_direction == DMA_DEV_TO_MEM) {
-		plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR |
-			pl08x_select_bus(plchan->cd->periph_buses,
-					 pl08x->mem_buses);
+		plchan->src_cctl = pl08x_cctl(cctl);
 	} else {
-		plchan->dst_cctl = pl08x_cctl(cctl) | PL080_CONTROL_SRC_INCR |
-			pl08x_select_bus(pl08x->mem_buses,
-					 plchan->cd->periph_buses);
+		plchan->dst_cctl = pl08x_cctl(cctl);
 	}
 
 	dev_dbg(&pl08x->adev->dev,
@@ -1451,6 +1447,8 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
 	struct scatterlist *sg;
 	dma_addr_t slave_addr;
 	int ret, tmp;
+	u8 src_buses, dst_buses;
+	u32 cctl;
 
 	dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
 			__func__, sg_dma_len(sgl), plchan->name);
@@ -1474,11 +1472,15 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
 	txd->direction = direction;
 
 	if (direction == DMA_MEM_TO_DEV) {
-		txd->cctl = plchan->dst_cctl;
+		cctl = plchan->dst_cctl | PL080_CONTROL_SRC_INCR;
 		slave_addr = plchan->cfg.dst_addr;
+		src_buses = pl08x->mem_buses;
+		dst_buses = plchan->cd->periph_buses;
 	} else if (direction == DMA_DEV_TO_MEM) {
-		txd->cctl = plchan->src_cctl;
+		cctl = plchan->src_cctl | PL080_CONTROL_DST_INCR;
 		slave_addr = plchan->cfg.src_addr;
+		src_buses = plchan->cd->periph_buses;
+		dst_buses = pl08x->mem_buses;
 	} else {
 		pl08x_free_txd(pl08x, txd);
 		dev_err(&pl08x->adev->dev,
@@ -1486,6 +1488,8 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
 		return NULL;
 	}
 
+	txd->cctl = cctl | pl08x_select_bus(src_buses, dst_buses);
+
 	if (plchan->cfg.device_fc)
 		tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER :
 			PL080_FLOW_PER2MEM_PER;
@@ -1785,10 +1789,8 @@ static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan)
 	chan->name = chan->cd->bus_id;
 	chan->cfg.src_addr = chan->cd->addr;
 	chan->cfg.dst_addr = chan->cd->addr;
-	chan->src_cctl = cctl | PL080_CONTROL_DST_INCR |
-		pl08x_select_bus(chan->cd->periph_buses, chan->host->mem_buses);
-	chan->dst_cctl = cctl | PL080_CONTROL_SRC_INCR |
-		pl08x_select_bus(chan->host->mem_buses, chan->cd->periph_buses);
+	chan->src_cctl = cctl;
+	chan->dst_cctl = cctl;
 }
 
 /*
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 11/31] dmaengine: PL08x: extract function to to generate cctl values
  2012-06-07 10:45     ` Russell King - ARM Linux
@ 2012-06-07 10:49       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:49 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Dan Williams, Vinod Koul

Extract the functionality from dma_slave_config to generate the cctl
values for a given bus width and burst size.  This allows us to use
this elsewhere in the driver, namely the prepare functions.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |   53 +++++++++++++++++++++++++++------------------
 1 files changed, 32 insertions(+), 21 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index bd51a44..fde801f 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1207,14 +1207,40 @@ static u32 pl08x_burst(u32 maxburst)
 	return burst_sizes[i].reg;
 }
 
+static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan,
+	enum dma_slave_buswidth addr_width, u32 maxburst)
+{
+	u32 width, burst, cctl = 0;
+
+	width = pl08x_width(addr_width);
+	if (width == ~0)
+		return ~0;
+
+	cctl |= width << PL080_CONTROL_SWIDTH_SHIFT;
+	cctl |= width << PL080_CONTROL_DWIDTH_SHIFT;
+
+	/*
+	 * If this channel will only request single transfers, set this
+	 * down to ONE element.  Also select one element if no maxburst
+	 * is specified.
+	 */
+	if (plchan->cd->single)
+		maxburst = 1;
+
+	burst = pl08x_burst(maxburst);
+	cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
+	cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
+
+	return pl08x_cctl(cctl);
+}
+
 static int dma_set_runtime_config(struct dma_chan *chan,
 				  struct dma_slave_config *config)
 {
 	struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
 	struct pl08x_driver_data *pl08x = plchan->host;
 	enum dma_slave_buswidth addr_width;
-	u32 width, burst, maxburst;
-	u32 cctl = 0;
+	u32 maxburst, cctl = 0;
 
 	if (!plchan->slave)
 		return -EINVAL;
@@ -1233,8 +1259,8 @@ static int dma_set_runtime_config(struct dma_chan *chan,
 		return -EINVAL;
 	}
 
-	width = pl08x_width(addr_width);
-	if (width == ~0) {
+	cctl = pl08x_get_cctl(plchan, addr_width, maxburst);
+	if (cctl == ~0) {
 		dev_err(&pl08x->adev->dev,
 			"bad runtime_config: alien address width\n");
 		return -EINVAL;
@@ -1242,25 +1268,10 @@ static int dma_set_runtime_config(struct dma_chan *chan,
 
 	plchan->cfg = *config;
 
-	cctl |= width << PL080_CONTROL_SWIDTH_SHIFT;
-	cctl |= width << PL080_CONTROL_DWIDTH_SHIFT;
-
-	/*
-	 * If this channel will only request single transfers, set this
-	 * down to ONE element.  Also select one element if no maxburst
-	 * is specified.
-	 */
-	if (plchan->cd->single)
-		maxburst = 1;
-
-	burst = pl08x_burst(maxburst);
-	cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
-	cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
-
 	if (plchan->runtime_direction == DMA_DEV_TO_MEM) {
-		plchan->src_cctl = pl08x_cctl(cctl);
+		plchan->src_cctl = cctl;
 	} else {
-		plchan->dst_cctl = pl08x_cctl(cctl);
+		plchan->dst_cctl = cctl;
 	}
 
 	dev_dbg(&pl08x->adev->dev,
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 11/31] dmaengine: PL08x: extract function to to generate cctl values
@ 2012-06-07 10:49       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:49 UTC (permalink / raw)
  To: linux-arm-kernel

Extract the functionality from dma_slave_config to generate the cctl
values for a given bus width and burst size.  This allows us to use
this elsewhere in the driver, namely the prepare functions.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |   53 +++++++++++++++++++++++++++------------------
 1 files changed, 32 insertions(+), 21 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index bd51a44..fde801f 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1207,14 +1207,40 @@ static u32 pl08x_burst(u32 maxburst)
 	return burst_sizes[i].reg;
 }
 
+static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan,
+	enum dma_slave_buswidth addr_width, u32 maxburst)
+{
+	u32 width, burst, cctl = 0;
+
+	width = pl08x_width(addr_width);
+	if (width == ~0)
+		return ~0;
+
+	cctl |= width << PL080_CONTROL_SWIDTH_SHIFT;
+	cctl |= width << PL080_CONTROL_DWIDTH_SHIFT;
+
+	/*
+	 * If this channel will only request single transfers, set this
+	 * down to ONE element.  Also select one element if no maxburst
+	 * is specified.
+	 */
+	if (plchan->cd->single)
+		maxburst = 1;
+
+	burst = pl08x_burst(maxburst);
+	cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
+	cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
+
+	return pl08x_cctl(cctl);
+}
+
 static int dma_set_runtime_config(struct dma_chan *chan,
 				  struct dma_slave_config *config)
 {
 	struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
 	struct pl08x_driver_data *pl08x = plchan->host;
 	enum dma_slave_buswidth addr_width;
-	u32 width, burst, maxburst;
-	u32 cctl = 0;
+	u32 maxburst, cctl = 0;
 
 	if (!plchan->slave)
 		return -EINVAL;
@@ -1233,8 +1259,8 @@ static int dma_set_runtime_config(struct dma_chan *chan,
 		return -EINVAL;
 	}
 
-	width = pl08x_width(addr_width);
-	if (width == ~0) {
+	cctl = pl08x_get_cctl(plchan, addr_width, maxburst);
+	if (cctl == ~0) {
 		dev_err(&pl08x->adev->dev,
 			"bad runtime_config: alien address width\n");
 		return -EINVAL;
@@ -1242,25 +1268,10 @@ static int dma_set_runtime_config(struct dma_chan *chan,
 
 	plchan->cfg = *config;
 
-	cctl |= width << PL080_CONTROL_SWIDTH_SHIFT;
-	cctl |= width << PL080_CONTROL_DWIDTH_SHIFT;
-
-	/*
-	 * If this channel will only request single transfers, set this
-	 * down to ONE element.  Also select one element if no maxburst
-	 * is specified.
-	 */
-	if (plchan->cd->single)
-		maxburst = 1;
-
-	burst = pl08x_burst(maxburst);
-	cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
-	cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
-
 	if (plchan->runtime_direction == DMA_DEV_TO_MEM) {
-		plchan->src_cctl = pl08x_cctl(cctl);
+		plchan->src_cctl = cctl;
 	} else {
-		plchan->dst_cctl = pl08x_cctl(cctl);
+		plchan->dst_cctl = cctl;
 	}
 
 	dev_dbg(&pl08x->adev->dev,
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 12/31] dmaengine: PL08x: ignore 'direction' argument in dma_slave_config
  2012-06-07 10:45     ` Russell King - ARM Linux
@ 2012-06-07 10:49       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:49 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Dan Williams, Vinod Koul

Ignore the direction argument in dma_slave_config, and configure both
directions independently.  We still check that the configuration for
the intended direction is valid; this check will eventually be dropped.
This check is just for debugging at present.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |   56 ++++++++++++++-------------------------------
 1 files changed, 18 insertions(+), 38 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index fde801f..50b9a83 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -218,8 +218,6 @@ enum pl08x_dma_chan_state {
  * @name: name of channel
  * @cd: channel platform data
  * @runtime_addr: address for RX/TX according to the runtime config
- * @runtime_direction: current direction of this channel according to
- * runtime config
  * @pend_list: queued transactions pending on this channel
  * @at: active transaction on this channel
  * @lock: a lock for this channel data
@@ -239,7 +237,6 @@ struct pl08x_dma_chan {
 	struct dma_slave_config cfg;
 	u32 src_cctl;
 	u32 dst_cctl;
-	enum dma_transfer_direction runtime_direction;
 	struct list_head pend_list;
 	struct pl08x_txd *at;
 	spinlock_t lock;
@@ -1239,50 +1236,31 @@ static int dma_set_runtime_config(struct dma_chan *chan,
 {
 	struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
 	struct pl08x_driver_data *pl08x = plchan->host;
-	enum dma_slave_buswidth addr_width;
-	u32 maxburst, cctl = 0;
+	u32 src_cctl, dst_cctl;
 
 	if (!plchan->slave)
 		return -EINVAL;
 
-	/* Transfer direction */
-	plchan->runtime_direction = config->direction;
-	if (config->direction == DMA_MEM_TO_DEV) {
-		addr_width = config->dst_addr_width;
-		maxburst = config->dst_maxburst;
-	} else if (config->direction == DMA_DEV_TO_MEM) {
-		addr_width = config->src_addr_width;
-		maxburst = config->src_maxburst;
-	} else {
+	dst_cctl = pl08x_get_cctl(plchan, config->dst_addr_width,
+				  config->dst_maxburst);
+	if (dst_cctl == ~0 && config->direction == DMA_MEM_TO_DEV) {
 		dev_err(&pl08x->adev->dev,
-			"bad runtime_config: alien transfer direction\n");
+			"bad runtime_config: alien address width (M2D)\n");
 		return -EINVAL;
 	}
 
-	cctl = pl08x_get_cctl(plchan, addr_width, maxburst);
-	if (cctl == ~0) {
+	src_cctl = pl08x_get_cctl(plchan, config->src_addr_width,
+				  config->src_maxburst);
+	if (src_cctl == ~0 && config->direction == DMA_DEV_TO_MEM) {
 		dev_err(&pl08x->adev->dev,
-			"bad runtime_config: alien address width\n");
+			"bad runtime_config: alien address width (D2M)\n");
 		return -EINVAL;
 	}
 
+	plchan->dst_cctl = dst_cctl;
+	plchan->src_cctl = src_cctl;
 	plchan->cfg = *config;
 
-	if (plchan->runtime_direction == DMA_DEV_TO_MEM) {
-		plchan->src_cctl = cctl;
-	} else {
-		plchan->dst_cctl = cctl;
-	}
-
-	dev_dbg(&pl08x->adev->dev,
-		"configured channel %s (%s) for %s, data width %d, "
-		"maxburst %d words, LE, CCTL=0x%08x\n",
-		dma_chan_name(chan), plchan->name,
-		(config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
-		addr_width,
-		maxburst,
-		cctl);
-
 	return 0;
 }
 
@@ -1470,11 +1448,6 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
 		return NULL;
 	}
 
-	if (direction != plchan->runtime_direction)
-		dev_err(&pl08x->adev->dev, "%s DMA setup does not match "
-			"the direction configured for the PrimeCell\n",
-			__func__);
-
 	/*
 	 * Set up addresses, the PrimeCell configured address
 	 * will take precedence since this may configure the
@@ -1499,6 +1472,13 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
 		return NULL;
 	}
 
+	if (cctl == ~0) {
+		pl08x_free_txd(pl08x, txd);
+		dev_err(&pl08x->adev->dev,
+			"DMA slave configuration botched?\n");
+		return NULL;
+	}
+
 	txd->cctl = cctl | pl08x_select_bus(src_buses, dst_buses);
 
 	if (plchan->cfg.device_fc)
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 12/31] dmaengine: PL08x: ignore 'direction' argument in dma_slave_config
@ 2012-06-07 10:49       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:49 UTC (permalink / raw)
  To: linux-arm-kernel

Ignore the direction argument in dma_slave_config, and configure both
directions independently.  We still check that the configuration for
the intended direction is valid; this check will eventually be dropped.
This check is just for debugging at present.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |   56 ++++++++++++++-------------------------------
 1 files changed, 18 insertions(+), 38 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index fde801f..50b9a83 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -218,8 +218,6 @@ enum pl08x_dma_chan_state {
  * @name: name of channel
  * @cd: channel platform data
  * @runtime_addr: address for RX/TX according to the runtime config
- * @runtime_direction: current direction of this channel according to
- * runtime config
  * @pend_list: queued transactions pending on this channel
  * @at: active transaction on this channel
  * @lock: a lock for this channel data
@@ -239,7 +237,6 @@ struct pl08x_dma_chan {
 	struct dma_slave_config cfg;
 	u32 src_cctl;
 	u32 dst_cctl;
-	enum dma_transfer_direction runtime_direction;
 	struct list_head pend_list;
 	struct pl08x_txd *at;
 	spinlock_t lock;
@@ -1239,50 +1236,31 @@ static int dma_set_runtime_config(struct dma_chan *chan,
 {
 	struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
 	struct pl08x_driver_data *pl08x = plchan->host;
-	enum dma_slave_buswidth addr_width;
-	u32 maxburst, cctl = 0;
+	u32 src_cctl, dst_cctl;
 
 	if (!plchan->slave)
 		return -EINVAL;
 
-	/* Transfer direction */
-	plchan->runtime_direction = config->direction;
-	if (config->direction == DMA_MEM_TO_DEV) {
-		addr_width = config->dst_addr_width;
-		maxburst = config->dst_maxburst;
-	} else if (config->direction == DMA_DEV_TO_MEM) {
-		addr_width = config->src_addr_width;
-		maxburst = config->src_maxburst;
-	} else {
+	dst_cctl = pl08x_get_cctl(plchan, config->dst_addr_width,
+				  config->dst_maxburst);
+	if (dst_cctl == ~0 && config->direction == DMA_MEM_TO_DEV) {
 		dev_err(&pl08x->adev->dev,
-			"bad runtime_config: alien transfer direction\n");
+			"bad runtime_config: alien address width (M2D)\n");
 		return -EINVAL;
 	}
 
-	cctl = pl08x_get_cctl(plchan, addr_width, maxburst);
-	if (cctl == ~0) {
+	src_cctl = pl08x_get_cctl(plchan, config->src_addr_width,
+				  config->src_maxburst);
+	if (src_cctl == ~0 && config->direction == DMA_DEV_TO_MEM) {
 		dev_err(&pl08x->adev->dev,
-			"bad runtime_config: alien address width\n");
+			"bad runtime_config: alien address width (D2M)\n");
 		return -EINVAL;
 	}
 
+	plchan->dst_cctl = dst_cctl;
+	plchan->src_cctl = src_cctl;
 	plchan->cfg = *config;
 
-	if (plchan->runtime_direction == DMA_DEV_TO_MEM) {
-		plchan->src_cctl = cctl;
-	} else {
-		plchan->dst_cctl = cctl;
-	}
-
-	dev_dbg(&pl08x->adev->dev,
-		"configured channel %s (%s) for %s, data width %d, "
-		"maxburst %d words, LE, CCTL=0x%08x\n",
-		dma_chan_name(chan), plchan->name,
-		(config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
-		addr_width,
-		maxburst,
-		cctl);
-
 	return 0;
 }
 
@@ -1470,11 +1448,6 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
 		return NULL;
 	}
 
-	if (direction != plchan->runtime_direction)
-		dev_err(&pl08x->adev->dev, "%s DMA setup does not match "
-			"the direction configured for the PrimeCell\n",
-			__func__);
-
 	/*
 	 * Set up addresses, the PrimeCell configured address
 	 * will take precedence since this may configure the
@@ -1499,6 +1472,13 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
 		return NULL;
 	}
 
+	if (cctl == ~0) {
+		pl08x_free_txd(pl08x, txd);
+		dev_err(&pl08x->adev->dev,
+			"DMA slave configuration botched?\n");
+		return NULL;
+	}
+
 	txd->cctl = cctl | pl08x_select_bus(src_buses, dst_buses);
 
 	if (plchan->cfg.device_fc)
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 13/31] dmaengine: PL08x: get rid of unnecessary checks in dma_slave_config
  2012-06-07 10:45     ` Russell King - ARM Linux
@ 2012-06-07 10:50       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:50 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Dan Williams, Vinod Koul

Get rid of the unnecessary checks in dma_slave_config utilizing
the DMA direction.  This allows us to move the computation of
cctl to the prepare function.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c   |   41 +++++++++++++----------------------------
 include/linux/amba/pl08x.h |    5 +++--
 2 files changed, 16 insertions(+), 30 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 50b9a83..f739778 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -235,8 +235,6 @@ struct pl08x_dma_chan {
 	const char *name;
 	const struct pl08x_channel_data *cd;
 	struct dma_slave_config cfg;
-	u32 src_cctl;
-	u32 dst_cctl;
 	struct list_head pend_list;
 	struct pl08x_txd *at;
 	spinlock_t lock;
@@ -1235,30 +1233,15 @@ static int dma_set_runtime_config(struct dma_chan *chan,
 				  struct dma_slave_config *config)
 {
 	struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
-	struct pl08x_driver_data *pl08x = plchan->host;
-	u32 src_cctl, dst_cctl;
 
 	if (!plchan->slave)
 		return -EINVAL;
 
-	dst_cctl = pl08x_get_cctl(plchan, config->dst_addr_width,
-				  config->dst_maxburst);
-	if (dst_cctl == ~0 && config->direction == DMA_MEM_TO_DEV) {
-		dev_err(&pl08x->adev->dev,
-			"bad runtime_config: alien address width (M2D)\n");
+	/* Reject definitely invalid configurations */
+	if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
+	    config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
 		return -EINVAL;
-	}
 
-	src_cctl = pl08x_get_cctl(plchan, config->src_addr_width,
-				  config->src_maxburst);
-	if (src_cctl == ~0 && config->direction == DMA_DEV_TO_MEM) {
-		dev_err(&pl08x->adev->dev,
-			"bad runtime_config: alien address width (D2M)\n");
-		return -EINVAL;
-	}
-
-	plchan->dst_cctl = dst_cctl;
-	plchan->src_cctl = src_cctl;
 	plchan->cfg = *config;
 
 	return 0;
@@ -1407,7 +1390,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
 
 	/* Set platform data for m2m */
 	txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
-	txd->cctl = pl08x->pd->memcpy_channel.cctl &
+	txd->cctl = pl08x->pd->memcpy_channel.cctl_memcpy &
 			~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2);
 
 	/* Both to be incremented or the code will break */
@@ -1434,10 +1417,11 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
 	struct pl08x_txd *txd;
 	struct pl08x_sg *dsg;
 	struct scatterlist *sg;
+	enum dma_slave_buswidth addr_width;
 	dma_addr_t slave_addr;
 	int ret, tmp;
 	u8 src_buses, dst_buses;
-	u32 cctl;
+	u32 maxburst, cctl;
 
 	dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
 			__func__, sg_dma_len(sgl), plchan->name);
@@ -1456,13 +1440,17 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
 	txd->direction = direction;
 
 	if (direction == DMA_MEM_TO_DEV) {
-		cctl = plchan->dst_cctl | PL080_CONTROL_SRC_INCR;
+		cctl = PL080_CONTROL_SRC_INCR;
 		slave_addr = plchan->cfg.dst_addr;
+		addr_width = plchan->cfg.dst_addr_width;
+		maxburst = plchan->cfg.dst_maxburst;
 		src_buses = pl08x->mem_buses;
 		dst_buses = plchan->cd->periph_buses;
 	} else if (direction == DMA_DEV_TO_MEM) {
-		cctl = plchan->src_cctl | PL080_CONTROL_DST_INCR;
+		cctl = PL080_CONTROL_DST_INCR;
 		slave_addr = plchan->cfg.src_addr;
+		addr_width = plchan->cfg.src_addr_width;
+		maxburst = plchan->cfg.src_maxburst;
 		src_buses = plchan->cd->periph_buses;
 		dst_buses = pl08x->mem_buses;
 	} else {
@@ -1472,6 +1460,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
 		return NULL;
 	}
 
+	cctl |= pl08x_get_cctl(plchan, addr_width, maxburst);
 	if (cctl == ~0) {
 		pl08x_free_txd(pl08x, txd);
 		dev_err(&pl08x->adev->dev,
@@ -1774,14 +1763,10 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
 
 static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan)
 {
-	u32 cctl = pl08x_cctl(chan->cd->cctl);
-
 	chan->slave = true;
 	chan->name = chan->cd->bus_id;
 	chan->cfg.src_addr = chan->cd->addr;
 	chan->cfg.dst_addr = chan->cd->addr;
-	chan->src_cctl = cctl;
-	chan->dst_cctl = cctl;
 }
 
 /*
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h
index 158ce26..2a5f64a 100644
--- a/include/linux/amba/pl08x.h
+++ b/include/linux/amba/pl08x.h
@@ -47,7 +47,8 @@ enum {
  * devices with static assignments
  * @muxval: a number usually used to poke into some mux regiser to
  * mux in the signal to this channel
- * @cctl_opt: default options for the channel control register
+ * @cctl_memcpy: options for the channel control register for memcpy
+ *  *** not used for slave channels ***
  * @addr: source/target address in physical memory for this DMA channel,
  * can be the address of a FIFO register for burst requests for example.
  * This can be left undefined if the PrimeCell API is used for configuring
@@ -62,7 +63,7 @@ struct pl08x_channel_data {
 	int min_signal;
 	int max_signal;
 	u32 muxval;
-	u32 cctl;
+	u32 cctl_memcpy;
 	dma_addr_t addr;
 	bool single;
 	u8 periph_buses;
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 13/31] dmaengine: PL08x: get rid of unnecessary checks in dma_slave_config
@ 2012-06-07 10:50       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:50 UTC (permalink / raw)
  To: linux-arm-kernel

Get rid of the unnecessary checks in dma_slave_config utilizing
the DMA direction.  This allows us to move the computation of
cctl to the prepare function.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c   |   41 +++++++++++++----------------------------
 include/linux/amba/pl08x.h |    5 +++--
 2 files changed, 16 insertions(+), 30 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 50b9a83..f739778 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -235,8 +235,6 @@ struct pl08x_dma_chan {
 	const char *name;
 	const struct pl08x_channel_data *cd;
 	struct dma_slave_config cfg;
-	u32 src_cctl;
-	u32 dst_cctl;
 	struct list_head pend_list;
 	struct pl08x_txd *at;
 	spinlock_t lock;
@@ -1235,30 +1233,15 @@ static int dma_set_runtime_config(struct dma_chan *chan,
 				  struct dma_slave_config *config)
 {
 	struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
-	struct pl08x_driver_data *pl08x = plchan->host;
-	u32 src_cctl, dst_cctl;
 
 	if (!plchan->slave)
 		return -EINVAL;
 
-	dst_cctl = pl08x_get_cctl(plchan, config->dst_addr_width,
-				  config->dst_maxburst);
-	if (dst_cctl == ~0 && config->direction == DMA_MEM_TO_DEV) {
-		dev_err(&pl08x->adev->dev,
-			"bad runtime_config: alien address width (M2D)\n");
+	/* Reject definitely invalid configurations */
+	if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
+	    config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
 		return -EINVAL;
-	}
 
-	src_cctl = pl08x_get_cctl(plchan, config->src_addr_width,
-				  config->src_maxburst);
-	if (src_cctl == ~0 && config->direction == DMA_DEV_TO_MEM) {
-		dev_err(&pl08x->adev->dev,
-			"bad runtime_config: alien address width (D2M)\n");
-		return -EINVAL;
-	}
-
-	plchan->dst_cctl = dst_cctl;
-	plchan->src_cctl = src_cctl;
 	plchan->cfg = *config;
 
 	return 0;
@@ -1407,7 +1390,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
 
 	/* Set platform data for m2m */
 	txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
-	txd->cctl = pl08x->pd->memcpy_channel.cctl &
+	txd->cctl = pl08x->pd->memcpy_channel.cctl_memcpy &
 			~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2);
 
 	/* Both to be incremented or the code will break */
@@ -1434,10 +1417,11 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
 	struct pl08x_txd *txd;
 	struct pl08x_sg *dsg;
 	struct scatterlist *sg;
+	enum dma_slave_buswidth addr_width;
 	dma_addr_t slave_addr;
 	int ret, tmp;
 	u8 src_buses, dst_buses;
-	u32 cctl;
+	u32 maxburst, cctl;
 
 	dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
 			__func__, sg_dma_len(sgl), plchan->name);
@@ -1456,13 +1440,17 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
 	txd->direction = direction;
 
 	if (direction == DMA_MEM_TO_DEV) {
-		cctl = plchan->dst_cctl | PL080_CONTROL_SRC_INCR;
+		cctl = PL080_CONTROL_SRC_INCR;
 		slave_addr = plchan->cfg.dst_addr;
+		addr_width = plchan->cfg.dst_addr_width;
+		maxburst = plchan->cfg.dst_maxburst;
 		src_buses = pl08x->mem_buses;
 		dst_buses = plchan->cd->periph_buses;
 	} else if (direction == DMA_DEV_TO_MEM) {
-		cctl = plchan->src_cctl | PL080_CONTROL_DST_INCR;
+		cctl = PL080_CONTROL_DST_INCR;
 		slave_addr = plchan->cfg.src_addr;
+		addr_width = plchan->cfg.src_addr_width;
+		maxburst = plchan->cfg.src_maxburst;
 		src_buses = plchan->cd->periph_buses;
 		dst_buses = pl08x->mem_buses;
 	} else {
@@ -1472,6 +1460,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
 		return NULL;
 	}
 
+	cctl |= pl08x_get_cctl(plchan, addr_width, maxburst);
 	if (cctl == ~0) {
 		pl08x_free_txd(pl08x, txd);
 		dev_err(&pl08x->adev->dev,
@@ -1774,14 +1763,10 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
 
 static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan)
 {
-	u32 cctl = pl08x_cctl(chan->cd->cctl);
-
 	chan->slave = true;
 	chan->name = chan->cd->bus_id;
 	chan->cfg.src_addr = chan->cd->addr;
 	chan->cfg.dst_addr = chan->cd->addr;
-	chan->src_cctl = cctl;
-	chan->dst_cctl = cctl;
 }
 
 /*
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h
index 158ce26..2a5f64a 100644
--- a/include/linux/amba/pl08x.h
+++ b/include/linux/amba/pl08x.h
@@ -47,7 +47,8 @@ enum {
  * devices with static assignments
  * @muxval: a number usually used to poke into some mux regiser to
  * mux in the signal to this channel
- * @cctl_opt: default options for the channel control register
+ * @cctl_memcpy: options for the channel control register for memcpy
+ *  *** not used for slave channels ***
  * @addr: source/target address in physical memory for this DMA channel,
  * can be the address of a FIFO register for burst requests for example.
  * This can be left undefined if the PrimeCell API is used for configuring
@@ -62,7 +63,7 @@ struct pl08x_channel_data {
 	int min_signal;
 	int max_signal;
 	u32 muxval;
-	u32 cctl;
+	u32 cctl_memcpy;
 	dma_addr_t addr;
 	bool single;
 	u8 periph_buses;
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 14/31] dmaengine: PL08x: split DMA signal muxing from channel alloc
  2012-06-07 10:45     ` Russell King - ARM Linux
@ 2012-06-07 10:50       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:50 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Dan Williams, Vinod Koul

Split the DMA request mux signal handling from the physical channel
allocation code.  The physical channel has very little to do with the
DMA request input which will be used, so these should be two separate
operations.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |   43 ++++++++++++++++++++++++++++++++++++-------
 1 files changed, 36 insertions(+), 7 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index f739778..b579bac 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -296,6 +296,39 @@ static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx)
 }
 
 /*
+ * Mux handling.
+ *
+ * This gives us the DMA request input to the PL08x primecell which the
+ * peripheral described by the channel data will be routed to, possibly
+ * via a board/SoC specific external MUX.  One important point to note
+ * here is that this does not depend on the physical channel.
+ */
+static int pl08x_request_mux(struct pl08x_dma_chan *plchan, struct pl08x_phy_chan *ch)
+{
+	const struct pl08x_platform_data *pd = plchan->host->pd;
+	int ret;
+
+	if (pd->get_signal) {
+		ret = pd->get_signal(plchan->cd);
+		if (ret < 0)
+			return ret;
+
+		ch->signal = ret;
+	}
+	return 0;
+}
+
+static void pl08x_release_mux(struct pl08x_dma_chan *plchan)
+{
+	const struct pl08x_platform_data *pd = plchan->host->pd;
+
+	if (plchan->phychan->signal >= 0 && pd->put_signal) {
+		pd->put_signal(plchan->cd, plchan->phychan->signal);
+		plchan->phychan->signal = -1;
+	}
+}
+
+/*
  * Physical channel handling
  */
 
@@ -999,8 +1032,8 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
 	 * need, but for slaves the physical signals may be muxed!
 	 * Can the platform allow us to use this channel?
 	 */
-	if (plchan->slave && pl08x->pd->get_signal) {
-		ret = pl08x->pd->get_signal(plchan->cd);
+	if (plchan->slave) {
+		ret = pl08x_request_mux(plchan, ch);
 		if (ret < 0) {
 			dev_dbg(&pl08x->adev->dev,
 				"unable to use physical channel %d for transfer on %s due to platform restrictions\n",
@@ -1009,7 +1042,6 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
 			pl08x_put_phy_channel(pl08x, ch);
 			return -EBUSY;
 		}
-		ch->signal = ret;
 	}
 
 	plchan->phychan = ch;
@@ -1034,10 +1066,7 @@ static void release_phy_channel(struct pl08x_dma_chan *plchan)
 {
 	struct pl08x_driver_data *pl08x = plchan->host;
 
-	if ((plchan->phychan->signal >= 0) && pl08x->pd->put_signal) {
-		pl08x->pd->put_signal(plchan->cd, plchan->phychan->signal);
-		plchan->phychan->signal = -1;
-	}
+	pl08x_release_mux(plchan);
 	pl08x_put_phy_channel(pl08x, plchan->phychan);
 	plchan->phychan = NULL;
 }
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 14/31] dmaengine: PL08x: split DMA signal muxing from channel alloc
@ 2012-06-07 10:50       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:50 UTC (permalink / raw)
  To: linux-arm-kernel

Split the DMA request mux signal handling from the physical channel
allocation code.  The physical channel has very little to do with the
DMA request input which will be used, so these should be two separate
operations.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |   43 ++++++++++++++++++++++++++++++++++++-------
 1 files changed, 36 insertions(+), 7 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index f739778..b579bac 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -296,6 +296,39 @@ static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx)
 }
 
 /*
+ * Mux handling.
+ *
+ * This gives us the DMA request input to the PL08x primecell which the
+ * peripheral described by the channel data will be routed to, possibly
+ * via a board/SoC specific external MUX.  One important point to note
+ * here is that this does not depend on the physical channel.
+ */
+static int pl08x_request_mux(struct pl08x_dma_chan *plchan, struct pl08x_phy_chan *ch)
+{
+	const struct pl08x_platform_data *pd = plchan->host->pd;
+	int ret;
+
+	if (pd->get_signal) {
+		ret = pd->get_signal(plchan->cd);
+		if (ret < 0)
+			return ret;
+
+		ch->signal = ret;
+	}
+	return 0;
+}
+
+static void pl08x_release_mux(struct pl08x_dma_chan *plchan)
+{
+	const struct pl08x_platform_data *pd = plchan->host->pd;
+
+	if (plchan->phychan->signal >= 0 && pd->put_signal) {
+		pd->put_signal(plchan->cd, plchan->phychan->signal);
+		plchan->phychan->signal = -1;
+	}
+}
+
+/*
  * Physical channel handling
  */
 
@@ -999,8 +1032,8 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
 	 * need, but for slaves the physical signals may be muxed!
 	 * Can the platform allow us to use this channel?
 	 */
-	if (plchan->slave && pl08x->pd->get_signal) {
-		ret = pl08x->pd->get_signal(plchan->cd);
+	if (plchan->slave) {
+		ret = pl08x_request_mux(plchan, ch);
 		if (ret < 0) {
 			dev_dbg(&pl08x->adev->dev,
 				"unable to use physical channel %d for transfer on %s due to platform restrictions\n",
@@ -1009,7 +1042,6 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
 			pl08x_put_phy_channel(pl08x, ch);
 			return -EBUSY;
 		}
-		ch->signal = ret;
 	}
 
 	plchan->phychan = ch;
@@ -1034,10 +1066,7 @@ static void release_phy_channel(struct pl08x_dma_chan *plchan)
 {
 	struct pl08x_driver_data *pl08x = plchan->host;
 
-	if ((plchan->phychan->signal >= 0) && pl08x->pd->put_signal) {
-		pl08x->pd->put_signal(plchan->cd, plchan->phychan->signal);
-		plchan->phychan->signal = -1;
-	}
+	pl08x_release_mux(plchan);
 	pl08x_put_phy_channel(pl08x, plchan->phychan);
 	plchan->phychan = NULL;
 }
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 15/31] dmaengine: PL08x: move DMA signal muxing into pl08x_dma_chan struct
  2012-06-07 10:45     ` Russell King - ARM Linux
@ 2012-06-07 10:50       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:50 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Dan Williams, Vinod Koul

Move the signal handling out of the physical channel structure into
the virtual channel structure, where it should belong as it has more
to do with the virtual channel than the physical one.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |   29 +++++++++++++++--------------
 1 files changed, 15 insertions(+), 14 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index b579bac..c203d2f 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -136,17 +136,17 @@ struct pl08x_bus_data {
  * struct pl08x_phy_chan - holder for the physical channels
  * @id: physical index to this channel
  * @lock: a lock to use when altering an instance of this struct
- * @signal: the physical signal (aka channel) serving this physical channel
- * right now
  * @serving: the virtual channel currently being served by this physical
  * channel
+ * @locked: channel unavailable for the system, e.g. dedicated to secure
+ * world
  */
 struct pl08x_phy_chan {
 	unsigned int id;
 	void __iomem *base;
 	spinlock_t lock;
-	int signal;
 	struct pl08x_dma_chan *serving;
+	bool locked;
 };
 
 /**
@@ -226,6 +226,7 @@ enum pl08x_dma_chan_state {
  * @slave: whether this channel is a device (slave) or for memcpy
  * @waiting: a TX descriptor on this channel which is waiting for a physical
  * channel to become available
+ * @signal: the physical DMA request signal which this channel is using
  */
 struct pl08x_dma_chan {
 	struct dma_chan chan;
@@ -242,6 +243,7 @@ struct pl08x_dma_chan {
 	enum pl08x_dma_chan_state state;
 	bool slave;
 	struct pl08x_txd *waiting;
+	int signal;
 };
 
 /**
@@ -303,7 +305,7 @@ static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx)
  * via a board/SoC specific external MUX.  One important point to note
  * here is that this does not depend on the physical channel.
  */
-static int pl08x_request_mux(struct pl08x_dma_chan *plchan, struct pl08x_phy_chan *ch)
+static int pl08x_request_mux(struct pl08x_dma_chan *plchan)
 {
 	const struct pl08x_platform_data *pd = plchan->host->pd;
 	int ret;
@@ -313,7 +315,7 @@ static int pl08x_request_mux(struct pl08x_dma_chan *plchan, struct pl08x_phy_cha
 		if (ret < 0)
 			return ret;
 
-		ch->signal = ret;
+		plchan->signal = ret;
 	}
 	return 0;
 }
@@ -322,9 +324,9 @@ static void pl08x_release_mux(struct pl08x_dma_chan *plchan)
 {
 	const struct pl08x_platform_data *pd = plchan->host->pd;
 
-	if (plchan->phychan->signal >= 0 && pd->put_signal) {
-		pd->put_signal(plchan->cd, plchan->phychan->signal);
-		plchan->phychan->signal = -1;
+	if (plchan->signal >= 0 && pd->put_signal) {
+		pd->put_signal(plchan->cd, plchan->signal);
+		plchan->signal = -1;
 	}
 }
 
@@ -549,7 +551,6 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
 
 		if (!ch->locked && !ch->serving) {
 			ch->serving = virt_chan;
-			ch->signal = -1;
 			spin_unlock_irqrestore(&ch->lock, flags);
 			break;
 		}
@@ -1033,7 +1034,7 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
 	 * Can the platform allow us to use this channel?
 	 */
 	if (plchan->slave) {
-		ret = pl08x_request_mux(plchan, ch);
+		ret = pl08x_request_mux(plchan);
 		if (ret < 0) {
 			dev_dbg(&pl08x->adev->dev,
 				"unable to use physical channel %d for transfer on %s due to platform restrictions\n",
@@ -1047,15 +1048,15 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
 	plchan->phychan = ch;
 	dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n",
 		 ch->id,
-		 ch->signal,
+		 plchan->signal,
 		 plchan->name);
 
 got_channel:
 	/* Assign the flow control signal to this channel */
 	if (txd->direction == DMA_MEM_TO_DEV)
-		txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT;
+		txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT;
 	else if (txd->direction == DMA_DEV_TO_MEM)
-		txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT;
+		txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT;
 
 	plchan->phychan_hold++;
 
@@ -1825,6 +1826,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
 
 		chan->host = pl08x;
 		chan->state = PL08X_CHAN_IDLE;
+		chan->signal = -1;
 
 		if (slave) {
 			chan->cd = &pl08x->pd->slave_channels[i];
@@ -2062,7 +2064,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
 		ch->id = i;
 		ch->base = pl08x->base + PL080_Cx_BASE(i);
 		spin_lock_init(&ch->lock);
-		ch->signal = -1;
 
 		/*
 		 * Nomadik variants can have channels that are locked
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 15/31] dmaengine: PL08x: move DMA signal muxing into pl08x_dma_chan struct
@ 2012-06-07 10:50       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:50 UTC (permalink / raw)
  To: linux-arm-kernel

Move the signal handling out of the physical channel structure into
the virtual channel structure, where it should belong as it has more
to do with the virtual channel than the physical one.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |   29 +++++++++++++++--------------
 1 files changed, 15 insertions(+), 14 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index b579bac..c203d2f 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -136,17 +136,17 @@ struct pl08x_bus_data {
  * struct pl08x_phy_chan - holder for the physical channels
  * @id: physical index to this channel
  * @lock: a lock to use when altering an instance of this struct
- * @signal: the physical signal (aka channel) serving this physical channel
- * right now
  * @serving: the virtual channel currently being served by this physical
  * channel
+ * @locked: channel unavailable for the system, e.g. dedicated to secure
+ * world
  */
 struct pl08x_phy_chan {
 	unsigned int id;
 	void __iomem *base;
 	spinlock_t lock;
-	int signal;
 	struct pl08x_dma_chan *serving;
+	bool locked;
 };
 
 /**
@@ -226,6 +226,7 @@ enum pl08x_dma_chan_state {
  * @slave: whether this channel is a device (slave) or for memcpy
  * @waiting: a TX descriptor on this channel which is waiting for a physical
  * channel to become available
+ * @signal: the physical DMA request signal which this channel is using
  */
 struct pl08x_dma_chan {
 	struct dma_chan chan;
@@ -242,6 +243,7 @@ struct pl08x_dma_chan {
 	enum pl08x_dma_chan_state state;
 	bool slave;
 	struct pl08x_txd *waiting;
+	int signal;
 };
 
 /**
@@ -303,7 +305,7 @@ static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx)
  * via a board/SoC specific external MUX.  One important point to note
  * here is that this does not depend on the physical channel.
  */
-static int pl08x_request_mux(struct pl08x_dma_chan *plchan, struct pl08x_phy_chan *ch)
+static int pl08x_request_mux(struct pl08x_dma_chan *plchan)
 {
 	const struct pl08x_platform_data *pd = plchan->host->pd;
 	int ret;
@@ -313,7 +315,7 @@ static int pl08x_request_mux(struct pl08x_dma_chan *plchan, struct pl08x_phy_cha
 		if (ret < 0)
 			return ret;
 
-		ch->signal = ret;
+		plchan->signal = ret;
 	}
 	return 0;
 }
@@ -322,9 +324,9 @@ static void pl08x_release_mux(struct pl08x_dma_chan *plchan)
 {
 	const struct pl08x_platform_data *pd = plchan->host->pd;
 
-	if (plchan->phychan->signal >= 0 && pd->put_signal) {
-		pd->put_signal(plchan->cd, plchan->phychan->signal);
-		plchan->phychan->signal = -1;
+	if (plchan->signal >= 0 && pd->put_signal) {
+		pd->put_signal(plchan->cd, plchan->signal);
+		plchan->signal = -1;
 	}
 }
 
@@ -549,7 +551,6 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
 
 		if (!ch->locked && !ch->serving) {
 			ch->serving = virt_chan;
-			ch->signal = -1;
 			spin_unlock_irqrestore(&ch->lock, flags);
 			break;
 		}
@@ -1033,7 +1034,7 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
 	 * Can the platform allow us to use this channel?
 	 */
 	if (plchan->slave) {
-		ret = pl08x_request_mux(plchan, ch);
+		ret = pl08x_request_mux(plchan);
 		if (ret < 0) {
 			dev_dbg(&pl08x->adev->dev,
 				"unable to use physical channel %d for transfer on %s due to platform restrictions\n",
@@ -1047,15 +1048,15 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
 	plchan->phychan = ch;
 	dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n",
 		 ch->id,
-		 ch->signal,
+		 plchan->signal,
 		 plchan->name);
 
 got_channel:
 	/* Assign the flow control signal to this channel */
 	if (txd->direction == DMA_MEM_TO_DEV)
-		txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT;
+		txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT;
 	else if (txd->direction == DMA_DEV_TO_MEM)
-		txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT;
+		txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT;
 
 	plchan->phychan_hold++;
 
@@ -1825,6 +1826,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
 
 		chan->host = pl08x;
 		chan->state = PL08X_CHAN_IDLE;
+		chan->signal = -1;
 
 		if (slave) {
 			chan->cd = &pl08x->pd->slave_channels[i];
@@ -2062,7 +2064,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
 		ch->id = i;
 		ch->base = pl08x->base + PL080_Cx_BASE(i);
 		spin_lock_init(&ch->lock);
-		ch->signal = -1;
 
 		/*
 		 * Nomadik variants can have channels that are locked
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 16/31] dmaengine: PL08x: track mux usage on a per-channel basis.
  2012-06-07 10:45     ` Russell King - ARM Linux
@ 2012-06-07 10:51       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:51 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Dan Williams, Vinod Koul

Keep track of the number of descriptors currently using a MUX setting
on a per-channel basis.  This allows us to know when we have descriptors
queued somewhere which have been assigned a DMA request signal.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |   18 +++++++++++++-----
 1 files changed, 13 insertions(+), 5 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index c203d2f..ac9fdcc 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -227,6 +227,7 @@ enum pl08x_dma_chan_state {
  * @waiting: a TX descriptor on this channel which is waiting for a physical
  * channel to become available
  * @signal: the physical DMA request signal which this channel is using
+ * @mux_use: count of descriptors using this DMA request signal setting
  */
 struct pl08x_dma_chan {
 	struct dma_chan chan;
@@ -244,6 +245,7 @@ struct pl08x_dma_chan {
 	bool slave;
 	struct pl08x_txd *waiting;
 	int signal;
+	unsigned mux_use;
 };
 
 /**
@@ -310,10 +312,12 @@ static int pl08x_request_mux(struct pl08x_dma_chan *plchan)
 	const struct pl08x_platform_data *pd = plchan->host->pd;
 	int ret;
 
-	if (pd->get_signal) {
+	if (plchan->mux_use++ == 0 && pd->get_signal) {
 		ret = pd->get_signal(plchan->cd);
-		if (ret < 0)
+		if (ret < 0) {
+			plchan->mux_use = 0;
 			return ret;
+		}
 
 		plchan->signal = ret;
 	}
@@ -324,9 +328,13 @@ static void pl08x_release_mux(struct pl08x_dma_chan *plchan)
 {
 	const struct pl08x_platform_data *pd = plchan->host->pd;
 
-	if (plchan->signal >= 0 && pd->put_signal) {
-		pd->put_signal(plchan->cd, plchan->signal);
-		plchan->signal = -1;
+	if (plchan->signal >= 0) {
+		WARN_ON(plchan->mux_use == 0);
+
+		if (--plchan->mux_use == 0 && pd->put_signal) {
+			pd->put_signal(plchan->cd, plchan->signal);
+			plchan->signal = -1;
+		}
 	}
 }
 
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 16/31] dmaengine: PL08x: track mux usage on a per-channel basis.
@ 2012-06-07 10:51       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:51 UTC (permalink / raw)
  To: linux-arm-kernel

Keep track of the number of descriptors currently using a MUX setting
on a per-channel basis.  This allows us to know when we have descriptors
queued somewhere which have been assigned a DMA request signal.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |   18 +++++++++++++-----
 1 files changed, 13 insertions(+), 5 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index c203d2f..ac9fdcc 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -227,6 +227,7 @@ enum pl08x_dma_chan_state {
  * @waiting: a TX descriptor on this channel which is waiting for a physical
  * channel to become available
  * @signal: the physical DMA request signal which this channel is using
+ * @mux_use: count of descriptors using this DMA request signal setting
  */
 struct pl08x_dma_chan {
 	struct dma_chan chan;
@@ -244,6 +245,7 @@ struct pl08x_dma_chan {
 	bool slave;
 	struct pl08x_txd *waiting;
 	int signal;
+	unsigned mux_use;
 };
 
 /**
@@ -310,10 +312,12 @@ static int pl08x_request_mux(struct pl08x_dma_chan *plchan)
 	const struct pl08x_platform_data *pd = plchan->host->pd;
 	int ret;
 
-	if (pd->get_signal) {
+	if (plchan->mux_use++ == 0 && pd->get_signal) {
 		ret = pd->get_signal(plchan->cd);
-		if (ret < 0)
+		if (ret < 0) {
+			plchan->mux_use = 0;
 			return ret;
+		}
 
 		plchan->signal = ret;
 	}
@@ -324,9 +328,13 @@ static void pl08x_release_mux(struct pl08x_dma_chan *plchan)
 {
 	const struct pl08x_platform_data *pd = plchan->host->pd;
 
-	if (plchan->signal >= 0 && pd->put_signal) {
-		pd->put_signal(plchan->cd, plchan->signal);
-		plchan->signal = -1;
+	if (plchan->signal >= 0) {
+		WARN_ON(plchan->mux_use == 0);
+
+		if (--plchan->mux_use == 0 && pd->put_signal) {
+			pd->put_signal(plchan->cd, plchan->signal);
+			plchan->signal = -1;
+		}
 	}
 }
 
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 17/31] dmaengine: PL08x: convert to a list of completed descriptors
  2012-06-07 10:45     ` Russell King - ARM Linux
@ 2012-06-07 10:51       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:51 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Dan Williams, Vinod Koul

Convert PL08x to use a list of completed descriptors rather than
merely relying upon a single pointer.  This makes it possible to
schedule the tasklet for other purposes, and makes our behaviour
similar to virt-dma.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |   30 ++++++++++++++++++++----------
 1 files changed, 20 insertions(+), 10 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index ac9fdcc..54e3eb0 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -219,6 +219,7 @@ enum pl08x_dma_chan_state {
  * @cd: channel platform data
  * @runtime_addr: address for RX/TX according to the runtime config
  * @pend_list: queued transactions pending on this channel
+ * @done_list: list of completed transactions
  * @at: active transaction on this channel
  * @lock: a lock for this channel data
  * @host: a pointer to the host (internal use)
@@ -238,6 +239,7 @@ struct pl08x_dma_chan {
 	const struct pl08x_channel_data *cd;
 	struct dma_slave_config cfg;
 	struct list_head pend_list;
+	struct list_head done_list;
 	struct pl08x_txd *at;
 	spinlock_t lock;
 	struct pl08x_driver_data *host;
@@ -1673,18 +1675,11 @@ static void pl08x_tasklet(unsigned long data)
 {
 	struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data;
 	struct pl08x_driver_data *pl08x = plchan->host;
-	struct pl08x_txd *txd;
 	unsigned long flags;
+	LIST_HEAD(head);
 
 	spin_lock_irqsave(&plchan->lock, flags);
-
-	txd = plchan->at;
-	plchan->at = NULL;
-
-	if (txd) {
-		/* Update last completed */
-		dma_cookie_complete(&txd->tx);
-	}
+	list_splice_tail_init(&plchan->done_list, &head);
 
 	/* If a new descriptor is queued, set it up plchan->at is NULL here */
 	if (!list_empty(&plchan->pend_list)) {
@@ -1739,10 +1734,14 @@ static void pl08x_tasklet(unsigned long data)
 
 	spin_unlock_irqrestore(&plchan->lock, flags);
 
-	if (txd) {
+	while (!list_empty(&head)) {
+		struct pl08x_txd *txd = list_first_entry(&head,
+						struct pl08x_txd, node);
 		dma_async_tx_callback callback = txd->tx.callback;
 		void *callback_param = txd->tx.callback_param;
 
+		list_del(&txd->node);
+
 		/* Don't try to unmap buffers on slave channels */
 		if (!plchan->slave)
 			pl08x_unmap_buffers(txd);
@@ -1782,6 +1781,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
 			/* Locate physical channel */
 			struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i];
 			struct pl08x_dma_chan *plchan = phychan->serving;
+			struct pl08x_txd *tx;
 
 			if (!plchan) {
 				dev_err(&pl08x->adev->dev,
@@ -1790,6 +1790,15 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
 				continue;
 			}
 
+			spin_lock(&plchan->lock);
+			tx = plchan->at;
+			if (tx) {
+				plchan->at = NULL;
+				dma_cookie_complete(&tx->tx);
+				list_add_tail(&tx->node, &plchan->done_list);
+			}
+			spin_unlock(&plchan->lock);
+
 			/* Schedule tasklet on this channel */
 			tasklet_schedule(&plchan->tasklet);
 			mask |= (1 << i);
@@ -1856,6 +1865,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
 
 		spin_lock_init(&chan->lock);
 		INIT_LIST_HEAD(&chan->pend_list);
+		INIT_LIST_HEAD(&chan->done_list);
 		tasklet_init(&chan->tasklet, pl08x_tasklet,
 			     (unsigned long) chan);
 
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 17/31] dmaengine: PL08x: convert to a list of completed descriptors
@ 2012-06-07 10:51       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:51 UTC (permalink / raw)
  To: linux-arm-kernel

Convert PL08x to use a list of completed descriptors rather than
merely relying upon a single pointer.  This makes it possible to
schedule the tasklet for other purposes, and makes our behaviour
similar to virt-dma.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |   30 ++++++++++++++++++++----------
 1 files changed, 20 insertions(+), 10 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index ac9fdcc..54e3eb0 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -219,6 +219,7 @@ enum pl08x_dma_chan_state {
  * @cd: channel platform data
  * @runtime_addr: address for RX/TX according to the runtime config
  * @pend_list: queued transactions pending on this channel
+ * @done_list: list of completed transactions
  * @at: active transaction on this channel
  * @lock: a lock for this channel data
  * @host: a pointer to the host (internal use)
@@ -238,6 +239,7 @@ struct pl08x_dma_chan {
 	const struct pl08x_channel_data *cd;
 	struct dma_slave_config cfg;
 	struct list_head pend_list;
+	struct list_head done_list;
 	struct pl08x_txd *at;
 	spinlock_t lock;
 	struct pl08x_driver_data *host;
@@ -1673,18 +1675,11 @@ static void pl08x_tasklet(unsigned long data)
 {
 	struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data;
 	struct pl08x_driver_data *pl08x = plchan->host;
-	struct pl08x_txd *txd;
 	unsigned long flags;
+	LIST_HEAD(head);
 
 	spin_lock_irqsave(&plchan->lock, flags);
-
-	txd = plchan->at;
-	plchan->at = NULL;
-
-	if (txd) {
-		/* Update last completed */
-		dma_cookie_complete(&txd->tx);
-	}
+	list_splice_tail_init(&plchan->done_list, &head);
 
 	/* If a new descriptor is queued, set it up plchan->at is NULL here */
 	if (!list_empty(&plchan->pend_list)) {
@@ -1739,10 +1734,14 @@ static void pl08x_tasklet(unsigned long data)
 
 	spin_unlock_irqrestore(&plchan->lock, flags);
 
-	if (txd) {
+	while (!list_empty(&head)) {
+		struct pl08x_txd *txd = list_first_entry(&head,
+						struct pl08x_txd, node);
 		dma_async_tx_callback callback = txd->tx.callback;
 		void *callback_param = txd->tx.callback_param;
 
+		list_del(&txd->node);
+
 		/* Don't try to unmap buffers on slave channels */
 		if (!plchan->slave)
 			pl08x_unmap_buffers(txd);
@@ -1782,6 +1781,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
 			/* Locate physical channel */
 			struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i];
 			struct pl08x_dma_chan *plchan = phychan->serving;
+			struct pl08x_txd *tx;
 
 			if (!plchan) {
 				dev_err(&pl08x->adev->dev,
@@ -1790,6 +1790,15 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
 				continue;
 			}
 
+			spin_lock(&plchan->lock);
+			tx = plchan->at;
+			if (tx) {
+				plchan->at = NULL;
+				dma_cookie_complete(&tx->tx);
+				list_add_tail(&tx->node, &plchan->done_list);
+			}
+			spin_unlock(&plchan->lock);
+
 			/* Schedule tasklet on this channel */
 			tasklet_schedule(&plchan->tasklet);
 			mask |= (1 << i);
@@ -1856,6 +1865,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
 
 		spin_lock_init(&chan->lock);
 		INIT_LIST_HEAD(&chan->pend_list);
+		INIT_LIST_HEAD(&chan->done_list);
 		tasklet_init(&chan->tasklet, pl08x_tasklet,
 			     (unsigned long) chan);
 
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 18/31] dmaengine: PL08x: move DMA signal muxing into slave prepare code
  2012-06-07 10:45     ` Russell King - ARM Linux
@ 2012-06-07 10:51       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:51 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Dan Williams, Vinod Koul

Move the DMA request muxing into the slave prepare code and txd
release/completion code.  This means we only hold the DMA request
mux while there are descriptors waiting to be started or are in
progress.

This leaves txd->direction as a write-only variable; remove it.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |   79 ++++++++++++++++++---------------------------
 1 files changed, 32 insertions(+), 47 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 54e3eb0..e04ca0b 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -168,7 +168,6 @@ struct pl08x_sg {
  * @tx: async tx descriptor
  * @node: node for txd list for channels
  * @dsg_list: list of children sg's
- * @direction: direction of transfer
  * @llis_bus: DMA memory address (physical) start for the LLIs
  * @llis_va: virtual memory address start for the LLIs
  * @cctl: control reg values for current txd
@@ -178,7 +177,6 @@ struct pl08x_txd {
 	struct dma_async_tx_descriptor tx;
 	struct list_head node;
 	struct list_head dsg_list;
-	enum dma_transfer_direction direction;
 	dma_addr_t llis_bus;
 	struct pl08x_lli *llis_va;
 	/* Default cctl value for LLIs */
@@ -997,6 +995,7 @@ static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
 	if (!list_empty(&plchan->pend_list)) {
 		list_for_each_entry_safe(txdi,
 					 next, &plchan->pend_list, node) {
+			pl08x_release_mux(plchan);
 			list_del(&txdi->node);
 			pl08x_free_txd(pl08x, txdi);
 		}
@@ -1018,12 +1017,10 @@ static void pl08x_free_chan_resources(struct dma_chan *chan)
 /*
  * This should be called with the channel plchan->lock held
  */
-static int prep_phy_channel(struct pl08x_dma_chan *plchan,
-			    struct pl08x_txd *txd)
+static int prep_phy_channel(struct pl08x_dma_chan *plchan)
 {
 	struct pl08x_driver_data *pl08x = plchan->host;
 	struct pl08x_phy_chan *ch;
-	int ret;
 
 	/* Check if we already have a channel */
 	if (plchan->phychan) {
@@ -1038,36 +1035,11 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
 		return -EBUSY;
 	}
 
-	/*
-	 * OK we have a physical channel: for memcpy() this is all we
-	 * need, but for slaves the physical signals may be muxed!
-	 * Can the platform allow us to use this channel?
-	 */
-	if (plchan->slave) {
-		ret = pl08x_request_mux(plchan);
-		if (ret < 0) {
-			dev_dbg(&pl08x->adev->dev,
-				"unable to use physical channel %d for transfer on %s due to platform restrictions\n",
-				ch->id, plchan->name);
-			/* Release physical channel & return */
-			pl08x_put_phy_channel(pl08x, ch);
-			return -EBUSY;
-		}
-	}
-
 	plchan->phychan = ch;
-	dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n",
-		 ch->id,
-		 plchan->signal,
-		 plchan->name);
+	dev_dbg(&pl08x->adev->dev, "allocated physical channel %d for xfer on %s\n",
+		 ch->id, plchan->name);
 
 got_channel:
-	/* Assign the flow control signal to this channel */
-	if (txd->direction == DMA_MEM_TO_DEV)
-		txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT;
-	else if (txd->direction == DMA_DEV_TO_MEM)
-		txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT;
-
 	plchan->phychan_hold++;
 
 	return 0;
@@ -1077,7 +1049,6 @@ static void release_phy_channel(struct pl08x_dma_chan *plchan)
 {
 	struct pl08x_driver_data *pl08x = plchan->host;
 
-	pl08x_release_mux(plchan);
 	pl08x_put_phy_channel(pl08x, plchan->phychan);
 	plchan->phychan = NULL;
 }
@@ -1340,19 +1311,12 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
 	 * See if we already have a physical channel allocated,
 	 * else this is the time to try to get one.
 	 */
-	ret = prep_phy_channel(plchan, txd);
+	ret = prep_phy_channel(plchan);
 	if (ret) {
 		/*
 		 * No physical channel was available.
 		 *
 		 * memcpy transfers can be sorted out at submission time.
-		 *
-		 * Slave transfers may have been denied due to platform
-		 * channel muxing restrictions.  Since there is no guarantee
-		 * that this will ever be resolved, and the signal must be
-		 * acquired AFTER acquiring the physical channel, we will let
-		 * them be NACK:ed with -EBUSY here. The drivers can retry
-		 * the prep() call if they are eager on doing this using DMA.
 		 */
 		if (plchan->slave) {
 			pl08x_free_txd_list(pl08x, plchan);
@@ -1423,7 +1387,6 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
 	}
 	list_add_tail(&dsg->node, &txd->dsg_list);
 
-	txd->direction = DMA_MEM_TO_MEM;
 	dsg->src_addr = src;
 	dsg->dst_addr = dest;
 	dsg->len = len;
@@ -1477,8 +1440,6 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
 	 * will take precedence since this may configure the
 	 * channel target address dynamically at runtime.
 	 */
-	txd->direction = direction;
-
 	if (direction == DMA_MEM_TO_DEV) {
 		cctl = PL080_CONTROL_SRC_INCR;
 		slave_addr = plchan->cfg.dst_addr;
@@ -1519,9 +1480,28 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
 
 	txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
 
+	ret = pl08x_request_mux(plchan);
+	if (ret < 0) {
+		pl08x_free_txd(pl08x, txd);
+		dev_dbg(&pl08x->adev->dev,
+			"unable to mux for transfer on %s due to platform restrictions\n",
+			plchan->name);
+		return NULL;
+	}
+
+	dev_dbg(&pl08x->adev->dev, "allocated DMA request signal %d for xfer on %s\n",
+		 plchan->signal, plchan->name);
+
+	/* Assign the flow control signal to this channel */
+	if (direction == DMA_MEM_TO_DEV)
+		txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT;
+	else
+		txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT;
+
 	for_each_sg(sgl, sg, sg_len, tmp) {
 		dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
 		if (!dsg) {
+			pl08x_release_mux(plchan);
 			pl08x_free_txd(pl08x, txd);
 			dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n",
 					__func__);
@@ -1586,6 +1566,8 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
 		}
 		/* Dequeue jobs and free LLIs */
 		if (plchan->at) {
+			/* Killing this one off, release its mux */
+			pl08x_release_mux(plchan);
 			pl08x_free_txd(pl08x, plchan->at);
 			plchan->at = NULL;
 		}
@@ -1702,7 +1684,6 @@ static void pl08x_tasklet(unsigned long data)
 
 		/*
 		 * No more jobs, so free up the physical channel
-		 * Free any allocated signal on slave transfers too
 		 */
 		release_phy_channel(plchan);
 		plchan->state = PL08X_CHAN_IDLE;
@@ -1720,8 +1701,7 @@ static void pl08x_tasklet(unsigned long data)
 				int ret;
 
 				/* This should REALLY not fail now */
-				ret = prep_phy_channel(waiting,
-						       waiting->waiting);
+				ret = prep_phy_channel(waiting);
 				BUG_ON(ret);
 				waiting->phychan_hold--;
 				waiting->state = PL08X_CHAN_RUNNING;
@@ -1794,6 +1774,11 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
 			tx = plchan->at;
 			if (tx) {
 				plchan->at = NULL;
+				/*
+				 * This descriptor is done, release its mux
+				 * reservation.
+				 */
+				pl08x_release_mux(plchan);
 				dma_cookie_complete(&tx->tx);
 				list_add_tail(&tx->node, &plchan->done_list);
 			}
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 18/31] dmaengine: PL08x: move DMA signal muxing into slave prepare code
@ 2012-06-07 10:51       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:51 UTC (permalink / raw)
  To: linux-arm-kernel

Move the DMA request muxing into the slave prepare code and txd
release/completion code.  This means we only hold the DMA request
mux while there are descriptors waiting to be started or are in
progress.

This leaves txd->direction as a write-only variable; remove it.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |   79 ++++++++++++++++++---------------------------
 1 files changed, 32 insertions(+), 47 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 54e3eb0..e04ca0b 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -168,7 +168,6 @@ struct pl08x_sg {
  * @tx: async tx descriptor
  * @node: node for txd list for channels
  * @dsg_list: list of children sg's
- * @direction: direction of transfer
  * @llis_bus: DMA memory address (physical) start for the LLIs
  * @llis_va: virtual memory address start for the LLIs
  * @cctl: control reg values for current txd
@@ -178,7 +177,6 @@ struct pl08x_txd {
 	struct dma_async_tx_descriptor tx;
 	struct list_head node;
 	struct list_head dsg_list;
-	enum dma_transfer_direction direction;
 	dma_addr_t llis_bus;
 	struct pl08x_lli *llis_va;
 	/* Default cctl value for LLIs */
@@ -997,6 +995,7 @@ static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
 	if (!list_empty(&plchan->pend_list)) {
 		list_for_each_entry_safe(txdi,
 					 next, &plchan->pend_list, node) {
+			pl08x_release_mux(plchan);
 			list_del(&txdi->node);
 			pl08x_free_txd(pl08x, txdi);
 		}
@@ -1018,12 +1017,10 @@ static void pl08x_free_chan_resources(struct dma_chan *chan)
 /*
  * This should be called with the channel plchan->lock held
  */
-static int prep_phy_channel(struct pl08x_dma_chan *plchan,
-			    struct pl08x_txd *txd)
+static int prep_phy_channel(struct pl08x_dma_chan *plchan)
 {
 	struct pl08x_driver_data *pl08x = plchan->host;
 	struct pl08x_phy_chan *ch;
-	int ret;
 
 	/* Check if we already have a channel */
 	if (plchan->phychan) {
@@ -1038,36 +1035,11 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
 		return -EBUSY;
 	}
 
-	/*
-	 * OK we have a physical channel: for memcpy() this is all we
-	 * need, but for slaves the physical signals may be muxed!
-	 * Can the platform allow us to use this channel?
-	 */
-	if (plchan->slave) {
-		ret = pl08x_request_mux(plchan);
-		if (ret < 0) {
-			dev_dbg(&pl08x->adev->dev,
-				"unable to use physical channel %d for transfer on %s due to platform restrictions\n",
-				ch->id, plchan->name);
-			/* Release physical channel & return */
-			pl08x_put_phy_channel(pl08x, ch);
-			return -EBUSY;
-		}
-	}
-
 	plchan->phychan = ch;
-	dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n",
-		 ch->id,
-		 plchan->signal,
-		 plchan->name);
+	dev_dbg(&pl08x->adev->dev, "allocated physical channel %d for xfer on %s\n",
+		 ch->id, plchan->name);
 
 got_channel:
-	/* Assign the flow control signal to this channel */
-	if (txd->direction == DMA_MEM_TO_DEV)
-		txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT;
-	else if (txd->direction == DMA_DEV_TO_MEM)
-		txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT;
-
 	plchan->phychan_hold++;
 
 	return 0;
@@ -1077,7 +1049,6 @@ static void release_phy_channel(struct pl08x_dma_chan *plchan)
 {
 	struct pl08x_driver_data *pl08x = plchan->host;
 
-	pl08x_release_mux(plchan);
 	pl08x_put_phy_channel(pl08x, plchan->phychan);
 	plchan->phychan = NULL;
 }
@@ -1340,19 +1311,12 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
 	 * See if we already have a physical channel allocated,
 	 * else this is the time to try to get one.
 	 */
-	ret = prep_phy_channel(plchan, txd);
+	ret = prep_phy_channel(plchan);
 	if (ret) {
 		/*
 		 * No physical channel was available.
 		 *
 		 * memcpy transfers can be sorted out at submission time.
-		 *
-		 * Slave transfers may have been denied due to platform
-		 * channel muxing restrictions.  Since there is no guarantee
-		 * that this will ever be resolved, and the signal must be
-		 * acquired AFTER acquiring the physical channel, we will let
-		 * them be NACK:ed with -EBUSY here. The drivers can retry
-		 * the prep() call if they are eager on doing this using DMA.
 		 */
 		if (plchan->slave) {
 			pl08x_free_txd_list(pl08x, plchan);
@@ -1423,7 +1387,6 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
 	}
 	list_add_tail(&dsg->node, &txd->dsg_list);
 
-	txd->direction = DMA_MEM_TO_MEM;
 	dsg->src_addr = src;
 	dsg->dst_addr = dest;
 	dsg->len = len;
@@ -1477,8 +1440,6 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
 	 * will take precedence since this may configure the
 	 * channel target address dynamically at runtime.
 	 */
-	txd->direction = direction;
-
 	if (direction == DMA_MEM_TO_DEV) {
 		cctl = PL080_CONTROL_SRC_INCR;
 		slave_addr = plchan->cfg.dst_addr;
@@ -1519,9 +1480,28 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
 
 	txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
 
+	ret = pl08x_request_mux(plchan);
+	if (ret < 0) {
+		pl08x_free_txd(pl08x, txd);
+		dev_dbg(&pl08x->adev->dev,
+			"unable to mux for transfer on %s due to platform restrictions\n",
+			plchan->name);
+		return NULL;
+	}
+
+	dev_dbg(&pl08x->adev->dev, "allocated DMA request signal %d for xfer on %s\n",
+		 plchan->signal, plchan->name);
+
+	/* Assign the flow control signal to this channel */
+	if (direction == DMA_MEM_TO_DEV)
+		txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT;
+	else
+		txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT;
+
 	for_each_sg(sgl, sg, sg_len, tmp) {
 		dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
 		if (!dsg) {
+			pl08x_release_mux(plchan);
 			pl08x_free_txd(pl08x, txd);
 			dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n",
 					__func__);
@@ -1586,6 +1566,8 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
 		}
 		/* Dequeue jobs and free LLIs */
 		if (plchan->at) {
+			/* Killing this one off, release its mux */
+			pl08x_release_mux(plchan);
 			pl08x_free_txd(pl08x, plchan->at);
 			plchan->at = NULL;
 		}
@@ -1702,7 +1684,6 @@ static void pl08x_tasklet(unsigned long data)
 
 		/*
 		 * No more jobs, so free up the physical channel
-		 * Free any allocated signal on slave transfers too
 		 */
 		release_phy_channel(plchan);
 		plchan->state = PL08X_CHAN_IDLE;
@@ -1720,8 +1701,7 @@ static void pl08x_tasklet(unsigned long data)
 				int ret;
 
 				/* This should REALLY not fail now */
-				ret = prep_phy_channel(waiting,
-						       waiting->waiting);
+				ret = prep_phy_channel(waiting);
 				BUG_ON(ret);
 				waiting->phychan_hold--;
 				waiting->state = PL08X_CHAN_RUNNING;
@@ -1794,6 +1774,11 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
 			tx = plchan->at;
 			if (tx) {
 				plchan->at = NULL;
+				/*
+				 * This descriptor is done, release its mux
+				 * reservation.
+				 */
+				pl08x_release_mux(plchan);
 				dma_cookie_complete(&tx->tx);
 				list_add_tail(&tx->node, &plchan->done_list);
 			}
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 19/31] dmaengine: PL08x: remove waiting descriptor pointer
  2012-06-07 10:45     ` Russell King - ARM Linux
@ 2012-06-07 10:52       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:52 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Dan Williams, Vinod Koul

As we no longer need to pass a descriptor to prep_phy_channel(), we
don't need to keep track of the descriptor which is waiting for a
channel to become available.  So let's get rid of it.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |    8 +-------
 1 files changed, 1 insertions(+), 7 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index e04ca0b..88661fa 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -223,8 +223,6 @@ enum pl08x_dma_chan_state {
  * @host: a pointer to the host (internal use)
  * @state: whether the channel is idle, paused, running etc
  * @slave: whether this channel is a device (slave) or for memcpy
- * @waiting: a TX descriptor on this channel which is waiting for a physical
- * channel to become available
  * @signal: the physical DMA request signal which this channel is using
  * @mux_use: count of descriptors using this DMA request signal setting
  */
@@ -243,7 +241,6 @@ struct pl08x_dma_chan {
 	struct pl08x_driver_data *host;
 	enum pl08x_dma_chan_state state;
 	bool slave;
-	struct pl08x_txd *waiting;
 	int signal;
 	unsigned mux_use;
 };
@@ -1074,7 +1071,6 @@ static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
 	if (!plchan->slave && !plchan->phychan) {
 		/* Do this memcpy whenever there is a channel ready */
 		plchan->state = PL08X_CHAN_WAITING;
-		plchan->waiting = txd;
 	} else {
 		plchan->phychan_hold--;
 	}
@@ -1696,8 +1692,7 @@ static void pl08x_tasklet(unsigned long data)
 		 */
 		list_for_each_entry(waiting, &pl08x->memcpy.channels,
 				    chan.device_node) {
-			if (waiting->state == PL08X_CHAN_WAITING &&
-				waiting->waiting != NULL) {
+			if (waiting->state == PL08X_CHAN_WAITING) {
 				int ret;
 
 				/* This should REALLY not fail now */
@@ -1705,7 +1700,6 @@ static void pl08x_tasklet(unsigned long data)
 				BUG_ON(ret);
 				waiting->phychan_hold--;
 				waiting->state = PL08X_CHAN_RUNNING;
-				waiting->waiting = NULL;
 				pl08x_issue_pending(&waiting->chan);
 				break;
 			}
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 19/31] dmaengine: PL08x: remove waiting descriptor pointer
@ 2012-06-07 10:52       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:52 UTC (permalink / raw)
  To: linux-arm-kernel

As we no longer need to pass a descriptor to prep_phy_channel(), we
don't need to keep track of the descriptor which is waiting for a
channel to become available.  So let's get rid of it.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |    8 +-------
 1 files changed, 1 insertions(+), 7 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index e04ca0b..88661fa 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -223,8 +223,6 @@ enum pl08x_dma_chan_state {
  * @host: a pointer to the host (internal use)
  * @state: whether the channel is idle, paused, running etc
  * @slave: whether this channel is a device (slave) or for memcpy
- * @waiting: a TX descriptor on this channel which is waiting for a physical
- * channel to become available
  * @signal: the physical DMA request signal which this channel is using
  * @mux_use: count of descriptors using this DMA request signal setting
  */
@@ -243,7 +241,6 @@ struct pl08x_dma_chan {
 	struct pl08x_driver_data *host;
 	enum pl08x_dma_chan_state state;
 	bool slave;
-	struct pl08x_txd *waiting;
 	int signal;
 	unsigned mux_use;
 };
@@ -1074,7 +1071,6 @@ static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
 	if (!plchan->slave && !plchan->phychan) {
 		/* Do this memcpy whenever there is a channel ready */
 		plchan->state = PL08X_CHAN_WAITING;
-		plchan->waiting = txd;
 	} else {
 		plchan->phychan_hold--;
 	}
@@ -1696,8 +1692,7 @@ static void pl08x_tasklet(unsigned long data)
 		 */
 		list_for_each_entry(waiting, &pl08x->memcpy.channels,
 				    chan.device_node) {
-			if (waiting->state == PL08X_CHAN_WAITING &&
-				waiting->waiting != NULL) {
+			if (waiting->state == PL08X_CHAN_WAITING) {
 				int ret;
 
 				/* This should REALLY not fail now */
@@ -1705,7 +1700,6 @@ static void pl08x_tasklet(unsigned long data)
 				BUG_ON(ret);
 				waiting->phychan_hold--;
 				waiting->state = PL08X_CHAN_RUNNING;
-				waiting->waiting = NULL;
 				pl08x_issue_pending(&waiting->chan);
 				break;
 			}
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 20/31] dmaengine: PL08x: re-jig the starting of txds
  2012-06-07 10:45     ` Russell King - ARM Linux
@ 2012-06-07 10:52       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:52 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Dan Williams, Vinod Koul

Rather than code the de-queue of the txd several times, move that into
the start_txd function.  Rename this to better illustrate what it's
now doing, and call this function when starting a delayed memcpy().

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |   37 +++++++++++++++++--------------------
 1 files changed, 17 insertions(+), 20 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 88661fa..c278d23 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -354,20 +354,25 @@ static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch)
  * been set when the LLIs were constructed.  Poke them into the hardware
  * and start the transfer.
  */
-static void pl08x_start_txd(struct pl08x_dma_chan *plchan,
-	struct pl08x_txd *txd)
+static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan)
 {
 	struct pl08x_driver_data *pl08x = plchan->host;
 	struct pl08x_phy_chan *phychan = plchan->phychan;
-	struct pl08x_lli *lli = &txd->llis_va[0];
+	struct pl08x_lli *lli;
+	struct pl08x_txd *txd;
 	u32 val;
 
+	txd = list_first_entry(&plchan->pend_list, struct pl08x_txd, node);
+	list_del(&txd->node);
+
 	plchan->at = txd;
 
 	/* Wait for channel inactive */
 	while (pl08x_phy_channel_busy(phychan))
 		cpu_relax();
 
+	lli = &txd->llis_va[0];
+
 	dev_vdbg(&pl08x->adev->dev,
 		"WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
 		"clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
@@ -1272,15 +1277,8 @@ static void pl08x_issue_pending(struct dma_chan *chan)
 
 	/* Take the first element in the queue and execute it */
 	if (!list_empty(&plchan->pend_list)) {
-		struct pl08x_txd *next;
-
-		next = list_first_entry(&plchan->pend_list,
-					struct pl08x_txd,
-					node);
-		list_del(&next->node);
 		plchan->state = PL08X_CHAN_RUNNING;
-
-		pl08x_start_txd(plchan, next);
+		pl08x_start_next_txd(plchan);
 	}
 
 	spin_unlock_irqrestore(&plchan->lock, flags);
@@ -1661,14 +1659,7 @@ static void pl08x_tasklet(unsigned long data)
 
 	/* If a new descriptor is queued, set it up plchan->at is NULL here */
 	if (!list_empty(&plchan->pend_list)) {
-		struct pl08x_txd *next;
-
-		next = list_first_entry(&plchan->pend_list,
-					struct pl08x_txd,
-					node);
-		list_del(&next->node);
-
-		pl08x_start_txd(plchan, next);
+		pl08x_start_next_txd(plchan);
 	} else if (plchan->phychan_hold) {
 		/*
 		 * This channel is still in use - we have a new txd being
@@ -1700,7 +1691,13 @@ static void pl08x_tasklet(unsigned long data)
 				BUG_ON(ret);
 				waiting->phychan_hold--;
 				waiting->state = PL08X_CHAN_RUNNING;
-				pl08x_issue_pending(&waiting->chan);
+				/*
+				 * Eww.  We know this isn't going to deadlock
+				 * but lockdep probably doens't.
+				 */
+				spin_lock(&waiting->lock);
+				pl08x_start_next_txd(waiting);
+				spin_unlock(&waiting->lock);
 				break;
 			}
 		}
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 20/31] dmaengine: PL08x: re-jig the starting of txds
@ 2012-06-07 10:52       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:52 UTC (permalink / raw)
  To: linux-arm-kernel

Rather than code the de-queue of the txd several times, move that into
the start_txd function.  Rename this to better illustrate what it's
now doing, and call this function when starting a delayed memcpy().

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |   37 +++++++++++++++++--------------------
 1 files changed, 17 insertions(+), 20 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 88661fa..c278d23 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -354,20 +354,25 @@ static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch)
  * been set when the LLIs were constructed.  Poke them into the hardware
  * and start the transfer.
  */
-static void pl08x_start_txd(struct pl08x_dma_chan *plchan,
-	struct pl08x_txd *txd)
+static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan)
 {
 	struct pl08x_driver_data *pl08x = plchan->host;
 	struct pl08x_phy_chan *phychan = plchan->phychan;
-	struct pl08x_lli *lli = &txd->llis_va[0];
+	struct pl08x_lli *lli;
+	struct pl08x_txd *txd;
 	u32 val;
 
+	txd = list_first_entry(&plchan->pend_list, struct pl08x_txd, node);
+	list_del(&txd->node);
+
 	plchan->at = txd;
 
 	/* Wait for channel inactive */
 	while (pl08x_phy_channel_busy(phychan))
 		cpu_relax();
 
+	lli = &txd->llis_va[0];
+
 	dev_vdbg(&pl08x->adev->dev,
 		"WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
 		"clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
@@ -1272,15 +1277,8 @@ static void pl08x_issue_pending(struct dma_chan *chan)
 
 	/* Take the first element in the queue and execute it */
 	if (!list_empty(&plchan->pend_list)) {
-		struct pl08x_txd *next;
-
-		next = list_first_entry(&plchan->pend_list,
-					struct pl08x_txd,
-					node);
-		list_del(&next->node);
 		plchan->state = PL08X_CHAN_RUNNING;
-
-		pl08x_start_txd(plchan, next);
+		pl08x_start_next_txd(plchan);
 	}
 
 	spin_unlock_irqrestore(&plchan->lock, flags);
@@ -1661,14 +1659,7 @@ static void pl08x_tasklet(unsigned long data)
 
 	/* If a new descriptor is queued, set it up plchan->at is NULL here */
 	if (!list_empty(&plchan->pend_list)) {
-		struct pl08x_txd *next;
-
-		next = list_first_entry(&plchan->pend_list,
-					struct pl08x_txd,
-					node);
-		list_del(&next->node);
-
-		pl08x_start_txd(plchan, next);
+		pl08x_start_next_txd(plchan);
 	} else if (plchan->phychan_hold) {
 		/*
 		 * This channel is still in use - we have a new txd being
@@ -1700,7 +1691,13 @@ static void pl08x_tasklet(unsigned long data)
 				BUG_ON(ret);
 				waiting->phychan_hold--;
 				waiting->state = PL08X_CHAN_RUNNING;
-				pl08x_issue_pending(&waiting->chan);
+				/*
+				 * Eww.  We know this isn't going to deadlock
+				 * but lockdep probably doens't.
+				 */
+				spin_lock(&waiting->lock);
+				pl08x_start_next_txd(waiting);
+				spin_unlock(&waiting->lock);
 				break;
 			}
 		}
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 21/31] dmaengine: PL08x: split the pend_list in two
  2012-06-07 10:45     ` Russell King - ARM Linux
@ 2012-06-07 10:52       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:52 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Dan Williams, Vinod Koul

Our behaviour wasn't correct; issue_pending is supposed to be called
before any submitted descriptors are available for processing by the
DMA engine.  Split the pend_list in two, one for submitted descriptors
and another list for issued descriptors.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |   41 ++++++++++++++++++++++++++++-------------
 1 files changed, 28 insertions(+), 13 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index c278d23..b613284 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -217,6 +217,7 @@ enum pl08x_dma_chan_state {
  * @cd: channel platform data
  * @runtime_addr: address for RX/TX according to the runtime config
  * @pend_list: queued transactions pending on this channel
+ * @issued_list: issued transactions for this channel
  * @done_list: list of completed transactions
  * @at: active transaction on this channel
  * @lock: a lock for this channel data
@@ -235,6 +236,7 @@ struct pl08x_dma_chan {
 	const struct pl08x_channel_data *cd;
 	struct dma_slave_config cfg;
 	struct list_head pend_list;
+	struct list_head issued_list;
 	struct list_head done_list;
 	struct pl08x_txd *at;
 	spinlock_t lock;
@@ -362,7 +364,7 @@ static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan)
 	struct pl08x_txd *txd;
 	u32 val;
 
-	txd = list_first_entry(&plchan->pend_list, struct pl08x_txd, node);
+	txd = list_first_entry(&plchan->issued_list, struct pl08x_txd, node);
 	list_del(&txd->node);
 
 	plchan->at = txd;
@@ -525,6 +527,15 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
 	}
 
 	/* Sum up all queued transactions */
+	if (!list_empty(&plchan->issued_list)) {
+		struct pl08x_txd *txdi;
+		list_for_each_entry(txdi, &plchan->issued_list, node) {
+			struct pl08x_sg *dsg;
+			list_for_each_entry(dsg, &txd->dsg_list, node)
+				bytes += dsg->len;
+		}
+	}
+
 	if (!list_empty(&plchan->pend_list)) {
 		struct pl08x_txd *txdi;
 		list_for_each_entry(txdi, &plchan->pend_list, node) {
@@ -991,16 +1002,17 @@ static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
 static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
 				struct pl08x_dma_chan *plchan)
 {
-	struct pl08x_txd *txdi = NULL;
-	struct pl08x_txd *next;
+	LIST_HEAD(head);
+	struct pl08x_txd *txd;
 
-	if (!list_empty(&plchan->pend_list)) {
-		list_for_each_entry_safe(txdi,
-					 next, &plchan->pend_list, node) {
-			pl08x_release_mux(plchan);
-			list_del(&txdi->node);
-			pl08x_free_txd(pl08x, txdi);
-		}
+	list_splice_tail_init(&plchan->issued_list, &head);
+	list_splice_tail_init(&plchan->pend_list, &head);
+
+	while (!list_empty(&head)) {
+		txd = list_first_entry(&head, struct pl08x_txd, node);
+		pl08x_release_mux(plchan);
+		list_del(&txd->node);
+		pl08x_free_txd(pl08x, txd);
 	}
 }
 
@@ -1269,6 +1281,8 @@ static void pl08x_issue_pending(struct dma_chan *chan)
 	unsigned long flags;
 
 	spin_lock_irqsave(&plchan->lock, flags);
+	list_splice_tail_init(&plchan->pend_list, &plchan->issued_list);
+
 	/* Something is already active, or we're waiting for a channel... */
 	if (plchan->at || plchan->state == PL08X_CHAN_WAITING) {
 		spin_unlock_irqrestore(&plchan->lock, flags);
@@ -1276,7 +1290,7 @@ static void pl08x_issue_pending(struct dma_chan *chan)
 	}
 
 	/* Take the first element in the queue and execute it */
-	if (!list_empty(&plchan->pend_list)) {
+	if (!list_empty(&plchan->issued_list)) {
 		plchan->state = PL08X_CHAN_RUNNING;
 		pl08x_start_next_txd(plchan);
 	}
@@ -1658,9 +1672,9 @@ static void pl08x_tasklet(unsigned long data)
 	list_splice_tail_init(&plchan->done_list, &head);
 
 	/* If a new descriptor is queued, set it up plchan->at is NULL here */
-	if (!list_empty(&plchan->pend_list)) {
+	if (!list_empty(&plchan->issued_list)) {
 		pl08x_start_next_txd(plchan);
-	} else if (plchan->phychan_hold) {
+	} else if (!list_empty(&plchan->pend_list) || plchan->phychan_hold) {
 		/*
 		 * This channel is still in use - we have a new txd being
 		 * prepared and will soon be queued.  Don't give up the
@@ -1841,6 +1855,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
 
 		spin_lock_init(&chan->lock);
 		INIT_LIST_HEAD(&chan->pend_list);
+		INIT_LIST_HEAD(&chan->issued_list);
 		INIT_LIST_HEAD(&chan->done_list);
 		tasklet_init(&chan->tasklet, pl08x_tasklet,
 			     (unsigned long) chan);
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 21/31] dmaengine: PL08x: split the pend_list in two
@ 2012-06-07 10:52       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:52 UTC (permalink / raw)
  To: linux-arm-kernel

Our behaviour wasn't correct; issue_pending is supposed to be called
before any submitted descriptors are available for processing by the
DMA engine.  Split the pend_list in two, one for submitted descriptors
and another list for issued descriptors.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |   41 ++++++++++++++++++++++++++++-------------
 1 files changed, 28 insertions(+), 13 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index c278d23..b613284 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -217,6 +217,7 @@ enum pl08x_dma_chan_state {
  * @cd: channel platform data
  * @runtime_addr: address for RX/TX according to the runtime config
  * @pend_list: queued transactions pending on this channel
+ * @issued_list: issued transactions for this channel
  * @done_list: list of completed transactions
  * @at: active transaction on this channel
  * @lock: a lock for this channel data
@@ -235,6 +236,7 @@ struct pl08x_dma_chan {
 	const struct pl08x_channel_data *cd;
 	struct dma_slave_config cfg;
 	struct list_head pend_list;
+	struct list_head issued_list;
 	struct list_head done_list;
 	struct pl08x_txd *at;
 	spinlock_t lock;
@@ -362,7 +364,7 @@ static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan)
 	struct pl08x_txd *txd;
 	u32 val;
 
-	txd = list_first_entry(&plchan->pend_list, struct pl08x_txd, node);
+	txd = list_first_entry(&plchan->issued_list, struct pl08x_txd, node);
 	list_del(&txd->node);
 
 	plchan->at = txd;
@@ -525,6 +527,15 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
 	}
 
 	/* Sum up all queued transactions */
+	if (!list_empty(&plchan->issued_list)) {
+		struct pl08x_txd *txdi;
+		list_for_each_entry(txdi, &plchan->issued_list, node) {
+			struct pl08x_sg *dsg;
+			list_for_each_entry(dsg, &txd->dsg_list, node)
+				bytes += dsg->len;
+		}
+	}
+
 	if (!list_empty(&plchan->pend_list)) {
 		struct pl08x_txd *txdi;
 		list_for_each_entry(txdi, &plchan->pend_list, node) {
@@ -991,16 +1002,17 @@ static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
 static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
 				struct pl08x_dma_chan *plchan)
 {
-	struct pl08x_txd *txdi = NULL;
-	struct pl08x_txd *next;
+	LIST_HEAD(head);
+	struct pl08x_txd *txd;
 
-	if (!list_empty(&plchan->pend_list)) {
-		list_for_each_entry_safe(txdi,
-					 next, &plchan->pend_list, node) {
-			pl08x_release_mux(plchan);
-			list_del(&txdi->node);
-			pl08x_free_txd(pl08x, txdi);
-		}
+	list_splice_tail_init(&plchan->issued_list, &head);
+	list_splice_tail_init(&plchan->pend_list, &head);
+
+	while (!list_empty(&head)) {
+		txd = list_first_entry(&head, struct pl08x_txd, node);
+		pl08x_release_mux(plchan);
+		list_del(&txd->node);
+		pl08x_free_txd(pl08x, txd);
 	}
 }
 
@@ -1269,6 +1281,8 @@ static void pl08x_issue_pending(struct dma_chan *chan)
 	unsigned long flags;
 
 	spin_lock_irqsave(&plchan->lock, flags);
+	list_splice_tail_init(&plchan->pend_list, &plchan->issued_list);
+
 	/* Something is already active, or we're waiting for a channel... */
 	if (plchan->at || plchan->state == PL08X_CHAN_WAITING) {
 		spin_unlock_irqrestore(&plchan->lock, flags);
@@ -1276,7 +1290,7 @@ static void pl08x_issue_pending(struct dma_chan *chan)
 	}
 
 	/* Take the first element in the queue and execute it */
-	if (!list_empty(&plchan->pend_list)) {
+	if (!list_empty(&plchan->issued_list)) {
 		plchan->state = PL08X_CHAN_RUNNING;
 		pl08x_start_next_txd(plchan);
 	}
@@ -1658,9 +1672,9 @@ static void pl08x_tasklet(unsigned long data)
 	list_splice_tail_init(&plchan->done_list, &head);
 
 	/* If a new descriptor is queued, set it up plchan->at is NULL here */
-	if (!list_empty(&plchan->pend_list)) {
+	if (!list_empty(&plchan->issued_list)) {
 		pl08x_start_next_txd(plchan);
-	} else if (plchan->phychan_hold) {
+	} else if (!list_empty(&plchan->pend_list) || plchan->phychan_hold) {
 		/*
 		 * This channel is still in use - we have a new txd being
 		 * prepared and will soon be queued.  Don't give up the
@@ -1841,6 +1855,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
 
 		spin_lock_init(&chan->lock);
 		INIT_LIST_HEAD(&chan->pend_list);
+		INIT_LIST_HEAD(&chan->issued_list);
 		INIT_LIST_HEAD(&chan->done_list);
 		tasklet_init(&chan->tasklet, pl08x_tasklet,
 			     (unsigned long) chan);
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 22/31] dmaengine: PL08x: start next descriptor from irq context
  2012-06-07 10:45     ` Russell King - ARM Linux
@ 2012-06-07 10:53       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:53 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Dan Williams, Vinod Koul

Rather than waiting for the tasklet to run, we can start the next
descriptor from interrupt context, as soon as we know that the
previous descriptor has completed.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |    9 +++++----
 1 files changed, 5 insertions(+), 4 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index b613284..30b6921 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1671,10 +1671,7 @@ static void pl08x_tasklet(unsigned long data)
 	spin_lock_irqsave(&plchan->lock, flags);
 	list_splice_tail_init(&plchan->done_list, &head);
 
-	/* If a new descriptor is queued, set it up plchan->at is NULL here */
-	if (!list_empty(&plchan->issued_list)) {
-		pl08x_start_next_txd(plchan);
-	} else if (!list_empty(&plchan->pend_list) || plchan->phychan_hold) {
+	if (plchan->at || !list_empty(&plchan->pend_list) || plchan->phychan_hold) {
 		/*
 		 * This channel is still in use - we have a new txd being
 		 * prepared and will soon be queued.  Don't give up the
@@ -1786,6 +1783,10 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
 				pl08x_release_mux(plchan);
 				dma_cookie_complete(&tx->tx);
 				list_add_tail(&tx->node, &plchan->done_list);
+
+				/* And start the next descriptor */
+				if (!list_empty(&plchan->issued_list))
+					pl08x_start_next_txd(plchan);
 			}
 			spin_unlock(&plchan->lock);
 
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 22/31] dmaengine: PL08x: start next descriptor from irq context
@ 2012-06-07 10:53       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:53 UTC (permalink / raw)
  To: linux-arm-kernel

Rather than waiting for the tasklet to run, we can start the next
descriptor from interrupt context, as soon as we know that the
previous descriptor has completed.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |    9 +++++----
 1 files changed, 5 insertions(+), 4 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index b613284..30b6921 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1671,10 +1671,7 @@ static void pl08x_tasklet(unsigned long data)
 	spin_lock_irqsave(&plchan->lock, flags);
 	list_splice_tail_init(&plchan->done_list, &head);
 
-	/* If a new descriptor is queued, set it up plchan->at is NULL here */
-	if (!list_empty(&plchan->issued_list)) {
-		pl08x_start_next_txd(plchan);
-	} else if (!list_empty(&plchan->pend_list) || plchan->phychan_hold) {
+	if (plchan->at || !list_empty(&plchan->pend_list) || plchan->phychan_hold) {
 		/*
 		 * This channel is still in use - we have a new txd being
 		 * prepared and will soon be queued.  Don't give up the
@@ -1786,6 +1783,10 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
 				pl08x_release_mux(plchan);
 				dma_cookie_complete(&tx->tx);
 				list_add_tail(&tx->node, &plchan->done_list);
+
+				/* And start the next descriptor */
+				if (!list_empty(&plchan->issued_list))
+					pl08x_start_next_txd(plchan);
 			}
 			spin_unlock(&plchan->lock);
 
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 23/31] dmaengine: PL08x: rejig physical channel allocation
  2012-06-07 10:45     ` Russell King - ARM Linux
@ 2012-06-07 10:53       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:53 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Dan Williams, Vinod Koul

Rework the physical channel allocation mechanism to only allocate
physical channels to virtual channels when they're about to be used.
This eliminates all the complexity with holding channels while
descriptors are being prepared, which is completely unnecessary.

This also brings this driver to a state where the generic virtual DMA
code can be used with this driver, and opens up the possibility of
properly scheduling and prioritorising physical DMA channels to
virtual DMA channels.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |  268 +++++++++++++++++++---------------------------
 1 files changed, 112 insertions(+), 156 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 30b6921..bbae30c 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -210,8 +210,6 @@ enum pl08x_dma_chan_state {
  * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
  * @chan: wrappped abstract channel
  * @phychan: the physical channel utilized by this channel, if there is one
- * @phychan_hold: if non-zero, hold on to the physical channel even if we
- * have no pending entries
  * @tasklet: tasklet scheduled by the IRQ to handle actual work etc
  * @name: name of channel
  * @cd: channel platform data
@@ -230,7 +228,6 @@ enum pl08x_dma_chan_state {
 struct pl08x_dma_chan {
 	struct dma_chan chan;
 	struct pl08x_phy_chan *phychan;
-	int phychan_hold;
 	struct tasklet_struct tasklet;
 	const char *name;
 	const struct pl08x_channel_data *cd;
@@ -587,19 +584,111 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
 	return ch;
 }
 
+/* Mark the physical channel as free.  Note, this write is atomic. */
 static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
 					 struct pl08x_phy_chan *ch)
 {
-	unsigned long flags;
+	ch->serving = NULL;
+}
 
-	spin_lock_irqsave(&ch->lock, flags);
+/*
+ * Try to allocate a physical channel.  When successful, assign it to
+ * this virtual channel, and initiate the next descriptor.  The
+ * virtual channel lock must be held at this point.
+ */
+static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan)
+{
+	struct pl08x_driver_data *pl08x = plchan->host;
+	struct pl08x_phy_chan *ch;
 
-	/* Stop the channel and clear its interrupts */
-	pl08x_terminate_phy_chan(pl08x, ch);
+	ch = pl08x_get_phy_channel(pl08x, plchan);
+	if (!ch) {
+		dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
+		plchan->state = PL08X_CHAN_WAITING;
+		return;
+	}
 
-	/* Mark it as free */
-	ch->serving = NULL;
-	spin_unlock_irqrestore(&ch->lock, flags);
+	dev_dbg(&pl08x->adev->dev, "allocated physical channel %d for xfer on %s\n",
+		ch->id, plchan->name);
+
+	plchan->phychan = ch;
+	plchan->state = PL08X_CHAN_RUNNING;
+	pl08x_start_next_txd(plchan);
+}
+
+static void pl08x_phy_reassign_start(struct pl08x_phy_chan *ch,
+	struct pl08x_dma_chan *plchan)
+{
+	struct pl08x_driver_data *pl08x = plchan->host;
+
+	dev_dbg(&pl08x->adev->dev, "reassigned physical channel %d for xfer on %s\n",
+		ch->id, plchan->name);
+
+	/*
+	 * We do this without taking the lock; we're really only concerned
+	 * about whether this pointer is NULL or not, and we're guaranteed
+	 * that this will only be called when it _already_ is non-NULL.
+	 */
+	ch->serving = plchan;
+	plchan->phychan = ch;
+	plchan->state = PL08X_CHAN_RUNNING;
+	pl08x_start_next_txd(plchan);
+}
+
+/*
+ * Free a physical DMA channel, potentially reallocating it to another
+ * virtual channel if we have any pending.
+ */
+static void pl08x_phy_free(struct pl08x_dma_chan *plchan)
+{
+	struct pl08x_driver_data *pl08x = plchan->host;
+	struct pl08x_dma_chan *p, *next;
+
+ retry:
+	next = NULL;
+
+	/* Find a waiting virtual channel for the next transfer. */
+	list_for_each_entry(p, &pl08x->memcpy.channels, chan.device_node)
+		if (p->state == PL08X_CHAN_WAITING) {
+			next = p;
+			break;
+		}
+
+	if (!next) {
+		list_for_each_entry(p, &pl08x->slave.channels, chan.device_node)
+			if (p->state == PL08X_CHAN_WAITING) {
+				next = p;
+				break;
+			}
+	}
+
+	/* Ensure that the physical channel is stopped */
+	pl08x_terminate_phy_chan(pl08x, plchan->phychan);
+
+	if (next) {
+		bool success;
+
+		/*
+		 * Eww.  We know this isn't going to deadlock
+		 * but lockdep probably doesn't.
+		 */
+		spin_lock(&next->lock);
+		/* Re-check the state now that we have the lock */
+		success = next->state == PL08X_CHAN_WAITING;
+		if (success)
+			pl08x_phy_reassign_start(plchan->phychan, next);
+		spin_unlock(&next->lock);
+
+		/* If the state changed, try to find another channel */
+		if (!success)
+			goto retry;
+	} else {
+		/* No more jobs, so free up the physical channel */
+		pl08x_put_phy_channel(pl08x, plchan->phychan);
+	}
+
+	plchan->phychan = NULL;
+	plchan->state = PL08X_CHAN_IDLE;
 }
 
 /*
@@ -1028,45 +1117,6 @@ static void pl08x_free_chan_resources(struct dma_chan *chan)
 {
 }
 
-/*
- * This should be called with the channel plchan->lock held
- */
-static int prep_phy_channel(struct pl08x_dma_chan *plchan)
-{
-	struct pl08x_driver_data *pl08x = plchan->host;
-	struct pl08x_phy_chan *ch;
-
-	/* Check if we already have a channel */
-	if (plchan->phychan) {
-		ch = plchan->phychan;
-		goto got_channel;
-	}
-
-	ch = pl08x_get_phy_channel(pl08x, plchan);
-	if (!ch) {
-		/* No physical channel available, cope with it */
-		dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
-		return -EBUSY;
-	}
-
-	plchan->phychan = ch;
-	dev_dbg(&pl08x->adev->dev, "allocated physical channel %d for xfer on %s\n",
-		 ch->id, plchan->name);
-
-got_channel:
-	plchan->phychan_hold++;
-
-	return 0;
-}
-
-static void release_phy_channel(struct pl08x_dma_chan *plchan)
-{
-	struct pl08x_driver_data *pl08x = plchan->host;
-
-	pl08x_put_phy_channel(pl08x, plchan->phychan);
-	plchan->phychan = NULL;
-}
-
 static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
 {
 	struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan);
@@ -1079,19 +1129,6 @@ static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
 
 	/* Put this onto the pending list */
 	list_add_tail(&txd->node, &plchan->pend_list);
-
-	/*
-	 * If there was no physical channel available for this memcpy,
-	 * stack the request up and indicate that the channel is waiting
-	 * for a free physical channel.
-	 */
-	if (!plchan->slave && !plchan->phychan) {
-		/* Do this memcpy whenever there is a channel ready */
-		plchan->state = PL08X_CHAN_WAITING;
-	} else {
-		plchan->phychan_hold--;
-	}
-
 	spin_unlock_irqrestore(&plchan->lock, flags);
 
 	return cookie;
@@ -1282,19 +1319,10 @@ static void pl08x_issue_pending(struct dma_chan *chan)
 
 	spin_lock_irqsave(&plchan->lock, flags);
 	list_splice_tail_init(&plchan->pend_list, &plchan->issued_list);
-
-	/* Something is already active, or we're waiting for a channel... */
-	if (plchan->at || plchan->state == PL08X_CHAN_WAITING) {
-		spin_unlock_irqrestore(&plchan->lock, flags);
-		return;
-	}
-
-	/* Take the first element in the queue and execute it */
 	if (!list_empty(&plchan->issued_list)) {
-		plchan->state = PL08X_CHAN_RUNNING;
-		pl08x_start_next_txd(plchan);
+		if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING)
+			pl08x_phy_alloc_and_start(plchan);
 	}
-
 	spin_unlock_irqrestore(&plchan->lock, flags);
 }
 
@@ -1302,48 +1330,18 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
 					struct pl08x_txd *txd)
 {
 	struct pl08x_driver_data *pl08x = plchan->host;
-	unsigned long flags;
-	int num_llis, ret;
+	int num_llis;
 
 	num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
 	if (!num_llis) {
+		unsigned long flags;
+
 		spin_lock_irqsave(&plchan->lock, flags);
 		pl08x_free_txd(pl08x, txd);
 		spin_unlock_irqrestore(&plchan->lock, flags);
+
 		return -EINVAL;
 	}
-
-	spin_lock_irqsave(&plchan->lock, flags);
-
-	/*
-	 * See if we already have a physical channel allocated,
-	 * else this is the time to try to get one.
-	 */
-	ret = prep_phy_channel(plchan);
-	if (ret) {
-		/*
-		 * No physical channel was available.
-		 *
-		 * memcpy transfers can be sorted out at submission time.
-		 */
-		if (plchan->slave) {
-			pl08x_free_txd_list(pl08x, plchan);
-			pl08x_free_txd(pl08x, txd);
-			spin_unlock_irqrestore(&plchan->lock, flags);
-			return -EBUSY;
-		}
-	} else
-		/*
-		 * Else we're all set, paused and ready to roll, status
-		 * will switch to PL08X_CHAN_RUNNING when we call
-		 * issue_pending(). If there is something running on the
-		 * channel already we don't change its state.
-		 */
-		if (plchan->state == PL08X_CHAN_IDLE)
-			plchan->state = PL08X_CHAN_PAUSED;
-
-	spin_unlock_irqrestore(&plchan->lock, flags);
-
 	return 0;
 }
 
@@ -1563,14 +1561,11 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
 		plchan->state = PL08X_CHAN_IDLE;
 
 		if (plchan->phychan) {
-			pl08x_terminate_phy_chan(pl08x, plchan->phychan);
-
 			/*
 			 * Mark physical channel as free and free any slave
 			 * signal
 			 */
-			release_phy_channel(plchan);
-			plchan->phychan_hold = 0;
+			pl08x_phy_free(plchan);
 		}
 		/* Dequeue jobs and free LLIs */
 		if (plchan->at) {
@@ -1670,50 +1665,6 @@ static void pl08x_tasklet(unsigned long data)
 
 	spin_lock_irqsave(&plchan->lock, flags);
 	list_splice_tail_init(&plchan->done_list, &head);
-
-	if (plchan->at || !list_empty(&plchan->pend_list) || plchan->phychan_hold) {
-		/*
-		 * This channel is still in use - we have a new txd being
-		 * prepared and will soon be queued.  Don't give up the
-		 * physical channel.
-		 */
-	} else {
-		struct pl08x_dma_chan *waiting = NULL;
-
-		/*
-		 * No more jobs, so free up the physical channel
-		 */
-		release_phy_channel(plchan);
-		plchan->state = PL08X_CHAN_IDLE;
-
-		/*
-		 * And NOW before anyone else can grab that free:d up
-		 * physical channel, see if there is some memcpy pending
-		 * that seriously needs to start because of being stacked
-		 * up while we were choking the physical channels with data.
-		 */
-		list_for_each_entry(waiting, &pl08x->memcpy.channels,
-				    chan.device_node) {
-			if (waiting->state == PL08X_CHAN_WAITING) {
-				int ret;
-
-				/* This should REALLY not fail now */
-				ret = prep_phy_channel(waiting);
-				BUG_ON(ret);
-				waiting->phychan_hold--;
-				waiting->state = PL08X_CHAN_RUNNING;
-				/*
-				 * Eww.  We know this isn't going to deadlock
-				 * but lockdep probably doens't.
-				 */
-				spin_lock(&waiting->lock);
-				pl08x_start_next_txd(waiting);
-				spin_unlock(&waiting->lock);
-				break;
-			}
-		}
-	}
-
 	spin_unlock_irqrestore(&plchan->lock, flags);
 
 	while (!list_empty(&head)) {
@@ -1784,9 +1735,14 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
 				dma_cookie_complete(&tx->tx);
 				list_add_tail(&tx->node, &plchan->done_list);
 
-				/* And start the next descriptor */
+				/*
+				 * And start the next descriptor (if any),
+				 * otherwise free this channel.
+				 */
 				if (!list_empty(&plchan->issued_list))
 					pl08x_start_next_txd(plchan);
+				else
+					pl08x_phy_free(plchan);
 			}
 			spin_unlock(&plchan->lock);
 
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 23/31] dmaengine: PL08x: rejig physical channel allocation
@ 2012-06-07 10:53       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:53 UTC (permalink / raw)
  To: linux-arm-kernel

Rework the physical channel allocation mechanism to only allocate
physical channels to virtual channels when they're about to be used.
This eliminates all the complexity with holding channels while
descriptors are being prepared, which is completely unnecessary.

This also brings this driver to a state where the generic virtual DMA
code can be used with this driver, and opens up the possibility of
properly scheduling and prioritorising physical DMA channels to
virtual DMA channels.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |  268 +++++++++++++++++++---------------------------
 1 files changed, 112 insertions(+), 156 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 30b6921..bbae30c 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -210,8 +210,6 @@ enum pl08x_dma_chan_state {
  * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
  * @chan: wrappped abstract channel
  * @phychan: the physical channel utilized by this channel, if there is one
- * @phychan_hold: if non-zero, hold on to the physical channel even if we
- * have no pending entries
  * @tasklet: tasklet scheduled by the IRQ to handle actual work etc
  * @name: name of channel
  * @cd: channel platform data
@@ -230,7 +228,6 @@ enum pl08x_dma_chan_state {
 struct pl08x_dma_chan {
 	struct dma_chan chan;
 	struct pl08x_phy_chan *phychan;
-	int phychan_hold;
 	struct tasklet_struct tasklet;
 	const char *name;
 	const struct pl08x_channel_data *cd;
@@ -587,19 +584,111 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
 	return ch;
 }
 
+/* Mark the physical channel as free.  Note, this write is atomic. */
 static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
 					 struct pl08x_phy_chan *ch)
 {
-	unsigned long flags;
+	ch->serving = NULL;
+}
 
-	spin_lock_irqsave(&ch->lock, flags);
+/*
+ * Try to allocate a physical channel.  When successful, assign it to
+ * this virtual channel, and initiate the next descriptor.  The
+ * virtual channel lock must be held at this point.
+ */
+static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan)
+{
+	struct pl08x_driver_data *pl08x = plchan->host;
+	struct pl08x_phy_chan *ch;
 
-	/* Stop the channel and clear its interrupts */
-	pl08x_terminate_phy_chan(pl08x, ch);
+	ch = pl08x_get_phy_channel(pl08x, plchan);
+	if (!ch) {
+		dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
+		plchan->state = PL08X_CHAN_WAITING;
+		return;
+	}
 
-	/* Mark it as free */
-	ch->serving = NULL;
-	spin_unlock_irqrestore(&ch->lock, flags);
+	dev_dbg(&pl08x->adev->dev, "allocated physical channel %d for xfer on %s\n",
+		ch->id, plchan->name);
+
+	plchan->phychan = ch;
+	plchan->state = PL08X_CHAN_RUNNING;
+	pl08x_start_next_txd(plchan);
+}
+
+static void pl08x_phy_reassign_start(struct pl08x_phy_chan *ch,
+	struct pl08x_dma_chan *plchan)
+{
+	struct pl08x_driver_data *pl08x = plchan->host;
+
+	dev_dbg(&pl08x->adev->dev, "reassigned physical channel %d for xfer on %s\n",
+		ch->id, plchan->name);
+
+	/*
+	 * We do this without taking the lock; we're really only concerned
+	 * about whether this pointer is NULL or not, and we're guaranteed
+	 * that this will only be called when it _already_ is non-NULL.
+	 */
+	ch->serving = plchan;
+	plchan->phychan = ch;
+	plchan->state = PL08X_CHAN_RUNNING;
+	pl08x_start_next_txd(plchan);
+}
+
+/*
+ * Free a physical DMA channel, potentially reallocating it to another
+ * virtual channel if we have any pending.
+ */
+static void pl08x_phy_free(struct pl08x_dma_chan *plchan)
+{
+	struct pl08x_driver_data *pl08x = plchan->host;
+	struct pl08x_dma_chan *p, *next;
+
+ retry:
+	next = NULL;
+
+	/* Find a waiting virtual channel for the next transfer. */
+	list_for_each_entry(p, &pl08x->memcpy.channels, chan.device_node)
+		if (p->state == PL08X_CHAN_WAITING) {
+			next = p;
+			break;
+		}
+
+	if (!next) {
+		list_for_each_entry(p, &pl08x->slave.channels, chan.device_node)
+			if (p->state == PL08X_CHAN_WAITING) {
+				next = p;
+				break;
+			}
+	}
+
+	/* Ensure that the physical channel is stopped */
+	pl08x_terminate_phy_chan(pl08x, plchan->phychan);
+
+	if (next) {
+		bool success;
+
+		/*
+		 * Eww.  We know this isn't going to deadlock
+		 * but lockdep probably doesn't.
+		 */
+		spin_lock(&next->lock);
+		/* Re-check the state now that we have the lock */
+		success = next->state == PL08X_CHAN_WAITING;
+		if (success)
+			pl08x_phy_reassign_start(plchan->phychan, next);
+		spin_unlock(&next->lock);
+
+		/* If the state changed, try to find another channel */
+		if (!success)
+			goto retry;
+	} else {
+		/* No more jobs, so free up the physical channel */
+		pl08x_put_phy_channel(pl08x, plchan->phychan);
+	}
+
+	plchan->phychan = NULL;
+	plchan->state = PL08X_CHAN_IDLE;
 }
 
 /*
@@ -1028,45 +1117,6 @@ static void pl08x_free_chan_resources(struct dma_chan *chan)
 {
 }
 
-/*
- * This should be called with the channel plchan->lock held
- */
-static int prep_phy_channel(struct pl08x_dma_chan *plchan)
-{
-	struct pl08x_driver_data *pl08x = plchan->host;
-	struct pl08x_phy_chan *ch;
-
-	/* Check if we already have a channel */
-	if (plchan->phychan) {
-		ch = plchan->phychan;
-		goto got_channel;
-	}
-
-	ch = pl08x_get_phy_channel(pl08x, plchan);
-	if (!ch) {
-		/* No physical channel available, cope with it */
-		dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
-		return -EBUSY;
-	}
-
-	plchan->phychan = ch;
-	dev_dbg(&pl08x->adev->dev, "allocated physical channel %d for xfer on %s\n",
-		 ch->id, plchan->name);
-
-got_channel:
-	plchan->phychan_hold++;
-
-	return 0;
-}
-
-static void release_phy_channel(struct pl08x_dma_chan *plchan)
-{
-	struct pl08x_driver_data *pl08x = plchan->host;
-
-	pl08x_put_phy_channel(pl08x, plchan->phychan);
-	plchan->phychan = NULL;
-}
-
 static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
 {
 	struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan);
@@ -1079,19 +1129,6 @@ static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
 
 	/* Put this onto the pending list */
 	list_add_tail(&txd->node, &plchan->pend_list);
-
-	/*
-	 * If there was no physical channel available for this memcpy,
-	 * stack the request up and indicate that the channel is waiting
-	 * for a free physical channel.
-	 */
-	if (!plchan->slave && !plchan->phychan) {
-		/* Do this memcpy whenever there is a channel ready */
-		plchan->state = PL08X_CHAN_WAITING;
-	} else {
-		plchan->phychan_hold--;
-	}
-
 	spin_unlock_irqrestore(&plchan->lock, flags);
 
 	return cookie;
@@ -1282,19 +1319,10 @@ static void pl08x_issue_pending(struct dma_chan *chan)
 
 	spin_lock_irqsave(&plchan->lock, flags);
 	list_splice_tail_init(&plchan->pend_list, &plchan->issued_list);
-
-	/* Something is already active, or we're waiting for a channel... */
-	if (plchan->at || plchan->state == PL08X_CHAN_WAITING) {
-		spin_unlock_irqrestore(&plchan->lock, flags);
-		return;
-	}
-
-	/* Take the first element in the queue and execute it */
 	if (!list_empty(&plchan->issued_list)) {
-		plchan->state = PL08X_CHAN_RUNNING;
-		pl08x_start_next_txd(plchan);
+		if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING)
+			pl08x_phy_alloc_and_start(plchan);
 	}
-
 	spin_unlock_irqrestore(&plchan->lock, flags);
 }
 
@@ -1302,48 +1330,18 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
 					struct pl08x_txd *txd)
 {
 	struct pl08x_driver_data *pl08x = plchan->host;
-	unsigned long flags;
-	int num_llis, ret;
+	int num_llis;
 
 	num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
 	if (!num_llis) {
+		unsigned long flags;
+
 		spin_lock_irqsave(&plchan->lock, flags);
 		pl08x_free_txd(pl08x, txd);
 		spin_unlock_irqrestore(&plchan->lock, flags);
+
 		return -EINVAL;
 	}
-
-	spin_lock_irqsave(&plchan->lock, flags);
-
-	/*
-	 * See if we already have a physical channel allocated,
-	 * else this is the time to try to get one.
-	 */
-	ret = prep_phy_channel(plchan);
-	if (ret) {
-		/*
-		 * No physical channel was available.
-		 *
-		 * memcpy transfers can be sorted out at submission time.
-		 */
-		if (plchan->slave) {
-			pl08x_free_txd_list(pl08x, plchan);
-			pl08x_free_txd(pl08x, txd);
-			spin_unlock_irqrestore(&plchan->lock, flags);
-			return -EBUSY;
-		}
-	} else
-		/*
-		 * Else we're all set, paused and ready to roll, status
-		 * will switch to PL08X_CHAN_RUNNING when we call
-		 * issue_pending(). If there is something running on the
-		 * channel already we don't change its state.
-		 */
-		if (plchan->state == PL08X_CHAN_IDLE)
-			plchan->state = PL08X_CHAN_PAUSED;
-
-	spin_unlock_irqrestore(&plchan->lock, flags);
-
 	return 0;
 }
 
@@ -1563,14 +1561,11 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
 		plchan->state = PL08X_CHAN_IDLE;
 
 		if (plchan->phychan) {
-			pl08x_terminate_phy_chan(pl08x, plchan->phychan);
-
 			/*
 			 * Mark physical channel as free and free any slave
 			 * signal
 			 */
-			release_phy_channel(plchan);
-			plchan->phychan_hold = 0;
+			pl08x_phy_free(plchan);
 		}
 		/* Dequeue jobs and free LLIs */
 		if (plchan->at) {
@@ -1670,50 +1665,6 @@ static void pl08x_tasklet(unsigned long data)
 
 	spin_lock_irqsave(&plchan->lock, flags);
 	list_splice_tail_init(&plchan->done_list, &head);
-
-	if (plchan->at || !list_empty(&plchan->pend_list) || plchan->phychan_hold) {
-		/*
-		 * This channel is still in use - we have a new txd being
-		 * prepared and will soon be queued.  Don't give up the
-		 * physical channel.
-		 */
-	} else {
-		struct pl08x_dma_chan *waiting = NULL;
-
-		/*
-		 * No more jobs, so free up the physical channel
-		 */
-		release_phy_channel(plchan);
-		plchan->state = PL08X_CHAN_IDLE;
-
-		/*
-		 * And NOW before anyone else can grab that free:d up
-		 * physical channel, see if there is some memcpy pending
-		 * that seriously needs to start because of being stacked
-		 * up while we were choking the physical channels with data.
-		 */
-		list_for_each_entry(waiting, &pl08x->memcpy.channels,
-				    chan.device_node) {
-			if (waiting->state == PL08X_CHAN_WAITING) {
-				int ret;
-
-				/* This should REALLY not fail now */
-				ret = prep_phy_channel(waiting);
-				BUG_ON(ret);
-				waiting->phychan_hold--;
-				waiting->state = PL08X_CHAN_RUNNING;
-				/*
-				 * Eww.  We know this isn't going to deadlock
-				 * but lockdep probably doens't.
-				 */
-				spin_lock(&waiting->lock);
-				pl08x_start_next_txd(waiting);
-				spin_unlock(&waiting->lock);
-				break;
-			}
-		}
-	}
-
 	spin_unlock_irqrestore(&plchan->lock, flags);
 
 	while (!list_empty(&head)) {
@@ -1784,9 +1735,14 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
 				dma_cookie_complete(&tx->tx);
 				list_add_tail(&tx->node, &plchan->done_list);
 
-				/* And start the next descriptor */
+				/*
+				 * And start the next descriptor (if any),
+				 * otherwise free this channel.
+				 */
 				if (!list_empty(&plchan->issued_list))
 					pl08x_start_next_txd(plchan);
+				else
+					pl08x_phy_free(plchan);
 			}
 			spin_unlock(&plchan->lock);
 
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 24/31] dmaengine: PL08x: convert to use virt-dma structs
  2012-06-07 10:45     ` Russell King - ARM Linux
@ 2012-06-07 10:53       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:53 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Dan Williams, Vinod Koul

Convert PL08x to use the virt-dma structures.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |   57 +++++++++++++++++++++++----------------------
 1 files changed, 29 insertions(+), 28 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index bbae30c..9a06428 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -86,6 +86,7 @@
 #include <asm/hardware/pl080.h>
 
 #include "dmaengine.h"
+#include "virt-dma.h"
 
 #define DRIVER_NAME	"pl08xdmac"
 
@@ -165,7 +166,7 @@ struct pl08x_sg {
 
 /**
  * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
- * @tx: async tx descriptor
+ * @vd: virtual DMA descriptor
  * @node: node for txd list for channels
  * @dsg_list: list of children sg's
  * @llis_bus: DMA memory address (physical) start for the LLIs
@@ -174,7 +175,7 @@ struct pl08x_sg {
  * @ccfg: config reg values for current txd
  */
 struct pl08x_txd {
-	struct dma_async_tx_descriptor tx;
+	struct virt_dma_desc vd;
 	struct list_head node;
 	struct list_head dsg_list;
 	dma_addr_t llis_bus;
@@ -208,7 +209,7 @@ enum pl08x_dma_chan_state {
 
 /**
  * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
- * @chan: wrappped abstract channel
+ * @vc: wrappped virtual channel
  * @phychan: the physical channel utilized by this channel, if there is one
  * @tasklet: tasklet scheduled by the IRQ to handle actual work etc
  * @name: name of channel
@@ -226,7 +227,7 @@ enum pl08x_dma_chan_state {
  * @mux_use: count of descriptors using this DMA request signal setting
  */
 struct pl08x_dma_chan {
-	struct dma_chan chan;
+	struct virt_dma_chan vc;
 	struct pl08x_phy_chan *phychan;
 	struct tasklet_struct tasklet;
 	const char *name;
@@ -287,12 +288,12 @@ struct pl08x_driver_data {
 
 static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
 {
-	return container_of(chan, struct pl08x_dma_chan, chan);
+	return container_of(chan, struct pl08x_dma_chan, vc.chan);
 }
 
 static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx)
 {
-	return container_of(tx, struct pl08x_txd, tx);
+	return container_of(tx, struct pl08x_txd, vd.tx);
 }
 
 /*
@@ -648,14 +649,14 @@ static void pl08x_phy_free(struct pl08x_dma_chan *plchan)
 	next = NULL;
 
 	/* Find a waiting virtual channel for the next transfer. */
-	list_for_each_entry(p, &pl08x->memcpy.channels, chan.device_node)
+	list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node)
 		if (p->state == PL08X_CHAN_WAITING) {
 			next = p;
 			break;
 		}
 
 	if (!next) {
-		list_for_each_entry(p, &pl08x->slave.channels, chan.device_node)
+		list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node)
 			if (p->state == PL08X_CHAN_WAITING) {
 				next = p;
 				break;
@@ -1351,9 +1352,9 @@ static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
 	struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
 
 	if (txd) {
-		dma_async_tx_descriptor_init(&txd->tx, &plchan->chan);
-		txd->tx.flags = flags;
-		txd->tx.tx_submit = pl08x_tx_submit;
+		dma_async_tx_descriptor_init(&txd->vd.tx, &plchan->vc.chan);
+		txd->vd.tx.flags = flags;
+		txd->vd.tx.tx_submit = pl08x_tx_submit;
 		INIT_LIST_HEAD(&txd->node);
 		INIT_LIST_HEAD(&txd->dsg_list);
 
@@ -1413,7 +1414,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
 	if (ret)
 		return NULL;
 
-	return &txd->tx;
+	return &txd->vd.tx;
 }
 
 static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
@@ -1529,7 +1530,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
 	if (ret)
 		return NULL;
 
-	return &txd->tx;
+	return &txd->vd.tx;
 }
 
 static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
@@ -1630,11 +1631,11 @@ static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
 
 static void pl08x_unmap_buffers(struct pl08x_txd *txd)
 {
-	struct device *dev = txd->tx.chan->device->dev;
+	struct device *dev = txd->vd.tx.chan->device->dev;
 	struct pl08x_sg *dsg;
 
-	if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
-		if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
+	if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+		if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
 			list_for_each_entry(dsg, &txd->dsg_list, node)
 				dma_unmap_single(dev, dsg->src_addr, dsg->len,
 						DMA_TO_DEVICE);
@@ -1644,8 +1645,8 @@ static void pl08x_unmap_buffers(struct pl08x_txd *txd)
 						DMA_TO_DEVICE);
 		}
 	}
-	if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
-		if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
+	if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+		if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
 			list_for_each_entry(dsg, &txd->dsg_list, node)
 				dma_unmap_single(dev, dsg->dst_addr, dsg->len,
 						DMA_FROM_DEVICE);
@@ -1670,8 +1671,8 @@ static void pl08x_tasklet(unsigned long data)
 	while (!list_empty(&head)) {
 		struct pl08x_txd *txd = list_first_entry(&head,
 						struct pl08x_txd, node);
-		dma_async_tx_callback callback = txd->tx.callback;
-		void *callback_param = txd->tx.callback_param;
+		dma_async_tx_callback callback = txd->vd.tx.callback;
+		void *callback_param = txd->vd.tx.callback_param;
 
 		list_del(&txd->node);
 
@@ -1732,7 +1733,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
 				 * reservation.
 				 */
 				pl08x_release_mux(plchan);
-				dma_cookie_complete(&tx->tx);
+				dma_cookie_complete(&tx->vd.tx);
 				list_add_tail(&tx->node, &plchan->done_list);
 
 				/*
@@ -1807,8 +1808,8 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
 			 "initialize virtual channel \"%s\"\n",
 			 chan->name);
 
-		chan->chan.device = dmadev;
-		dma_cookie_init(&chan->chan);
+		chan->vc.chan.device = dmadev;
+		dma_cookie_init(&chan->vc.chan);
 
 		spin_lock_init(&chan->lock);
 		INIT_LIST_HEAD(&chan->pend_list);
@@ -1817,7 +1818,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
 		tasklet_init(&chan->tasklet, pl08x_tasklet,
 			     (unsigned long) chan);
 
-		list_add_tail(&chan->chan.device_node, &dmadev->channels);
+		list_add_tail(&chan->vc.chan.device_node, &dmadev->channels);
 	}
 	dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n",
 		 i, slave ? "slave" : "memcpy");
@@ -1830,8 +1831,8 @@ static void pl08x_free_virtual_channels(struct dma_device *dmadev)
 	struct pl08x_dma_chan *next;
 
 	list_for_each_entry_safe(chan,
-				 next, &dmadev->channels, chan.device_node) {
-		list_del(&chan->chan.device_node);
+				 next, &dmadev->channels, vc.chan.device_node) {
+		list_del(&chan->vc.chan.device_node);
 		kfree(chan);
 	}
 }
@@ -1884,7 +1885,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data)
 	seq_printf(s, "\nPL08x virtual memcpy channels:\n");
 	seq_printf(s, "CHANNEL:\tSTATE:\n");
 	seq_printf(s, "--------\t------\n");
-	list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) {
+	list_for_each_entry(chan, &pl08x->memcpy.channels, vc.chan.device_node) {
 		seq_printf(s, "%s\t\t%s\n", chan->name,
 			   pl08x_state_str(chan->state));
 	}
@@ -1892,7 +1893,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data)
 	seq_printf(s, "\nPL08x virtual slave channels:\n");
 	seq_printf(s, "CHANNEL:\tSTATE:\n");
 	seq_printf(s, "--------\t------\n");
-	list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) {
+	list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) {
 		seq_printf(s, "%s\t\t%s\n", chan->name,
 			   pl08x_state_str(chan->state));
 	}
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 24/31] dmaengine: PL08x: convert to use virt-dma structs
@ 2012-06-07 10:53       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:53 UTC (permalink / raw)
  To: linux-arm-kernel

Convert PL08x to use the virt-dma structures.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |   57 +++++++++++++++++++++++----------------------
 1 files changed, 29 insertions(+), 28 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index bbae30c..9a06428 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -86,6 +86,7 @@
 #include <asm/hardware/pl080.h>
 
 #include "dmaengine.h"
+#include "virt-dma.h"
 
 #define DRIVER_NAME	"pl08xdmac"
 
@@ -165,7 +166,7 @@ struct pl08x_sg {
 
 /**
  * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
- * @tx: async tx descriptor
+ * @vd: virtual DMA descriptor
  * @node: node for txd list for channels
  * @dsg_list: list of children sg's
  * @llis_bus: DMA memory address (physical) start for the LLIs
@@ -174,7 +175,7 @@ struct pl08x_sg {
  * @ccfg: config reg values for current txd
  */
 struct pl08x_txd {
-	struct dma_async_tx_descriptor tx;
+	struct virt_dma_desc vd;
 	struct list_head node;
 	struct list_head dsg_list;
 	dma_addr_t llis_bus;
@@ -208,7 +209,7 @@ enum pl08x_dma_chan_state {
 
 /**
  * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
- * @chan: wrappped abstract channel
+ * @vc: wrappped virtual channel
  * @phychan: the physical channel utilized by this channel, if there is one
  * @tasklet: tasklet scheduled by the IRQ to handle actual work etc
  * @name: name of channel
@@ -226,7 +227,7 @@ enum pl08x_dma_chan_state {
  * @mux_use: count of descriptors using this DMA request signal setting
  */
 struct pl08x_dma_chan {
-	struct dma_chan chan;
+	struct virt_dma_chan vc;
 	struct pl08x_phy_chan *phychan;
 	struct tasklet_struct tasklet;
 	const char *name;
@@ -287,12 +288,12 @@ struct pl08x_driver_data {
 
 static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
 {
-	return container_of(chan, struct pl08x_dma_chan, chan);
+	return container_of(chan, struct pl08x_dma_chan, vc.chan);
 }
 
 static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx)
 {
-	return container_of(tx, struct pl08x_txd, tx);
+	return container_of(tx, struct pl08x_txd, vd.tx);
 }
 
 /*
@@ -648,14 +649,14 @@ static void pl08x_phy_free(struct pl08x_dma_chan *plchan)
 	next = NULL;
 
 	/* Find a waiting virtual channel for the next transfer. */
-	list_for_each_entry(p, &pl08x->memcpy.channels, chan.device_node)
+	list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node)
 		if (p->state == PL08X_CHAN_WAITING) {
 			next = p;
 			break;
 		}
 
 	if (!next) {
-		list_for_each_entry(p, &pl08x->slave.channels, chan.device_node)
+		list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node)
 			if (p->state == PL08X_CHAN_WAITING) {
 				next = p;
 				break;
@@ -1351,9 +1352,9 @@ static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
 	struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
 
 	if (txd) {
-		dma_async_tx_descriptor_init(&txd->tx, &plchan->chan);
-		txd->tx.flags = flags;
-		txd->tx.tx_submit = pl08x_tx_submit;
+		dma_async_tx_descriptor_init(&txd->vd.tx, &plchan->vc.chan);
+		txd->vd.tx.flags = flags;
+		txd->vd.tx.tx_submit = pl08x_tx_submit;
 		INIT_LIST_HEAD(&txd->node);
 		INIT_LIST_HEAD(&txd->dsg_list);
 
@@ -1413,7 +1414,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
 	if (ret)
 		return NULL;
 
-	return &txd->tx;
+	return &txd->vd.tx;
 }
 
 static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
@@ -1529,7 +1530,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
 	if (ret)
 		return NULL;
 
-	return &txd->tx;
+	return &txd->vd.tx;
 }
 
 static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
@@ -1630,11 +1631,11 @@ static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
 
 static void pl08x_unmap_buffers(struct pl08x_txd *txd)
 {
-	struct device *dev = txd->tx.chan->device->dev;
+	struct device *dev = txd->vd.tx.chan->device->dev;
 	struct pl08x_sg *dsg;
 
-	if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
-		if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
+	if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+		if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
 			list_for_each_entry(dsg, &txd->dsg_list, node)
 				dma_unmap_single(dev, dsg->src_addr, dsg->len,
 						DMA_TO_DEVICE);
@@ -1644,8 +1645,8 @@ static void pl08x_unmap_buffers(struct pl08x_txd *txd)
 						DMA_TO_DEVICE);
 		}
 	}
-	if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
-		if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
+	if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+		if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
 			list_for_each_entry(dsg, &txd->dsg_list, node)
 				dma_unmap_single(dev, dsg->dst_addr, dsg->len,
 						DMA_FROM_DEVICE);
@@ -1670,8 +1671,8 @@ static void pl08x_tasklet(unsigned long data)
 	while (!list_empty(&head)) {
 		struct pl08x_txd *txd = list_first_entry(&head,
 						struct pl08x_txd, node);
-		dma_async_tx_callback callback = txd->tx.callback;
-		void *callback_param = txd->tx.callback_param;
+		dma_async_tx_callback callback = txd->vd.tx.callback;
+		void *callback_param = txd->vd.tx.callback_param;
 
 		list_del(&txd->node);
 
@@ -1732,7 +1733,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
 				 * reservation.
 				 */
 				pl08x_release_mux(plchan);
-				dma_cookie_complete(&tx->tx);
+				dma_cookie_complete(&tx->vd.tx);
 				list_add_tail(&tx->node, &plchan->done_list);
 
 				/*
@@ -1807,8 +1808,8 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
 			 "initialize virtual channel \"%s\"\n",
 			 chan->name);
 
-		chan->chan.device = dmadev;
-		dma_cookie_init(&chan->chan);
+		chan->vc.chan.device = dmadev;
+		dma_cookie_init(&chan->vc.chan);
 
 		spin_lock_init(&chan->lock);
 		INIT_LIST_HEAD(&chan->pend_list);
@@ -1817,7 +1818,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
 		tasklet_init(&chan->tasklet, pl08x_tasklet,
 			     (unsigned long) chan);
 
-		list_add_tail(&chan->chan.device_node, &dmadev->channels);
+		list_add_tail(&chan->vc.chan.device_node, &dmadev->channels);
 	}
 	dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n",
 		 i, slave ? "slave" : "memcpy");
@@ -1830,8 +1831,8 @@ static void pl08x_free_virtual_channels(struct dma_device *dmadev)
 	struct pl08x_dma_chan *next;
 
 	list_for_each_entry_safe(chan,
-				 next, &dmadev->channels, chan.device_node) {
-		list_del(&chan->chan.device_node);
+				 next, &dmadev->channels, vc.chan.device_node) {
+		list_del(&chan->vc.chan.device_node);
 		kfree(chan);
 	}
 }
@@ -1884,7 +1885,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data)
 	seq_printf(s, "\nPL08x virtual memcpy channels:\n");
 	seq_printf(s, "CHANNEL:\tSTATE:\n");
 	seq_printf(s, "--------\t------\n");
-	list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) {
+	list_for_each_entry(chan, &pl08x->memcpy.channels, vc.chan.device_node) {
 		seq_printf(s, "%s\t\t%s\n", chan->name,
 			   pl08x_state_str(chan->state));
 	}
@@ -1892,7 +1893,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data)
 	seq_printf(s, "\nPL08x virtual slave channels:\n");
 	seq_printf(s, "CHANNEL:\tSTATE:\n");
 	seq_printf(s, "--------\t------\n");
-	list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) {
+	list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) {
 		seq_printf(s, "%s\t\t%s\n", chan->name,
 			   pl08x_state_str(chan->state));
 	}
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 25/31] dmaengine: PL08x: use vchan's spinlock
  2012-06-07 10:45     ` Russell King - ARM Linux
@ 2012-06-07 10:54       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:54 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Dan Williams, Vinod Koul

Initialize the vchan struct, and use the provided spinlock rather than
our own.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/Kconfig      |    1 +
 drivers/dma/amba-pl08x.c |   45 ++++++++++++++++++++-------------------------
 2 files changed, 21 insertions(+), 25 deletions(-)

diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index eb2b60e..be0dc3b 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -53,6 +53,7 @@ config AMBA_PL08X
 	bool "ARM PrimeCell PL080 or PL081 support"
 	depends on ARM_AMBA && EXPERIMENTAL
 	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
 	help
 	  Platform has a PL08x DMAC device
 	  which can provide DMA engine support
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 9a06428..398a5da 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -237,7 +237,6 @@ struct pl08x_dma_chan {
 	struct list_head issued_list;
 	struct list_head done_list;
 	struct pl08x_txd *at;
-	spinlock_t lock;
 	struct pl08x_driver_data *host;
 	enum pl08x_dma_chan_state state;
 	bool slave;
@@ -484,7 +483,7 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
 	unsigned long flags;
 	size_t bytes = 0;
 
-	spin_lock_irqsave(&plchan->lock, flags);
+	spin_lock_irqsave(&plchan->vc.lock, flags);
 	ch = plchan->phychan;
 	txd = plchan->at;
 
@@ -543,7 +542,7 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
 		}
 	}
 
-	spin_unlock_irqrestore(&plchan->lock, flags);
+	spin_unlock_irqrestore(&plchan->vc.lock, flags);
 
 	return bytes;
 }
@@ -673,12 +672,12 @@ static void pl08x_phy_free(struct pl08x_dma_chan *plchan)
 		 * Eww.  We know this isn't going to deadlock
 		 * but lockdep probably doesn't.
 		 */
-		spin_lock(&next->lock);
+		spin_lock(&next->vc.lock);
 		/* Re-check the state now that we have the lock */
 		success = next->state == PL08X_CHAN_WAITING;
 		if (success)
 			pl08x_phy_reassign_start(plchan->phychan, next);
-		spin_unlock(&next->lock);
+		spin_unlock(&next->vc.lock);
 
 		/* If the state changed, try to find another channel */
 		if (!success)
@@ -1125,12 +1124,12 @@ static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
 	unsigned long flags;
 	dma_cookie_t cookie;
 
-	spin_lock_irqsave(&plchan->lock, flags);
+	spin_lock_irqsave(&plchan->vc.lock, flags);
 	cookie = dma_cookie_assign(tx);
 
 	/* Put this onto the pending list */
 	list_add_tail(&txd->node, &plchan->pend_list);
-	spin_unlock_irqrestore(&plchan->lock, flags);
+	spin_unlock_irqrestore(&plchan->vc.lock, flags);
 
 	return cookie;
 }
@@ -1318,13 +1317,13 @@ static void pl08x_issue_pending(struct dma_chan *chan)
 	struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
 	unsigned long flags;
 
-	spin_lock_irqsave(&plchan->lock, flags);
+	spin_lock_irqsave(&plchan->vc.lock, flags);
 	list_splice_tail_init(&plchan->pend_list, &plchan->issued_list);
 	if (!list_empty(&plchan->issued_list)) {
 		if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING)
 			pl08x_phy_alloc_and_start(plchan);
 	}
-	spin_unlock_irqrestore(&plchan->lock, flags);
+	spin_unlock_irqrestore(&plchan->vc.lock, flags);
 }
 
 static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
@@ -1337,9 +1336,9 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
 	if (!num_llis) {
 		unsigned long flags;
 
-		spin_lock_irqsave(&plchan->lock, flags);
+		spin_lock_irqsave(&plchan->vc.lock, flags);
 		pl08x_free_txd(pl08x, txd);
-		spin_unlock_irqrestore(&plchan->lock, flags);
+		spin_unlock_irqrestore(&plchan->vc.lock, flags);
 
 		return -EINVAL;
 	}
@@ -1551,9 +1550,9 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
 	 * Anything succeeds on channels with no physical allocation and
 	 * no queued transfers.
 	 */
-	spin_lock_irqsave(&plchan->lock, flags);
+	spin_lock_irqsave(&plchan->vc.lock, flags);
 	if (!plchan->phychan && !plchan->at) {
-		spin_unlock_irqrestore(&plchan->lock, flags);
+		spin_unlock_irqrestore(&plchan->vc.lock, flags);
 		return 0;
 	}
 
@@ -1592,7 +1591,7 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
 		break;
 	}
 
-	spin_unlock_irqrestore(&plchan->lock, flags);
+	spin_unlock_irqrestore(&plchan->vc.lock, flags);
 
 	return ret;
 }
@@ -1664,9 +1663,9 @@ static void pl08x_tasklet(unsigned long data)
 	unsigned long flags;
 	LIST_HEAD(head);
 
-	spin_lock_irqsave(&plchan->lock, flags);
+	spin_lock_irqsave(&plchan->vc.lock, flags);
 	list_splice_tail_init(&plchan->done_list, &head);
-	spin_unlock_irqrestore(&plchan->lock, flags);
+	spin_unlock_irqrestore(&plchan->vc.lock, flags);
 
 	while (!list_empty(&head)) {
 		struct pl08x_txd *txd = list_first_entry(&head,
@@ -1681,9 +1680,9 @@ static void pl08x_tasklet(unsigned long data)
 			pl08x_unmap_buffers(txd);
 
 		/* Free the descriptor */
-		spin_lock_irqsave(&plchan->lock, flags);
+		spin_lock_irqsave(&plchan->vc.lock, flags);
 		pl08x_free_txd(pl08x, txd);
-		spin_unlock_irqrestore(&plchan->lock, flags);
+		spin_unlock_irqrestore(&plchan->vc.lock, flags);
 
 		/* Callback to signal completion */
 		if (callback)
@@ -1724,7 +1723,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
 				continue;
 			}
 
-			spin_lock(&plchan->lock);
+			spin_lock(&plchan->vc.lock);
 			tx = plchan->at;
 			if (tx) {
 				plchan->at = NULL;
@@ -1745,7 +1744,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
 				else
 					pl08x_phy_free(plchan);
 			}
-			spin_unlock(&plchan->lock);
+			spin_unlock(&plchan->vc.lock);
 
 			/* Schedule tasklet on this channel */
 			tasklet_schedule(&plchan->tasklet);
@@ -1808,17 +1807,13 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
 			 "initialize virtual channel \"%s\"\n",
 			 chan->name);
 
-		chan->vc.chan.device = dmadev;
-		dma_cookie_init(&chan->vc.chan);
-
-		spin_lock_init(&chan->lock);
 		INIT_LIST_HEAD(&chan->pend_list);
 		INIT_LIST_HEAD(&chan->issued_list);
 		INIT_LIST_HEAD(&chan->done_list);
 		tasklet_init(&chan->tasklet, pl08x_tasklet,
 			     (unsigned long) chan);
 
-		list_add_tail(&chan->vc.chan.device_node, &dmadev->channels);
+		vchan_init(&chan->vc, dmadev);
 	}
 	dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n",
 		 i, slave ? "slave" : "memcpy");
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 25/31] dmaengine: PL08x: use vchan's spinlock
@ 2012-06-07 10:54       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:54 UTC (permalink / raw)
  To: linux-arm-kernel

Initialize the vchan struct, and use the provided spinlock rather than
our own.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/Kconfig      |    1 +
 drivers/dma/amba-pl08x.c |   45 ++++++++++++++++++++-------------------------
 2 files changed, 21 insertions(+), 25 deletions(-)

diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index eb2b60e..be0dc3b 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -53,6 +53,7 @@ config AMBA_PL08X
 	bool "ARM PrimeCell PL080 or PL081 support"
 	depends on ARM_AMBA && EXPERIMENTAL
 	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
 	help
 	  Platform has a PL08x DMAC device
 	  which can provide DMA engine support
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 9a06428..398a5da 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -237,7 +237,6 @@ struct pl08x_dma_chan {
 	struct list_head issued_list;
 	struct list_head done_list;
 	struct pl08x_txd *at;
-	spinlock_t lock;
 	struct pl08x_driver_data *host;
 	enum pl08x_dma_chan_state state;
 	bool slave;
@@ -484,7 +483,7 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
 	unsigned long flags;
 	size_t bytes = 0;
 
-	spin_lock_irqsave(&plchan->lock, flags);
+	spin_lock_irqsave(&plchan->vc.lock, flags);
 	ch = plchan->phychan;
 	txd = plchan->at;
 
@@ -543,7 +542,7 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
 		}
 	}
 
-	spin_unlock_irqrestore(&plchan->lock, flags);
+	spin_unlock_irqrestore(&plchan->vc.lock, flags);
 
 	return bytes;
 }
@@ -673,12 +672,12 @@ static void pl08x_phy_free(struct pl08x_dma_chan *plchan)
 		 * Eww.  We know this isn't going to deadlock
 		 * but lockdep probably doesn't.
 		 */
-		spin_lock(&next->lock);
+		spin_lock(&next->vc.lock);
 		/* Re-check the state now that we have the lock */
 		success = next->state == PL08X_CHAN_WAITING;
 		if (success)
 			pl08x_phy_reassign_start(plchan->phychan, next);
-		spin_unlock(&next->lock);
+		spin_unlock(&next->vc.lock);
 
 		/* If the state changed, try to find another channel */
 		if (!success)
@@ -1125,12 +1124,12 @@ static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
 	unsigned long flags;
 	dma_cookie_t cookie;
 
-	spin_lock_irqsave(&plchan->lock, flags);
+	spin_lock_irqsave(&plchan->vc.lock, flags);
 	cookie = dma_cookie_assign(tx);
 
 	/* Put this onto the pending list */
 	list_add_tail(&txd->node, &plchan->pend_list);
-	spin_unlock_irqrestore(&plchan->lock, flags);
+	spin_unlock_irqrestore(&plchan->vc.lock, flags);
 
 	return cookie;
 }
@@ -1318,13 +1317,13 @@ static void pl08x_issue_pending(struct dma_chan *chan)
 	struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
 	unsigned long flags;
 
-	spin_lock_irqsave(&plchan->lock, flags);
+	spin_lock_irqsave(&plchan->vc.lock, flags);
 	list_splice_tail_init(&plchan->pend_list, &plchan->issued_list);
 	if (!list_empty(&plchan->issued_list)) {
 		if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING)
 			pl08x_phy_alloc_and_start(plchan);
 	}
-	spin_unlock_irqrestore(&plchan->lock, flags);
+	spin_unlock_irqrestore(&plchan->vc.lock, flags);
 }
 
 static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
@@ -1337,9 +1336,9 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
 	if (!num_llis) {
 		unsigned long flags;
 
-		spin_lock_irqsave(&plchan->lock, flags);
+		spin_lock_irqsave(&plchan->vc.lock, flags);
 		pl08x_free_txd(pl08x, txd);
-		spin_unlock_irqrestore(&plchan->lock, flags);
+		spin_unlock_irqrestore(&plchan->vc.lock, flags);
 
 		return -EINVAL;
 	}
@@ -1551,9 +1550,9 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
 	 * Anything succeeds on channels with no physical allocation and
 	 * no queued transfers.
 	 */
-	spin_lock_irqsave(&plchan->lock, flags);
+	spin_lock_irqsave(&plchan->vc.lock, flags);
 	if (!plchan->phychan && !plchan->at) {
-		spin_unlock_irqrestore(&plchan->lock, flags);
+		spin_unlock_irqrestore(&plchan->vc.lock, flags);
 		return 0;
 	}
 
@@ -1592,7 +1591,7 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
 		break;
 	}
 
-	spin_unlock_irqrestore(&plchan->lock, flags);
+	spin_unlock_irqrestore(&plchan->vc.lock, flags);
 
 	return ret;
 }
@@ -1664,9 +1663,9 @@ static void pl08x_tasklet(unsigned long data)
 	unsigned long flags;
 	LIST_HEAD(head);
 
-	spin_lock_irqsave(&plchan->lock, flags);
+	spin_lock_irqsave(&plchan->vc.lock, flags);
 	list_splice_tail_init(&plchan->done_list, &head);
-	spin_unlock_irqrestore(&plchan->lock, flags);
+	spin_unlock_irqrestore(&plchan->vc.lock, flags);
 
 	while (!list_empty(&head)) {
 		struct pl08x_txd *txd = list_first_entry(&head,
@@ -1681,9 +1680,9 @@ static void pl08x_tasklet(unsigned long data)
 			pl08x_unmap_buffers(txd);
 
 		/* Free the descriptor */
-		spin_lock_irqsave(&plchan->lock, flags);
+		spin_lock_irqsave(&plchan->vc.lock, flags);
 		pl08x_free_txd(pl08x, txd);
-		spin_unlock_irqrestore(&plchan->lock, flags);
+		spin_unlock_irqrestore(&plchan->vc.lock, flags);
 
 		/* Callback to signal completion */
 		if (callback)
@@ -1724,7 +1723,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
 				continue;
 			}
 
-			spin_lock(&plchan->lock);
+			spin_lock(&plchan->vc.lock);
 			tx = plchan->at;
 			if (tx) {
 				plchan->at = NULL;
@@ -1745,7 +1744,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
 				else
 					pl08x_phy_free(plchan);
 			}
-			spin_unlock(&plchan->lock);
+			spin_unlock(&plchan->vc.lock);
 
 			/* Schedule tasklet on this channel */
 			tasklet_schedule(&plchan->tasklet);
@@ -1808,17 +1807,13 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
 			 "initialize virtual channel \"%s\"\n",
 			 chan->name);
 
-		chan->vc.chan.device = dmadev;
-		dma_cookie_init(&chan->vc.chan);
-
-		spin_lock_init(&chan->lock);
 		INIT_LIST_HEAD(&chan->pend_list);
 		INIT_LIST_HEAD(&chan->issued_list);
 		INIT_LIST_HEAD(&chan->done_list);
 		tasklet_init(&chan->tasklet, pl08x_tasklet,
 			     (unsigned long) chan);
 
-		list_add_tail(&chan->vc.chan.device_node, &dmadev->channels);
+		vchan_init(&chan->vc, dmadev);
 	}
 	dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n",
 		 i, slave ? "slave" : "memcpy");
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 26/31] dmaengine: PL08x: convert to use vchan submitted/issued lists
  2012-06-07 10:45     ` Russell King - ARM Linux
@ 2012-06-07 10:54       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:54 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Dan Williams, Vinod Koul

Convert to use the virtual dma channel submitted/issued descriptor
lists rather than our own private lists, and use the virtual dma
channel support functions to manage these lists.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |   64 ++++++++++++---------------------------------
 1 files changed, 17 insertions(+), 47 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 398a5da..5333a91 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -215,8 +215,6 @@ enum pl08x_dma_chan_state {
  * @name: name of channel
  * @cd: channel platform data
  * @runtime_addr: address for RX/TX according to the runtime config
- * @pend_list: queued transactions pending on this channel
- * @issued_list: issued transactions for this channel
  * @done_list: list of completed transactions
  * @at: active transaction on this channel
  * @lock: a lock for this channel data
@@ -233,8 +231,6 @@ struct pl08x_dma_chan {
 	const char *name;
 	const struct pl08x_channel_data *cd;
 	struct dma_slave_config cfg;
-	struct list_head pend_list;
-	struct list_head issued_list;
 	struct list_head done_list;
 	struct pl08x_txd *at;
 	struct pl08x_driver_data *host;
@@ -357,12 +353,12 @@ static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan)
 {
 	struct pl08x_driver_data *pl08x = plchan->host;
 	struct pl08x_phy_chan *phychan = plchan->phychan;
+	struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc);
+	struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
 	struct pl08x_lli *lli;
-	struct pl08x_txd *txd;
 	u32 val;
 
-	txd = list_first_entry(&plchan->issued_list, struct pl08x_txd, node);
-	list_del(&txd->node);
+	list_del(&txd->vd.node);
 
 	plchan->at = txd;
 
@@ -524,18 +520,18 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
 	}
 
 	/* Sum up all queued transactions */
-	if (!list_empty(&plchan->issued_list)) {
+	if (!list_empty(&plchan->vc.desc_issued)) {
 		struct pl08x_txd *txdi;
-		list_for_each_entry(txdi, &plchan->issued_list, node) {
+		list_for_each_entry(txdi, &plchan->vc.desc_issued, vd.node) {
 			struct pl08x_sg *dsg;
 			list_for_each_entry(dsg, &txd->dsg_list, node)
 				bytes += dsg->len;
 		}
 	}
 
-	if (!list_empty(&plchan->pend_list)) {
+	if (!list_empty(&plchan->vc.desc_submitted)) {
 		struct pl08x_txd *txdi;
-		list_for_each_entry(txdi, &plchan->pend_list, node) {
+		list_for_each_entry(txdi, &plchan->vc.desc_submitted, vd.node) {
 			struct pl08x_sg *dsg;
 			list_for_each_entry(dsg, &txd->dsg_list, node)
 				bytes += dsg->len;
@@ -1094,13 +1090,12 @@ static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
 	LIST_HEAD(head);
 	struct pl08x_txd *txd;
 
-	list_splice_tail_init(&plchan->issued_list, &head);
-	list_splice_tail_init(&plchan->pend_list, &head);
+	vchan_get_all_descriptors(&plchan->vc, &head);
 
 	while (!list_empty(&head)) {
-		txd = list_first_entry(&head, struct pl08x_txd, node);
+		txd = list_first_entry(&head, struct pl08x_txd, vd.node);
 		pl08x_release_mux(plchan);
-		list_del(&txd->node);
+		list_del(&txd->vd.node);
 		pl08x_free_txd(pl08x, txd);
 	}
 }
@@ -1117,23 +1112,6 @@ static void pl08x_free_chan_resources(struct dma_chan *chan)
 {
 }
 
-static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
-{
-	struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan);
-	struct pl08x_txd *txd = to_pl08x_txd(tx);
-	unsigned long flags;
-	dma_cookie_t cookie;
-
-	spin_lock_irqsave(&plchan->vc.lock, flags);
-	cookie = dma_cookie_assign(tx);
-
-	/* Put this onto the pending list */
-	list_add_tail(&txd->node, &plchan->pend_list);
-	spin_unlock_irqrestore(&plchan->vc.lock, flags);
-
-	return cookie;
-}
-
 static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
 		struct dma_chan *chan, unsigned long flags)
 {
@@ -1318,8 +1296,7 @@ static void pl08x_issue_pending(struct dma_chan *chan)
 	unsigned long flags;
 
 	spin_lock_irqsave(&plchan->vc.lock, flags);
-	list_splice_tail_init(&plchan->pend_list, &plchan->issued_list);
-	if (!list_empty(&plchan->issued_list)) {
+	if (vchan_issue_pending(&plchan->vc)) {
 		if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING)
 			pl08x_phy_alloc_and_start(plchan);
 	}
@@ -1345,16 +1322,11 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
 	return 0;
 }
 
-static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
-	unsigned long flags)
+static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan)
 {
 	struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
 
 	if (txd) {
-		dma_async_tx_descriptor_init(&txd->vd.tx, &plchan->vc.chan);
-		txd->vd.tx.flags = flags;
-		txd->vd.tx.tx_submit = pl08x_tx_submit;
-		INIT_LIST_HEAD(&txd->node);
 		INIT_LIST_HEAD(&txd->dsg_list);
 
 		/* Always enable error and terminal interrupts */
@@ -1377,7 +1349,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
 	struct pl08x_sg *dsg;
 	int ret;
 
-	txd = pl08x_get_txd(plchan, flags);
+	txd = pl08x_get_txd(plchan);
 	if (!txd) {
 		dev_err(&pl08x->adev->dev,
 			"%s no memory for descriptor\n", __func__);
@@ -1413,7 +1385,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
 	if (ret)
 		return NULL;
 
-	return &txd->vd.tx;
+	return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
 }
 
 static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
@@ -1435,7 +1407,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
 	dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
 			__func__, sg_dma_len(sgl), plchan->name);
 
-	txd = pl08x_get_txd(plchan, flags);
+	txd = pl08x_get_txd(plchan);
 	if (!txd) {
 		dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
 		return NULL;
@@ -1529,7 +1501,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
 	if (ret)
 		return NULL;
 
-	return &txd->vd.tx;
+	return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
 }
 
 static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
@@ -1739,7 +1711,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
 				 * And start the next descriptor (if any),
 				 * otherwise free this channel.
 				 */
-				if (!list_empty(&plchan->issued_list))
+				if (vchan_next_desc(&plchan->vc))
 					pl08x_start_next_txd(plchan);
 				else
 					pl08x_phy_free(plchan);
@@ -1807,8 +1779,6 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
 			 "initialize virtual channel \"%s\"\n",
 			 chan->name);
 
-		INIT_LIST_HEAD(&chan->pend_list);
-		INIT_LIST_HEAD(&chan->issued_list);
 		INIT_LIST_HEAD(&chan->done_list);
 		tasklet_init(&chan->tasklet, pl08x_tasklet,
 			     (unsigned long) chan);
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 26/31] dmaengine: PL08x: convert to use vchan submitted/issued lists
@ 2012-06-07 10:54       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:54 UTC (permalink / raw)
  To: linux-arm-kernel

Convert to use the virtual dma channel submitted/issued descriptor
lists rather than our own private lists, and use the virtual dma
channel support functions to manage these lists.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |   64 ++++++++++++---------------------------------
 1 files changed, 17 insertions(+), 47 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 398a5da..5333a91 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -215,8 +215,6 @@ enum pl08x_dma_chan_state {
  * @name: name of channel
  * @cd: channel platform data
  * @runtime_addr: address for RX/TX according to the runtime config
- * @pend_list: queued transactions pending on this channel
- * @issued_list: issued transactions for this channel
  * @done_list: list of completed transactions
  * @at: active transaction on this channel
  * @lock: a lock for this channel data
@@ -233,8 +231,6 @@ struct pl08x_dma_chan {
 	const char *name;
 	const struct pl08x_channel_data *cd;
 	struct dma_slave_config cfg;
-	struct list_head pend_list;
-	struct list_head issued_list;
 	struct list_head done_list;
 	struct pl08x_txd *at;
 	struct pl08x_driver_data *host;
@@ -357,12 +353,12 @@ static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan)
 {
 	struct pl08x_driver_data *pl08x = plchan->host;
 	struct pl08x_phy_chan *phychan = plchan->phychan;
+	struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc);
+	struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
 	struct pl08x_lli *lli;
-	struct pl08x_txd *txd;
 	u32 val;
 
-	txd = list_first_entry(&plchan->issued_list, struct pl08x_txd, node);
-	list_del(&txd->node);
+	list_del(&txd->vd.node);
 
 	plchan->at = txd;
 
@@ -524,18 +520,18 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
 	}
 
 	/* Sum up all queued transactions */
-	if (!list_empty(&plchan->issued_list)) {
+	if (!list_empty(&plchan->vc.desc_issued)) {
 		struct pl08x_txd *txdi;
-		list_for_each_entry(txdi, &plchan->issued_list, node) {
+		list_for_each_entry(txdi, &plchan->vc.desc_issued, vd.node) {
 			struct pl08x_sg *dsg;
 			list_for_each_entry(dsg, &txd->dsg_list, node)
 				bytes += dsg->len;
 		}
 	}
 
-	if (!list_empty(&plchan->pend_list)) {
+	if (!list_empty(&plchan->vc.desc_submitted)) {
 		struct pl08x_txd *txdi;
-		list_for_each_entry(txdi, &plchan->pend_list, node) {
+		list_for_each_entry(txdi, &plchan->vc.desc_submitted, vd.node) {
 			struct pl08x_sg *dsg;
 			list_for_each_entry(dsg, &txd->dsg_list, node)
 				bytes += dsg->len;
@@ -1094,13 +1090,12 @@ static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
 	LIST_HEAD(head);
 	struct pl08x_txd *txd;
 
-	list_splice_tail_init(&plchan->issued_list, &head);
-	list_splice_tail_init(&plchan->pend_list, &head);
+	vchan_get_all_descriptors(&plchan->vc, &head);
 
 	while (!list_empty(&head)) {
-		txd = list_first_entry(&head, struct pl08x_txd, node);
+		txd = list_first_entry(&head, struct pl08x_txd, vd.node);
 		pl08x_release_mux(plchan);
-		list_del(&txd->node);
+		list_del(&txd->vd.node);
 		pl08x_free_txd(pl08x, txd);
 	}
 }
@@ -1117,23 +1112,6 @@ static void pl08x_free_chan_resources(struct dma_chan *chan)
 {
 }
 
-static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
-{
-	struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan);
-	struct pl08x_txd *txd = to_pl08x_txd(tx);
-	unsigned long flags;
-	dma_cookie_t cookie;
-
-	spin_lock_irqsave(&plchan->vc.lock, flags);
-	cookie = dma_cookie_assign(tx);
-
-	/* Put this onto the pending list */
-	list_add_tail(&txd->node, &plchan->pend_list);
-	spin_unlock_irqrestore(&plchan->vc.lock, flags);
-
-	return cookie;
-}
-
 static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
 		struct dma_chan *chan, unsigned long flags)
 {
@@ -1318,8 +1296,7 @@ static void pl08x_issue_pending(struct dma_chan *chan)
 	unsigned long flags;
 
 	spin_lock_irqsave(&plchan->vc.lock, flags);
-	list_splice_tail_init(&plchan->pend_list, &plchan->issued_list);
-	if (!list_empty(&plchan->issued_list)) {
+	if (vchan_issue_pending(&plchan->vc)) {
 		if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING)
 			pl08x_phy_alloc_and_start(plchan);
 	}
@@ -1345,16 +1322,11 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
 	return 0;
 }
 
-static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
-	unsigned long flags)
+static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan)
 {
 	struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
 
 	if (txd) {
-		dma_async_tx_descriptor_init(&txd->vd.tx, &plchan->vc.chan);
-		txd->vd.tx.flags = flags;
-		txd->vd.tx.tx_submit = pl08x_tx_submit;
-		INIT_LIST_HEAD(&txd->node);
 		INIT_LIST_HEAD(&txd->dsg_list);
 
 		/* Always enable error and terminal interrupts */
@@ -1377,7 +1349,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
 	struct pl08x_sg *dsg;
 	int ret;
 
-	txd = pl08x_get_txd(plchan, flags);
+	txd = pl08x_get_txd(plchan);
 	if (!txd) {
 		dev_err(&pl08x->adev->dev,
 			"%s no memory for descriptor\n", __func__);
@@ -1413,7 +1385,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
 	if (ret)
 		return NULL;
 
-	return &txd->vd.tx;
+	return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
 }
 
 static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
@@ -1435,7 +1407,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
 	dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
 			__func__, sg_dma_len(sgl), plchan->name);
 
-	txd = pl08x_get_txd(plchan, flags);
+	txd = pl08x_get_txd(plchan);
 	if (!txd) {
 		dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
 		return NULL;
@@ -1529,7 +1501,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
 	if (ret)
 		return NULL;
 
-	return &txd->vd.tx;
+	return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
 }
 
 static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
@@ -1739,7 +1711,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
 				 * And start the next descriptor (if any),
 				 * otherwise free this channel.
 				 */
-				if (!list_empty(&plchan->issued_list))
+				if (vchan_next_desc(&plchan->vc))
 					pl08x_start_next_txd(plchan);
 				else
 					pl08x_phy_free(plchan);
@@ -1807,8 +1779,6 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
 			 "initialize virtual channel \"%s\"\n",
 			 chan->name);
 
-		INIT_LIST_HEAD(&chan->pend_list);
-		INIT_LIST_HEAD(&chan->issued_list);
 		INIT_LIST_HEAD(&chan->done_list);
 		tasklet_init(&chan->tasklet, pl08x_tasklet,
 			     (unsigned long) chan);
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 27/31] dmaengine: PL08x: convert to use vchan done list
  2012-06-07 10:45     ` Russell King - ARM Linux
@ 2012-06-07 10:54       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:54 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Dan Williams, Vinod Koul

Convert to use the virtual dma channel done list, tasklet, and
descriptor freeing.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |  135 ++++++++++++++++++---------------------------
 1 files changed, 54 insertions(+), 81 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 5333a91..6a35e37 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -167,16 +167,16 @@ struct pl08x_sg {
 /**
  * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
  * @vd: virtual DMA descriptor
- * @node: node for txd list for channels
  * @dsg_list: list of children sg's
  * @llis_bus: DMA memory address (physical) start for the LLIs
  * @llis_va: virtual memory address start for the LLIs
  * @cctl: control reg values for current txd
  * @ccfg: config reg values for current txd
+ * @done: this marks completed descriptors, which should not have their
+ *   mux released.
  */
 struct pl08x_txd {
 	struct virt_dma_desc vd;
-	struct list_head node;
 	struct list_head dsg_list;
 	dma_addr_t llis_bus;
 	struct pl08x_lli *llis_va;
@@ -187,6 +187,7 @@ struct pl08x_txd {
 	 * trigger this txd.  Other registers are in llis_va[0].
 	 */
 	u32 ccfg;
+	bool done;
 };
 
 /**
@@ -211,11 +212,9 @@ enum pl08x_dma_chan_state {
  * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
  * @vc: wrappped virtual channel
  * @phychan: the physical channel utilized by this channel, if there is one
- * @tasklet: tasklet scheduled by the IRQ to handle actual work etc
  * @name: name of channel
  * @cd: channel platform data
  * @runtime_addr: address for RX/TX according to the runtime config
- * @done_list: list of completed transactions
  * @at: active transaction on this channel
  * @lock: a lock for this channel data
  * @host: a pointer to the host (internal use)
@@ -227,11 +226,9 @@ enum pl08x_dma_chan_state {
 struct pl08x_dma_chan {
 	struct virt_dma_chan vc;
 	struct pl08x_phy_chan *phychan;
-	struct tasklet_struct tasklet;
 	const char *name;
 	const struct pl08x_channel_data *cd;
 	struct dma_slave_config cfg;
-	struct list_head done_list;
 	struct pl08x_txd *at;
 	struct pl08x_driver_data *host;
 	enum pl08x_dma_chan_state state;
@@ -1084,6 +1081,52 @@ static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
 	kfree(txd);
 }
 
+static void pl08x_unmap_buffers(struct pl08x_txd *txd)
+{
+	struct device *dev = txd->vd.tx.chan->device->dev;
+	struct pl08x_sg *dsg;
+
+	if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+		if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
+			list_for_each_entry(dsg, &txd->dsg_list, node)
+				dma_unmap_single(dev, dsg->src_addr, dsg->len,
+						DMA_TO_DEVICE);
+		else {
+			list_for_each_entry(dsg, &txd->dsg_list, node)
+				dma_unmap_page(dev, dsg->src_addr, dsg->len,
+						DMA_TO_DEVICE);
+		}
+	}
+	if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+		if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
+			list_for_each_entry(dsg, &txd->dsg_list, node)
+				dma_unmap_single(dev, dsg->dst_addr, dsg->len,
+						DMA_FROM_DEVICE);
+		else
+			list_for_each_entry(dsg, &txd->dsg_list, node)
+				dma_unmap_page(dev, dsg->dst_addr, dsg->len,
+						DMA_FROM_DEVICE);
+	}
+}
+
+static void pl08x_desc_free(struct virt_dma_desc *vd)
+{
+	struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
+	struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan);
+	struct pl08x_driver_data *pl08x = plchan->host;
+	unsigned long flags;
+
+	if (!plchan->slave)
+		pl08x_unmap_buffers(txd);
+
+	if (!txd->done)
+		pl08x_release_mux(plchan);
+
+	spin_lock_irqsave(&pl08x->lock, flags);
+	pl08x_free_txd(plchan->host, txd);
+	spin_unlock_irqrestore(&pl08x->lock, flags);
+}
+
 static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
 				struct pl08x_dma_chan *plchan)
 {
@@ -1094,9 +1137,8 @@ static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
 
 	while (!list_empty(&head)) {
 		txd = list_first_entry(&head, struct pl08x_txd, vd.node);
-		pl08x_release_mux(plchan);
 		list_del(&txd->vd.node);
-		pl08x_free_txd(pl08x, txd);
+		pl08x_desc_free(&txd->vd);
 	}
 }
 
@@ -1541,9 +1583,7 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
 		}
 		/* Dequeue jobs and free LLIs */
 		if (plchan->at) {
-			/* Killing this one off, release its mux */
-			pl08x_release_mux(plchan);
-			pl08x_free_txd(pl08x, plchan->at);
+			pl08x_desc_free(&plchan->at->vd);
 			plchan->at = NULL;
 		}
 		/* Dequeue jobs not yet fired as well */
@@ -1600,68 +1640,6 @@ static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
 	writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG);
 }
 
-static void pl08x_unmap_buffers(struct pl08x_txd *txd)
-{
-	struct device *dev = txd->vd.tx.chan->device->dev;
-	struct pl08x_sg *dsg;
-
-	if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
-		if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
-			list_for_each_entry(dsg, &txd->dsg_list, node)
-				dma_unmap_single(dev, dsg->src_addr, dsg->len,
-						DMA_TO_DEVICE);
-		else {
-			list_for_each_entry(dsg, &txd->dsg_list, node)
-				dma_unmap_page(dev, dsg->src_addr, dsg->len,
-						DMA_TO_DEVICE);
-		}
-	}
-	if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
-		if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
-			list_for_each_entry(dsg, &txd->dsg_list, node)
-				dma_unmap_single(dev, dsg->dst_addr, dsg->len,
-						DMA_FROM_DEVICE);
-		else
-			list_for_each_entry(dsg, &txd->dsg_list, node)
-				dma_unmap_page(dev, dsg->dst_addr, dsg->len,
-						DMA_FROM_DEVICE);
-	}
-}
-
-static void pl08x_tasklet(unsigned long data)
-{
-	struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data;
-	struct pl08x_driver_data *pl08x = plchan->host;
-	unsigned long flags;
-	LIST_HEAD(head);
-
-	spin_lock_irqsave(&plchan->vc.lock, flags);
-	list_splice_tail_init(&plchan->done_list, &head);
-	spin_unlock_irqrestore(&plchan->vc.lock, flags);
-
-	while (!list_empty(&head)) {
-		struct pl08x_txd *txd = list_first_entry(&head,
-						struct pl08x_txd, node);
-		dma_async_tx_callback callback = txd->vd.tx.callback;
-		void *callback_param = txd->vd.tx.callback_param;
-
-		list_del(&txd->node);
-
-		/* Don't try to unmap buffers on slave channels */
-		if (!plchan->slave)
-			pl08x_unmap_buffers(txd);
-
-		/* Free the descriptor */
-		spin_lock_irqsave(&plchan->vc.lock, flags);
-		pl08x_free_txd(pl08x, txd);
-		spin_unlock_irqrestore(&plchan->vc.lock, flags);
-
-		/* Callback to signal completion */
-		if (callback)
-			callback(callback_param);
-	}
-}
-
 static irqreturn_t pl08x_irq(int irq, void *dev)
 {
 	struct pl08x_driver_data *pl08x = dev;
@@ -1704,8 +1682,8 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
 				 * reservation.
 				 */
 				pl08x_release_mux(plchan);
-				dma_cookie_complete(&tx->vd.tx);
-				list_add_tail(&tx->node, &plchan->done_list);
+				tx->done = true;
+				vchan_cookie_complete(&tx->vd);
 
 				/*
 				 * And start the next descriptor (if any),
@@ -1718,8 +1696,6 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
 			}
 			spin_unlock(&plchan->vc.lock);
 
-			/* Schedule tasklet on this channel */
-			tasklet_schedule(&plchan->tasklet);
 			mask |= (1 << i);
 		}
 	}
@@ -1779,10 +1755,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
 			 "initialize virtual channel \"%s\"\n",
 			 chan->name);
 
-		INIT_LIST_HEAD(&chan->done_list);
-		tasklet_init(&chan->tasklet, pl08x_tasklet,
-			     (unsigned long) chan);
-
+		chan->vc.desc_free = pl08x_desc_free;
 		vchan_init(&chan->vc, dmadev);
 	}
 	dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n",
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 27/31] dmaengine: PL08x: convert to use vchan done list
@ 2012-06-07 10:54       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:54 UTC (permalink / raw)
  To: linux-arm-kernel

Convert to use the virtual dma channel done list, tasklet, and
descriptor freeing.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |  135 ++++++++++++++++++---------------------------
 1 files changed, 54 insertions(+), 81 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 5333a91..6a35e37 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -167,16 +167,16 @@ struct pl08x_sg {
 /**
  * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
  * @vd: virtual DMA descriptor
- * @node: node for txd list for channels
  * @dsg_list: list of children sg's
  * @llis_bus: DMA memory address (physical) start for the LLIs
  * @llis_va: virtual memory address start for the LLIs
  * @cctl: control reg values for current txd
  * @ccfg: config reg values for current txd
+ * @done: this marks completed descriptors, which should not have their
+ *   mux released.
  */
 struct pl08x_txd {
 	struct virt_dma_desc vd;
-	struct list_head node;
 	struct list_head dsg_list;
 	dma_addr_t llis_bus;
 	struct pl08x_lli *llis_va;
@@ -187,6 +187,7 @@ struct pl08x_txd {
 	 * trigger this txd.  Other registers are in llis_va[0].
 	 */
 	u32 ccfg;
+	bool done;
 };
 
 /**
@@ -211,11 +212,9 @@ enum pl08x_dma_chan_state {
  * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
  * @vc: wrappped virtual channel
  * @phychan: the physical channel utilized by this channel, if there is one
- * @tasklet: tasklet scheduled by the IRQ to handle actual work etc
  * @name: name of channel
  * @cd: channel platform data
  * @runtime_addr: address for RX/TX according to the runtime config
- * @done_list: list of completed transactions
  * @at: active transaction on this channel
  * @lock: a lock for this channel data
  * @host: a pointer to the host (internal use)
@@ -227,11 +226,9 @@ enum pl08x_dma_chan_state {
 struct pl08x_dma_chan {
 	struct virt_dma_chan vc;
 	struct pl08x_phy_chan *phychan;
-	struct tasklet_struct tasklet;
 	const char *name;
 	const struct pl08x_channel_data *cd;
 	struct dma_slave_config cfg;
-	struct list_head done_list;
 	struct pl08x_txd *at;
 	struct pl08x_driver_data *host;
 	enum pl08x_dma_chan_state state;
@@ -1084,6 +1081,52 @@ static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
 	kfree(txd);
 }
 
+static void pl08x_unmap_buffers(struct pl08x_txd *txd)
+{
+	struct device *dev = txd->vd.tx.chan->device->dev;
+	struct pl08x_sg *dsg;
+
+	if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+		if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
+			list_for_each_entry(dsg, &txd->dsg_list, node)
+				dma_unmap_single(dev, dsg->src_addr, dsg->len,
+						DMA_TO_DEVICE);
+		else {
+			list_for_each_entry(dsg, &txd->dsg_list, node)
+				dma_unmap_page(dev, dsg->src_addr, dsg->len,
+						DMA_TO_DEVICE);
+		}
+	}
+	if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+		if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
+			list_for_each_entry(dsg, &txd->dsg_list, node)
+				dma_unmap_single(dev, dsg->dst_addr, dsg->len,
+						DMA_FROM_DEVICE);
+		else
+			list_for_each_entry(dsg, &txd->dsg_list, node)
+				dma_unmap_page(dev, dsg->dst_addr, dsg->len,
+						DMA_FROM_DEVICE);
+	}
+}
+
+static void pl08x_desc_free(struct virt_dma_desc *vd)
+{
+	struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
+	struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan);
+	struct pl08x_driver_data *pl08x = plchan->host;
+	unsigned long flags;
+
+	if (!plchan->slave)
+		pl08x_unmap_buffers(txd);
+
+	if (!txd->done)
+		pl08x_release_mux(plchan);
+
+	spin_lock_irqsave(&pl08x->lock, flags);
+	pl08x_free_txd(plchan->host, txd);
+	spin_unlock_irqrestore(&pl08x->lock, flags);
+}
+
 static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
 				struct pl08x_dma_chan *plchan)
 {
@@ -1094,9 +1137,8 @@ static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
 
 	while (!list_empty(&head)) {
 		txd = list_first_entry(&head, struct pl08x_txd, vd.node);
-		pl08x_release_mux(plchan);
 		list_del(&txd->vd.node);
-		pl08x_free_txd(pl08x, txd);
+		pl08x_desc_free(&txd->vd);
 	}
 }
 
@@ -1541,9 +1583,7 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
 		}
 		/* Dequeue jobs and free LLIs */
 		if (plchan->at) {
-			/* Killing this one off, release its mux */
-			pl08x_release_mux(plchan);
-			pl08x_free_txd(pl08x, plchan->at);
+			pl08x_desc_free(&plchan->at->vd);
 			plchan->at = NULL;
 		}
 		/* Dequeue jobs not yet fired as well */
@@ -1600,68 +1640,6 @@ static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
 	writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG);
 }
 
-static void pl08x_unmap_buffers(struct pl08x_txd *txd)
-{
-	struct device *dev = txd->vd.tx.chan->device->dev;
-	struct pl08x_sg *dsg;
-
-	if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
-		if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
-			list_for_each_entry(dsg, &txd->dsg_list, node)
-				dma_unmap_single(dev, dsg->src_addr, dsg->len,
-						DMA_TO_DEVICE);
-		else {
-			list_for_each_entry(dsg, &txd->dsg_list, node)
-				dma_unmap_page(dev, dsg->src_addr, dsg->len,
-						DMA_TO_DEVICE);
-		}
-	}
-	if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
-		if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
-			list_for_each_entry(dsg, &txd->dsg_list, node)
-				dma_unmap_single(dev, dsg->dst_addr, dsg->len,
-						DMA_FROM_DEVICE);
-		else
-			list_for_each_entry(dsg, &txd->dsg_list, node)
-				dma_unmap_page(dev, dsg->dst_addr, dsg->len,
-						DMA_FROM_DEVICE);
-	}
-}
-
-static void pl08x_tasklet(unsigned long data)
-{
-	struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data;
-	struct pl08x_driver_data *pl08x = plchan->host;
-	unsigned long flags;
-	LIST_HEAD(head);
-
-	spin_lock_irqsave(&plchan->vc.lock, flags);
-	list_splice_tail_init(&plchan->done_list, &head);
-	spin_unlock_irqrestore(&plchan->vc.lock, flags);
-
-	while (!list_empty(&head)) {
-		struct pl08x_txd *txd = list_first_entry(&head,
-						struct pl08x_txd, node);
-		dma_async_tx_callback callback = txd->vd.tx.callback;
-		void *callback_param = txd->vd.tx.callback_param;
-
-		list_del(&txd->node);
-
-		/* Don't try to unmap buffers on slave channels */
-		if (!plchan->slave)
-			pl08x_unmap_buffers(txd);
-
-		/* Free the descriptor */
-		spin_lock_irqsave(&plchan->vc.lock, flags);
-		pl08x_free_txd(pl08x, txd);
-		spin_unlock_irqrestore(&plchan->vc.lock, flags);
-
-		/* Callback to signal completion */
-		if (callback)
-			callback(callback_param);
-	}
-}
-
 static irqreturn_t pl08x_irq(int irq, void *dev)
 {
 	struct pl08x_driver_data *pl08x = dev;
@@ -1704,8 +1682,8 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
 				 * reservation.
 				 */
 				pl08x_release_mux(plchan);
-				dma_cookie_complete(&tx->vd.tx);
-				list_add_tail(&tx->node, &plchan->done_list);
+				tx->done = true;
+				vchan_cookie_complete(&tx->vd);
 
 				/*
 				 * And start the next descriptor (if any),
@@ -1718,8 +1696,6 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
 			}
 			spin_unlock(&plchan->vc.lock);
 
-			/* Schedule tasklet on this channel */
-			tasklet_schedule(&plchan->tasklet);
 			mask |= (1 << i);
 		}
 	}
@@ -1779,10 +1755,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
 			 "initialize virtual channel \"%s\"\n",
 			 chan->name);
 
-		INIT_LIST_HEAD(&chan->done_list);
-		tasklet_init(&chan->tasklet, pl08x_tasklet,
-			     (unsigned long) chan);
-
+		chan->vc.desc_free = pl08x_desc_free;
 		vchan_init(&chan->vc, dmadev);
 	}
 	dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n",
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 28/31] dmaengine: PL08x: fix tx_status function to return correct residue
  2012-06-07 10:45     ` Russell King - ARM Linux
@ 2012-06-07 10:55       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:55 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Dan Williams, Vinod Koul

Now that we're converted to use the generic vchan support, we can fix
the residue return from tx_status to be compliant with dmaengine.  This
returns the number of bytes remaining for the _specified_ cookie, not
the number of bytes in all pending transfers on the channel.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |   61 +++++++++++++++++++++++++--------------------
 1 files changed, 34 insertions(+), 27 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 6a35e37..c42c7ef 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -473,10 +473,8 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
 {
 	struct pl08x_phy_chan *ch;
 	struct pl08x_txd *txd;
-	unsigned long flags;
 	size_t bytes = 0;
 
-	spin_lock_irqsave(&plchan->vc.lock, flags);
 	ch = plchan->phychan;
 	txd = plchan->at;
 
@@ -516,27 +514,6 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
 		}
 	}
 
-	/* Sum up all queued transactions */
-	if (!list_empty(&plchan->vc.desc_issued)) {
-		struct pl08x_txd *txdi;
-		list_for_each_entry(txdi, &plchan->vc.desc_issued, vd.node) {
-			struct pl08x_sg *dsg;
-			list_for_each_entry(dsg, &txd->dsg_list, node)
-				bytes += dsg->len;
-		}
-	}
-
-	if (!list_empty(&plchan->vc.desc_submitted)) {
-		struct pl08x_txd *txdi;
-		list_for_each_entry(txdi, &plchan->vc.desc_submitted, vd.node) {
-			struct pl08x_sg *dsg;
-			list_for_each_entry(dsg, &txd->dsg_list, node)
-				bytes += dsg->len;
-		}
-	}
-
-	spin_unlock_irqrestore(&plchan->vc.lock, flags);
-
 	return bytes;
 }
 
@@ -1171,23 +1148,53 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
 		dma_cookie_t cookie, struct dma_tx_state *txstate)
 {
 	struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+	struct virt_dma_desc *vd;
+	unsigned long flags;
 	enum dma_status ret;
+	size_t bytes = 0;
 
 	ret = dma_cookie_status(chan, cookie, txstate);
 	if (ret == DMA_SUCCESS)
 		return ret;
 
 	/*
+	 * There's no point calculating the residue if there's
+	 * no txstate to store the value.
+	 */
+	if (!txstate) {
+		if (plchan->state == PL08X_CHAN_PAUSED)
+			ret = DMA_PAUSED;
+		return ret;
+	}
+
+	spin_lock_irqsave(&plchan->vc.lock, flags);
+	ret = dma_cookie_status(chan, cookie, txstate);
+	if (ret != DMA_SUCCESS) {
+		vd = vchan_find_desc(&plchan->vc, cookie);
+		if (vd) {
+			/* On the issued list, so hasn't been processed yet */
+			struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
+			struct pl08x_sg *dsg;
+
+			list_for_each_entry(dsg, &txd->dsg_list, node)
+				bytes += dsg->len;
+		} else {
+			bytes = pl08x_getbytes_chan(plchan);
+		}
+	}
+	spin_unlock_irqrestore(&plchan->vc.lock, flags);
+
+	/*
 	 * This cookie not complete yet
 	 * Get number of bytes left in the active transactions and queue
 	 */
-	dma_set_residue(txstate, pl08x_getbytes_chan(plchan));
+	dma_set_residue(txstate, bytes);
 
-	if (plchan->state == PL08X_CHAN_PAUSED)
-		return DMA_PAUSED;
+	if (plchan->state == PL08X_CHAN_PAUSED && ret == DMA_IN_PROGRESS)
+		ret = DMA_PAUSED;
 
 	/* Whether waiting or running, we're in progress */
-	return DMA_IN_PROGRESS;
+	return ret;
 }
 
 /* PrimeCell DMA extension */
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 28/31] dmaengine: PL08x: fix tx_status function to return correct residue
@ 2012-06-07 10:55       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:55 UTC (permalink / raw)
  To: linux-arm-kernel

Now that we're converted to use the generic vchan support, we can fix
the residue return from tx_status to be compliant with dmaengine.  This
returns the number of bytes remaining for the _specified_ cookie, not
the number of bytes in all pending transfers on the channel.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |   61 +++++++++++++++++++++++++--------------------
 1 files changed, 34 insertions(+), 27 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 6a35e37..c42c7ef 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -473,10 +473,8 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
 {
 	struct pl08x_phy_chan *ch;
 	struct pl08x_txd *txd;
-	unsigned long flags;
 	size_t bytes = 0;
 
-	spin_lock_irqsave(&plchan->vc.lock, flags);
 	ch = plchan->phychan;
 	txd = plchan->at;
 
@@ -516,27 +514,6 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
 		}
 	}
 
-	/* Sum up all queued transactions */
-	if (!list_empty(&plchan->vc.desc_issued)) {
-		struct pl08x_txd *txdi;
-		list_for_each_entry(txdi, &plchan->vc.desc_issued, vd.node) {
-			struct pl08x_sg *dsg;
-			list_for_each_entry(dsg, &txd->dsg_list, node)
-				bytes += dsg->len;
-		}
-	}
-
-	if (!list_empty(&plchan->vc.desc_submitted)) {
-		struct pl08x_txd *txdi;
-		list_for_each_entry(txdi, &plchan->vc.desc_submitted, vd.node) {
-			struct pl08x_sg *dsg;
-			list_for_each_entry(dsg, &txd->dsg_list, node)
-				bytes += dsg->len;
-		}
-	}
-
-	spin_unlock_irqrestore(&plchan->vc.lock, flags);
-
 	return bytes;
 }
 
@@ -1171,23 +1148,53 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
 		dma_cookie_t cookie, struct dma_tx_state *txstate)
 {
 	struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+	struct virt_dma_desc *vd;
+	unsigned long flags;
 	enum dma_status ret;
+	size_t bytes = 0;
 
 	ret = dma_cookie_status(chan, cookie, txstate);
 	if (ret == DMA_SUCCESS)
 		return ret;
 
 	/*
+	 * There's no point calculating the residue if there's
+	 * no txstate to store the value.
+	 */
+	if (!txstate) {
+		if (plchan->state == PL08X_CHAN_PAUSED)
+			ret = DMA_PAUSED;
+		return ret;
+	}
+
+	spin_lock_irqsave(&plchan->vc.lock, flags);
+	ret = dma_cookie_status(chan, cookie, txstate);
+	if (ret != DMA_SUCCESS) {
+		vd = vchan_find_desc(&plchan->vc, cookie);
+		if (vd) {
+			/* On the issued list, so hasn't been processed yet */
+			struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
+			struct pl08x_sg *dsg;
+
+			list_for_each_entry(dsg, &txd->dsg_list, node)
+				bytes += dsg->len;
+		} else {
+			bytes = pl08x_getbytes_chan(plchan);
+		}
+	}
+	spin_unlock_irqrestore(&plchan->vc.lock, flags);
+
+	/*
 	 * This cookie not complete yet
 	 * Get number of bytes left in the active transactions and queue
 	 */
-	dma_set_residue(txstate, pl08x_getbytes_chan(plchan));
+	dma_set_residue(txstate, bytes);
 
-	if (plchan->state == PL08X_CHAN_PAUSED)
-		return DMA_PAUSED;
+	if (plchan->state == PL08X_CHAN_PAUSED && ret == DMA_IN_PROGRESS)
+		ret = DMA_PAUSED;
 
 	/* Whether waiting or running, we're in progress */
-	return DMA_IN_PROGRESS;
+	return ret;
 }
 
 /* PrimeCell DMA extension */
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 29/31] dmaengine: PL08x: get rid of pl08x_prep_channel_resources
  2012-06-07 10:45     ` Russell King - ARM Linux
@ 2012-06-07 10:55       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:55 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Dan Williams, Vinod Koul

This function is now unnecessary; we can move its internals inline
instead.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |   32 +++++++++-----------------------
 1 files changed, 9 insertions(+), 23 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index c42c7ef..9297240 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1352,25 +1352,6 @@ static void pl08x_issue_pending(struct dma_chan *chan)
 	spin_unlock_irqrestore(&plchan->vc.lock, flags);
 }
 
-static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
-					struct pl08x_txd *txd)
-{
-	struct pl08x_driver_data *pl08x = plchan->host;
-	int num_llis;
-
-	num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
-	if (!num_llis) {
-		unsigned long flags;
-
-		spin_lock_irqsave(&plchan->vc.lock, flags);
-		pl08x_free_txd(pl08x, txd);
-		spin_unlock_irqrestore(&plchan->vc.lock, flags);
-
-		return -EINVAL;
-	}
-	return 0;
-}
-
 static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan)
 {
 	struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
@@ -1430,9 +1411,11 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
 		txd->cctl |= pl08x_select_bus(pl08x->mem_buses,
 					      pl08x->mem_buses);
 
-	ret = pl08x_prep_channel_resources(plchan, txd);
-	if (ret)
+	ret = pl08x_fill_llis_for_desc(plchan->host, txd);
+	if (!ret) {
+		pl08x_free_txd(pl08x, txd);
 		return NULL;
+	}
 
 	return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
 }
@@ -1546,9 +1529,12 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
 		}
 	}
 
-	ret = pl08x_prep_channel_resources(plchan, txd);
-	if (ret)
+	ret = pl08x_fill_llis_for_desc(plchan->host, txd);
+	if (!ret) {
+		pl08x_release_mux(plchan);
+		pl08x_free_txd(pl08x, txd);
 		return NULL;
+	}
 
 	return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
 }
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 29/31] dmaengine: PL08x: get rid of pl08x_prep_channel_resources
@ 2012-06-07 10:55       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:55 UTC (permalink / raw)
  To: linux-arm-kernel

This function is now unnecessary; we can move its internals inline
instead.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |   32 +++++++++-----------------------
 1 files changed, 9 insertions(+), 23 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index c42c7ef..9297240 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1352,25 +1352,6 @@ static void pl08x_issue_pending(struct dma_chan *chan)
 	spin_unlock_irqrestore(&plchan->vc.lock, flags);
 }
 
-static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
-					struct pl08x_txd *txd)
-{
-	struct pl08x_driver_data *pl08x = plchan->host;
-	int num_llis;
-
-	num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
-	if (!num_llis) {
-		unsigned long flags;
-
-		spin_lock_irqsave(&plchan->vc.lock, flags);
-		pl08x_free_txd(pl08x, txd);
-		spin_unlock_irqrestore(&plchan->vc.lock, flags);
-
-		return -EINVAL;
-	}
-	return 0;
-}
-
 static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan)
 {
 	struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
@@ -1430,9 +1411,11 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
 		txd->cctl |= pl08x_select_bus(pl08x->mem_buses,
 					      pl08x->mem_buses);
 
-	ret = pl08x_prep_channel_resources(plchan, txd);
-	if (ret)
+	ret = pl08x_fill_llis_for_desc(plchan->host, txd);
+	if (!ret) {
+		pl08x_free_txd(pl08x, txd);
 		return NULL;
+	}
 
 	return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
 }
@@ -1546,9 +1529,12 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
 		}
 	}
 
-	ret = pl08x_prep_channel_resources(plchan, txd);
-	if (ret)
+	ret = pl08x_fill_llis_for_desc(plchan->host, txd);
+	if (!ret) {
+		pl08x_release_mux(plchan);
+		pl08x_free_txd(pl08x, txd);
 		return NULL;
+	}
 
 	return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
 }
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 30/31] dmaengine: PL08x: get rid of write only pool_ctr and free_txd locking
  2012-06-07 10:45     ` Russell King - ARM Linux
@ 2012-06-07 10:55       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:55 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Dan Williams, Vinod Koul

The free function says the pl08x lock should be taken before calling
it.  However, the DMA pool allocation/freeing is already properly
locked.  The only thing that would need this is pool_ctr, which
happens to be a write-only variable.

Let's get rid of this, and eliminate any need for additional locking
here.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |   12 ------------
 1 files changed, 0 insertions(+), 12 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 9297240..a5d85b1 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -247,7 +247,6 @@ struct pl08x_dma_chan {
  * @pd: platform data passed in from the platform/machine
  * @phy_chans: array of data for the physical channels
  * @pool: a pool for the LLI descriptors
- * @pool_ctr: counter of LLIs in the pool
  * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI
  * fetches
  * @mem_buses: set to indicate memory transfers on AHB2.
@@ -262,7 +261,6 @@ struct pl08x_driver_data {
 	struct pl08x_platform_data *pd;
 	struct pl08x_phy_chan *phy_chans;
 	struct dma_pool *pool;
-	int pool_ctr;
 	u8 lli_buses;
 	u8 mem_buses;
 };
@@ -821,8 +819,6 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
 		return 0;
 	}
 
-	pl08x->pool_ctr++;
-
 	bd.txd = txd;
 	bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0;
 	cctl = txd->cctl;
@@ -1038,18 +1034,14 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
 	return num_llis;
 }
 
-/* You should call this with the struct pl08x lock held */
 static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
 			   struct pl08x_txd *txd)
 {
 	struct pl08x_sg *dsg, *_dsg;
 
-	/* Free the LLI */
 	if (txd->llis_va)
 		dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
 
-	pl08x->pool_ctr--;
-
 	list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
 		list_del(&dsg->node);
 		kfree(dsg);
@@ -1090,8 +1082,6 @@ static void pl08x_desc_free(struct virt_dma_desc *vd)
 {
 	struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
 	struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan);
-	struct pl08x_driver_data *pl08x = plchan->host;
-	unsigned long flags;
 
 	if (!plchan->slave)
 		pl08x_unmap_buffers(txd);
@@ -1099,9 +1089,7 @@ static void pl08x_desc_free(struct virt_dma_desc *vd)
 	if (!txd->done)
 		pl08x_release_mux(plchan);
 
-	spin_lock_irqsave(&pl08x->lock, flags);
 	pl08x_free_txd(plchan->host, txd);
-	spin_unlock_irqrestore(&pl08x->lock, flags);
 }
 
 static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 30/31] dmaengine: PL08x: get rid of write only pool_ctr and free_txd locking
@ 2012-06-07 10:55       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:55 UTC (permalink / raw)
  To: linux-arm-kernel

The free function says the pl08x lock should be taken before calling
it.  However, the DMA pool allocation/freeing is already properly
locked.  The only thing that would need this is pool_ctr, which
happens to be a write-only variable.

Let's get rid of this, and eliminate any need for additional locking
here.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |   12 ------------
 1 files changed, 0 insertions(+), 12 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 9297240..a5d85b1 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -247,7 +247,6 @@ struct pl08x_dma_chan {
  * @pd: platform data passed in from the platform/machine
  * @phy_chans: array of data for the physical channels
  * @pool: a pool for the LLI descriptors
- * @pool_ctr: counter of LLIs in the pool
  * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI
  * fetches
  * @mem_buses: set to indicate memory transfers on AHB2.
@@ -262,7 +261,6 @@ struct pl08x_driver_data {
 	struct pl08x_platform_data *pd;
 	struct pl08x_phy_chan *phy_chans;
 	struct dma_pool *pool;
-	int pool_ctr;
 	u8 lli_buses;
 	u8 mem_buses;
 };
@@ -821,8 +819,6 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
 		return 0;
 	}
 
-	pl08x->pool_ctr++;
-
 	bd.txd = txd;
 	bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0;
 	cctl = txd->cctl;
@@ -1038,18 +1034,14 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
 	return num_llis;
 }
 
-/* You should call this with the struct pl08x lock held */
 static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
 			   struct pl08x_txd *txd)
 {
 	struct pl08x_sg *dsg, *_dsg;
 
-	/* Free the LLI */
 	if (txd->llis_va)
 		dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
 
-	pl08x->pool_ctr--;
-
 	list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
 		list_del(&dsg->node);
 		kfree(dsg);
@@ -1090,8 +1082,6 @@ static void pl08x_desc_free(struct virt_dma_desc *vd)
 {
 	struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
 	struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan);
-	struct pl08x_driver_data *pl08x = plchan->host;
-	unsigned long flags;
 
 	if (!plchan->slave)
 		pl08x_unmap_buffers(txd);
@@ -1099,9 +1089,7 @@ static void pl08x_desc_free(struct virt_dma_desc *vd)
 	if (!txd->done)
 		pl08x_release_mux(plchan);
 
-	spin_lock_irqsave(&pl08x->lock, flags);
 	pl08x_free_txd(plchan->host, txd);
-	spin_unlock_irqrestore(&pl08x->lock, flags);
 }
 
 static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 31/31] dmaengine: PL08x: ensure all descriptors are freed when channel is released
  2012-06-07 10:45     ` Russell King - ARM Linux
@ 2012-06-07 10:56       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:56 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Dan Williams, Vinod Koul

Ensure all queued descriptors are freed when the channel is released,
ensuring we don't leak memory

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |    2 ++
 1 files changed, 2 insertions(+), 0 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index a5d85b1..6fbeebb 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1117,6 +1117,8 @@ static int pl08x_alloc_chan_resources(struct dma_chan *chan)
 
 static void pl08x_free_chan_resources(struct dma_chan *chan)
 {
+	/* Ensure all queued descriptors are freed */
+	vchan_free_chan_resources(to_virt_chan(chan));
 }
 
 static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 31/31] dmaengine: PL08x: ensure all descriptors are freed when channel is released
@ 2012-06-07 10:56       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 10:56 UTC (permalink / raw)
  To: linux-arm-kernel

Ensure all queued descriptors are freed when the channel is released,
ensuring we don't leak memory

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/amba-pl08x.c |    2 ++
 1 files changed, 2 insertions(+), 0 deletions(-)

diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index a5d85b1..6fbeebb 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1117,6 +1117,8 @@ static int pl08x_alloc_chan_resources(struct dma_chan *chan)
 
 static void pl08x_free_chan_resources(struct dma_chan *chan)
 {
+	/* Ensure all queued descriptors are freed */
+	vchan_free_chan_resources(to_virt_chan(chan));
 }
 
 static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT] OMAP patches
  2012-06-07 10:41   ` Russell King
@ 2012-06-07 11:06     ` Russell King - ARM Linux
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King - ARM Linux @ 2012-06-07 11:06 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Vinod Koul, Dan Williams

And the last set - the OMAP patches.

 Documentation/feature-removal-schedule.txt |   11 +
 arch/arm/mach-omap1/board-h2-mmc.c         |    1 -
 arch/arm/mach-omap1/board-h3-mmc.c         |    1 -
 arch/arm/mach-omap1/board-nokia770.c       |    1 -
 arch/arm/mach-omap2/board-n8x0.c           |    1 -
 arch/arm/mach-omap2/hsmmc.c                |    1 -
 arch/arm/plat-omap/include/plat/mmc.h      |    2 -
 drivers/dma/Kconfig                        |    6 +
 drivers/dma/Makefile                       |    1 +
 drivers/dma/omap-dma.c                     |  522 ++++++++++++++++++++++++++++
 drivers/mmc/host/omap.c                    |  368 +++++++++-----------
 drivers/mmc/host/omap_hsmmc.c              |  202 ++++++------
 drivers/mtd/nand/omap2.c                   |  106 +++---
 drivers/spi/spi-omap2-mcspi.c              |  229 +++++++------
 include/linux/omap-dma.h                   |   24 ++
 15 files changed, 1010 insertions(+), 466 deletions(-)


^ permalink raw reply	[flat|nested] 172+ messages in thread

* [CFT] OMAP patches
@ 2012-06-07 11:06     ` Russell King - ARM Linux
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King - ARM Linux @ 2012-06-07 11:06 UTC (permalink / raw)
  To: linux-arm-kernel

And the last set - the OMAP patches.

 Documentation/feature-removal-schedule.txt |   11 +
 arch/arm/mach-omap1/board-h2-mmc.c         |    1 -
 arch/arm/mach-omap1/board-h3-mmc.c         |    1 -
 arch/arm/mach-omap1/board-nokia770.c       |    1 -
 arch/arm/mach-omap2/board-n8x0.c           |    1 -
 arch/arm/mach-omap2/hsmmc.c                |    1 -
 arch/arm/plat-omap/include/plat/mmc.h      |    2 -
 drivers/dma/Kconfig                        |    6 +
 drivers/dma/Makefile                       |    1 +
 drivers/dma/omap-dma.c                     |  522 ++++++++++++++++++++++++++++
 drivers/mmc/host/omap.c                    |  368 +++++++++-----------
 drivers/mmc/host/omap_hsmmc.c              |  202 ++++++------
 drivers/mtd/nand/omap2.c                   |  106 +++---
 drivers/spi/spi-omap2-mcspi.c              |  229 +++++++------
 include/linux/omap-dma.h                   |   24 ++
 15 files changed, 1010 insertions(+), 466 deletions(-)

^ permalink raw reply	[flat|nested] 172+ messages in thread

* [CFT 01/11] dmaengine: add OMAP DMA engine driver
  2012-06-07 11:06     ` Russell King - ARM Linux
@ 2012-06-07 11:06       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 11:06 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Dan Williams, Vinod Koul

Tested-by: Tony Lindgren <tony@atomide.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/Kconfig      |    6 +
 drivers/dma/Makefile     |    1 +
 drivers/dma/omap-dma.c   |  522 ++++++++++++++++++++++++++++++++++++++++++++++
 include/linux/omap-dma.h |   24 ++
 4 files changed, 553 insertions(+), 0 deletions(-)
 create mode 100644 drivers/dma/omap-dma.c
 create mode 100644 include/linux/omap-dma.h

diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index eb2b60e..8be3bf6 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -261,6 +261,12 @@ config DMA_SA11X0
 	  SA-1110 SoCs.  This DMA engine can only be used with on-chip
 	  devices.
 
+config DMA_OMAP
+	tristate "OMAP DMA support"
+	depends on ARCH_OMAP
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+
 config DMA_ENGINE
 	bool
 
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index fc05f7d..ddc291a 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -29,3 +29,4 @@ obj-$(CONFIG_PCH_DMA) += pch_dma.o
 obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
 obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
 obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
+obj-$(CONFIG_DMA_OMAP) += omap-dma.o
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
new file mode 100644
index 0000000..500bc71
--- /dev/null
+++ b/drivers/dma/omap-dma.c
@@ -0,0 +1,522 @@
+/*
+ * OMAP DMAengine support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/omap-dma.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "virt-dma.h"
+#include <plat/dma.h>
+
+struct omap_dmadev {
+	struct dma_device ddev;
+	spinlock_t lock;
+	struct tasklet_struct task;
+	struct list_head pending;
+};
+
+struct omap_chan {
+	struct virt_dma_chan vc;
+	struct list_head node;
+
+	struct dma_slave_config	cfg;
+	unsigned dma_sig;
+
+	int dma_ch;
+	struct omap_desc *desc;
+	unsigned sgidx;
+};
+
+struct omap_sg {
+	dma_addr_t addr;
+	uint32_t en;		/* number of elements (24-bit) */
+	uint32_t fn;		/* number of frames (16-bit) */
+};
+
+struct omap_desc {
+	struct virt_dma_desc vd;
+	enum dma_transfer_direction dir;
+	dma_addr_t dev_addr;
+
+	uint8_t es;		/* element size */
+	uint8_t sync_mode;	/* OMAP_DMA_SYNC_xxx */
+	uint8_t sync_type;	/* OMAP_DMA_xxx_SYNC* */
+	uint8_t periph_port;	/* Peripheral port */
+
+	unsigned sglen;
+	struct omap_sg sg[0];
+};
+
+static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
+{
+	return container_of(d, struct omap_dmadev, ddev);
+}
+
+static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct omap_chan, vc.chan);
+}
+
+static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t)
+{
+	return container_of(t, struct omap_desc, vd.tx);
+}
+
+static void omap_dma_desc_free(struct virt_dma_desc *vd)
+{
+	kfree(container_of(vd, struct omap_desc, vd));
+}
+
+static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
+	unsigned idx)
+{
+	struct omap_sg *sg = d->sg + idx;
+
+	if (d->dir == DMA_DEV_TO_MEM)
+		omap_set_dma_dest_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
+			OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
+	else
+		omap_set_dma_src_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
+			OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
+
+	omap_set_dma_transfer_params(c->dma_ch, d->es, sg->en, sg->fn,
+		d->sync_mode, c->dma_sig, d->sync_type);
+
+	omap_start_dma(c->dma_ch);
+}
+
+static void omap_dma_start_desc(struct omap_chan *c)
+{
+	struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
+	struct omap_desc *d;
+
+	if (!vd) {
+		c->desc = NULL;
+		return;
+	}
+
+	list_del(&vd->node);
+
+	c->desc = d = to_omap_dma_desc(&vd->tx);
+	c->sgidx = 0;
+
+	if (d->dir == DMA_DEV_TO_MEM)
+		omap_set_dma_src_params(c->dma_ch, d->periph_port,
+			OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, 0);
+	else
+		omap_set_dma_dest_params(c->dma_ch, d->periph_port,
+			OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, 0);
+
+	omap_dma_start_sg(c, d, 0);
+}
+
+static void omap_dma_callback(int ch, u16 status, void *data)
+{
+	struct omap_chan *c = data;
+	struct omap_desc *d;
+	unsigned long flags;
+
+	spin_lock_irqsave(&c->vc.lock, flags);
+	d = c->desc;
+	if (!d)
+		return;
+
+	if (++c->sgidx < d->sglen) {
+		omap_dma_start_sg(c, d, c->sgidx);
+	} else {
+		omap_dma_start_desc(c);
+		vchan_cookie_complete(&d->vd);
+	}
+	spin_unlock_irqrestore(&c->vc.lock, flags);
+}
+
+/*
+ * This callback schedules all pending channels.  We could be more
+ * clever here by postponing allocation of the real DMA channels to
+ * this point, and freeing them when our virtual channel becomes idle.
+ *
+ * We would then need to deal with 'all channels in-use'
+ */
+static void omap_dma_sched(unsigned long data)
+{
+	struct omap_dmadev *d = (struct omap_dmadev *)data;
+	LIST_HEAD(head);
+
+	spin_lock_irq(&d->lock);
+	list_splice_tail_init(&d->pending, &head);
+	spin_unlock_irq(&d->lock);
+
+	while (!list_empty(&head)) {
+		struct omap_chan *c = list_first_entry(&head,
+			struct omap_chan, node);
+
+		spin_lock_irq(&c->vc.lock);
+		list_del_init(&c->node);
+		omap_dma_start_desc(c);
+		spin_unlock_irq(&c->vc.lock);
+	}
+}
+
+static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct omap_chan *c = to_omap_dma_chan(chan);
+
+	dev_info(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
+
+	return omap_request_dma(c->dma_sig, "DMA engine",
+		omap_dma_callback, c, &c->dma_ch);
+}
+
+static void omap_dma_free_chan_resources(struct dma_chan *chan)
+{
+	struct omap_chan *c = to_omap_dma_chan(chan);
+
+	vchan_free_chan_resources(&c->vc);
+	omap_free_dma(c->dma_ch);
+
+	dev_info(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
+}
+
+static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
+	dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+	/*
+	 * FIXME: do we need to return pending bytes?
+	 * We have no users of that info at the moment...
+	 */
+	return dma_cookie_status(chan, cookie, txstate);
+}
+
+static void omap_dma_issue_pending(struct dma_chan *chan)
+{
+	struct omap_chan *c = to_omap_dma_chan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&c->vc.lock, flags);
+	if (vchan_issue_pending(&c->vc) && !c->desc) {
+		struct omap_dmadev *d = to_omap_dma_dev(chan->device);
+		spin_lock(&d->lock);
+		if (list_empty(&c->node))
+			list_add_tail(&c->node, &d->pending);
+		spin_unlock(&d->lock);
+		tasklet_schedule(&d->task);
+	}
+	spin_unlock_irqrestore(&c->vc.lock, flags);
+}
+
+static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
+	struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
+	enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
+{
+	struct omap_chan *c = to_omap_dma_chan(chan);
+	enum dma_slave_buswidth dev_width;
+	struct scatterlist *sgent;
+	struct omap_desc *d;
+	dma_addr_t dev_addr;
+	unsigned i, j = 0, es, es_bytes, en, frame_bytes, sync_type;
+	u32 burst;
+
+	if (dir == DMA_DEV_TO_MEM) {
+		dev_addr = c->cfg.src_addr;
+		dev_width = c->cfg.src_addr_width;
+		burst = c->cfg.src_maxburst;
+		sync_type = OMAP_DMA_SRC_SYNC;
+	} else if (dir == DMA_MEM_TO_DEV) {
+		dev_addr = c->cfg.dst_addr;
+		dev_width = c->cfg.dst_addr_width;
+		burst = c->cfg.dst_maxburst;
+		sync_type = OMAP_DMA_DST_SYNC;
+	} else {
+		dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
+		return NULL;
+	}
+
+	/* Bus width translates to the element size (ES) */
+	switch (dev_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		es = OMAP_DMA_DATA_TYPE_S8;
+		es_bytes = 1;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		es = OMAP_DMA_DATA_TYPE_S16;
+		es_bytes = 2;
+		break;
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+		es = OMAP_DMA_DATA_TYPE_S32;
+		es_bytes = 4;
+		break;
+	default: /* not reached */
+		return NULL;
+	}
+
+	/* Now allocate and setup the descriptor. */
+	d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
+	if (!d)
+		return NULL;
+
+	d->dir = dir;
+	d->dev_addr = dev_addr;
+	d->es = es;
+	d->sync_mode = OMAP_DMA_SYNC_FRAME;
+	d->sync_type = sync_type;
+	d->periph_port = OMAP_DMA_PORT_TIPB;
+
+	/*
+	 * Build our scatterlist entries: each contains the address,
+	 * the number of elements (EN) in each frame, and the number of
+	 * frames (FN).  Number of bytes for this entry = ES * EN * FN.
+	 *
+	 * Burst size translates to number of elements with frame sync.
+	 * Note: DMA engine defines burst to be the number of dev-width
+	 * transfers.
+	 */
+	en = burst;
+	frame_bytes = es_bytes * en;
+	for_each_sg(sgl, sgent, sglen, i) {
+		d->sg[j].addr = sg_dma_address(sgent);
+		d->sg[j].en = en;
+		d->sg[j].fn = sg_dma_len(sgent) / frame_bytes;
+		j++;
+	}
+
+	d->sglen = j;
+
+	return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
+}
+
+static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg)
+{
+	if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
+	    cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
+		return -EINVAL;
+
+	memcpy(&c->cfg, cfg, sizeof(c->cfg));
+
+	return 0;
+}
+
+static int omap_dma_terminate_all(struct omap_chan *c)
+{
+	struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device);
+	unsigned long flags;
+	LIST_HEAD(head);
+
+	spin_lock_irqsave(&c->vc.lock, flags);
+
+	/* Prevent this channel being scheduled */
+	spin_lock(&d->lock);
+	list_del_init(&c->node);
+	spin_unlock(&d->lock);
+
+	/*
+	 * Stop DMA activity: we assume the callback will not be called
+	 * after omap_stop_dma() returns (even if it does, it will see
+	 * c->desc is NULL and exit.)
+	 */
+	if (c->desc) {
+		c->desc = NULL;
+		omap_stop_dma(c->dma_ch);
+	}
+
+	vchan_get_all_descriptors(&c->vc, &head);
+	spin_unlock_irqrestore(&c->vc.lock, flags);
+	vchan_dma_desc_free_list(&c->vc, &head);
+
+	return 0;
+}
+
+static int omap_dma_pause(struct omap_chan *c)
+{
+	/* FIXME: not supported by platform private API */
+	return -EINVAL;
+}
+
+static int omap_dma_resume(struct omap_chan *c)
+{
+	/* FIXME: not supported by platform private API */
+	return -EINVAL;
+}
+
+static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+	unsigned long arg)
+{
+	struct omap_chan *c = to_omap_dma_chan(chan);
+	int ret;
+
+	switch (cmd) {
+	case DMA_SLAVE_CONFIG:
+		ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg);
+		break;
+
+	case DMA_TERMINATE_ALL:
+		ret = omap_dma_terminate_all(c);
+		break;
+
+	case DMA_PAUSE:
+		ret = omap_dma_pause(c);
+		break;
+
+	case DMA_RESUME:
+		ret = omap_dma_resume(c);
+		break;
+
+	default:
+		ret = -ENXIO;
+		break;
+	}
+
+	return ret;
+}
+
+static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
+{
+	struct omap_chan *c;
+
+	c = kzalloc(sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return -ENOMEM;
+
+	c->dma_sig = dma_sig;
+	c->vc.desc_free = omap_dma_desc_free;
+	vchan_init(&c->vc, &od->ddev);
+	INIT_LIST_HEAD(&c->node);
+
+	od->ddev.chancnt++;
+
+	return 0;
+}
+
+static void omap_dma_free(struct omap_dmadev *od)
+{
+	tasklet_kill(&od->task);
+	while (!list_empty(&od->ddev.channels)) {
+		struct omap_chan *c = list_first_entry(&od->ddev.channels,
+			struct omap_chan, vc.chan.device_node);
+
+		list_del(&c->vc.chan.device_node);
+		tasklet_kill(&c->vc.task);
+		kfree(c);
+	}
+	kfree(od);
+}
+
+static int omap_dma_probe(struct platform_device *pdev)
+{
+	struct omap_dmadev *od;
+	int rc, i;
+
+	od = kzalloc(sizeof(*od), GFP_KERNEL);
+	if (!od)
+		return -ENOMEM;
+
+	dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
+	od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
+	od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
+	od->ddev.device_tx_status = omap_dma_tx_status;
+	od->ddev.device_issue_pending = omap_dma_issue_pending;
+	od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
+	od->ddev.device_control = omap_dma_control;
+	od->ddev.dev = &pdev->dev;
+	INIT_LIST_HEAD(&od->ddev.channels);
+	INIT_LIST_HEAD(&od->pending);
+	spin_lock_init(&od->lock);
+
+	tasklet_init(&od->task, omap_dma_sched, (unsigned long)od);
+
+	for (i = 0; i < 127; i++) {
+		rc = omap_dma_chan_init(od, i);
+		if (rc) {
+			omap_dma_free(od);
+			return rc;
+		}
+	}
+
+	rc = dma_async_device_register(&od->ddev);
+	if (rc) {
+		pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
+			rc);
+		omap_dma_free(od);
+	} else {
+		platform_set_drvdata(pdev, od);
+	}
+
+	dev_info(&pdev->dev, "OMAP DMA engine driver\n");
+
+	return rc;
+}
+
+static int omap_dma_remove(struct platform_device *pdev)
+{
+	struct omap_dmadev *od = platform_get_drvdata(pdev);
+
+	dma_async_device_unregister(&od->ddev);
+	omap_dma_free(od);
+
+	return 0;
+}
+
+static struct platform_driver omap_dma_driver = {
+	.probe	= omap_dma_probe,
+	.remove	= omap_dma_remove,
+	.driver = {
+		.name = "omap-dma-engine",
+		.owner = THIS_MODULE,
+	},
+};
+
+bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+	if (chan->device->dev->driver == &omap_dma_driver.driver) {
+		struct omap_chan *c = to_omap_dma_chan(chan);
+		unsigned req = *(unsigned *)param;
+
+		return req == c->dma_sig;
+	}
+	return false;
+}
+EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
+
+static struct platform_device *pdev;
+
+static const struct platform_device_info omap_dma_dev_info = {
+	.name = "omap-dma-engine",
+	.id = -1,
+	.dma_mask = DMA_BIT_MASK(32),
+};
+
+static int omap_dma_init(void)
+{
+	int rc = platform_driver_register(&omap_dma_driver);
+
+	if (rc == 0) {
+		pdev = platform_device_register_full(&omap_dma_dev_info);
+		if (IS_ERR(pdev)) {
+			platform_driver_unregister(&omap_dma_driver);
+			rc = PTR_ERR(pdev);
+		}
+	}
+	return rc;
+}
+subsys_initcall(omap_dma_init);
+
+static void __exit omap_dma_exit(void)
+{
+	platform_device_unregister(pdev);
+	platform_driver_unregister(&omap_dma_driver);
+}
+module_exit(omap_dma_exit);
+
+MODULE_AUTHOR("Russell King");
+MODULE_LICENSE("GPL");
diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h
new file mode 100644
index 0000000..869534c
--- /dev/null
+++ b/include/linux/omap-dma.h
@@ -0,0 +1,24 @@
+/*
+ * OMAP DMA Engine support
+ *
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __LINUX_OMAP_DMA_H
+#define __LINUX_OMAP_DMA_H
+
+struct dma_chan;
+
+#if defined(CONFIG_DMA_OMAP) || defined(CONFIG_DMA_OMAP_MODULE)
+bool omap_dma_filter_fn(struct dma_chan *, void *);
+#else
+static inline bool omap_dma_filter_fn(struct dma_chan *c, void *d)
+{
+	return false;
+}
+#endif
+
+#endif
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 01/11] dmaengine: add OMAP DMA engine driver
@ 2012-06-07 11:06       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 11:06 UTC (permalink / raw)
  To: linux-arm-kernel

Tested-by: Tony Lindgren <tony@atomide.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/dma/Kconfig      |    6 +
 drivers/dma/Makefile     |    1 +
 drivers/dma/omap-dma.c   |  522 ++++++++++++++++++++++++++++++++++++++++++++++
 include/linux/omap-dma.h |   24 ++
 4 files changed, 553 insertions(+), 0 deletions(-)
 create mode 100644 drivers/dma/omap-dma.c
 create mode 100644 include/linux/omap-dma.h

diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index eb2b60e..8be3bf6 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -261,6 +261,12 @@ config DMA_SA11X0
 	  SA-1110 SoCs.  This DMA engine can only be used with on-chip
 	  devices.
 
+config DMA_OMAP
+	tristate "OMAP DMA support"
+	depends on ARCH_OMAP
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+
 config DMA_ENGINE
 	bool
 
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index fc05f7d..ddc291a 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -29,3 +29,4 @@ obj-$(CONFIG_PCH_DMA) += pch_dma.o
 obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
 obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
 obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
+obj-$(CONFIG_DMA_OMAP) += omap-dma.o
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
new file mode 100644
index 0000000..500bc71
--- /dev/null
+++ b/drivers/dma/omap-dma.c
@@ -0,0 +1,522 @@
+/*
+ * OMAP DMAengine support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/omap-dma.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "virt-dma.h"
+#include <plat/dma.h>
+
+struct omap_dmadev {
+	struct dma_device ddev;
+	spinlock_t lock;
+	struct tasklet_struct task;
+	struct list_head pending;
+};
+
+struct omap_chan {
+	struct virt_dma_chan vc;
+	struct list_head node;
+
+	struct dma_slave_config	cfg;
+	unsigned dma_sig;
+
+	int dma_ch;
+	struct omap_desc *desc;
+	unsigned sgidx;
+};
+
+struct omap_sg {
+	dma_addr_t addr;
+	uint32_t en;		/* number of elements (24-bit) */
+	uint32_t fn;		/* number of frames (16-bit) */
+};
+
+struct omap_desc {
+	struct virt_dma_desc vd;
+	enum dma_transfer_direction dir;
+	dma_addr_t dev_addr;
+
+	uint8_t es;		/* element size */
+	uint8_t sync_mode;	/* OMAP_DMA_SYNC_xxx */
+	uint8_t sync_type;	/* OMAP_DMA_xxx_SYNC* */
+	uint8_t periph_port;	/* Peripheral port */
+
+	unsigned sglen;
+	struct omap_sg sg[0];
+};
+
+static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
+{
+	return container_of(d, struct omap_dmadev, ddev);
+}
+
+static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct omap_chan, vc.chan);
+}
+
+static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t)
+{
+	return container_of(t, struct omap_desc, vd.tx);
+}
+
+static void omap_dma_desc_free(struct virt_dma_desc *vd)
+{
+	kfree(container_of(vd, struct omap_desc, vd));
+}
+
+static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
+	unsigned idx)
+{
+	struct omap_sg *sg = d->sg + idx;
+
+	if (d->dir == DMA_DEV_TO_MEM)
+		omap_set_dma_dest_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
+			OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
+	else
+		omap_set_dma_src_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
+			OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
+
+	omap_set_dma_transfer_params(c->dma_ch, d->es, sg->en, sg->fn,
+		d->sync_mode, c->dma_sig, d->sync_type);
+
+	omap_start_dma(c->dma_ch);
+}
+
+static void omap_dma_start_desc(struct omap_chan *c)
+{
+	struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
+	struct omap_desc *d;
+
+	if (!vd) {
+		c->desc = NULL;
+		return;
+	}
+
+	list_del(&vd->node);
+
+	c->desc = d = to_omap_dma_desc(&vd->tx);
+	c->sgidx = 0;
+
+	if (d->dir == DMA_DEV_TO_MEM)
+		omap_set_dma_src_params(c->dma_ch, d->periph_port,
+			OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, 0);
+	else
+		omap_set_dma_dest_params(c->dma_ch, d->periph_port,
+			OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, 0);
+
+	omap_dma_start_sg(c, d, 0);
+}
+
+static void omap_dma_callback(int ch, u16 status, void *data)
+{
+	struct omap_chan *c = data;
+	struct omap_desc *d;
+	unsigned long flags;
+
+	spin_lock_irqsave(&c->vc.lock, flags);
+	d = c->desc;
+	if (!d)
+		return;
+
+	if (++c->sgidx < d->sglen) {
+		omap_dma_start_sg(c, d, c->sgidx);
+	} else {
+		omap_dma_start_desc(c);
+		vchan_cookie_complete(&d->vd);
+	}
+	spin_unlock_irqrestore(&c->vc.lock, flags);
+}
+
+/*
+ * This callback schedules all pending channels.  We could be more
+ * clever here by postponing allocation of the real DMA channels to
+ * this point, and freeing them when our virtual channel becomes idle.
+ *
+ * We would then need to deal with 'all channels in-use'
+ */
+static void omap_dma_sched(unsigned long data)
+{
+	struct omap_dmadev *d = (struct omap_dmadev *)data;
+	LIST_HEAD(head);
+
+	spin_lock_irq(&d->lock);
+	list_splice_tail_init(&d->pending, &head);
+	spin_unlock_irq(&d->lock);
+
+	while (!list_empty(&head)) {
+		struct omap_chan *c = list_first_entry(&head,
+			struct omap_chan, node);
+
+		spin_lock_irq(&c->vc.lock);
+		list_del_init(&c->node);
+		omap_dma_start_desc(c);
+		spin_unlock_irq(&c->vc.lock);
+	}
+}
+
+static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct omap_chan *c = to_omap_dma_chan(chan);
+
+	dev_info(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
+
+	return omap_request_dma(c->dma_sig, "DMA engine",
+		omap_dma_callback, c, &c->dma_ch);
+}
+
+static void omap_dma_free_chan_resources(struct dma_chan *chan)
+{
+	struct omap_chan *c = to_omap_dma_chan(chan);
+
+	vchan_free_chan_resources(&c->vc);
+	omap_free_dma(c->dma_ch);
+
+	dev_info(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
+}
+
+static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
+	dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+	/*
+	 * FIXME: do we need to return pending bytes?
+	 * We have no users of that info at the moment...
+	 */
+	return dma_cookie_status(chan, cookie, txstate);
+}
+
+static void omap_dma_issue_pending(struct dma_chan *chan)
+{
+	struct omap_chan *c = to_omap_dma_chan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&c->vc.lock, flags);
+	if (vchan_issue_pending(&c->vc) && !c->desc) {
+		struct omap_dmadev *d = to_omap_dma_dev(chan->device);
+		spin_lock(&d->lock);
+		if (list_empty(&c->node))
+			list_add_tail(&c->node, &d->pending);
+		spin_unlock(&d->lock);
+		tasklet_schedule(&d->task);
+	}
+	spin_unlock_irqrestore(&c->vc.lock, flags);
+}
+
+static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
+	struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
+	enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
+{
+	struct omap_chan *c = to_omap_dma_chan(chan);
+	enum dma_slave_buswidth dev_width;
+	struct scatterlist *sgent;
+	struct omap_desc *d;
+	dma_addr_t dev_addr;
+	unsigned i, j = 0, es, es_bytes, en, frame_bytes, sync_type;
+	u32 burst;
+
+	if (dir == DMA_DEV_TO_MEM) {
+		dev_addr = c->cfg.src_addr;
+		dev_width = c->cfg.src_addr_width;
+		burst = c->cfg.src_maxburst;
+		sync_type = OMAP_DMA_SRC_SYNC;
+	} else if (dir == DMA_MEM_TO_DEV) {
+		dev_addr = c->cfg.dst_addr;
+		dev_width = c->cfg.dst_addr_width;
+		burst = c->cfg.dst_maxburst;
+		sync_type = OMAP_DMA_DST_SYNC;
+	} else {
+		dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
+		return NULL;
+	}
+
+	/* Bus width translates to the element size (ES) */
+	switch (dev_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		es = OMAP_DMA_DATA_TYPE_S8;
+		es_bytes = 1;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		es = OMAP_DMA_DATA_TYPE_S16;
+		es_bytes = 2;
+		break;
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+		es = OMAP_DMA_DATA_TYPE_S32;
+		es_bytes = 4;
+		break;
+	default: /* not reached */
+		return NULL;
+	}
+
+	/* Now allocate and setup the descriptor. */
+	d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
+	if (!d)
+		return NULL;
+
+	d->dir = dir;
+	d->dev_addr = dev_addr;
+	d->es = es;
+	d->sync_mode = OMAP_DMA_SYNC_FRAME;
+	d->sync_type = sync_type;
+	d->periph_port = OMAP_DMA_PORT_TIPB;
+
+	/*
+	 * Build our scatterlist entries: each contains the address,
+	 * the number of elements (EN) in each frame, and the number of
+	 * frames (FN).  Number of bytes for this entry = ES * EN * FN.
+	 *
+	 * Burst size translates to number of elements with frame sync.
+	 * Note: DMA engine defines burst to be the number of dev-width
+	 * transfers.
+	 */
+	en = burst;
+	frame_bytes = es_bytes * en;
+	for_each_sg(sgl, sgent, sglen, i) {
+		d->sg[j].addr = sg_dma_address(sgent);
+		d->sg[j].en = en;
+		d->sg[j].fn = sg_dma_len(sgent) / frame_bytes;
+		j++;
+	}
+
+	d->sglen = j;
+
+	return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
+}
+
+static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg)
+{
+	if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
+	    cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
+		return -EINVAL;
+
+	memcpy(&c->cfg, cfg, sizeof(c->cfg));
+
+	return 0;
+}
+
+static int omap_dma_terminate_all(struct omap_chan *c)
+{
+	struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device);
+	unsigned long flags;
+	LIST_HEAD(head);
+
+	spin_lock_irqsave(&c->vc.lock, flags);
+
+	/* Prevent this channel being scheduled */
+	spin_lock(&d->lock);
+	list_del_init(&c->node);
+	spin_unlock(&d->lock);
+
+	/*
+	 * Stop DMA activity: we assume the callback will not be called
+	 * after omap_stop_dma() returns (even if it does, it will see
+	 * c->desc is NULL and exit.)
+	 */
+	if (c->desc) {
+		c->desc = NULL;
+		omap_stop_dma(c->dma_ch);
+	}
+
+	vchan_get_all_descriptors(&c->vc, &head);
+	spin_unlock_irqrestore(&c->vc.lock, flags);
+	vchan_dma_desc_free_list(&c->vc, &head);
+
+	return 0;
+}
+
+static int omap_dma_pause(struct omap_chan *c)
+{
+	/* FIXME: not supported by platform private API */
+	return -EINVAL;
+}
+
+static int omap_dma_resume(struct omap_chan *c)
+{
+	/* FIXME: not supported by platform private API */
+	return -EINVAL;
+}
+
+static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+	unsigned long arg)
+{
+	struct omap_chan *c = to_omap_dma_chan(chan);
+	int ret;
+
+	switch (cmd) {
+	case DMA_SLAVE_CONFIG:
+		ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg);
+		break;
+
+	case DMA_TERMINATE_ALL:
+		ret = omap_dma_terminate_all(c);
+		break;
+
+	case DMA_PAUSE:
+		ret = omap_dma_pause(c);
+		break;
+
+	case DMA_RESUME:
+		ret = omap_dma_resume(c);
+		break;
+
+	default:
+		ret = -ENXIO;
+		break;
+	}
+
+	return ret;
+}
+
+static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
+{
+	struct omap_chan *c;
+
+	c = kzalloc(sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return -ENOMEM;
+
+	c->dma_sig = dma_sig;
+	c->vc.desc_free = omap_dma_desc_free;
+	vchan_init(&c->vc, &od->ddev);
+	INIT_LIST_HEAD(&c->node);
+
+	od->ddev.chancnt++;
+
+	return 0;
+}
+
+static void omap_dma_free(struct omap_dmadev *od)
+{
+	tasklet_kill(&od->task);
+	while (!list_empty(&od->ddev.channels)) {
+		struct omap_chan *c = list_first_entry(&od->ddev.channels,
+			struct omap_chan, vc.chan.device_node);
+
+		list_del(&c->vc.chan.device_node);
+		tasklet_kill(&c->vc.task);
+		kfree(c);
+	}
+	kfree(od);
+}
+
+static int omap_dma_probe(struct platform_device *pdev)
+{
+	struct omap_dmadev *od;
+	int rc, i;
+
+	od = kzalloc(sizeof(*od), GFP_KERNEL);
+	if (!od)
+		return -ENOMEM;
+
+	dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
+	od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
+	od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
+	od->ddev.device_tx_status = omap_dma_tx_status;
+	od->ddev.device_issue_pending = omap_dma_issue_pending;
+	od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
+	od->ddev.device_control = omap_dma_control;
+	od->ddev.dev = &pdev->dev;
+	INIT_LIST_HEAD(&od->ddev.channels);
+	INIT_LIST_HEAD(&od->pending);
+	spin_lock_init(&od->lock);
+
+	tasklet_init(&od->task, omap_dma_sched, (unsigned long)od);
+
+	for (i = 0; i < 127; i++) {
+		rc = omap_dma_chan_init(od, i);
+		if (rc) {
+			omap_dma_free(od);
+			return rc;
+		}
+	}
+
+	rc = dma_async_device_register(&od->ddev);
+	if (rc) {
+		pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
+			rc);
+		omap_dma_free(od);
+	} else {
+		platform_set_drvdata(pdev, od);
+	}
+
+	dev_info(&pdev->dev, "OMAP DMA engine driver\n");
+
+	return rc;
+}
+
+static int omap_dma_remove(struct platform_device *pdev)
+{
+	struct omap_dmadev *od = platform_get_drvdata(pdev);
+
+	dma_async_device_unregister(&od->ddev);
+	omap_dma_free(od);
+
+	return 0;
+}
+
+static struct platform_driver omap_dma_driver = {
+	.probe	= omap_dma_probe,
+	.remove	= omap_dma_remove,
+	.driver = {
+		.name = "omap-dma-engine",
+		.owner = THIS_MODULE,
+	},
+};
+
+bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+	if (chan->device->dev->driver == &omap_dma_driver.driver) {
+		struct omap_chan *c = to_omap_dma_chan(chan);
+		unsigned req = *(unsigned *)param;
+
+		return req == c->dma_sig;
+	}
+	return false;
+}
+EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
+
+static struct platform_device *pdev;
+
+static const struct platform_device_info omap_dma_dev_info = {
+	.name = "omap-dma-engine",
+	.id = -1,
+	.dma_mask = DMA_BIT_MASK(32),
+};
+
+static int omap_dma_init(void)
+{
+	int rc = platform_driver_register(&omap_dma_driver);
+
+	if (rc == 0) {
+		pdev = platform_device_register_full(&omap_dma_dev_info);
+		if (IS_ERR(pdev)) {
+			platform_driver_unregister(&omap_dma_driver);
+			rc = PTR_ERR(pdev);
+		}
+	}
+	return rc;
+}
+subsys_initcall(omap_dma_init);
+
+static void __exit omap_dma_exit(void)
+{
+	platform_device_unregister(pdev);
+	platform_driver_unregister(&omap_dma_driver);
+}
+module_exit(omap_dma_exit);
+
+MODULE_AUTHOR("Russell King");
+MODULE_LICENSE("GPL");
diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h
new file mode 100644
index 0000000..869534c
--- /dev/null
+++ b/include/linux/omap-dma.h
@@ -0,0 +1,24 @@
+/*
+ * OMAP DMA Engine support
+ *
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __LINUX_OMAP_DMA_H
+#define __LINUX_OMAP_DMA_H
+
+struct dma_chan;
+
+#if defined(CONFIG_DMA_OMAP) || defined(CONFIG_DMA_OMAP_MODULE)
+bool omap_dma_filter_fn(struct dma_chan *, void *);
+#else
+static inline bool omap_dma_filter_fn(struct dma_chan *c, void *d)
+{
+	return false;
+}
+#endif
+
+#endif
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 02/11] mmc: omap_hsmmc: add DMA engine support
  2012-06-07 11:06     ` Russell King - ARM Linux
@ 2012-06-07 11:06       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 11:06 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Chris Ball, linux-mmc

Add DMA engine support to the OMAP HSMMC driver.  This supplements the
private DMA API implementation contained within this driver, and the
driver can be switched at build time between using DMA engine and the
private DMA API.

Tested-by: Grazvydas Ignotas <notasas@gmail.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/mmc/host/omap_hsmmc.c |  192 +++++++++++++++++++++++++++++++++++------
 1 files changed, 165 insertions(+), 27 deletions(-)

diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 9a7a60a..f80361f 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -19,6 +19,7 @@
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/debugfs.h>
+#include <linux/dmaengine.h>
 #include <linux/seq_file.h>
 #include <linux/interrupt.h>
 #include <linux/delay.h>
@@ -167,7 +168,9 @@ struct omap_hsmmc_host {
 	u32			bytesleft;
 	int			suspended;
 	int			irq;
-	int			use_dma, dma_ch;
+	int			use_dma, dma_ch, dma2;
+	struct dma_chan		*tx_chan;
+	struct dma_chan		*rx_chan;
 	int			dma_line_tx, dma_line_rx;
 	int			slot_id;
 	int			response_busy;
@@ -802,19 +805,26 @@ omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data)
 		return DMA_FROM_DEVICE;
 }
 
+static struct dma_chan *omap_hsmmc_get_dma_chan(struct omap_hsmmc_host *host,
+	struct mmc_data *data)
+{
+	return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan;
+}
+
 static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq)
 {
-	int dma_ch;
+	int dma_ch, dma2;
 	unsigned long flags;
 
 	spin_lock_irqsave(&host->irq_lock, flags);
 	host->req_in_progress = 0;
 	dma_ch = host->dma_ch;
+	dma2 = host->dma2;
 	spin_unlock_irqrestore(&host->irq_lock, flags);
 
 	omap_hsmmc_disable_irq(host);
 	/* Do not complete the request if DMA is still in progress */
-	if (mrq->data && host->use_dma && dma_ch != -1)
+	if (mrq->data && host->use_dma && (dma_ch != -1 || dma2 != -1))
 		return;
 	host->mrq = NULL;
 	mmc_request_done(host->mmc, mrq);
@@ -886,7 +896,7 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
  */
 static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
 {
-	int dma_ch;
+	int dma_ch, dma2;
 	unsigned long flags;
 
 	host->data->error = errno;
@@ -894,8 +904,20 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
 	spin_lock_irqsave(&host->irq_lock, flags);
 	dma_ch = host->dma_ch;
 	host->dma_ch = -1;
+	dma2 = host->dma2;
+	host->dma2 = -1;
 	spin_unlock_irqrestore(&host->irq_lock, flags);
 
+	if (host->use_dma && dma2 != -1) {
+		struct dma_chan *chan = omap_hsmmc_get_dma_chan(host, host->data);
+
+		dmaengine_terminate_all(chan);
+		dma_unmap_sg(chan->device->dev,
+			host->data->sg, host->data->sg_len,
+			omap_hsmmc_get_dma_dir(host, host->data));
+
+		host->data->host_cookie = 0;
+	}
 	if (host->use_dma && dma_ch != -1) {
 		dma_unmap_sg(mmc_dev(host->mmc), host->data->sg,
 			host->data->sg_len,
@@ -1292,9 +1314,43 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
 	}
 }
 
+static void omap_hsmmc_dma_callback(void *param)
+{
+	struct omap_hsmmc_host *host = param;
+	struct dma_chan *chan;
+	struct mmc_data *data;
+	int req_in_progress;
+
+	spin_lock_irq(&host->irq_lock);
+	if (host->dma2 < 0) {
+		spin_unlock_irq(&host->irq_lock);
+		return;
+	}
+
+	data = host->mrq->data;
+	chan = omap_hsmmc_get_dma_chan(host, data);
+	if (!data->host_cookie)
+		dma_unmap_sg(chan->device->dev,
+			     data->sg, data->sg_len,
+			     omap_hsmmc_get_dma_dir(host, data));
+
+	req_in_progress = host->req_in_progress;
+	host->dma2 = -1;
+	spin_unlock_irq(&host->irq_lock);
+
+	/* If DMA has finished after TC, complete the request */
+	if (!req_in_progress) {
+		struct mmc_request *mrq = host->mrq;
+
+		host->mrq = NULL;
+		mmc_request_done(host->mmc, mrq);
+	}
+}
+
 static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
 				       struct mmc_data *data,
-				       struct omap_hsmmc_next *next)
+				       struct omap_hsmmc_next *next,
+				       struct device *dev)
 {
 	int dma_len;
 
@@ -1309,8 +1365,7 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
 	/* Check if next job is already prepared */
 	if (next ||
 	    (!next && data->host_cookie != host->next_data.cookie)) {
-		dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
-				     data->sg_len,
+		dma_len = dma_map_sg(dev, data->sg, data->sg_len,
 				     omap_hsmmc_get_dma_dir(host, data));
 
 	} else {
@@ -1339,6 +1394,7 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
 {
 	int dma_ch = 0, ret = 0, i;
 	struct mmc_data *data = req->data;
+	struct dma_chan *chan;
 
 	/* Sanity check: all the SG entries must be aligned by block size. */
 	for (i = 0; i < data->sg_len; i++) {
@@ -1354,24 +1410,66 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
 		 */
 		return -EINVAL;
 
-	BUG_ON(host->dma_ch != -1);
+	BUG_ON(host->dma_ch != -1 || host->dma2 != -1);
 
-	ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data),
-			       "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch);
-	if (ret != 0) {
-		dev_err(mmc_dev(host->mmc),
-			"%s: omap_request_dma() failed with %d\n",
-			mmc_hostname(host->mmc), ret);
-		return ret;
-	}
-	ret = omap_hsmmc_pre_dma_transfer(host, data, NULL);
-	if (ret)
-		return ret;
+	chan = omap_hsmmc_get_dma_chan(host, data);
+	if (!chan) {
+		ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data),
+				       "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch);
+		if (ret != 0) {
+			dev_err(mmc_dev(host->mmc),
+				"%s: omap_request_dma() failed with %d\n",
+				mmc_hostname(host->mmc), ret);
+			return ret;
+		}
+		ret = omap_hsmmc_pre_dma_transfer(host, data, NULL,
+						  mmc_dev(host->mmc));
+		if (ret)
+			return ret;
+
+		host->dma_ch = dma_ch;
+		host->dma_sg_idx = 0;
+
+		omap_hsmmc_config_dma_params(host, data, data->sg);
+	} else {
+		struct dma_slave_config cfg;
+		struct dma_async_tx_descriptor *tx;
+
+		cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA;
+		cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA;
+		cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+		cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+		cfg.src_maxburst = data->blksz / 4;
+		cfg.dst_maxburst = data->blksz / 4;
+
+		ret = dmaengine_slave_config(chan, &cfg);
+		if (ret)
+			return ret;
+
+		ret = omap_hsmmc_pre_dma_transfer(host, data, NULL,
+						  chan->device->dev);
+		if (ret)
+			return ret;
+
+		tx = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len,
+			data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
+			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+		if (!tx) {
+			dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n");
+			/* FIXME: cleanup */
+			return -1;
+		}
 
-	host->dma_ch = dma_ch;
-	host->dma_sg_idx = 0;
+		tx->callback = omap_hsmmc_dma_callback;
+		tx->callback_param = host;
 
-	omap_hsmmc_config_dma_params(host, data, data->sg);
+		/* Does not fail */
+		dmaengine_submit(tx);
+
+		host->dma2 = 1;
+
+		dma_async_issue_pending(chan);
+	}
 
 	return 0;
 }
@@ -1454,9 +1552,12 @@ static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
 	struct mmc_data *data = mrq->data;
 
 	if (host->use_dma) {
+		struct dma_chan *c = omap_hsmmc_get_dma_chan(host, data);
+		struct device *dev = c ? c->device->dev : mmc_dev(mmc);
+
 		if (data->host_cookie)
-			dma_unmap_sg(mmc_dev(host->mmc), data->sg,
-				     data->sg_len,
+			dma_unmap_sg(dev,
+				     data->sg, data->sg_len,
 				     omap_hsmmc_get_dma_dir(host, data));
 		data->host_cookie = 0;
 	}
@@ -1472,10 +1573,14 @@ static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
 		return ;
 	}
 
-	if (host->use_dma)
+	if (host->use_dma) {
+		struct dma_chan *c = omap_hsmmc_get_dma_chan(host, mrq->data);
+		struct device *dev = c ? c->device->dev : mmc_dev(mmc);
+
 		if (omap_hsmmc_pre_dma_transfer(host, mrq->data,
-						&host->next_data))
+						&host->next_data, dev))
 			mrq->data->host_cookie = 0;
+	}
 }
 
 /*
@@ -1487,7 +1592,7 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
 	int err;
 
 	BUG_ON(host->req_in_progress);
-	BUG_ON(host->dma_ch != -1);
+	BUG_ON(host->dma_ch != -1 || host->dma2 != -1);
 	if (host->protect_card) {
 		if (host->reqs_blocked < 3) {
 			/*
@@ -1854,6 +1959,7 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
 	host->use_dma	= 1;
 	host->dev->dma_mask = &pdata->dma_mask;
 	host->dma_ch	= -1;
+	host->dma2	= -1;
 	host->irq	= irq;
 	host->slot_id	= 0;
 	host->mapbase	= res->start + pdata->reg_offset;
@@ -1951,6 +2057,29 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
 	}
 	host->dma_line_rx = res->start;
 
+	{
+		dma_cap_mask_t mask;
+		unsigned sig;
+		extern bool omap_dma_filter_fn(struct dma_chan *chan, void *param);
+
+		dma_cap_zero(mask);
+		dma_cap_set(DMA_SLAVE, mask);
+#if 1
+		sig = host->dma_line_rx;
+		host->rx_chan = dma_request_channel(mask, omap_dma_filter_fn, &sig);
+		if (!host->rx_chan) {
+			dev_warn(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", sig);
+		}
+#endif
+#if 1
+		sig = host->dma_line_tx;
+		host->tx_chan = dma_request_channel(mask, omap_dma_filter_fn, &sig);
+		if (!host->tx_chan) {
+			dev_warn(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", sig);
+		}
+#endif
+	}
+
 	/* Request IRQ for MMC operations */
 	ret = request_irq(host->irq, omap_hsmmc_irq, 0,
 			mmc_hostname(mmc), host);
@@ -2028,6 +2157,10 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
 err_irq_cd_init:
 	free_irq(host->irq, host);
 err_irq:
+	if (host->tx_chan)
+		dma_release_channel(host->tx_chan);
+	if (host->rx_chan)
+		dma_release_channel(host->rx_chan);
 	pm_runtime_put_sync(host->dev);
 	pm_runtime_disable(host->dev);
 	clk_put(host->fclk);
@@ -2063,6 +2196,11 @@ static int __devexit omap_hsmmc_remove(struct platform_device *pdev)
 	if (mmc_slot(host).card_detect_irq)
 		free_irq(mmc_slot(host).card_detect_irq, host);
 
+	if (host->tx_chan)
+		dma_release_channel(host->tx_chan);
+	if (host->rx_chan)
+		dma_release_channel(host->rx_chan);
+
 	pm_runtime_put_sync(host->dev);
 	pm_runtime_disable(host->dev);
 	clk_put(host->fclk);
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 02/11] mmc: omap_hsmmc: add DMA engine support
@ 2012-06-07 11:06       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 11:06 UTC (permalink / raw)
  To: linux-arm-kernel

Add DMA engine support to the OMAP HSMMC driver.  This supplements the
private DMA API implementation contained within this driver, and the
driver can be switched at build time between using DMA engine and the
private DMA API.

Tested-by: Grazvydas Ignotas <notasas@gmail.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/mmc/host/omap_hsmmc.c |  192 +++++++++++++++++++++++++++++++++++------
 1 files changed, 165 insertions(+), 27 deletions(-)

diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 9a7a60a..f80361f 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -19,6 +19,7 @@
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/debugfs.h>
+#include <linux/dmaengine.h>
 #include <linux/seq_file.h>
 #include <linux/interrupt.h>
 #include <linux/delay.h>
@@ -167,7 +168,9 @@ struct omap_hsmmc_host {
 	u32			bytesleft;
 	int			suspended;
 	int			irq;
-	int			use_dma, dma_ch;
+	int			use_dma, dma_ch, dma2;
+	struct dma_chan		*tx_chan;
+	struct dma_chan		*rx_chan;
 	int			dma_line_tx, dma_line_rx;
 	int			slot_id;
 	int			response_busy;
@@ -802,19 +805,26 @@ omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data)
 		return DMA_FROM_DEVICE;
 }
 
+static struct dma_chan *omap_hsmmc_get_dma_chan(struct omap_hsmmc_host *host,
+	struct mmc_data *data)
+{
+	return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan;
+}
+
 static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq)
 {
-	int dma_ch;
+	int dma_ch, dma2;
 	unsigned long flags;
 
 	spin_lock_irqsave(&host->irq_lock, flags);
 	host->req_in_progress = 0;
 	dma_ch = host->dma_ch;
+	dma2 = host->dma2;
 	spin_unlock_irqrestore(&host->irq_lock, flags);
 
 	omap_hsmmc_disable_irq(host);
 	/* Do not complete the request if DMA is still in progress */
-	if (mrq->data && host->use_dma && dma_ch != -1)
+	if (mrq->data && host->use_dma && (dma_ch != -1 || dma2 != -1))
 		return;
 	host->mrq = NULL;
 	mmc_request_done(host->mmc, mrq);
@@ -886,7 +896,7 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
  */
 static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
 {
-	int dma_ch;
+	int dma_ch, dma2;
 	unsigned long flags;
 
 	host->data->error = errno;
@@ -894,8 +904,20 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
 	spin_lock_irqsave(&host->irq_lock, flags);
 	dma_ch = host->dma_ch;
 	host->dma_ch = -1;
+	dma2 = host->dma2;
+	host->dma2 = -1;
 	spin_unlock_irqrestore(&host->irq_lock, flags);
 
+	if (host->use_dma && dma2 != -1) {
+		struct dma_chan *chan = omap_hsmmc_get_dma_chan(host, host->data);
+
+		dmaengine_terminate_all(chan);
+		dma_unmap_sg(chan->device->dev,
+			host->data->sg, host->data->sg_len,
+			omap_hsmmc_get_dma_dir(host, host->data));
+
+		host->data->host_cookie = 0;
+	}
 	if (host->use_dma && dma_ch != -1) {
 		dma_unmap_sg(mmc_dev(host->mmc), host->data->sg,
 			host->data->sg_len,
@@ -1292,9 +1314,43 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
 	}
 }
 
+static void omap_hsmmc_dma_callback(void *param)
+{
+	struct omap_hsmmc_host *host = param;
+	struct dma_chan *chan;
+	struct mmc_data *data;
+	int req_in_progress;
+
+	spin_lock_irq(&host->irq_lock);
+	if (host->dma2 < 0) {
+		spin_unlock_irq(&host->irq_lock);
+		return;
+	}
+
+	data = host->mrq->data;
+	chan = omap_hsmmc_get_dma_chan(host, data);
+	if (!data->host_cookie)
+		dma_unmap_sg(chan->device->dev,
+			     data->sg, data->sg_len,
+			     omap_hsmmc_get_dma_dir(host, data));
+
+	req_in_progress = host->req_in_progress;
+	host->dma2 = -1;
+	spin_unlock_irq(&host->irq_lock);
+
+	/* If DMA has finished after TC, complete the request */
+	if (!req_in_progress) {
+		struct mmc_request *mrq = host->mrq;
+
+		host->mrq = NULL;
+		mmc_request_done(host->mmc, mrq);
+	}
+}
+
 static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
 				       struct mmc_data *data,
-				       struct omap_hsmmc_next *next)
+				       struct omap_hsmmc_next *next,
+				       struct device *dev)
 {
 	int dma_len;
 
@@ -1309,8 +1365,7 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
 	/* Check if next job is already prepared */
 	if (next ||
 	    (!next && data->host_cookie != host->next_data.cookie)) {
-		dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
-				     data->sg_len,
+		dma_len = dma_map_sg(dev, data->sg, data->sg_len,
 				     omap_hsmmc_get_dma_dir(host, data));
 
 	} else {
@@ -1339,6 +1394,7 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
 {
 	int dma_ch = 0, ret = 0, i;
 	struct mmc_data *data = req->data;
+	struct dma_chan *chan;
 
 	/* Sanity check: all the SG entries must be aligned by block size. */
 	for (i = 0; i < data->sg_len; i++) {
@@ -1354,24 +1410,66 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
 		 */
 		return -EINVAL;
 
-	BUG_ON(host->dma_ch != -1);
+	BUG_ON(host->dma_ch != -1 || host->dma2 != -1);
 
-	ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data),
-			       "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch);
-	if (ret != 0) {
-		dev_err(mmc_dev(host->mmc),
-			"%s: omap_request_dma() failed with %d\n",
-			mmc_hostname(host->mmc), ret);
-		return ret;
-	}
-	ret = omap_hsmmc_pre_dma_transfer(host, data, NULL);
-	if (ret)
-		return ret;
+	chan = omap_hsmmc_get_dma_chan(host, data);
+	if (!chan) {
+		ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data),
+				       "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch);
+		if (ret != 0) {
+			dev_err(mmc_dev(host->mmc),
+				"%s: omap_request_dma() failed with %d\n",
+				mmc_hostname(host->mmc), ret);
+			return ret;
+		}
+		ret = omap_hsmmc_pre_dma_transfer(host, data, NULL,
+						  mmc_dev(host->mmc));
+		if (ret)
+			return ret;
+
+		host->dma_ch = dma_ch;
+		host->dma_sg_idx = 0;
+
+		omap_hsmmc_config_dma_params(host, data, data->sg);
+	} else {
+		struct dma_slave_config cfg;
+		struct dma_async_tx_descriptor *tx;
+
+		cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA;
+		cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA;
+		cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+		cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+		cfg.src_maxburst = data->blksz / 4;
+		cfg.dst_maxburst = data->blksz / 4;
+
+		ret = dmaengine_slave_config(chan, &cfg);
+		if (ret)
+			return ret;
+
+		ret = omap_hsmmc_pre_dma_transfer(host, data, NULL,
+						  chan->device->dev);
+		if (ret)
+			return ret;
+
+		tx = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len,
+			data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
+			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+		if (!tx) {
+			dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n");
+			/* FIXME: cleanup */
+			return -1;
+		}
 
-	host->dma_ch = dma_ch;
-	host->dma_sg_idx = 0;
+		tx->callback = omap_hsmmc_dma_callback;
+		tx->callback_param = host;
 
-	omap_hsmmc_config_dma_params(host, data, data->sg);
+		/* Does not fail */
+		dmaengine_submit(tx);
+
+		host->dma2 = 1;
+
+		dma_async_issue_pending(chan);
+	}
 
 	return 0;
 }
@@ -1454,9 +1552,12 @@ static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
 	struct mmc_data *data = mrq->data;
 
 	if (host->use_dma) {
+		struct dma_chan *c = omap_hsmmc_get_dma_chan(host, data);
+		struct device *dev = c ? c->device->dev : mmc_dev(mmc);
+
 		if (data->host_cookie)
-			dma_unmap_sg(mmc_dev(host->mmc), data->sg,
-				     data->sg_len,
+			dma_unmap_sg(dev,
+				     data->sg, data->sg_len,
 				     omap_hsmmc_get_dma_dir(host, data));
 		data->host_cookie = 0;
 	}
@@ -1472,10 +1573,14 @@ static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
 		return ;
 	}
 
-	if (host->use_dma)
+	if (host->use_dma) {
+		struct dma_chan *c = omap_hsmmc_get_dma_chan(host, mrq->data);
+		struct device *dev = c ? c->device->dev : mmc_dev(mmc);
+
 		if (omap_hsmmc_pre_dma_transfer(host, mrq->data,
-						&host->next_data))
+						&host->next_data, dev))
 			mrq->data->host_cookie = 0;
+	}
 }
 
 /*
@@ -1487,7 +1592,7 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
 	int err;
 
 	BUG_ON(host->req_in_progress);
-	BUG_ON(host->dma_ch != -1);
+	BUG_ON(host->dma_ch != -1 || host->dma2 != -1);
 	if (host->protect_card) {
 		if (host->reqs_blocked < 3) {
 			/*
@@ -1854,6 +1959,7 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
 	host->use_dma	= 1;
 	host->dev->dma_mask = &pdata->dma_mask;
 	host->dma_ch	= -1;
+	host->dma2	= -1;
 	host->irq	= irq;
 	host->slot_id	= 0;
 	host->mapbase	= res->start + pdata->reg_offset;
@@ -1951,6 +2057,29 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
 	}
 	host->dma_line_rx = res->start;
 
+	{
+		dma_cap_mask_t mask;
+		unsigned sig;
+		extern bool omap_dma_filter_fn(struct dma_chan *chan, void *param);
+
+		dma_cap_zero(mask);
+		dma_cap_set(DMA_SLAVE, mask);
+#if 1
+		sig = host->dma_line_rx;
+		host->rx_chan = dma_request_channel(mask, omap_dma_filter_fn, &sig);
+		if (!host->rx_chan) {
+			dev_warn(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", sig);
+		}
+#endif
+#if 1
+		sig = host->dma_line_tx;
+		host->tx_chan = dma_request_channel(mask, omap_dma_filter_fn, &sig);
+		if (!host->tx_chan) {
+			dev_warn(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", sig);
+		}
+#endif
+	}
+
 	/* Request IRQ for MMC operations */
 	ret = request_irq(host->irq, omap_hsmmc_irq, 0,
 			mmc_hostname(mmc), host);
@@ -2028,6 +2157,10 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
 err_irq_cd_init:
 	free_irq(host->irq, host);
 err_irq:
+	if (host->tx_chan)
+		dma_release_channel(host->tx_chan);
+	if (host->rx_chan)
+		dma_release_channel(host->rx_chan);
 	pm_runtime_put_sync(host->dev);
 	pm_runtime_disable(host->dev);
 	clk_put(host->fclk);
@@ -2063,6 +2196,11 @@ static int __devexit omap_hsmmc_remove(struct platform_device *pdev)
 	if (mmc_slot(host).card_detect_irq)
 		free_irq(mmc_slot(host).card_detect_irq, host);
 
+	if (host->tx_chan)
+		dma_release_channel(host->tx_chan);
+	if (host->rx_chan)
+		dma_release_channel(host->rx_chan);
+
 	pm_runtime_put_sync(host->dev);
 	pm_runtime_disable(host->dev);
 	clk_put(host->fclk);
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 03/11] mmc: omap_hsmmc: remove private DMA API implementation
  2012-06-07 11:06     ` Russell King - ARM Linux
@ 2012-06-07 11:07       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 11:07 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Chris Ball, linux-mmc

Remove the private DMA API implementation from omap_hsmmc, making it
use entirely the DMA engine API.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/mmc/host/omap_hsmmc.c |  265 ++++++++++-------------------------------
 1 files changed, 64 insertions(+), 201 deletions(-)

diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index f80361f..9504092 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -38,7 +38,6 @@
 #include <linux/gpio.h>
 #include <linux/regulator/consumer.h>
 #include <linux/pm_runtime.h>
-#include <plat/dma.h>
 #include <mach/hardware.h>
 #include <plat/board.h>
 #include <plat/mmc.h>
@@ -168,10 +167,9 @@ struct omap_hsmmc_host {
 	u32			bytesleft;
 	int			suspended;
 	int			irq;
-	int			use_dma, dma_ch, dma2;
+	int			use_dma, dma_ch;
 	struct dma_chan		*tx_chan;
 	struct dma_chan		*rx_chan;
-	int			dma_line_tx, dma_line_rx;
 	int			slot_id;
 	int			response_busy;
 	int			context_loss;
@@ -813,18 +811,17 @@ static struct dma_chan *omap_hsmmc_get_dma_chan(struct omap_hsmmc_host *host,
 
 static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq)
 {
-	int dma_ch, dma2;
+	int dma_ch;
 	unsigned long flags;
 
 	spin_lock_irqsave(&host->irq_lock, flags);
 	host->req_in_progress = 0;
 	dma_ch = host->dma_ch;
-	dma2 = host->dma2;
 	spin_unlock_irqrestore(&host->irq_lock, flags);
 
 	omap_hsmmc_disable_irq(host);
 	/* Do not complete the request if DMA is still in progress */
-	if (mrq->data && host->use_dma && (dma_ch != -1 || dma2 != -1))
+	if (mrq->data && host->use_dma && dma_ch != -1)
 		return;
 	host->mrq = NULL;
 	mmc_request_done(host->mmc, mrq);
@@ -896,7 +893,7 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
  */
 static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
 {
-	int dma_ch, dma2;
+	int dma_ch;
 	unsigned long flags;
 
 	host->data->error = errno;
@@ -904,11 +901,9 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
 	spin_lock_irqsave(&host->irq_lock, flags);
 	dma_ch = host->dma_ch;
 	host->dma_ch = -1;
-	dma2 = host->dma2;
-	host->dma2 = -1;
 	spin_unlock_irqrestore(&host->irq_lock, flags);
 
-	if (host->use_dma && dma2 != -1) {
+	if (host->use_dma && dma_ch != -1) {
 		struct dma_chan *chan = omap_hsmmc_get_dma_chan(host, host->data);
 
 		dmaengine_terminate_all(chan);
@@ -918,13 +913,6 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
 
 		host->data->host_cookie = 0;
 	}
-	if (host->use_dma && dma_ch != -1) {
-		dma_unmap_sg(mmc_dev(host->mmc), host->data->sg,
-			host->data->sg_len,
-			omap_hsmmc_get_dma_dir(host, host->data));
-		omap_free_dma(dma_ch);
-		host->data->host_cookie = 0;
-	}
 	host->data = NULL;
 }
 
@@ -1220,100 +1208,6 @@ static irqreturn_t omap_hsmmc_detect(int irq, void *dev_id)
 	return IRQ_HANDLED;
 }
 
-static int omap_hsmmc_get_dma_sync_dev(struct omap_hsmmc_host *host,
-				     struct mmc_data *data)
-{
-	int sync_dev;
-
-	if (data->flags & MMC_DATA_WRITE)
-		sync_dev = host->dma_line_tx;
-	else
-		sync_dev = host->dma_line_rx;
-	return sync_dev;
-}
-
-static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host,
-				       struct mmc_data *data,
-				       struct scatterlist *sgl)
-{
-	int blksz, nblk, dma_ch;
-
-	dma_ch = host->dma_ch;
-	if (data->flags & MMC_DATA_WRITE) {
-		omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
-			(host->mapbase + OMAP_HSMMC_DATA), 0, 0);
-		omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
-			sg_dma_address(sgl), 0, 0);
-	} else {
-		omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
-			(host->mapbase + OMAP_HSMMC_DATA), 0, 0);
-		omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
-			sg_dma_address(sgl), 0, 0);
-	}
-
-	blksz = host->data->blksz;
-	nblk = sg_dma_len(sgl) / blksz;
-
-	omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S32,
-			blksz / 4, nblk, OMAP_DMA_SYNC_FRAME,
-			omap_hsmmc_get_dma_sync_dev(host, data),
-			!(data->flags & MMC_DATA_WRITE));
-
-	omap_start_dma(dma_ch);
-}
-
-/*
- * DMA call back function
- */
-static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
-{
-	struct omap_hsmmc_host *host = cb_data;
-	struct mmc_data *data;
-	int dma_ch, req_in_progress;
-	unsigned long flags;
-
-	if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
-		dev_warn(mmc_dev(host->mmc), "unexpected dma status %x\n",
-			ch_status);
-		return;
-	}
-
-	spin_lock_irqsave(&host->irq_lock, flags);
-	if (host->dma_ch < 0) {
-		spin_unlock_irqrestore(&host->irq_lock, flags);
-		return;
-	}
-
-	data = host->mrq->data;
-	host->dma_sg_idx++;
-	if (host->dma_sg_idx < host->dma_len) {
-		/* Fire up the next transfer. */
-		omap_hsmmc_config_dma_params(host, data,
-					   data->sg + host->dma_sg_idx);
-		spin_unlock_irqrestore(&host->irq_lock, flags);
-		return;
-	}
-
-	if (!data->host_cookie)
-		dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
-			     omap_hsmmc_get_dma_dir(host, data));
-
-	req_in_progress = host->req_in_progress;
-	dma_ch = host->dma_ch;
-	host->dma_ch = -1;
-	spin_unlock_irqrestore(&host->irq_lock, flags);
-
-	omap_free_dma(dma_ch);
-
-	/* If DMA has finished after TC, complete the request */
-	if (!req_in_progress) {
-		struct mmc_request *mrq = host->mrq;
-
-		host->mrq = NULL;
-		mmc_request_done(host->mmc, mrq);
-	}
-}
-
 static void omap_hsmmc_dma_callback(void *param)
 {
 	struct omap_hsmmc_host *host = param;
@@ -1322,7 +1216,7 @@ static void omap_hsmmc_dma_callback(void *param)
 	int req_in_progress;
 
 	spin_lock_irq(&host->irq_lock);
-	if (host->dma2 < 0) {
+	if (host->dma_ch < 0) {
 		spin_unlock_irq(&host->irq_lock);
 		return;
 	}
@@ -1335,7 +1229,7 @@ static void omap_hsmmc_dma_callback(void *param)
 			     omap_hsmmc_get_dma_dir(host, data));
 
 	req_in_progress = host->req_in_progress;
-	host->dma2 = -1;
+	host->dma_ch = -1;
 	spin_unlock_irq(&host->irq_lock);
 
 	/* If DMA has finished after TC, complete the request */
@@ -1350,7 +1244,7 @@ static void omap_hsmmc_dma_callback(void *param)
 static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
 				       struct mmc_data *data,
 				       struct omap_hsmmc_next *next,
-				       struct device *dev)
+				       struct dma_chan *chan)
 {
 	int dma_len;
 
@@ -1365,7 +1259,7 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
 	/* Check if next job is already prepared */
 	if (next ||
 	    (!next && data->host_cookie != host->next_data.cookie)) {
-		dma_len = dma_map_sg(dev, data->sg, data->sg_len,
+		dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len,
 				     omap_hsmmc_get_dma_dir(host, data));
 
 	} else {
@@ -1392,7 +1286,9 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
 static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
 					struct mmc_request *req)
 {
-	int dma_ch = 0, ret = 0, i;
+	struct dma_slave_config cfg;
+	struct dma_async_tx_descriptor *tx;
+	int ret = 0, i;
 	struct mmc_data *data = req->data;
 	struct dma_chan *chan;
 
@@ -1410,66 +1306,43 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
 		 */
 		return -EINVAL;
 
-	BUG_ON(host->dma_ch != -1 || host->dma2 != -1);
+	BUG_ON(host->dma_ch != -1);
 
 	chan = omap_hsmmc_get_dma_chan(host, data);
-	if (!chan) {
-		ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data),
-				       "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch);
-		if (ret != 0) {
-			dev_err(mmc_dev(host->mmc),
-				"%s: omap_request_dma() failed with %d\n",
-				mmc_hostname(host->mmc), ret);
-			return ret;
-		}
-		ret = omap_hsmmc_pre_dma_transfer(host, data, NULL,
-						  mmc_dev(host->mmc));
-		if (ret)
-			return ret;
-
-		host->dma_ch = dma_ch;
-		host->dma_sg_idx = 0;
 
-		omap_hsmmc_config_dma_params(host, data, data->sg);
-	} else {
-		struct dma_slave_config cfg;
-		struct dma_async_tx_descriptor *tx;
+	cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA;
+	cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA;
+	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	cfg.src_maxburst = data->blksz / 4;
+	cfg.dst_maxburst = data->blksz / 4;
 
-		cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA;
-		cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA;
-		cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-		cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-		cfg.src_maxburst = data->blksz / 4;
-		cfg.dst_maxburst = data->blksz / 4;
-
-		ret = dmaengine_slave_config(chan, &cfg);
-		if (ret)
-			return ret;
+	ret = dmaengine_slave_config(chan, &cfg);
+	if (ret)
+		return ret;
 
-		ret = omap_hsmmc_pre_dma_transfer(host, data, NULL,
-						  chan->device->dev);
-		if (ret)
-			return ret;
+	ret = omap_hsmmc_pre_dma_transfer(host, data, NULL, chan);
+	if (ret)
+		return ret;
 
-		tx = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len,
-			data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
-			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-		if (!tx) {
-			dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n");
-			/* FIXME: cleanup */
-			return -1;
-		}
+	tx = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len,
+		data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
+		DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+	if (!tx) {
+		dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n");
+		/* FIXME: cleanup */
+		return -1;
+	}
 
-		tx->callback = omap_hsmmc_dma_callback;
-		tx->callback_param = host;
+	tx->callback = omap_hsmmc_dma_callback;
+	tx->callback_param = host;
 
-		/* Does not fail */
-		dmaengine_submit(tx);
+	/* Does not fail */
+	dmaengine_submit(tx);
 
-		host->dma2 = 1;
+	host->dma_ch = 1;
 
-		dma_async_issue_pending(chan);
-	}
+	dma_async_issue_pending(chan);
 
 	return 0;
 }
@@ -1551,14 +1424,11 @@ static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
 	struct omap_hsmmc_host *host = mmc_priv(mmc);
 	struct mmc_data *data = mrq->data;
 
-	if (host->use_dma) {
+	if (host->use_dma && data->host_cookie) {
 		struct dma_chan *c = omap_hsmmc_get_dma_chan(host, data);
-		struct device *dev = c ? c->device->dev : mmc_dev(mmc);
 
-		if (data->host_cookie)
-			dma_unmap_sg(dev,
-				     data->sg, data->sg_len,
-				     omap_hsmmc_get_dma_dir(host, data));
+		dma_unmap_sg(c->device->dev, data->sg, data->sg_len,
+			     omap_hsmmc_get_dma_dir(host, data));
 		data->host_cookie = 0;
 	}
 }
@@ -1575,10 +1445,9 @@ static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
 
 	if (host->use_dma) {
 		struct dma_chan *c = omap_hsmmc_get_dma_chan(host, mrq->data);
-		struct device *dev = c ? c->device->dev : mmc_dev(mmc);
 
 		if (omap_hsmmc_pre_dma_transfer(host, mrq->data,
-						&host->next_data, dev))
+						&host->next_data, c))
 			mrq->data->host_cookie = 0;
 	}
 }
@@ -1592,7 +1461,7 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
 	int err;
 
 	BUG_ON(host->req_in_progress);
-	BUG_ON(host->dma_ch != -1 || host->dma2 != -1);
+	BUG_ON(host->dma_ch != -1);
 	if (host->protect_card) {
 		if (host->reqs_blocked < 3) {
 			/*
@@ -1905,6 +1774,8 @@ static inline struct omap_mmc_platform_data
 }
 #endif
 
+extern bool omap_dma_filter_fn(struct dma_chan *chan, void *param);
+
 static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
 {
 	struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
@@ -1913,6 +1784,8 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
 	struct resource *res;
 	int ret, irq;
 	const struct of_device_id *match;
+	dma_cap_mask_t mask;
+	unsigned tx_req, rx_req;
 
 	match = of_match_device(of_match_ptr(omap_mmc_of_match), &pdev->dev);
 	if (match) {
@@ -1957,9 +1830,7 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
 	host->pdata	= pdata;
 	host->dev	= &pdev->dev;
 	host->use_dma	= 1;
-	host->dev->dma_mask = &pdata->dma_mask;
 	host->dma_ch	= -1;
-	host->dma2	= -1;
 	host->irq	= irq;
 	host->slot_id	= 0;
 	host->mapbase	= res->start + pdata->reg_offset;
@@ -2048,36 +1919,28 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
 		dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n");
 		goto err_irq;
 	}
-	host->dma_line_tx = res->start;
+	tx_req = res->start;
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
 	if (!res) {
 		dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n");
 		goto err_irq;
 	}
-	host->dma_line_rx = res->start;
+	rx_req = res->start;
 
-	{
-		dma_cap_mask_t mask;
-		unsigned sig;
-		extern bool omap_dma_filter_fn(struct dma_chan *chan, void *param);
-
-		dma_cap_zero(mask);
-		dma_cap_set(DMA_SLAVE, mask);
-#if 1
-		sig = host->dma_line_rx;
-		host->rx_chan = dma_request_channel(mask, omap_dma_filter_fn, &sig);
-		if (!host->rx_chan) {
-			dev_warn(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", sig);
-		}
-#endif
-#if 1
-		sig = host->dma_line_tx;
-		host->tx_chan = dma_request_channel(mask, omap_dma_filter_fn, &sig);
-		if (!host->tx_chan) {
-			dev_warn(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", sig);
-		}
-#endif
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_SLAVE, mask);
+
+	host->rx_chan = dma_request_channel(mask, omap_dma_filter_fn, &rx_req);
+	if (!host->rx_chan) {
+		dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req);
+		goto err_irq;
+	}
+
+	host->tx_chan = dma_request_channel(mask, omap_dma_filter_fn, &tx_req);
+	if (!host->tx_chan) {
+		dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req);
+		goto err_irq;
 	}
 
 	/* Request IRQ for MMC operations */
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 03/11] mmc: omap_hsmmc: remove private DMA API implementation
@ 2012-06-07 11:07       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 11:07 UTC (permalink / raw)
  To: linux-arm-kernel

Remove the private DMA API implementation from omap_hsmmc, making it
use entirely the DMA engine API.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/mmc/host/omap_hsmmc.c |  265 ++++++++++-------------------------------
 1 files changed, 64 insertions(+), 201 deletions(-)

diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index f80361f..9504092 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -38,7 +38,6 @@
 #include <linux/gpio.h>
 #include <linux/regulator/consumer.h>
 #include <linux/pm_runtime.h>
-#include <plat/dma.h>
 #include <mach/hardware.h>
 #include <plat/board.h>
 #include <plat/mmc.h>
@@ -168,10 +167,9 @@ struct omap_hsmmc_host {
 	u32			bytesleft;
 	int			suspended;
 	int			irq;
-	int			use_dma, dma_ch, dma2;
+	int			use_dma, dma_ch;
 	struct dma_chan		*tx_chan;
 	struct dma_chan		*rx_chan;
-	int			dma_line_tx, dma_line_rx;
 	int			slot_id;
 	int			response_busy;
 	int			context_loss;
@@ -813,18 +811,17 @@ static struct dma_chan *omap_hsmmc_get_dma_chan(struct omap_hsmmc_host *host,
 
 static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq)
 {
-	int dma_ch, dma2;
+	int dma_ch;
 	unsigned long flags;
 
 	spin_lock_irqsave(&host->irq_lock, flags);
 	host->req_in_progress = 0;
 	dma_ch = host->dma_ch;
-	dma2 = host->dma2;
 	spin_unlock_irqrestore(&host->irq_lock, flags);
 
 	omap_hsmmc_disable_irq(host);
 	/* Do not complete the request if DMA is still in progress */
-	if (mrq->data && host->use_dma && (dma_ch != -1 || dma2 != -1))
+	if (mrq->data && host->use_dma && dma_ch != -1)
 		return;
 	host->mrq = NULL;
 	mmc_request_done(host->mmc, mrq);
@@ -896,7 +893,7 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
  */
 static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
 {
-	int dma_ch, dma2;
+	int dma_ch;
 	unsigned long flags;
 
 	host->data->error = errno;
@@ -904,11 +901,9 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
 	spin_lock_irqsave(&host->irq_lock, flags);
 	dma_ch = host->dma_ch;
 	host->dma_ch = -1;
-	dma2 = host->dma2;
-	host->dma2 = -1;
 	spin_unlock_irqrestore(&host->irq_lock, flags);
 
-	if (host->use_dma && dma2 != -1) {
+	if (host->use_dma && dma_ch != -1) {
 		struct dma_chan *chan = omap_hsmmc_get_dma_chan(host, host->data);
 
 		dmaengine_terminate_all(chan);
@@ -918,13 +913,6 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
 
 		host->data->host_cookie = 0;
 	}
-	if (host->use_dma && dma_ch != -1) {
-		dma_unmap_sg(mmc_dev(host->mmc), host->data->sg,
-			host->data->sg_len,
-			omap_hsmmc_get_dma_dir(host, host->data));
-		omap_free_dma(dma_ch);
-		host->data->host_cookie = 0;
-	}
 	host->data = NULL;
 }
 
@@ -1220,100 +1208,6 @@ static irqreturn_t omap_hsmmc_detect(int irq, void *dev_id)
 	return IRQ_HANDLED;
 }
 
-static int omap_hsmmc_get_dma_sync_dev(struct omap_hsmmc_host *host,
-				     struct mmc_data *data)
-{
-	int sync_dev;
-
-	if (data->flags & MMC_DATA_WRITE)
-		sync_dev = host->dma_line_tx;
-	else
-		sync_dev = host->dma_line_rx;
-	return sync_dev;
-}
-
-static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host,
-				       struct mmc_data *data,
-				       struct scatterlist *sgl)
-{
-	int blksz, nblk, dma_ch;
-
-	dma_ch = host->dma_ch;
-	if (data->flags & MMC_DATA_WRITE) {
-		omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
-			(host->mapbase + OMAP_HSMMC_DATA), 0, 0);
-		omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
-			sg_dma_address(sgl), 0, 0);
-	} else {
-		omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
-			(host->mapbase + OMAP_HSMMC_DATA), 0, 0);
-		omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
-			sg_dma_address(sgl), 0, 0);
-	}
-
-	blksz = host->data->blksz;
-	nblk = sg_dma_len(sgl) / blksz;
-
-	omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S32,
-			blksz / 4, nblk, OMAP_DMA_SYNC_FRAME,
-			omap_hsmmc_get_dma_sync_dev(host, data),
-			!(data->flags & MMC_DATA_WRITE));
-
-	omap_start_dma(dma_ch);
-}
-
-/*
- * DMA call back function
- */
-static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
-{
-	struct omap_hsmmc_host *host = cb_data;
-	struct mmc_data *data;
-	int dma_ch, req_in_progress;
-	unsigned long flags;
-
-	if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
-		dev_warn(mmc_dev(host->mmc), "unexpected dma status %x\n",
-			ch_status);
-		return;
-	}
-
-	spin_lock_irqsave(&host->irq_lock, flags);
-	if (host->dma_ch < 0) {
-		spin_unlock_irqrestore(&host->irq_lock, flags);
-		return;
-	}
-
-	data = host->mrq->data;
-	host->dma_sg_idx++;
-	if (host->dma_sg_idx < host->dma_len) {
-		/* Fire up the next transfer. */
-		omap_hsmmc_config_dma_params(host, data,
-					   data->sg + host->dma_sg_idx);
-		spin_unlock_irqrestore(&host->irq_lock, flags);
-		return;
-	}
-
-	if (!data->host_cookie)
-		dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
-			     omap_hsmmc_get_dma_dir(host, data));
-
-	req_in_progress = host->req_in_progress;
-	dma_ch = host->dma_ch;
-	host->dma_ch = -1;
-	spin_unlock_irqrestore(&host->irq_lock, flags);
-
-	omap_free_dma(dma_ch);
-
-	/* If DMA has finished after TC, complete the request */
-	if (!req_in_progress) {
-		struct mmc_request *mrq = host->mrq;
-
-		host->mrq = NULL;
-		mmc_request_done(host->mmc, mrq);
-	}
-}
-
 static void omap_hsmmc_dma_callback(void *param)
 {
 	struct omap_hsmmc_host *host = param;
@@ -1322,7 +1216,7 @@ static void omap_hsmmc_dma_callback(void *param)
 	int req_in_progress;
 
 	spin_lock_irq(&host->irq_lock);
-	if (host->dma2 < 0) {
+	if (host->dma_ch < 0) {
 		spin_unlock_irq(&host->irq_lock);
 		return;
 	}
@@ -1335,7 +1229,7 @@ static void omap_hsmmc_dma_callback(void *param)
 			     omap_hsmmc_get_dma_dir(host, data));
 
 	req_in_progress = host->req_in_progress;
-	host->dma2 = -1;
+	host->dma_ch = -1;
 	spin_unlock_irq(&host->irq_lock);
 
 	/* If DMA has finished after TC, complete the request */
@@ -1350,7 +1244,7 @@ static void omap_hsmmc_dma_callback(void *param)
 static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
 				       struct mmc_data *data,
 				       struct omap_hsmmc_next *next,
-				       struct device *dev)
+				       struct dma_chan *chan)
 {
 	int dma_len;
 
@@ -1365,7 +1259,7 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
 	/* Check if next job is already prepared */
 	if (next ||
 	    (!next && data->host_cookie != host->next_data.cookie)) {
-		dma_len = dma_map_sg(dev, data->sg, data->sg_len,
+		dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len,
 				     omap_hsmmc_get_dma_dir(host, data));
 
 	} else {
@@ -1392,7 +1286,9 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
 static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
 					struct mmc_request *req)
 {
-	int dma_ch = 0, ret = 0, i;
+	struct dma_slave_config cfg;
+	struct dma_async_tx_descriptor *tx;
+	int ret = 0, i;
 	struct mmc_data *data = req->data;
 	struct dma_chan *chan;
 
@@ -1410,66 +1306,43 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
 		 */
 		return -EINVAL;
 
-	BUG_ON(host->dma_ch != -1 || host->dma2 != -1);
+	BUG_ON(host->dma_ch != -1);
 
 	chan = omap_hsmmc_get_dma_chan(host, data);
-	if (!chan) {
-		ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data),
-				       "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch);
-		if (ret != 0) {
-			dev_err(mmc_dev(host->mmc),
-				"%s: omap_request_dma() failed with %d\n",
-				mmc_hostname(host->mmc), ret);
-			return ret;
-		}
-		ret = omap_hsmmc_pre_dma_transfer(host, data, NULL,
-						  mmc_dev(host->mmc));
-		if (ret)
-			return ret;
-
-		host->dma_ch = dma_ch;
-		host->dma_sg_idx = 0;
 
-		omap_hsmmc_config_dma_params(host, data, data->sg);
-	} else {
-		struct dma_slave_config cfg;
-		struct dma_async_tx_descriptor *tx;
+	cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA;
+	cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA;
+	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	cfg.src_maxburst = data->blksz / 4;
+	cfg.dst_maxburst = data->blksz / 4;
 
-		cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA;
-		cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA;
-		cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-		cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-		cfg.src_maxburst = data->blksz / 4;
-		cfg.dst_maxburst = data->blksz / 4;
-
-		ret = dmaengine_slave_config(chan, &cfg);
-		if (ret)
-			return ret;
+	ret = dmaengine_slave_config(chan, &cfg);
+	if (ret)
+		return ret;
 
-		ret = omap_hsmmc_pre_dma_transfer(host, data, NULL,
-						  chan->device->dev);
-		if (ret)
-			return ret;
+	ret = omap_hsmmc_pre_dma_transfer(host, data, NULL, chan);
+	if (ret)
+		return ret;
 
-		tx = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len,
-			data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
-			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-		if (!tx) {
-			dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n");
-			/* FIXME: cleanup */
-			return -1;
-		}
+	tx = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len,
+		data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
+		DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+	if (!tx) {
+		dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n");
+		/* FIXME: cleanup */
+		return -1;
+	}
 
-		tx->callback = omap_hsmmc_dma_callback;
-		tx->callback_param = host;
+	tx->callback = omap_hsmmc_dma_callback;
+	tx->callback_param = host;
 
-		/* Does not fail */
-		dmaengine_submit(tx);
+	/* Does not fail */
+	dmaengine_submit(tx);
 
-		host->dma2 = 1;
+	host->dma_ch = 1;
 
-		dma_async_issue_pending(chan);
-	}
+	dma_async_issue_pending(chan);
 
 	return 0;
 }
@@ -1551,14 +1424,11 @@ static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
 	struct omap_hsmmc_host *host = mmc_priv(mmc);
 	struct mmc_data *data = mrq->data;
 
-	if (host->use_dma) {
+	if (host->use_dma && data->host_cookie) {
 		struct dma_chan *c = omap_hsmmc_get_dma_chan(host, data);
-		struct device *dev = c ? c->device->dev : mmc_dev(mmc);
 
-		if (data->host_cookie)
-			dma_unmap_sg(dev,
-				     data->sg, data->sg_len,
-				     omap_hsmmc_get_dma_dir(host, data));
+		dma_unmap_sg(c->device->dev, data->sg, data->sg_len,
+			     omap_hsmmc_get_dma_dir(host, data));
 		data->host_cookie = 0;
 	}
 }
@@ -1575,10 +1445,9 @@ static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
 
 	if (host->use_dma) {
 		struct dma_chan *c = omap_hsmmc_get_dma_chan(host, mrq->data);
-		struct device *dev = c ? c->device->dev : mmc_dev(mmc);
 
 		if (omap_hsmmc_pre_dma_transfer(host, mrq->data,
-						&host->next_data, dev))
+						&host->next_data, c))
 			mrq->data->host_cookie = 0;
 	}
 }
@@ -1592,7 +1461,7 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
 	int err;
 
 	BUG_ON(host->req_in_progress);
-	BUG_ON(host->dma_ch != -1 || host->dma2 != -1);
+	BUG_ON(host->dma_ch != -1);
 	if (host->protect_card) {
 		if (host->reqs_blocked < 3) {
 			/*
@@ -1905,6 +1774,8 @@ static inline struct omap_mmc_platform_data
 }
 #endif
 
+extern bool omap_dma_filter_fn(struct dma_chan *chan, void *param);
+
 static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
 {
 	struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
@@ -1913,6 +1784,8 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
 	struct resource *res;
 	int ret, irq;
 	const struct of_device_id *match;
+	dma_cap_mask_t mask;
+	unsigned tx_req, rx_req;
 
 	match = of_match_device(of_match_ptr(omap_mmc_of_match), &pdev->dev);
 	if (match) {
@@ -1957,9 +1830,7 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
 	host->pdata	= pdata;
 	host->dev	= &pdev->dev;
 	host->use_dma	= 1;
-	host->dev->dma_mask = &pdata->dma_mask;
 	host->dma_ch	= -1;
-	host->dma2	= -1;
 	host->irq	= irq;
 	host->slot_id	= 0;
 	host->mapbase	= res->start + pdata->reg_offset;
@@ -2048,36 +1919,28 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
 		dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n");
 		goto err_irq;
 	}
-	host->dma_line_tx = res->start;
+	tx_req = res->start;
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
 	if (!res) {
 		dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n");
 		goto err_irq;
 	}
-	host->dma_line_rx = res->start;
+	rx_req = res->start;
 
-	{
-		dma_cap_mask_t mask;
-		unsigned sig;
-		extern bool omap_dma_filter_fn(struct dma_chan *chan, void *param);
-
-		dma_cap_zero(mask);
-		dma_cap_set(DMA_SLAVE, mask);
-#if 1
-		sig = host->dma_line_rx;
-		host->rx_chan = dma_request_channel(mask, omap_dma_filter_fn, &sig);
-		if (!host->rx_chan) {
-			dev_warn(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", sig);
-		}
-#endif
-#if 1
-		sig = host->dma_line_tx;
-		host->tx_chan = dma_request_channel(mask, omap_dma_filter_fn, &sig);
-		if (!host->tx_chan) {
-			dev_warn(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", sig);
-		}
-#endif
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_SLAVE, mask);
+
+	host->rx_chan = dma_request_channel(mask, omap_dma_filter_fn, &rx_req);
+	if (!host->rx_chan) {
+		dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req);
+		goto err_irq;
+	}
+
+	host->tx_chan = dma_request_channel(mask, omap_dma_filter_fn, &tx_req);
+	if (!host->tx_chan) {
+		dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req);
+		goto err_irq;
 	}
 
 	/* Request IRQ for MMC operations */
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 04/11] mmc: omap: add DMA engine support
  2012-06-07 11:06     ` Russell King - ARM Linux
@ 2012-06-07 11:07       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 11:07 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Jarkko Lavinen, Chris Ball, linux-mmc

Add DMA engine support to the OMAP driver.  This supplements the
private DMA API implementation contained within this driver, and the
driver can be switched at build time between using DMA engine and the
private DMA API.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/mmc/host/omap.c       |  199 ++++++++++++++++++++++++++++++++++++++--
 drivers/mmc/host/omap_hsmmc.c |    3 +-
 2 files changed, 190 insertions(+), 12 deletions(-)

diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 552196c..eaea251 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -17,10 +17,12 @@
 #include <linux/ioport.h>
 #include <linux/platform_device.h>
 #include <linux/interrupt.h>
+#include <linux/dmaengine.h>
 #include <linux/dma-mapping.h>
 #include <linux/delay.h>
 #include <linux/spinlock.h>
 #include <linux/timer.h>
+#include <linux/omap-dma.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/card.h>
 #include <linux/clk.h>
@@ -99,6 +101,8 @@
 
 struct mmc_omap_host;
 
+#define USE_DMA_PRIVATE
+
 struct mmc_omap_slot {
 	int			id;
 	unsigned int		vdd;
@@ -128,6 +132,10 @@ struct mmc_omap_host {
 	unsigned char		id; /* 16xx chips have 2 MMC blocks */
 	struct clk *		iclk;
 	struct clk *		fclk;
+	struct dma_chan		*dma_rx;
+	u32			dma_rx_burst;
+	struct dma_chan		*dma_tx;
+	u32			dma_tx_burst;
 	struct resource		*mem_res;
 	void __iomem		*virt_base;
 	unsigned int		phys_base;
@@ -153,12 +161,14 @@ struct mmc_omap_host {
 
 	unsigned		use_dma:1;
 	unsigned		brs_received:1, dma_done:1;
-	unsigned		dma_is_read:1;
 	unsigned		dma_in_use:1;
+#ifdef USE_DMA_PRIVATE
+	unsigned		dma_is_read:1;
 	int			dma_ch;
-	spinlock_t		dma_lock;
 	struct timer_list	dma_timer;
 	unsigned		dma_len;
+#endif
+	spinlock_t		dma_lock;
 
 	struct mmc_omap_slot    *slots[OMAP_MMC_MAX_SLOTS];
 	struct mmc_omap_slot    *current_slot;
@@ -406,18 +416,32 @@ mmc_omap_release_dma(struct mmc_omap_host *host, struct mmc_data *data,
 		     int abort)
 {
 	enum dma_data_direction dma_data_dir;
+	struct device *dev = mmc_dev(host->mmc);
+	struct dma_chan *c;
 
+#ifdef USE_DMA_PRIVATE
 	BUG_ON(host->dma_ch < 0);
 	if (data->error)
 		omap_stop_dma(host->dma_ch);
 	/* Release DMA channel lazily */
 	mod_timer(&host->dma_timer, jiffies + HZ);
-	if (data->flags & MMC_DATA_WRITE)
+#endif
+	if (data->flags & MMC_DATA_WRITE) {
 		dma_data_dir = DMA_TO_DEVICE;
-	else
+		c = host->dma_tx;
+	} else {
 		dma_data_dir = DMA_FROM_DEVICE;
-	dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len,
-		     dma_data_dir);
+		c = host->dma_rx;
+	}
+	if (c) {
+		if (data->error) {
+			dmaengine_terminate_all(c);
+			/* Claim nothing transferred on error... */
+			data->bytes_xfered = 0;
+		}
+		dev = c->device->dev;
+	}
+	dma_unmap_sg(dev, data->sg, host->sg_len, dma_data_dir);
 }
 
 static void mmc_omap_send_stop_work(struct work_struct *work)
@@ -524,6 +548,7 @@ mmc_omap_end_of_data(struct mmc_omap_host *host, struct mmc_data *data)
 		mmc_omap_xfer_done(host, data);
 }
 
+#ifdef USE_DMA_PRIVATE
 static void
 mmc_omap_dma_timer(unsigned long data)
 {
@@ -533,6 +558,7 @@ mmc_omap_dma_timer(unsigned long data)
 	omap_free_dma(host->dma_ch);
 	host->dma_ch = -1;
 }
+#endif
 
 static void
 mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data)
@@ -891,6 +917,18 @@ static void mmc_omap_cover_handler(unsigned long param)
 		  jiffies + msecs_to_jiffies(OMAP_MMC_COVER_POLL_DELAY));
 }
 
+static void mmc_omap_dma_callback(void *priv)
+{
+	struct mmc_omap_host *host = priv;
+	struct mmc_data *data = host->data;
+
+	/* If we got to the end of DMA, assume everything went well */
+	data->bytes_xfered += data->blocks * data->blksz;
+
+	mmc_omap_dma_done(host, data);
+}
+
+#ifdef USE_DMA_PRIVATE
 /* Prepare to transfer the next segment of a scatterlist */
 static void
 mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data)
@@ -1045,6 +1083,7 @@ static int mmc_omap_get_dma_channel(struct mmc_omap_host *host, struct mmc_data
 
 	return 0;
 }
+#endif
 
 static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req)
 {
@@ -1118,6 +1157,80 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
 
 	host->sg_idx = 0;
 	if (use_dma) {
+		enum dma_data_direction dma_data_dir;
+		struct dma_async_tx_descriptor *tx;
+		struct dma_chan *c;
+		u32 burst, *bp;
+		u16 buf;
+
+		/*
+		 * FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx
+		 * and 24xx. Use 16 or 32 word frames when the
+		 * blocksize is at least that large. Blocksize is
+		 * usually 512 bytes; but not for some SD reads.
+		 */
+		burst = cpu_is_omap15xx() ? 32 : 64;
+		if (burst > data->blksz)
+			burst = data->blksz;
+
+		burst >>= 1;
+
+		if (data->flags & MMC_DATA_WRITE) {
+			c = host->dma_tx;
+			bp = &host->dma_tx_burst;
+			buf = 0x0f80 | (burst - 1) << 0;
+			dma_data_dir = DMA_TO_DEVICE;
+		} else {
+			c = host->dma_rx;
+			bp = &host->dma_rx_burst;
+			buf = 0x800f | (burst - 1) << 8;
+			dma_data_dir = DMA_FROM_DEVICE;
+		}
+
+		if (!c)
+			goto use_pio;
+
+		/* Only reconfigure if we have a different burst size */
+		if (*bp != burst) {
+			struct dma_slave_config cfg;
+
+			cfg.src_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
+			cfg.dst_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
+			cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+			cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+			cfg.src_maxburst = burst;
+			cfg.dst_maxburst = burst;
+
+			if (dmaengine_slave_config(c, &cfg))
+				goto use_pio;
+
+			*bp = burst;
+		}
+
+		host->sg_len = dma_map_sg(c->device->dev, data->sg, sg_len,
+					  dma_data_dir);
+		if (host->sg_len == 0)
+			goto use_pio;
+
+		tx = dmaengine_prep_slave_sg(c, data->sg, host->sg_len,
+			data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
+			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+		if (!tx)
+			goto use_pio;
+
+		OMAP_MMC_WRITE(host, BUF, buf);
+
+		tx->callback = mmc_omap_dma_callback;
+		tx->callback_param = host;
+		dmaengine_submit(tx);
+		host->brs_received = 0;
+		host->dma_done = 0;
+		host->dma_in_use = 1;
+		return;
+	}
+ use_pio:
+#ifdef USE_DMA_PRIVATE
+	if (use_dma) {
 		if (mmc_omap_get_dma_channel(host, data) == 0) {
 			enum dma_data_direction dma_data_dir;
 
@@ -1136,6 +1249,9 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
 		} else
 			use_dma = 0;
 	}
+#else
+	use_dma = 0;
+#endif
 
 	/* Revert to PIO? */
 	if (!use_dma) {
@@ -1157,8 +1273,17 @@ static void mmc_omap_start_request(struct mmc_omap_host *host,
 	/* only touch fifo AFTER the controller readies it */
 	mmc_omap_prepare_data(host, req);
 	mmc_omap_start_command(host, req->cmd);
-	if (host->dma_in_use)
-		omap_start_dma(host->dma_ch);
+	if (host->dma_in_use) {
+		struct dma_chan *c = host->data->flags & MMC_DATA_WRITE ?
+				host->dma_tx : host->dma_rx;
+
+		if (c)
+			dma_async_issue_pending(c);
+#ifdef USE_DMA_PRIVATE
+		else
+			omap_start_dma(host->dma_ch);
+#endif
+	}
 }
 
 static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req)
@@ -1400,6 +1525,8 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
 	struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
 	struct mmc_omap_host *host = NULL;
 	struct resource *res;
+	dma_cap_mask_t mask;
+	unsigned sig;
 	int i, ret = 0;
 	int irq;
 
@@ -1439,7 +1566,9 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
 	setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host);
 
 	spin_lock_init(&host->dma_lock);
+#ifdef USE_DMA_PRIVATE
 	setup_timer(&host->dma_timer, mmc_omap_dma_timer, (unsigned long) host);
+#endif
 	spin_lock_init(&host->slot_lock);
 	init_waitqueue_head(&host->slot_wq);
 
@@ -1452,8 +1581,10 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
 	host->irq = irq;
 
 	host->use_dma = 1;
+#ifdef USE_DMA_PRIVATE
 	host->dev->dma_mask = &pdata->dma_mask;
 	host->dma_ch = -1;
+#endif
 
 	host->irq = irq;
 	host->phys_base = host->mem_res->start;
@@ -1474,9 +1605,48 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
 		goto err_free_iclk;
 	}
 
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_SLAVE, mask);
+
+	host->dma_tx_burst = -1;
+	host->dma_rx_burst = -1;
+
+	if (cpu_is_omap24xx())
+		sig = host->id == 0 ? OMAP24XX_DMA_MMC1_TX : OMAP24XX_DMA_MMC2_TX;
+	else
+		sig = host->id == 0 ? OMAP_DMA_MMC_TX : OMAP_DMA_MMC2_TX;
+	host->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
+#if 0
+	if (!host->dma_tx) {
+		dev_err(host->dev, "unable to obtain TX DMA engine channel %u\n",
+			sig);
+		goto err_dma;
+	}
+#else
+	if (!host->dma_tx)
+		dev_warn(host->dev, "unable to obtain TX DMA engine channel %u\n",
+			sig);
+#endif
+	if (cpu_is_omap24xx())
+		sig = host->id == 0 ? OMAP24XX_DMA_MMC1_RX : OMAP24XX_DMA_MMC2_RX;
+	else
+		sig = host->id == 0 ? OMAP_DMA_MMC_RX : OMAP_DMA_MMC2_RX;
+	host->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
+#if 0
+	if (!host->dma_rx) {
+		dev_err(host->dev, "unable to obtain RX DMA engine channel %u\n",
+			sig);
+		goto err_dma;
+	}
+#else
+	if (!host->dma_rx)
+		dev_warn(host->dev, "unable to obtain RX DMA engine channel %u\n",
+			sig);
+#endif
+
 	ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host);
 	if (ret)
-		goto err_free_fclk;
+		goto err_free_dma;
 
 	if (pdata->init != NULL) {
 		ret = pdata->init(&pdev->dev);
@@ -1508,7 +1678,11 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
 		pdata->cleanup(&pdev->dev);
 err_free_irq:
 	free_irq(host->irq, host);
-err_free_fclk:
+err_free_dma:
+	if (host->dma_tx)
+		dma_release_channel(host->dma_tx);
+	if (host->dma_rx)
+		dma_release_channel(host->dma_rx);
 	clk_put(host->fclk);
 err_free_iclk:
 	clk_disable(host->iclk);
@@ -1543,6 +1717,11 @@ static int __devexit mmc_omap_remove(struct platform_device *pdev)
 	clk_disable(host->iclk);
 	clk_put(host->iclk);
 
+	if (host->dma_tx)
+		dma_release_channel(host->dma_tx);
+	if (host->dma_rx)
+		dma_release_channel(host->dma_rx);
+
 	iounmap(host->virt_base);
 	release_mem_region(pdev->resource[0].start,
 			   pdev->resource[0].end - pdev->resource[0].start + 1);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 9504092..5d7dbc9 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -30,6 +30,7 @@
 #include <linux/of.h>
 #include <linux/of_gpio.h>
 #include <linux/of_device.h>
+#include <linux/omap-dma.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/core.h>
 #include <linux/mmc/mmc.h>
@@ -1774,8 +1775,6 @@ static inline struct omap_mmc_platform_data
 }
 #endif
 
-extern bool omap_dma_filter_fn(struct dma_chan *chan, void *param);
-
 static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
 {
 	struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 04/11] mmc: omap: add DMA engine support
@ 2012-06-07 11:07       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 11:07 UTC (permalink / raw)
  To: linux-arm-kernel

Add DMA engine support to the OMAP driver.  This supplements the
private DMA API implementation contained within this driver, and the
driver can be switched at build time between using DMA engine and the
private DMA API.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/mmc/host/omap.c       |  199 ++++++++++++++++++++++++++++++++++++++--
 drivers/mmc/host/omap_hsmmc.c |    3 +-
 2 files changed, 190 insertions(+), 12 deletions(-)

diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 552196c..eaea251 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -17,10 +17,12 @@
 #include <linux/ioport.h>
 #include <linux/platform_device.h>
 #include <linux/interrupt.h>
+#include <linux/dmaengine.h>
 #include <linux/dma-mapping.h>
 #include <linux/delay.h>
 #include <linux/spinlock.h>
 #include <linux/timer.h>
+#include <linux/omap-dma.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/card.h>
 #include <linux/clk.h>
@@ -99,6 +101,8 @@
 
 struct mmc_omap_host;
 
+#define USE_DMA_PRIVATE
+
 struct mmc_omap_slot {
 	int			id;
 	unsigned int		vdd;
@@ -128,6 +132,10 @@ struct mmc_omap_host {
 	unsigned char		id; /* 16xx chips have 2 MMC blocks */
 	struct clk *		iclk;
 	struct clk *		fclk;
+	struct dma_chan		*dma_rx;
+	u32			dma_rx_burst;
+	struct dma_chan		*dma_tx;
+	u32			dma_tx_burst;
 	struct resource		*mem_res;
 	void __iomem		*virt_base;
 	unsigned int		phys_base;
@@ -153,12 +161,14 @@ struct mmc_omap_host {
 
 	unsigned		use_dma:1;
 	unsigned		brs_received:1, dma_done:1;
-	unsigned		dma_is_read:1;
 	unsigned		dma_in_use:1;
+#ifdef USE_DMA_PRIVATE
+	unsigned		dma_is_read:1;
 	int			dma_ch;
-	spinlock_t		dma_lock;
 	struct timer_list	dma_timer;
 	unsigned		dma_len;
+#endif
+	spinlock_t		dma_lock;
 
 	struct mmc_omap_slot    *slots[OMAP_MMC_MAX_SLOTS];
 	struct mmc_omap_slot    *current_slot;
@@ -406,18 +416,32 @@ mmc_omap_release_dma(struct mmc_omap_host *host, struct mmc_data *data,
 		     int abort)
 {
 	enum dma_data_direction dma_data_dir;
+	struct device *dev = mmc_dev(host->mmc);
+	struct dma_chan *c;
 
+#ifdef USE_DMA_PRIVATE
 	BUG_ON(host->dma_ch < 0);
 	if (data->error)
 		omap_stop_dma(host->dma_ch);
 	/* Release DMA channel lazily */
 	mod_timer(&host->dma_timer, jiffies + HZ);
-	if (data->flags & MMC_DATA_WRITE)
+#endif
+	if (data->flags & MMC_DATA_WRITE) {
 		dma_data_dir = DMA_TO_DEVICE;
-	else
+		c = host->dma_tx;
+	} else {
 		dma_data_dir = DMA_FROM_DEVICE;
-	dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len,
-		     dma_data_dir);
+		c = host->dma_rx;
+	}
+	if (c) {
+		if (data->error) {
+			dmaengine_terminate_all(c);
+			/* Claim nothing transferred on error... */
+			data->bytes_xfered = 0;
+		}
+		dev = c->device->dev;
+	}
+	dma_unmap_sg(dev, data->sg, host->sg_len, dma_data_dir);
 }
 
 static void mmc_omap_send_stop_work(struct work_struct *work)
@@ -524,6 +548,7 @@ mmc_omap_end_of_data(struct mmc_omap_host *host, struct mmc_data *data)
 		mmc_omap_xfer_done(host, data);
 }
 
+#ifdef USE_DMA_PRIVATE
 static void
 mmc_omap_dma_timer(unsigned long data)
 {
@@ -533,6 +558,7 @@ mmc_omap_dma_timer(unsigned long data)
 	omap_free_dma(host->dma_ch);
 	host->dma_ch = -1;
 }
+#endif
 
 static void
 mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data)
@@ -891,6 +917,18 @@ static void mmc_omap_cover_handler(unsigned long param)
 		  jiffies + msecs_to_jiffies(OMAP_MMC_COVER_POLL_DELAY));
 }
 
+static void mmc_omap_dma_callback(void *priv)
+{
+	struct mmc_omap_host *host = priv;
+	struct mmc_data *data = host->data;
+
+	/* If we got to the end of DMA, assume everything went well */
+	data->bytes_xfered += data->blocks * data->blksz;
+
+	mmc_omap_dma_done(host, data);
+}
+
+#ifdef USE_DMA_PRIVATE
 /* Prepare to transfer the next segment of a scatterlist */
 static void
 mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data)
@@ -1045,6 +1083,7 @@ static int mmc_omap_get_dma_channel(struct mmc_omap_host *host, struct mmc_data
 
 	return 0;
 }
+#endif
 
 static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req)
 {
@@ -1118,6 +1157,80 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
 
 	host->sg_idx = 0;
 	if (use_dma) {
+		enum dma_data_direction dma_data_dir;
+		struct dma_async_tx_descriptor *tx;
+		struct dma_chan *c;
+		u32 burst, *bp;
+		u16 buf;
+
+		/*
+		 * FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx
+		 * and 24xx. Use 16 or 32 word frames when the
+		 * blocksize is at least that large. Blocksize is
+		 * usually 512 bytes; but not for some SD reads.
+		 */
+		burst = cpu_is_omap15xx() ? 32 : 64;
+		if (burst > data->blksz)
+			burst = data->blksz;
+
+		burst >>= 1;
+
+		if (data->flags & MMC_DATA_WRITE) {
+			c = host->dma_tx;
+			bp = &host->dma_tx_burst;
+			buf = 0x0f80 | (burst - 1) << 0;
+			dma_data_dir = DMA_TO_DEVICE;
+		} else {
+			c = host->dma_rx;
+			bp = &host->dma_rx_burst;
+			buf = 0x800f | (burst - 1) << 8;
+			dma_data_dir = DMA_FROM_DEVICE;
+		}
+
+		if (!c)
+			goto use_pio;
+
+		/* Only reconfigure if we have a different burst size */
+		if (*bp != burst) {
+			struct dma_slave_config cfg;
+
+			cfg.src_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
+			cfg.dst_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
+			cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+			cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+			cfg.src_maxburst = burst;
+			cfg.dst_maxburst = burst;
+
+			if (dmaengine_slave_config(c, &cfg))
+				goto use_pio;
+
+			*bp = burst;
+		}
+
+		host->sg_len = dma_map_sg(c->device->dev, data->sg, sg_len,
+					  dma_data_dir);
+		if (host->sg_len == 0)
+			goto use_pio;
+
+		tx = dmaengine_prep_slave_sg(c, data->sg, host->sg_len,
+			data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
+			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+		if (!tx)
+			goto use_pio;
+
+		OMAP_MMC_WRITE(host, BUF, buf);
+
+		tx->callback = mmc_omap_dma_callback;
+		tx->callback_param = host;
+		dmaengine_submit(tx);
+		host->brs_received = 0;
+		host->dma_done = 0;
+		host->dma_in_use = 1;
+		return;
+	}
+ use_pio:
+#ifdef USE_DMA_PRIVATE
+	if (use_dma) {
 		if (mmc_omap_get_dma_channel(host, data) == 0) {
 			enum dma_data_direction dma_data_dir;
 
@@ -1136,6 +1249,9 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
 		} else
 			use_dma = 0;
 	}
+#else
+	use_dma = 0;
+#endif
 
 	/* Revert to PIO? */
 	if (!use_dma) {
@@ -1157,8 +1273,17 @@ static void mmc_omap_start_request(struct mmc_omap_host *host,
 	/* only touch fifo AFTER the controller readies it */
 	mmc_omap_prepare_data(host, req);
 	mmc_omap_start_command(host, req->cmd);
-	if (host->dma_in_use)
-		omap_start_dma(host->dma_ch);
+	if (host->dma_in_use) {
+		struct dma_chan *c = host->data->flags & MMC_DATA_WRITE ?
+				host->dma_tx : host->dma_rx;
+
+		if (c)
+			dma_async_issue_pending(c);
+#ifdef USE_DMA_PRIVATE
+		else
+			omap_start_dma(host->dma_ch);
+#endif
+	}
 }
 
 static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req)
@@ -1400,6 +1525,8 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
 	struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
 	struct mmc_omap_host *host = NULL;
 	struct resource *res;
+	dma_cap_mask_t mask;
+	unsigned sig;
 	int i, ret = 0;
 	int irq;
 
@@ -1439,7 +1566,9 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
 	setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host);
 
 	spin_lock_init(&host->dma_lock);
+#ifdef USE_DMA_PRIVATE
 	setup_timer(&host->dma_timer, mmc_omap_dma_timer, (unsigned long) host);
+#endif
 	spin_lock_init(&host->slot_lock);
 	init_waitqueue_head(&host->slot_wq);
 
@@ -1452,8 +1581,10 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
 	host->irq = irq;
 
 	host->use_dma = 1;
+#ifdef USE_DMA_PRIVATE
 	host->dev->dma_mask = &pdata->dma_mask;
 	host->dma_ch = -1;
+#endif
 
 	host->irq = irq;
 	host->phys_base = host->mem_res->start;
@@ -1474,9 +1605,48 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
 		goto err_free_iclk;
 	}
 
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_SLAVE, mask);
+
+	host->dma_tx_burst = -1;
+	host->dma_rx_burst = -1;
+
+	if (cpu_is_omap24xx())
+		sig = host->id == 0 ? OMAP24XX_DMA_MMC1_TX : OMAP24XX_DMA_MMC2_TX;
+	else
+		sig = host->id == 0 ? OMAP_DMA_MMC_TX : OMAP_DMA_MMC2_TX;
+	host->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
+#if 0
+	if (!host->dma_tx) {
+		dev_err(host->dev, "unable to obtain TX DMA engine channel %u\n",
+			sig);
+		goto err_dma;
+	}
+#else
+	if (!host->dma_tx)
+		dev_warn(host->dev, "unable to obtain TX DMA engine channel %u\n",
+			sig);
+#endif
+	if (cpu_is_omap24xx())
+		sig = host->id == 0 ? OMAP24XX_DMA_MMC1_RX : OMAP24XX_DMA_MMC2_RX;
+	else
+		sig = host->id == 0 ? OMAP_DMA_MMC_RX : OMAP_DMA_MMC2_RX;
+	host->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
+#if 0
+	if (!host->dma_rx) {
+		dev_err(host->dev, "unable to obtain RX DMA engine channel %u\n",
+			sig);
+		goto err_dma;
+	}
+#else
+	if (!host->dma_rx)
+		dev_warn(host->dev, "unable to obtain RX DMA engine channel %u\n",
+			sig);
+#endif
+
 	ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host);
 	if (ret)
-		goto err_free_fclk;
+		goto err_free_dma;
 
 	if (pdata->init != NULL) {
 		ret = pdata->init(&pdev->dev);
@@ -1508,7 +1678,11 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
 		pdata->cleanup(&pdev->dev);
 err_free_irq:
 	free_irq(host->irq, host);
-err_free_fclk:
+err_free_dma:
+	if (host->dma_tx)
+		dma_release_channel(host->dma_tx);
+	if (host->dma_rx)
+		dma_release_channel(host->dma_rx);
 	clk_put(host->fclk);
 err_free_iclk:
 	clk_disable(host->iclk);
@@ -1543,6 +1717,11 @@ static int __devexit mmc_omap_remove(struct platform_device *pdev)
 	clk_disable(host->iclk);
 	clk_put(host->iclk);
 
+	if (host->dma_tx)
+		dma_release_channel(host->dma_tx);
+	if (host->dma_rx)
+		dma_release_channel(host->dma_rx);
+
 	iounmap(host->virt_base);
 	release_mem_region(pdev->resource[0].start,
 			   pdev->resource[0].end - pdev->resource[0].start + 1);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 9504092..5d7dbc9 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -30,6 +30,7 @@
 #include <linux/of.h>
 #include <linux/of_gpio.h>
 #include <linux/of_device.h>
+#include <linux/omap-dma.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/core.h>
 #include <linux/mmc/mmc.h>
@@ -1774,8 +1775,6 @@ static inline struct omap_mmc_platform_data
 }
 #endif
 
-extern bool omap_dma_filter_fn(struct dma_chan *chan, void *param);
-
 static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
 {
 	struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 05/11] mmc: omap: remove private DMA API implementation
  2012-06-07 11:06     ` Russell King - ARM Linux
@ 2012-06-07 11:07       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 11:07 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Jarkko Lavinen, Chris Ball, linux-mmc

Remove the private DMA API implementation from omap, making it use
entirely the DMA engine API.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/mmc/host/omap.c |  235 +---------------------------------------------
 1 files changed, 6 insertions(+), 229 deletions(-)

diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index eaea251..4026392 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -101,8 +101,6 @@
 
 struct mmc_omap_host;
 
-#define USE_DMA_PRIVATE
-
 struct mmc_omap_slot {
 	int			id;
 	unsigned int		vdd;
@@ -162,12 +160,6 @@ struct mmc_omap_host {
 	unsigned		use_dma:1;
 	unsigned		brs_received:1, dma_done:1;
 	unsigned		dma_in_use:1;
-#ifdef USE_DMA_PRIVATE
-	unsigned		dma_is_read:1;
-	int			dma_ch;
-	struct timer_list	dma_timer;
-	unsigned		dma_len;
-#endif
 	spinlock_t		dma_lock;
 
 	struct mmc_omap_slot    *slots[OMAP_MMC_MAX_SLOTS];
@@ -419,13 +411,6 @@ mmc_omap_release_dma(struct mmc_omap_host *host, struct mmc_data *data,
 	struct device *dev = mmc_dev(host->mmc);
 	struct dma_chan *c;
 
-#ifdef USE_DMA_PRIVATE
-	BUG_ON(host->dma_ch < 0);
-	if (data->error)
-		omap_stop_dma(host->dma_ch);
-	/* Release DMA channel lazily */
-	mod_timer(&host->dma_timer, jiffies + HZ);
-#endif
 	if (data->flags & MMC_DATA_WRITE) {
 		dma_data_dir = DMA_TO_DEVICE;
 		c = host->dma_tx;
@@ -548,18 +533,6 @@ mmc_omap_end_of_data(struct mmc_omap_host *host, struct mmc_data *data)
 		mmc_omap_xfer_done(host, data);
 }
 
-#ifdef USE_DMA_PRIVATE
-static void
-mmc_omap_dma_timer(unsigned long data)
-{
-	struct mmc_omap_host *host = (struct mmc_omap_host *) data;
-
-	BUG_ON(host->dma_ch < 0);
-	omap_free_dma(host->dma_ch);
-	host->dma_ch = -1;
-}
-#endif
-
 static void
 mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data)
 {
@@ -928,163 +901,6 @@ static void mmc_omap_dma_callback(void *priv)
 	mmc_omap_dma_done(host, data);
 }
 
-#ifdef USE_DMA_PRIVATE
-/* Prepare to transfer the next segment of a scatterlist */
-static void
-mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data)
-{
-	int dma_ch = host->dma_ch;
-	unsigned long data_addr;
-	u16 buf, frame;
-	u32 count;
-	struct scatterlist *sg = &data->sg[host->sg_idx];
-	int src_port = 0;
-	int dst_port = 0;
-	int sync_dev = 0;
-
-	data_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
-	frame = data->blksz;
-	count = sg_dma_len(sg);
-
-	if ((data->blocks == 1) && (count > data->blksz))
-		count = frame;
-
-	host->dma_len = count;
-
-	/* FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx and 24xx.
-	 * Use 16 or 32 word frames when the blocksize is at least that large.
-	 * Blocksize is usually 512 bytes; but not for some SD reads.
-	 */
-	if (cpu_is_omap15xx() && frame > 32)
-		frame = 32;
-	else if (frame > 64)
-		frame = 64;
-	count /= frame;
-	frame >>= 1;
-
-	if (!(data->flags & MMC_DATA_WRITE)) {
-		buf = 0x800f | ((frame - 1) << 8);
-
-		if (cpu_class_is_omap1()) {
-			src_port = OMAP_DMA_PORT_TIPB;
-			dst_port = OMAP_DMA_PORT_EMIFF;
-		}
-		if (cpu_is_omap24xx())
-			sync_dev = OMAP24XX_DMA_MMC1_RX;
-
-		omap_set_dma_src_params(dma_ch, src_port,
-					OMAP_DMA_AMODE_CONSTANT,
-					data_addr, 0, 0);
-		omap_set_dma_dest_params(dma_ch, dst_port,
-					 OMAP_DMA_AMODE_POST_INC,
-					 sg_dma_address(sg), 0, 0);
-		omap_set_dma_dest_data_pack(dma_ch, 1);
-		omap_set_dma_dest_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
-	} else {
-		buf = 0x0f80 | ((frame - 1) << 0);
-
-		if (cpu_class_is_omap1()) {
-			src_port = OMAP_DMA_PORT_EMIFF;
-			dst_port = OMAP_DMA_PORT_TIPB;
-		}
-		if (cpu_is_omap24xx())
-			sync_dev = OMAP24XX_DMA_MMC1_TX;
-
-		omap_set_dma_dest_params(dma_ch, dst_port,
-					 OMAP_DMA_AMODE_CONSTANT,
-					 data_addr, 0, 0);
-		omap_set_dma_src_params(dma_ch, src_port,
-					OMAP_DMA_AMODE_POST_INC,
-					sg_dma_address(sg), 0, 0);
-		omap_set_dma_src_data_pack(dma_ch, 1);
-		omap_set_dma_src_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
-	}
-
-	/* Max limit for DMA frame count is 0xffff */
-	BUG_ON(count > 0xffff);
-
-	OMAP_MMC_WRITE(host, BUF, buf);
-	omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S16,
-				     frame, count, OMAP_DMA_SYNC_FRAME,
-				     sync_dev, 0);
-}
-
-/* A scatterlist segment completed */
-static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data)
-{
-	struct mmc_omap_host *host = (struct mmc_omap_host *) data;
-	struct mmc_data *mmcdat = host->data;
-
-	if (unlikely(host->dma_ch < 0)) {
-		dev_err(mmc_dev(host->mmc),
-			"DMA callback while DMA not enabled\n");
-		return;
-	}
-	/* FIXME: We really should do something to _handle_ the errors */
-	if (ch_status & OMAP1_DMA_TOUT_IRQ) {
-		dev_err(mmc_dev(host->mmc),"DMA timeout\n");
-		return;
-	}
-	if (ch_status & OMAP_DMA_DROP_IRQ) {
-		dev_err(mmc_dev(host->mmc), "DMA sync error\n");
-		return;
-	}
-	if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
-		return;
-	}
-	mmcdat->bytes_xfered += host->dma_len;
-	host->sg_idx++;
-	if (host->sg_idx < host->sg_len) {
-		mmc_omap_prepare_dma(host, host->data);
-		omap_start_dma(host->dma_ch);
-	} else
-		mmc_omap_dma_done(host, host->data);
-}
-
-static int mmc_omap_get_dma_channel(struct mmc_omap_host *host, struct mmc_data *data)
-{
-	const char *dma_dev_name;
-	int sync_dev, dma_ch, is_read, r;
-
-	is_read = !(data->flags & MMC_DATA_WRITE);
-	del_timer_sync(&host->dma_timer);
-	if (host->dma_ch >= 0) {
-		if (is_read == host->dma_is_read)
-			return 0;
-		omap_free_dma(host->dma_ch);
-		host->dma_ch = -1;
-	}
-
-	if (is_read) {
-		if (host->id == 0) {
-			sync_dev = OMAP_DMA_MMC_RX;
-			dma_dev_name = "MMC1 read";
-		} else {
-			sync_dev = OMAP_DMA_MMC2_RX;
-			dma_dev_name = "MMC2 read";
-		}
-	} else {
-		if (host->id == 0) {
-			sync_dev = OMAP_DMA_MMC_TX;
-			dma_dev_name = "MMC1 write";
-		} else {
-			sync_dev = OMAP_DMA_MMC2_TX;
-			dma_dev_name = "MMC2 write";
-		}
-	}
-	r = omap_request_dma(sync_dev, dma_dev_name, mmc_omap_dma_cb,
-			     host, &dma_ch);
-	if (r != 0) {
-		dev_dbg(mmc_dev(host->mmc), "omap_request_dma() failed with %d\n", r);
-		return r;
-	}
-	host->dma_ch = dma_ch;
-	host->dma_is_read = is_read;
-
-	return 0;
-}
-#endif
-
 static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req)
 {
 	u16 reg;
@@ -1229,38 +1045,13 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
 		return;
 	}
  use_pio:
-#ifdef USE_DMA_PRIVATE
-	if (use_dma) {
-		if (mmc_omap_get_dma_channel(host, data) == 0) {
-			enum dma_data_direction dma_data_dir;
-
-			if (data->flags & MMC_DATA_WRITE)
-				dma_data_dir = DMA_TO_DEVICE;
-			else
-				dma_data_dir = DMA_FROM_DEVICE;
-
-			host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
-						sg_len, dma_data_dir);
-			host->total_bytes_left = 0;
-			mmc_omap_prepare_dma(host, req->data);
-			host->brs_received = 0;
-			host->dma_done = 0;
-			host->dma_in_use = 1;
-		} else
-			use_dma = 0;
-	}
-#else
-	use_dma = 0;
-#endif
 
 	/* Revert to PIO? */
-	if (!use_dma) {
-		OMAP_MMC_WRITE(host, BUF, 0x1f1f);
-		host->total_bytes_left = data->blocks * block_size;
-		host->sg_len = sg_len;
-		mmc_omap_sg_to_buf(host);
-		host->dma_in_use = 0;
-	}
+	OMAP_MMC_WRITE(host, BUF, 0x1f1f);
+	host->total_bytes_left = data->blocks * block_size;
+	host->sg_len = sg_len;
+	mmc_omap_sg_to_buf(host);
+	host->dma_in_use = 0;
 }
 
 static void mmc_omap_start_request(struct mmc_omap_host *host,
@@ -1277,12 +1068,7 @@ static void mmc_omap_start_request(struct mmc_omap_host *host,
 		struct dma_chan *c = host->data->flags & MMC_DATA_WRITE ?
 				host->dma_tx : host->dma_rx;
 
-		if (c)
-			dma_async_issue_pending(c);
-#ifdef USE_DMA_PRIVATE
-		else
-			omap_start_dma(host->dma_ch);
-#endif
+		dma_async_issue_pending(c);
 	}
 }
 
@@ -1566,9 +1352,6 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
 	setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host);
 
 	spin_lock_init(&host->dma_lock);
-#ifdef USE_DMA_PRIVATE
-	setup_timer(&host->dma_timer, mmc_omap_dma_timer, (unsigned long) host);
-#endif
 	spin_lock_init(&host->slot_lock);
 	init_waitqueue_head(&host->slot_wq);
 
@@ -1579,13 +1362,7 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
 	host->id = pdev->id;
 	host->mem_res = res;
 	host->irq = irq;
-
 	host->use_dma = 1;
-#ifdef USE_DMA_PRIVATE
-	host->dev->dma_mask = &pdata->dma_mask;
-	host->dma_ch = -1;
-#endif
-
 	host->irq = irq;
 	host->phys_base = host->mem_res->start;
 	host->virt_base = ioremap(res->start, resource_size(res));
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 05/11] mmc: omap: remove private DMA API implementation
@ 2012-06-07 11:07       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 11:07 UTC (permalink / raw)
  To: linux-arm-kernel

Remove the private DMA API implementation from omap, making it use
entirely the DMA engine API.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/mmc/host/omap.c |  235 +---------------------------------------------
 1 files changed, 6 insertions(+), 229 deletions(-)

diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index eaea251..4026392 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -101,8 +101,6 @@
 
 struct mmc_omap_host;
 
-#define USE_DMA_PRIVATE
-
 struct mmc_omap_slot {
 	int			id;
 	unsigned int		vdd;
@@ -162,12 +160,6 @@ struct mmc_omap_host {
 	unsigned		use_dma:1;
 	unsigned		brs_received:1, dma_done:1;
 	unsigned		dma_in_use:1;
-#ifdef USE_DMA_PRIVATE
-	unsigned		dma_is_read:1;
-	int			dma_ch;
-	struct timer_list	dma_timer;
-	unsigned		dma_len;
-#endif
 	spinlock_t		dma_lock;
 
 	struct mmc_omap_slot    *slots[OMAP_MMC_MAX_SLOTS];
@@ -419,13 +411,6 @@ mmc_omap_release_dma(struct mmc_omap_host *host, struct mmc_data *data,
 	struct device *dev = mmc_dev(host->mmc);
 	struct dma_chan *c;
 
-#ifdef USE_DMA_PRIVATE
-	BUG_ON(host->dma_ch < 0);
-	if (data->error)
-		omap_stop_dma(host->dma_ch);
-	/* Release DMA channel lazily */
-	mod_timer(&host->dma_timer, jiffies + HZ);
-#endif
 	if (data->flags & MMC_DATA_WRITE) {
 		dma_data_dir = DMA_TO_DEVICE;
 		c = host->dma_tx;
@@ -548,18 +533,6 @@ mmc_omap_end_of_data(struct mmc_omap_host *host, struct mmc_data *data)
 		mmc_omap_xfer_done(host, data);
 }
 
-#ifdef USE_DMA_PRIVATE
-static void
-mmc_omap_dma_timer(unsigned long data)
-{
-	struct mmc_omap_host *host = (struct mmc_omap_host *) data;
-
-	BUG_ON(host->dma_ch < 0);
-	omap_free_dma(host->dma_ch);
-	host->dma_ch = -1;
-}
-#endif
-
 static void
 mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data)
 {
@@ -928,163 +901,6 @@ static void mmc_omap_dma_callback(void *priv)
 	mmc_omap_dma_done(host, data);
 }
 
-#ifdef USE_DMA_PRIVATE
-/* Prepare to transfer the next segment of a scatterlist */
-static void
-mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data)
-{
-	int dma_ch = host->dma_ch;
-	unsigned long data_addr;
-	u16 buf, frame;
-	u32 count;
-	struct scatterlist *sg = &data->sg[host->sg_idx];
-	int src_port = 0;
-	int dst_port = 0;
-	int sync_dev = 0;
-
-	data_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
-	frame = data->blksz;
-	count = sg_dma_len(sg);
-
-	if ((data->blocks == 1) && (count > data->blksz))
-		count = frame;
-
-	host->dma_len = count;
-
-	/* FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx and 24xx.
-	 * Use 16 or 32 word frames when the blocksize is at least that large.
-	 * Blocksize is usually 512 bytes; but not for some SD reads.
-	 */
-	if (cpu_is_omap15xx() && frame > 32)
-		frame = 32;
-	else if (frame > 64)
-		frame = 64;
-	count /= frame;
-	frame >>= 1;
-
-	if (!(data->flags & MMC_DATA_WRITE)) {
-		buf = 0x800f | ((frame - 1) << 8);
-
-		if (cpu_class_is_omap1()) {
-			src_port = OMAP_DMA_PORT_TIPB;
-			dst_port = OMAP_DMA_PORT_EMIFF;
-		}
-		if (cpu_is_omap24xx())
-			sync_dev = OMAP24XX_DMA_MMC1_RX;
-
-		omap_set_dma_src_params(dma_ch, src_port,
-					OMAP_DMA_AMODE_CONSTANT,
-					data_addr, 0, 0);
-		omap_set_dma_dest_params(dma_ch, dst_port,
-					 OMAP_DMA_AMODE_POST_INC,
-					 sg_dma_address(sg), 0, 0);
-		omap_set_dma_dest_data_pack(dma_ch, 1);
-		omap_set_dma_dest_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
-	} else {
-		buf = 0x0f80 | ((frame - 1) << 0);
-
-		if (cpu_class_is_omap1()) {
-			src_port = OMAP_DMA_PORT_EMIFF;
-			dst_port = OMAP_DMA_PORT_TIPB;
-		}
-		if (cpu_is_omap24xx())
-			sync_dev = OMAP24XX_DMA_MMC1_TX;
-
-		omap_set_dma_dest_params(dma_ch, dst_port,
-					 OMAP_DMA_AMODE_CONSTANT,
-					 data_addr, 0, 0);
-		omap_set_dma_src_params(dma_ch, src_port,
-					OMAP_DMA_AMODE_POST_INC,
-					sg_dma_address(sg), 0, 0);
-		omap_set_dma_src_data_pack(dma_ch, 1);
-		omap_set_dma_src_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
-	}
-
-	/* Max limit for DMA frame count is 0xffff */
-	BUG_ON(count > 0xffff);
-
-	OMAP_MMC_WRITE(host, BUF, buf);
-	omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S16,
-				     frame, count, OMAP_DMA_SYNC_FRAME,
-				     sync_dev, 0);
-}
-
-/* A scatterlist segment completed */
-static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data)
-{
-	struct mmc_omap_host *host = (struct mmc_omap_host *) data;
-	struct mmc_data *mmcdat = host->data;
-
-	if (unlikely(host->dma_ch < 0)) {
-		dev_err(mmc_dev(host->mmc),
-			"DMA callback while DMA not enabled\n");
-		return;
-	}
-	/* FIXME: We really should do something to _handle_ the errors */
-	if (ch_status & OMAP1_DMA_TOUT_IRQ) {
-		dev_err(mmc_dev(host->mmc),"DMA timeout\n");
-		return;
-	}
-	if (ch_status & OMAP_DMA_DROP_IRQ) {
-		dev_err(mmc_dev(host->mmc), "DMA sync error\n");
-		return;
-	}
-	if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
-		return;
-	}
-	mmcdat->bytes_xfered += host->dma_len;
-	host->sg_idx++;
-	if (host->sg_idx < host->sg_len) {
-		mmc_omap_prepare_dma(host, host->data);
-		omap_start_dma(host->dma_ch);
-	} else
-		mmc_omap_dma_done(host, host->data);
-}
-
-static int mmc_omap_get_dma_channel(struct mmc_omap_host *host, struct mmc_data *data)
-{
-	const char *dma_dev_name;
-	int sync_dev, dma_ch, is_read, r;
-
-	is_read = !(data->flags & MMC_DATA_WRITE);
-	del_timer_sync(&host->dma_timer);
-	if (host->dma_ch >= 0) {
-		if (is_read == host->dma_is_read)
-			return 0;
-		omap_free_dma(host->dma_ch);
-		host->dma_ch = -1;
-	}
-
-	if (is_read) {
-		if (host->id == 0) {
-			sync_dev = OMAP_DMA_MMC_RX;
-			dma_dev_name = "MMC1 read";
-		} else {
-			sync_dev = OMAP_DMA_MMC2_RX;
-			dma_dev_name = "MMC2 read";
-		}
-	} else {
-		if (host->id == 0) {
-			sync_dev = OMAP_DMA_MMC_TX;
-			dma_dev_name = "MMC1 write";
-		} else {
-			sync_dev = OMAP_DMA_MMC2_TX;
-			dma_dev_name = "MMC2 write";
-		}
-	}
-	r = omap_request_dma(sync_dev, dma_dev_name, mmc_omap_dma_cb,
-			     host, &dma_ch);
-	if (r != 0) {
-		dev_dbg(mmc_dev(host->mmc), "omap_request_dma() failed with %d\n", r);
-		return r;
-	}
-	host->dma_ch = dma_ch;
-	host->dma_is_read = is_read;
-
-	return 0;
-}
-#endif
-
 static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req)
 {
 	u16 reg;
@@ -1229,38 +1045,13 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
 		return;
 	}
  use_pio:
-#ifdef USE_DMA_PRIVATE
-	if (use_dma) {
-		if (mmc_omap_get_dma_channel(host, data) == 0) {
-			enum dma_data_direction dma_data_dir;
-
-			if (data->flags & MMC_DATA_WRITE)
-				dma_data_dir = DMA_TO_DEVICE;
-			else
-				dma_data_dir = DMA_FROM_DEVICE;
-
-			host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
-						sg_len, dma_data_dir);
-			host->total_bytes_left = 0;
-			mmc_omap_prepare_dma(host, req->data);
-			host->brs_received = 0;
-			host->dma_done = 0;
-			host->dma_in_use = 1;
-		} else
-			use_dma = 0;
-	}
-#else
-	use_dma = 0;
-#endif
 
 	/* Revert to PIO? */
-	if (!use_dma) {
-		OMAP_MMC_WRITE(host, BUF, 0x1f1f);
-		host->total_bytes_left = data->blocks * block_size;
-		host->sg_len = sg_len;
-		mmc_omap_sg_to_buf(host);
-		host->dma_in_use = 0;
-	}
+	OMAP_MMC_WRITE(host, BUF, 0x1f1f);
+	host->total_bytes_left = data->blocks * block_size;
+	host->sg_len = sg_len;
+	mmc_omap_sg_to_buf(host);
+	host->dma_in_use = 0;
 }
 
 static void mmc_omap_start_request(struct mmc_omap_host *host,
@@ -1277,12 +1068,7 @@ static void mmc_omap_start_request(struct mmc_omap_host *host,
 		struct dma_chan *c = host->data->flags & MMC_DATA_WRITE ?
 				host->dma_tx : host->dma_rx;
 
-		if (c)
-			dma_async_issue_pending(c);
-#ifdef USE_DMA_PRIVATE
-		else
-			omap_start_dma(host->dma_ch);
-#endif
+		dma_async_issue_pending(c);
 	}
 }
 
@@ -1566,9 +1352,6 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
 	setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host);
 
 	spin_lock_init(&host->dma_lock);
-#ifdef USE_DMA_PRIVATE
-	setup_timer(&host->dma_timer, mmc_omap_dma_timer, (unsigned long) host);
-#endif
 	spin_lock_init(&host->slot_lock);
 	init_waitqueue_head(&host->slot_wq);
 
@@ -1579,13 +1362,7 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
 	host->id = pdev->id;
 	host->mem_res = res;
 	host->irq = irq;
-
 	host->use_dma = 1;
-#ifdef USE_DMA_PRIVATE
-	host->dev->dma_mask = &pdata->dma_mask;
-	host->dma_ch = -1;
-#endif
-
 	host->irq = irq;
 	host->phys_base = host->mem_res->start;
 	host->virt_base = ioremap(res->start, resource_size(res));
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 06/11] ARM: omap: remove mmc platform data dma_mask and initialization
  2012-06-07 11:06     ` Russell King - ARM Linux
@ 2012-06-07 11:08       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 11:08 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Tony Lindgren

DMAengine uses the DMA engine device structure when mapping/unmapping
memory for DMA, so the MMC devices do not need their DMA masks
initialized (this reflects hardware: the MMC device is not the device
doing DMA.)

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 arch/arm/mach-omap1/board-h2-mmc.c    |    1 -
 arch/arm/mach-omap1/board-h3-mmc.c    |    1 -
 arch/arm/mach-omap1/board-nokia770.c  |    1 -
 arch/arm/mach-omap2/board-n8x0.c      |    1 -
 arch/arm/mach-omap2/hsmmc.c           |    1 -
 arch/arm/plat-omap/include/plat/mmc.h |    2 --
 6 files changed, 0 insertions(+), 7 deletions(-)

diff --git a/arch/arm/mach-omap1/board-h2-mmc.c b/arch/arm/mach-omap1/board-h2-mmc.c
index da0e37d..e1362ce 100644
--- a/arch/arm/mach-omap1/board-h2-mmc.c
+++ b/arch/arm/mach-omap1/board-h2-mmc.c
@@ -54,7 +54,6 @@ static struct omap_mmc_platform_data mmc1_data = {
 	.nr_slots                       = 1,
 	.init				= mmc_late_init,
 	.cleanup			= mmc_cleanup,
-	.dma_mask			= 0xffffffff,
 	.slots[0]       = {
 		.set_power              = mmc_set_power,
 		.ocr_mask               = MMC_VDD_32_33 | MMC_VDD_33_34,
diff --git a/arch/arm/mach-omap1/board-h3-mmc.c b/arch/arm/mach-omap1/board-h3-mmc.c
index f8242aa..c74daac 100644
--- a/arch/arm/mach-omap1/board-h3-mmc.c
+++ b/arch/arm/mach-omap1/board-h3-mmc.c
@@ -36,7 +36,6 @@ static int mmc_set_power(struct device *dev, int slot, int power_on,
  */
 static struct omap_mmc_platform_data mmc1_data = {
 	.nr_slots                       = 1,
-	.dma_mask			= 0xffffffff,
 	.slots[0]       = {
 		.set_power              = mmc_set_power,
 		.ocr_mask               = MMC_VDD_32_33 | MMC_VDD_33_34,
diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c
index 7212ae9..c54b45f 100644
--- a/arch/arm/mach-omap1/board-nokia770.c
+++ b/arch/arm/mach-omap1/board-nokia770.c
@@ -185,7 +185,6 @@ static int nokia770_mmc_get_cover_state(struct device *dev, int slot)
 
 static struct omap_mmc_platform_data nokia770_mmc2_data = {
 	.nr_slots                       = 1,
-	.dma_mask			= 0xffffffff,
 	.max_freq                       = 12000000,
 	.slots[0]       = {
 		.set_power		= nokia770_mmc_set_power,
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index 8ca14e8..6d70e81 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -470,7 +470,6 @@ static struct omap_mmc_platform_data mmc1_data = {
 	.cleanup			= n8x0_mmc_cleanup,
 	.shutdown			= n8x0_mmc_shutdown,
 	.max_freq			= 24000000,
-	.dma_mask			= 0xffffffff,
 	.slots[0] = {
 		.wires			= 4,
 		.set_power		= n8x0_mmc_set_power,
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c
index be697d4..a9675d8 100644
--- a/arch/arm/mach-omap2/hsmmc.c
+++ b/arch/arm/mach-omap2/hsmmc.c
@@ -315,7 +315,6 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
 	mmc->slots[0].caps = c->caps;
 	mmc->slots[0].pm_caps = c->pm_caps;
 	mmc->slots[0].internal_clock = !c->ext_clock;
-	mmc->dma_mask = 0xffffffff;
 	mmc->max_freq = c->max_freq;
 	if (cpu_is_omap44xx())
 		mmc->reg_offset = OMAP4_MMC_REG_OFFSET;
diff --git a/arch/arm/plat-omap/include/plat/mmc.h b/arch/arm/plat-omap/include/plat/mmc.h
index a7754a8..a76f423 100644
--- a/arch/arm/plat-omap/include/plat/mmc.h
+++ b/arch/arm/plat-omap/include/plat/mmc.h
@@ -81,8 +81,6 @@ struct omap_mmc_platform_data {
 	/* Return context loss count due to PM states changing */
 	int (*get_context_loss_count)(struct device *dev);
 
-	u64 dma_mask;
-
 	/* Integrating attributes from the omap_hwmod layer */
 	u8 controller_flags;
 
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 06/11] ARM: omap: remove mmc platform data dma_mask and initialization
@ 2012-06-07 11:08       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 11:08 UTC (permalink / raw)
  To: linux-arm-kernel

DMAengine uses the DMA engine device structure when mapping/unmapping
memory for DMA, so the MMC devices do not need their DMA masks
initialized (this reflects hardware: the MMC device is not the device
doing DMA.)

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 arch/arm/mach-omap1/board-h2-mmc.c    |    1 -
 arch/arm/mach-omap1/board-h3-mmc.c    |    1 -
 arch/arm/mach-omap1/board-nokia770.c  |    1 -
 arch/arm/mach-omap2/board-n8x0.c      |    1 -
 arch/arm/mach-omap2/hsmmc.c           |    1 -
 arch/arm/plat-omap/include/plat/mmc.h |    2 --
 6 files changed, 0 insertions(+), 7 deletions(-)

diff --git a/arch/arm/mach-omap1/board-h2-mmc.c b/arch/arm/mach-omap1/board-h2-mmc.c
index da0e37d..e1362ce 100644
--- a/arch/arm/mach-omap1/board-h2-mmc.c
+++ b/arch/arm/mach-omap1/board-h2-mmc.c
@@ -54,7 +54,6 @@ static struct omap_mmc_platform_data mmc1_data = {
 	.nr_slots                       = 1,
 	.init				= mmc_late_init,
 	.cleanup			= mmc_cleanup,
-	.dma_mask			= 0xffffffff,
 	.slots[0]       = {
 		.set_power              = mmc_set_power,
 		.ocr_mask               = MMC_VDD_32_33 | MMC_VDD_33_34,
diff --git a/arch/arm/mach-omap1/board-h3-mmc.c b/arch/arm/mach-omap1/board-h3-mmc.c
index f8242aa..c74daac 100644
--- a/arch/arm/mach-omap1/board-h3-mmc.c
+++ b/arch/arm/mach-omap1/board-h3-mmc.c
@@ -36,7 +36,6 @@ static int mmc_set_power(struct device *dev, int slot, int power_on,
  */
 static struct omap_mmc_platform_data mmc1_data = {
 	.nr_slots                       = 1,
-	.dma_mask			= 0xffffffff,
 	.slots[0]       = {
 		.set_power              = mmc_set_power,
 		.ocr_mask               = MMC_VDD_32_33 | MMC_VDD_33_34,
diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c
index 7212ae9..c54b45f 100644
--- a/arch/arm/mach-omap1/board-nokia770.c
+++ b/arch/arm/mach-omap1/board-nokia770.c
@@ -185,7 +185,6 @@ static int nokia770_mmc_get_cover_state(struct device *dev, int slot)
 
 static struct omap_mmc_platform_data nokia770_mmc2_data = {
 	.nr_slots                       = 1,
-	.dma_mask			= 0xffffffff,
 	.max_freq                       = 12000000,
 	.slots[0]       = {
 		.set_power		= nokia770_mmc_set_power,
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index 8ca14e8..6d70e81 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -470,7 +470,6 @@ static struct omap_mmc_platform_data mmc1_data = {
 	.cleanup			= n8x0_mmc_cleanup,
 	.shutdown			= n8x0_mmc_shutdown,
 	.max_freq			= 24000000,
-	.dma_mask			= 0xffffffff,
 	.slots[0] = {
 		.wires			= 4,
 		.set_power		= n8x0_mmc_set_power,
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c
index be697d4..a9675d8 100644
--- a/arch/arm/mach-omap2/hsmmc.c
+++ b/arch/arm/mach-omap2/hsmmc.c
@@ -315,7 +315,6 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
 	mmc->slots[0].caps = c->caps;
 	mmc->slots[0].pm_caps = c->pm_caps;
 	mmc->slots[0].internal_clock = !c->ext_clock;
-	mmc->dma_mask = 0xffffffff;
 	mmc->max_freq = c->max_freq;
 	if (cpu_is_omap44xx())
 		mmc->reg_offset = OMAP4_MMC_REG_OFFSET;
diff --git a/arch/arm/plat-omap/include/plat/mmc.h b/arch/arm/plat-omap/include/plat/mmc.h
index a7754a8..a76f423 100644
--- a/arch/arm/plat-omap/include/plat/mmc.h
+++ b/arch/arm/plat-omap/include/plat/mmc.h
@@ -81,8 +81,6 @@ struct omap_mmc_platform_data {
 	/* Return context loss count due to PM states changing */
 	int (*get_context_loss_count)(struct device *dev);
 
-	u64 dma_mask;
-
 	/* Integrating attributes from the omap_hwmod layer */
 	u8 controller_flags;
 
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 07/11] spi: omap2-mcspi: add DMA engine support
  2012-06-07 11:06     ` Russell King - ARM Linux
@ 2012-06-07 11:08       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 11:08 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Grant Likely, spi-devel-general

Add DMA engine support to the OMAP SPI driver.  This supplements the
private DMA API implementation contained within this driver, and the
driver can be independently switched at build time between using DMA
engine and the private DMA API for the transmit and receive sides.

Tested-by: Shubhrajyoti <shubhrajyoti@ti.com>
Acked-by: Grant Likely <grant.likely@secretlab.ca>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/spi/spi-omap2-mcspi.c |  183 +++++++++++++++++++++++++++++++++-------
 1 files changed, 151 insertions(+), 32 deletions(-)

diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 46ef5fe..ca016df 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -20,6 +20,8 @@
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  *
  */
+#define USE_DMA_ENGINE_RX
+#define USE_DMA_ENGINE_TX
 
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -28,6 +30,8 @@
 #include <linux/device.h>
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/omap-dma.h>
 #include <linux/platform_device.h>
 #include <linux/err.h>
 #include <linux/clk.h>
@@ -93,6 +97,8 @@
 
 /* We have 2 DMA channels per CS, one for RX and one for TX */
 struct omap2_mcspi_dma {
+	struct dma_chan *dma_tx;
+	struct dma_chan *dma_rx;
 	int dma_tx_channel;
 	int dma_rx_channel;
 
@@ -300,6 +306,30 @@ static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
 	return 0;
 }
 
+static void omap2_mcspi_rx_callback(void *data)
+{
+	struct spi_device *spi = data;
+	struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
+	struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
+
+	complete(&mcspi_dma->dma_rx_completion);
+
+	/* We must disable the DMA RX request */
+	omap2_mcspi_set_dma_req(spi, 1, 0);
+}
+
+static void omap2_mcspi_tx_callback(void *data)
+{
+	struct spi_device *spi = data;
+	struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
+	struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
+
+	complete(&mcspi_dma->dma_tx_completion);
+
+	/* We must disable the DMA TX request */
+	omap2_mcspi_set_dma_req(spi, 0, 0);
+}
+
 static unsigned
 omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
 {
@@ -314,6 +344,9 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
 	u8			* rx;
 	const u8		* tx;
 	void __iomem		*chstat_reg;
+	struct dma_slave_config	cfg;
+	enum dma_slave_buswidth width;
+	unsigned es;
 
 	mcspi = spi_master_get_devdata(spi->master);
 	mcspi_dma = &mcspi->dma_channels[spi->chip_select];
@@ -321,6 +354,71 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
 
 	chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
 
+	if (cs->word_len <= 8) {
+		width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+		es = 1;
+	} else if (cs->word_len <= 16) {
+		width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+		es = 2;
+	} else {
+		width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+		es = 4;
+	}
+
+	memset(&cfg, 0, sizeof(cfg));
+	cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0;
+	cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0;
+	cfg.src_addr_width = width;
+	cfg.dst_addr_width = width;
+	cfg.src_maxburst = 1;
+	cfg.dst_maxburst = 1;
+
+	if (xfer->tx_buf && mcspi_dma->dma_tx) {
+		struct dma_async_tx_descriptor *tx;
+		struct scatterlist sg;
+
+		dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
+
+		sg_init_table(&sg, 1);
+		sg_dma_address(&sg) = xfer->tx_dma;
+		sg_dma_len(&sg) = xfer->len;
+
+		tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1,
+			DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+		if (tx) {
+			tx->callback = omap2_mcspi_tx_callback;
+			tx->callback_param = spi;
+			dmaengine_submit(tx);
+		} else {
+			/* FIXME: fall back to PIO? */
+		}
+	}
+
+	if (xfer->rx_buf && mcspi_dma->dma_rx) {
+		struct dma_async_tx_descriptor *tx;
+		struct scatterlist sg;
+		size_t len = xfer->len - es;
+
+		dmaengine_slave_config(mcspi_dma->dma_rx, &cfg);
+
+		if (l & OMAP2_MCSPI_CHCONF_TURBO)
+			len -= es;
+
+		sg_init_table(&sg, 1);
+		sg_dma_address(&sg) = xfer->rx_dma;
+		sg_dma_len(&sg) = len;
+
+		tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, &sg, 1,
+			DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+		if (tx) {
+			tx->callback = omap2_mcspi_rx_callback;
+			tx->callback_param = spi;
+			dmaengine_submit(tx);
+		} else {
+			/* FIXME: fall back to PIO? */
+		}
+	}
+
 	count = xfer->len;
 	c = count;
 	word_len = cs->word_len;
@@ -342,7 +440,7 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
 		element_count = count >> 2;
 	}
 
-	if (tx != NULL) {
+	if (tx != NULL && mcspi_dma->dma_tx_channel != -1) {
 		omap_set_dma_transfer_params(mcspi_dma->dma_tx_channel,
 				data_type, element_count, 1,
 				OMAP_DMA_SYNC_ELEMENT,
@@ -357,7 +455,7 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
 				xfer->tx_dma, 0, 0);
 	}
 
-	if (rx != NULL) {
+	if (rx != NULL && mcspi_dma->dma_rx_channel != -1) {
 		elements = element_count - 1;
 		if (l & OMAP2_MCSPI_CHCONF_TURBO)
 			elements--;
@@ -377,12 +475,18 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
 	}
 
 	if (tx != NULL) {
-		omap_start_dma(mcspi_dma->dma_tx_channel);
+		if (mcspi_dma->dma_tx)
+			dma_async_issue_pending(mcspi_dma->dma_tx);
+		else
+			omap_start_dma(mcspi_dma->dma_tx_channel);
 		omap2_mcspi_set_dma_req(spi, 0, 1);
 	}
 
 	if (rx != NULL) {
-		omap_start_dma(mcspi_dma->dma_rx_channel);
+		if (mcspi_dma->dma_rx)
+			dma_async_issue_pending(mcspi_dma->dma_rx);
+		else
+			omap_start_dma(mcspi_dma->dma_rx_channel);
 		omap2_mcspi_set_dma_req(spi, 1, 1);
 	}
 
@@ -406,7 +510,10 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
 		dma_unmap_single(&spi->dev, xfer->rx_dma, count, DMA_FROM_DEVICE);
 		omap2_mcspi_set_enable(spi, 0);
 
+		elements = element_count - 1;
+
 		if (l & OMAP2_MCSPI_CHCONF_TURBO) {
+			elements--;
 
 			if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
 				   & OMAP2_MCSPI_CHSTAT_RXS)) {
@@ -725,32 +832,12 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
 
 static void omap2_mcspi_dma_rx_callback(int lch, u16 ch_status, void *data)
 {
-	struct spi_device	*spi = data;
-	struct omap2_mcspi	*mcspi;
-	struct omap2_mcspi_dma	*mcspi_dma;
-
-	mcspi = spi_master_get_devdata(spi->master);
-	mcspi_dma = &(mcspi->dma_channels[spi->chip_select]);
-
-	complete(&mcspi_dma->dma_rx_completion);
-
-	/* We must disable the DMA RX request */
-	omap2_mcspi_set_dma_req(spi, 1, 0);
+	omap2_mcspi_rx_callback(data);
 }
 
 static void omap2_mcspi_dma_tx_callback(int lch, u16 ch_status, void *data)
 {
-	struct spi_device	*spi = data;
-	struct omap2_mcspi	*mcspi;
-	struct omap2_mcspi_dma	*mcspi_dma;
-
-	mcspi = spi_master_get_devdata(spi->master);
-	mcspi_dma = &(mcspi->dma_channels[spi->chip_select]);
-
-	complete(&mcspi_dma->dma_tx_completion);
-
-	/* We must disable the DMA TX request */
-	omap2_mcspi_set_dma_req(spi, 0, 0);
+	omap2_mcspi_tx_callback(data);
 }
 
 static int omap2_mcspi_request_dma(struct spi_device *spi)
@@ -758,17 +845,43 @@ static int omap2_mcspi_request_dma(struct spi_device *spi)
 	struct spi_master	*master = spi->master;
 	struct omap2_mcspi	*mcspi;
 	struct omap2_mcspi_dma	*mcspi_dma;
+	dma_cap_mask_t mask;
+	unsigned sig;
 
 	mcspi = spi_master_get_devdata(master);
 	mcspi_dma = mcspi->dma_channels + spi->chip_select;
 
+	init_completion(&mcspi_dma->dma_rx_completion);
+	init_completion(&mcspi_dma->dma_tx_completion);
+
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_SLAVE, mask);
+#ifdef USE_DMA_ENGINE_RX
+	sig = mcspi_dma->dma_rx_sync_dev;
+	mcspi_dma->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
+	if (!mcspi_dma->dma_rx) {
+		dev_err(&spi->dev, "no RX DMA engine channel for McSPI\n");
+		return -EAGAIN;
+	}
+#else
 	if (omap_request_dma(mcspi_dma->dma_rx_sync_dev, "McSPI RX",
 			omap2_mcspi_dma_rx_callback, spi,
 			&mcspi_dma->dma_rx_channel)) {
 		dev_err(&spi->dev, "no RX DMA channel for McSPI\n");
 		return -EAGAIN;
 	}
+#endif
 
+#ifdef USE_DMA_ENGINE_TX
+	sig = mcspi_dma->dma_tx_sync_dev;
+	mcspi_dma->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
+	if (!mcspi_dma->dma_tx) {
+		dev_err(&spi->dev, "no TX DMA engine channel for McSPI\n");
+		dma_release_channel(mcspi_dma->dma_rx);
+		mcspi_dma->dma_rx = NULL;
+		return -EAGAIN;
+	}
+#else
 	if (omap_request_dma(mcspi_dma->dma_tx_sync_dev, "McSPI TX",
 			omap2_mcspi_dma_tx_callback, spi,
 			&mcspi_dma->dma_tx_channel)) {
@@ -777,9 +890,7 @@ static int omap2_mcspi_request_dma(struct spi_device *spi)
 		dev_err(&spi->dev, "no TX DMA channel for McSPI\n");
 		return -EAGAIN;
 	}
-
-	init_completion(&mcspi_dma->dma_rx_completion);
-	init_completion(&mcspi_dma->dma_tx_completion);
+#endif
 
 	return 0;
 }
@@ -812,8 +923,8 @@ static int omap2_mcspi_setup(struct spi_device *spi)
 		list_add_tail(&cs->node, &ctx->cs);
 	}
 
-	if (mcspi_dma->dma_rx_channel == -1
-			|| mcspi_dma->dma_tx_channel == -1) {
+	if ((!mcspi_dma->dma_rx && mcspi_dma->dma_rx_channel == -1) ||
+	    (!mcspi_dma->dma_tx && mcspi_dma->dma_tx_channel == -1)) {
 		ret = omap2_mcspi_request_dma(spi);
 		if (ret < 0)
 			return ret;
@@ -847,6 +958,14 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
 	if (spi->chip_select < spi->master->num_chipselect) {
 		mcspi_dma = &mcspi->dma_channels[spi->chip_select];
 
+		if (mcspi_dma->dma_rx) {
+			dma_release_channel(mcspi_dma->dma_rx);
+			mcspi_dma->dma_rx = NULL;
+		}
+		if (mcspi_dma->dma_tx) {
+			dma_release_channel(mcspi_dma->dma_tx);
+			mcspi_dma->dma_tx = NULL;
+		}
 		if (mcspi_dma->dma_rx_channel != -1) {
 			omap_free_dma(mcspi_dma->dma_rx_channel);
 			mcspi_dma->dma_rx_channel = -1;
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 07/11] spi: omap2-mcspi: add DMA engine support
@ 2012-06-07 11:08       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 11:08 UTC (permalink / raw)
  To: linux-arm-kernel

Add DMA engine support to the OMAP SPI driver.  This supplements the
private DMA API implementation contained within this driver, and the
driver can be independently switched at build time between using DMA
engine and the private DMA API for the transmit and receive sides.

Tested-by: Shubhrajyoti <shubhrajyoti@ti.com>
Acked-by: Grant Likely <grant.likely@secretlab.ca>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/spi/spi-omap2-mcspi.c |  183 +++++++++++++++++++++++++++++++++-------
 1 files changed, 151 insertions(+), 32 deletions(-)

diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 46ef5fe..ca016df 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -20,6 +20,8 @@
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  *
  */
+#define USE_DMA_ENGINE_RX
+#define USE_DMA_ENGINE_TX
 
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -28,6 +30,8 @@
 #include <linux/device.h>
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/omap-dma.h>
 #include <linux/platform_device.h>
 #include <linux/err.h>
 #include <linux/clk.h>
@@ -93,6 +97,8 @@
 
 /* We have 2 DMA channels per CS, one for RX and one for TX */
 struct omap2_mcspi_dma {
+	struct dma_chan *dma_tx;
+	struct dma_chan *dma_rx;
 	int dma_tx_channel;
 	int dma_rx_channel;
 
@@ -300,6 +306,30 @@ static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
 	return 0;
 }
 
+static void omap2_mcspi_rx_callback(void *data)
+{
+	struct spi_device *spi = data;
+	struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
+	struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
+
+	complete(&mcspi_dma->dma_rx_completion);
+
+	/* We must disable the DMA RX request */
+	omap2_mcspi_set_dma_req(spi, 1, 0);
+}
+
+static void omap2_mcspi_tx_callback(void *data)
+{
+	struct spi_device *spi = data;
+	struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
+	struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
+
+	complete(&mcspi_dma->dma_tx_completion);
+
+	/* We must disable the DMA TX request */
+	omap2_mcspi_set_dma_req(spi, 0, 0);
+}
+
 static unsigned
 omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
 {
@@ -314,6 +344,9 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
 	u8			* rx;
 	const u8		* tx;
 	void __iomem		*chstat_reg;
+	struct dma_slave_config	cfg;
+	enum dma_slave_buswidth width;
+	unsigned es;
 
 	mcspi = spi_master_get_devdata(spi->master);
 	mcspi_dma = &mcspi->dma_channels[spi->chip_select];
@@ -321,6 +354,71 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
 
 	chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
 
+	if (cs->word_len <= 8) {
+		width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+		es = 1;
+	} else if (cs->word_len <= 16) {
+		width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+		es = 2;
+	} else {
+		width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+		es = 4;
+	}
+
+	memset(&cfg, 0, sizeof(cfg));
+	cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0;
+	cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0;
+	cfg.src_addr_width = width;
+	cfg.dst_addr_width = width;
+	cfg.src_maxburst = 1;
+	cfg.dst_maxburst = 1;
+
+	if (xfer->tx_buf && mcspi_dma->dma_tx) {
+		struct dma_async_tx_descriptor *tx;
+		struct scatterlist sg;
+
+		dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
+
+		sg_init_table(&sg, 1);
+		sg_dma_address(&sg) = xfer->tx_dma;
+		sg_dma_len(&sg) = xfer->len;
+
+		tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1,
+			DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+		if (tx) {
+			tx->callback = omap2_mcspi_tx_callback;
+			tx->callback_param = spi;
+			dmaengine_submit(tx);
+		} else {
+			/* FIXME: fall back to PIO? */
+		}
+	}
+
+	if (xfer->rx_buf && mcspi_dma->dma_rx) {
+		struct dma_async_tx_descriptor *tx;
+		struct scatterlist sg;
+		size_t len = xfer->len - es;
+
+		dmaengine_slave_config(mcspi_dma->dma_rx, &cfg);
+
+		if (l & OMAP2_MCSPI_CHCONF_TURBO)
+			len -= es;
+
+		sg_init_table(&sg, 1);
+		sg_dma_address(&sg) = xfer->rx_dma;
+		sg_dma_len(&sg) = len;
+
+		tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, &sg, 1,
+			DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+		if (tx) {
+			tx->callback = omap2_mcspi_rx_callback;
+			tx->callback_param = spi;
+			dmaengine_submit(tx);
+		} else {
+			/* FIXME: fall back to PIO? */
+		}
+	}
+
 	count = xfer->len;
 	c = count;
 	word_len = cs->word_len;
@@ -342,7 +440,7 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
 		element_count = count >> 2;
 	}
 
-	if (tx != NULL) {
+	if (tx != NULL && mcspi_dma->dma_tx_channel != -1) {
 		omap_set_dma_transfer_params(mcspi_dma->dma_tx_channel,
 				data_type, element_count, 1,
 				OMAP_DMA_SYNC_ELEMENT,
@@ -357,7 +455,7 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
 				xfer->tx_dma, 0, 0);
 	}
 
-	if (rx != NULL) {
+	if (rx != NULL && mcspi_dma->dma_rx_channel != -1) {
 		elements = element_count - 1;
 		if (l & OMAP2_MCSPI_CHCONF_TURBO)
 			elements--;
@@ -377,12 +475,18 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
 	}
 
 	if (tx != NULL) {
-		omap_start_dma(mcspi_dma->dma_tx_channel);
+		if (mcspi_dma->dma_tx)
+			dma_async_issue_pending(mcspi_dma->dma_tx);
+		else
+			omap_start_dma(mcspi_dma->dma_tx_channel);
 		omap2_mcspi_set_dma_req(spi, 0, 1);
 	}
 
 	if (rx != NULL) {
-		omap_start_dma(mcspi_dma->dma_rx_channel);
+		if (mcspi_dma->dma_rx)
+			dma_async_issue_pending(mcspi_dma->dma_rx);
+		else
+			omap_start_dma(mcspi_dma->dma_rx_channel);
 		omap2_mcspi_set_dma_req(spi, 1, 1);
 	}
 
@@ -406,7 +510,10 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
 		dma_unmap_single(&spi->dev, xfer->rx_dma, count, DMA_FROM_DEVICE);
 		omap2_mcspi_set_enable(spi, 0);
 
+		elements = element_count - 1;
+
 		if (l & OMAP2_MCSPI_CHCONF_TURBO) {
+			elements--;
 
 			if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
 				   & OMAP2_MCSPI_CHSTAT_RXS)) {
@@ -725,32 +832,12 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
 
 static void omap2_mcspi_dma_rx_callback(int lch, u16 ch_status, void *data)
 {
-	struct spi_device	*spi = data;
-	struct omap2_mcspi	*mcspi;
-	struct omap2_mcspi_dma	*mcspi_dma;
-
-	mcspi = spi_master_get_devdata(spi->master);
-	mcspi_dma = &(mcspi->dma_channels[spi->chip_select]);
-
-	complete(&mcspi_dma->dma_rx_completion);
-
-	/* We must disable the DMA RX request */
-	omap2_mcspi_set_dma_req(spi, 1, 0);
+	omap2_mcspi_rx_callback(data);
 }
 
 static void omap2_mcspi_dma_tx_callback(int lch, u16 ch_status, void *data)
 {
-	struct spi_device	*spi = data;
-	struct omap2_mcspi	*mcspi;
-	struct omap2_mcspi_dma	*mcspi_dma;
-
-	mcspi = spi_master_get_devdata(spi->master);
-	mcspi_dma = &(mcspi->dma_channels[spi->chip_select]);
-
-	complete(&mcspi_dma->dma_tx_completion);
-
-	/* We must disable the DMA TX request */
-	omap2_mcspi_set_dma_req(spi, 0, 0);
+	omap2_mcspi_tx_callback(data);
 }
 
 static int omap2_mcspi_request_dma(struct spi_device *spi)
@@ -758,17 +845,43 @@ static int omap2_mcspi_request_dma(struct spi_device *spi)
 	struct spi_master	*master = spi->master;
 	struct omap2_mcspi	*mcspi;
 	struct omap2_mcspi_dma	*mcspi_dma;
+	dma_cap_mask_t mask;
+	unsigned sig;
 
 	mcspi = spi_master_get_devdata(master);
 	mcspi_dma = mcspi->dma_channels + spi->chip_select;
 
+	init_completion(&mcspi_dma->dma_rx_completion);
+	init_completion(&mcspi_dma->dma_tx_completion);
+
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_SLAVE, mask);
+#ifdef USE_DMA_ENGINE_RX
+	sig = mcspi_dma->dma_rx_sync_dev;
+	mcspi_dma->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
+	if (!mcspi_dma->dma_rx) {
+		dev_err(&spi->dev, "no RX DMA engine channel for McSPI\n");
+		return -EAGAIN;
+	}
+#else
 	if (omap_request_dma(mcspi_dma->dma_rx_sync_dev, "McSPI RX",
 			omap2_mcspi_dma_rx_callback, spi,
 			&mcspi_dma->dma_rx_channel)) {
 		dev_err(&spi->dev, "no RX DMA channel for McSPI\n");
 		return -EAGAIN;
 	}
+#endif
 
+#ifdef USE_DMA_ENGINE_TX
+	sig = mcspi_dma->dma_tx_sync_dev;
+	mcspi_dma->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
+	if (!mcspi_dma->dma_tx) {
+		dev_err(&spi->dev, "no TX DMA engine channel for McSPI\n");
+		dma_release_channel(mcspi_dma->dma_rx);
+		mcspi_dma->dma_rx = NULL;
+		return -EAGAIN;
+	}
+#else
 	if (omap_request_dma(mcspi_dma->dma_tx_sync_dev, "McSPI TX",
 			omap2_mcspi_dma_tx_callback, spi,
 			&mcspi_dma->dma_tx_channel)) {
@@ -777,9 +890,7 @@ static int omap2_mcspi_request_dma(struct spi_device *spi)
 		dev_err(&spi->dev, "no TX DMA channel for McSPI\n");
 		return -EAGAIN;
 	}
-
-	init_completion(&mcspi_dma->dma_rx_completion);
-	init_completion(&mcspi_dma->dma_tx_completion);
+#endif
 
 	return 0;
 }
@@ -812,8 +923,8 @@ static int omap2_mcspi_setup(struct spi_device *spi)
 		list_add_tail(&cs->node, &ctx->cs);
 	}
 
-	if (mcspi_dma->dma_rx_channel == -1
-			|| mcspi_dma->dma_tx_channel == -1) {
+	if ((!mcspi_dma->dma_rx && mcspi_dma->dma_rx_channel == -1) ||
+	    (!mcspi_dma->dma_tx && mcspi_dma->dma_tx_channel == -1)) {
 		ret = omap2_mcspi_request_dma(spi);
 		if (ret < 0)
 			return ret;
@@ -847,6 +958,14 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
 	if (spi->chip_select < spi->master->num_chipselect) {
 		mcspi_dma = &mcspi->dma_channels[spi->chip_select];
 
+		if (mcspi_dma->dma_rx) {
+			dma_release_channel(mcspi_dma->dma_rx);
+			mcspi_dma->dma_rx = NULL;
+		}
+		if (mcspi_dma->dma_tx) {
+			dma_release_channel(mcspi_dma->dma_tx);
+			mcspi_dma->dma_tx = NULL;
+		}
 		if (mcspi_dma->dma_rx_channel != -1) {
 			omap_free_dma(mcspi_dma->dma_rx_channel);
 			mcspi_dma->dma_rx_channel = -1;
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 08/11] spi: omap2-mcspi: remove private DMA API implementation
  2012-06-07 11:06     ` Russell King - ARM Linux
@ 2012-06-07 11:08       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 11:08 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Grant Likely, spi-devel-general

Remove the private DMA API implementation from spi-omap2-mcspi.c,
making it use entirely the DMA engine API.

Acked-by: Grant Likely <grant.likely@secretlab.ca>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/spi/spi-omap2-mcspi.c |  104 ++---------------------------------------
 1 files changed, 5 insertions(+), 99 deletions(-)

diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index ca016df..9d3409a 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -20,8 +20,6 @@
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  *
  */
-#define USE_DMA_ENGINE_RX
-#define USE_DMA_ENGINE_TX
 
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -43,7 +41,6 @@
 
 #include <linux/spi/spi.h>
 
-#include <plat/dma.h>
 #include <plat/clock.h>
 #include <plat/mcspi.h>
 
@@ -99,8 +96,6 @@
 struct omap2_mcspi_dma {
 	struct dma_chan *dma_tx;
 	struct dma_chan *dma_rx;
-	int dma_tx_channel;
-	int dma_rx_channel;
 
 	int dma_tx_sync_dev;
 	int dma_rx_sync_dev;
@@ -336,9 +331,8 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
 	struct omap2_mcspi	*mcspi;
 	struct omap2_mcspi_cs	*cs = spi->controller_state;
 	struct omap2_mcspi_dma  *mcspi_dma;
-	unsigned int		count, c;
-	unsigned long		base, tx_reg, rx_reg;
-	int			word_len, data_type, element_count;
+	unsigned int		count;
+	int			word_len, element_count;
 	int			elements = 0;
 	u32			l;
 	u8			* rx;
@@ -420,73 +414,26 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
 	}
 
 	count = xfer->len;
-	c = count;
 	word_len = cs->word_len;
 
-	base = cs->phys;
-	tx_reg = base + OMAP2_MCSPI_TX0;
-	rx_reg = base + OMAP2_MCSPI_RX0;
 	rx = xfer->rx_buf;
 	tx = xfer->tx_buf;
 
 	if (word_len <= 8) {
-		data_type = OMAP_DMA_DATA_TYPE_S8;
 		element_count = count;
 	} else if (word_len <= 16) {
-		data_type = OMAP_DMA_DATA_TYPE_S16;
 		element_count = count >> 1;
 	} else /* word_len <= 32 */ {
-		data_type = OMAP_DMA_DATA_TYPE_S32;
 		element_count = count >> 2;
 	}
 
-	if (tx != NULL && mcspi_dma->dma_tx_channel != -1) {
-		omap_set_dma_transfer_params(mcspi_dma->dma_tx_channel,
-				data_type, element_count, 1,
-				OMAP_DMA_SYNC_ELEMENT,
-				mcspi_dma->dma_tx_sync_dev, 0);
-
-		omap_set_dma_dest_params(mcspi_dma->dma_tx_channel, 0,
-				OMAP_DMA_AMODE_CONSTANT,
-				tx_reg, 0, 0);
-
-		omap_set_dma_src_params(mcspi_dma->dma_tx_channel, 0,
-				OMAP_DMA_AMODE_POST_INC,
-				xfer->tx_dma, 0, 0);
-	}
-
-	if (rx != NULL && mcspi_dma->dma_rx_channel != -1) {
-		elements = element_count - 1;
-		if (l & OMAP2_MCSPI_CHCONF_TURBO)
-			elements--;
-
-		omap_set_dma_transfer_params(mcspi_dma->dma_rx_channel,
-				data_type, elements, 1,
-				OMAP_DMA_SYNC_ELEMENT,
-				mcspi_dma->dma_rx_sync_dev, 1);
-
-		omap_set_dma_src_params(mcspi_dma->dma_rx_channel, 0,
-				OMAP_DMA_AMODE_CONSTANT,
-				rx_reg, 0, 0);
-
-		omap_set_dma_dest_params(mcspi_dma->dma_rx_channel, 0,
-				OMAP_DMA_AMODE_POST_INC,
-				xfer->rx_dma, 0, 0);
-	}
-
 	if (tx != NULL) {
-		if (mcspi_dma->dma_tx)
-			dma_async_issue_pending(mcspi_dma->dma_tx);
-		else
-			omap_start_dma(mcspi_dma->dma_tx_channel);
+		dma_async_issue_pending(mcspi_dma->dma_tx);
 		omap2_mcspi_set_dma_req(spi, 0, 1);
 	}
 
 	if (rx != NULL) {
-		if (mcspi_dma->dma_rx)
-			dma_async_issue_pending(mcspi_dma->dma_rx);
-		else
-			omap_start_dma(mcspi_dma->dma_rx_channel);
+		dma_async_issue_pending(mcspi_dma->dma_rx);
 		omap2_mcspi_set_dma_req(spi, 1, 1);
 	}
 
@@ -830,16 +777,6 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
 	return 0;
 }
 
-static void omap2_mcspi_dma_rx_callback(int lch, u16 ch_status, void *data)
-{
-	omap2_mcspi_rx_callback(data);
-}
-
-static void omap2_mcspi_dma_tx_callback(int lch, u16 ch_status, void *data)
-{
-	omap2_mcspi_tx_callback(data);
-}
-
 static int omap2_mcspi_request_dma(struct spi_device *spi)
 {
 	struct spi_master	*master = spi->master;
@@ -856,23 +793,13 @@ static int omap2_mcspi_request_dma(struct spi_device *spi)
 
 	dma_cap_zero(mask);
 	dma_cap_set(DMA_SLAVE, mask);
-#ifdef USE_DMA_ENGINE_RX
 	sig = mcspi_dma->dma_rx_sync_dev;
 	mcspi_dma->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
 	if (!mcspi_dma->dma_rx) {
 		dev_err(&spi->dev, "no RX DMA engine channel for McSPI\n");
 		return -EAGAIN;
 	}
-#else
-	if (omap_request_dma(mcspi_dma->dma_rx_sync_dev, "McSPI RX",
-			omap2_mcspi_dma_rx_callback, spi,
-			&mcspi_dma->dma_rx_channel)) {
-		dev_err(&spi->dev, "no RX DMA channel for McSPI\n");
-		return -EAGAIN;
-	}
-#endif
 
-#ifdef USE_DMA_ENGINE_TX
 	sig = mcspi_dma->dma_tx_sync_dev;
 	mcspi_dma->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
 	if (!mcspi_dma->dma_tx) {
@@ -881,16 +808,6 @@ static int omap2_mcspi_request_dma(struct spi_device *spi)
 		mcspi_dma->dma_rx = NULL;
 		return -EAGAIN;
 	}
-#else
-	if (omap_request_dma(mcspi_dma->dma_tx_sync_dev, "McSPI TX",
-			omap2_mcspi_dma_tx_callback, spi,
-			&mcspi_dma->dma_tx_channel)) {
-		omap_free_dma(mcspi_dma->dma_rx_channel);
-		mcspi_dma->dma_rx_channel = -1;
-		dev_err(&spi->dev, "no TX DMA channel for McSPI\n");
-		return -EAGAIN;
-	}
-#endif
 
 	return 0;
 }
@@ -923,8 +840,7 @@ static int omap2_mcspi_setup(struct spi_device *spi)
 		list_add_tail(&cs->node, &ctx->cs);
 	}
 
-	if ((!mcspi_dma->dma_rx && mcspi_dma->dma_rx_channel == -1) ||
-	    (!mcspi_dma->dma_tx && mcspi_dma->dma_tx_channel == -1)) {
+	if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) {
 		ret = omap2_mcspi_request_dma(spi);
 		if (ret < 0)
 			return ret;
@@ -966,14 +882,6 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
 			dma_release_channel(mcspi_dma->dma_tx);
 			mcspi_dma->dma_tx = NULL;
 		}
-		if (mcspi_dma->dma_rx_channel != -1) {
-			omap_free_dma(mcspi_dma->dma_rx_channel);
-			mcspi_dma->dma_rx_channel = -1;
-		}
-		if (mcspi_dma->dma_tx_channel != -1) {
-			omap_free_dma(mcspi_dma->dma_tx_channel);
-			mcspi_dma->dma_tx_channel = -1;
-		}
 	}
 }
 
@@ -1292,7 +1200,6 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev)
 			break;
 		}
 
-		mcspi->dma_channels[i].dma_rx_channel = -1;
 		mcspi->dma_channels[i].dma_rx_sync_dev = dma_res->start;
 		sprintf(dma_ch_name, "tx%d", i);
 		dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
@@ -1303,7 +1210,6 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev)
 			break;
 		}
 
-		mcspi->dma_channels[i].dma_tx_channel = -1;
 		mcspi->dma_channels[i].dma_tx_sync_dev = dma_res->start;
 	}
 
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 08/11] spi: omap2-mcspi: remove private DMA API implementation
@ 2012-06-07 11:08       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 11:08 UTC (permalink / raw)
  To: linux-arm-kernel

Remove the private DMA API implementation from spi-omap2-mcspi.c,
making it use entirely the DMA engine API.

Acked-by: Grant Likely <grant.likely@secretlab.ca>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/spi/spi-omap2-mcspi.c |  104 ++---------------------------------------
 1 files changed, 5 insertions(+), 99 deletions(-)

diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index ca016df..9d3409a 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -20,8 +20,6 @@
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  *
  */
-#define USE_DMA_ENGINE_RX
-#define USE_DMA_ENGINE_TX
 
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -43,7 +41,6 @@
 
 #include <linux/spi/spi.h>
 
-#include <plat/dma.h>
 #include <plat/clock.h>
 #include <plat/mcspi.h>
 
@@ -99,8 +96,6 @@
 struct omap2_mcspi_dma {
 	struct dma_chan *dma_tx;
 	struct dma_chan *dma_rx;
-	int dma_tx_channel;
-	int dma_rx_channel;
 
 	int dma_tx_sync_dev;
 	int dma_rx_sync_dev;
@@ -336,9 +331,8 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
 	struct omap2_mcspi	*mcspi;
 	struct omap2_mcspi_cs	*cs = spi->controller_state;
 	struct omap2_mcspi_dma  *mcspi_dma;
-	unsigned int		count, c;
-	unsigned long		base, tx_reg, rx_reg;
-	int			word_len, data_type, element_count;
+	unsigned int		count;
+	int			word_len, element_count;
 	int			elements = 0;
 	u32			l;
 	u8			* rx;
@@ -420,73 +414,26 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
 	}
 
 	count = xfer->len;
-	c = count;
 	word_len = cs->word_len;
 
-	base = cs->phys;
-	tx_reg = base + OMAP2_MCSPI_TX0;
-	rx_reg = base + OMAP2_MCSPI_RX0;
 	rx = xfer->rx_buf;
 	tx = xfer->tx_buf;
 
 	if (word_len <= 8) {
-		data_type = OMAP_DMA_DATA_TYPE_S8;
 		element_count = count;
 	} else if (word_len <= 16) {
-		data_type = OMAP_DMA_DATA_TYPE_S16;
 		element_count = count >> 1;
 	} else /* word_len <= 32 */ {
-		data_type = OMAP_DMA_DATA_TYPE_S32;
 		element_count = count >> 2;
 	}
 
-	if (tx != NULL && mcspi_dma->dma_tx_channel != -1) {
-		omap_set_dma_transfer_params(mcspi_dma->dma_tx_channel,
-				data_type, element_count, 1,
-				OMAP_DMA_SYNC_ELEMENT,
-				mcspi_dma->dma_tx_sync_dev, 0);
-
-		omap_set_dma_dest_params(mcspi_dma->dma_tx_channel, 0,
-				OMAP_DMA_AMODE_CONSTANT,
-				tx_reg, 0, 0);
-
-		omap_set_dma_src_params(mcspi_dma->dma_tx_channel, 0,
-				OMAP_DMA_AMODE_POST_INC,
-				xfer->tx_dma, 0, 0);
-	}
-
-	if (rx != NULL && mcspi_dma->dma_rx_channel != -1) {
-		elements = element_count - 1;
-		if (l & OMAP2_MCSPI_CHCONF_TURBO)
-			elements--;
-
-		omap_set_dma_transfer_params(mcspi_dma->dma_rx_channel,
-				data_type, elements, 1,
-				OMAP_DMA_SYNC_ELEMENT,
-				mcspi_dma->dma_rx_sync_dev, 1);
-
-		omap_set_dma_src_params(mcspi_dma->dma_rx_channel, 0,
-				OMAP_DMA_AMODE_CONSTANT,
-				rx_reg, 0, 0);
-
-		omap_set_dma_dest_params(mcspi_dma->dma_rx_channel, 0,
-				OMAP_DMA_AMODE_POST_INC,
-				xfer->rx_dma, 0, 0);
-	}
-
 	if (tx != NULL) {
-		if (mcspi_dma->dma_tx)
-			dma_async_issue_pending(mcspi_dma->dma_tx);
-		else
-			omap_start_dma(mcspi_dma->dma_tx_channel);
+		dma_async_issue_pending(mcspi_dma->dma_tx);
 		omap2_mcspi_set_dma_req(spi, 0, 1);
 	}
 
 	if (rx != NULL) {
-		if (mcspi_dma->dma_rx)
-			dma_async_issue_pending(mcspi_dma->dma_rx);
-		else
-			omap_start_dma(mcspi_dma->dma_rx_channel);
+		dma_async_issue_pending(mcspi_dma->dma_rx);
 		omap2_mcspi_set_dma_req(spi, 1, 1);
 	}
 
@@ -830,16 +777,6 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
 	return 0;
 }
 
-static void omap2_mcspi_dma_rx_callback(int lch, u16 ch_status, void *data)
-{
-	omap2_mcspi_rx_callback(data);
-}
-
-static void omap2_mcspi_dma_tx_callback(int lch, u16 ch_status, void *data)
-{
-	omap2_mcspi_tx_callback(data);
-}
-
 static int omap2_mcspi_request_dma(struct spi_device *spi)
 {
 	struct spi_master	*master = spi->master;
@@ -856,23 +793,13 @@ static int omap2_mcspi_request_dma(struct spi_device *spi)
 
 	dma_cap_zero(mask);
 	dma_cap_set(DMA_SLAVE, mask);
-#ifdef USE_DMA_ENGINE_RX
 	sig = mcspi_dma->dma_rx_sync_dev;
 	mcspi_dma->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
 	if (!mcspi_dma->dma_rx) {
 		dev_err(&spi->dev, "no RX DMA engine channel for McSPI\n");
 		return -EAGAIN;
 	}
-#else
-	if (omap_request_dma(mcspi_dma->dma_rx_sync_dev, "McSPI RX",
-			omap2_mcspi_dma_rx_callback, spi,
-			&mcspi_dma->dma_rx_channel)) {
-		dev_err(&spi->dev, "no RX DMA channel for McSPI\n");
-		return -EAGAIN;
-	}
-#endif
 
-#ifdef USE_DMA_ENGINE_TX
 	sig = mcspi_dma->dma_tx_sync_dev;
 	mcspi_dma->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
 	if (!mcspi_dma->dma_tx) {
@@ -881,16 +808,6 @@ static int omap2_mcspi_request_dma(struct spi_device *spi)
 		mcspi_dma->dma_rx = NULL;
 		return -EAGAIN;
 	}
-#else
-	if (omap_request_dma(mcspi_dma->dma_tx_sync_dev, "McSPI TX",
-			omap2_mcspi_dma_tx_callback, spi,
-			&mcspi_dma->dma_tx_channel)) {
-		omap_free_dma(mcspi_dma->dma_rx_channel);
-		mcspi_dma->dma_rx_channel = -1;
-		dev_err(&spi->dev, "no TX DMA channel for McSPI\n");
-		return -EAGAIN;
-	}
-#endif
 
 	return 0;
 }
@@ -923,8 +840,7 @@ static int omap2_mcspi_setup(struct spi_device *spi)
 		list_add_tail(&cs->node, &ctx->cs);
 	}
 
-	if ((!mcspi_dma->dma_rx && mcspi_dma->dma_rx_channel == -1) ||
-	    (!mcspi_dma->dma_tx && mcspi_dma->dma_tx_channel == -1)) {
+	if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) {
 		ret = omap2_mcspi_request_dma(spi);
 		if (ret < 0)
 			return ret;
@@ -966,14 +882,6 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
 			dma_release_channel(mcspi_dma->dma_tx);
 			mcspi_dma->dma_tx = NULL;
 		}
-		if (mcspi_dma->dma_rx_channel != -1) {
-			omap_free_dma(mcspi_dma->dma_rx_channel);
-			mcspi_dma->dma_rx_channel = -1;
-		}
-		if (mcspi_dma->dma_tx_channel != -1) {
-			omap_free_dma(mcspi_dma->dma_tx_channel);
-			mcspi_dma->dma_tx_channel = -1;
-		}
 	}
 }
 
@@ -1292,7 +1200,6 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev)
 			break;
 		}
 
-		mcspi->dma_channels[i].dma_rx_channel = -1;
 		mcspi->dma_channels[i].dma_rx_sync_dev = dma_res->start;
 		sprintf(dma_ch_name, "tx%d", i);
 		dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
@@ -1303,7 +1210,6 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev)
 			break;
 		}
 
-		mcspi->dma_channels[i].dma_tx_channel = -1;
 		mcspi->dma_channels[i].dma_tx_sync_dev = dma_res->start;
 	}
 
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 09/11] mtd: omap2: add DMA engine support
  2012-06-07 11:06     ` Russell King - ARM Linux
  (?)
@ 2012-06-07 11:09       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 11:09 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: David Woodhouse, linux-mtd

Add DMA engine support to the OMAP2 NAND driver.  This supplements the
private DMA API implementation contained within this driver, and the
driver can be independently switched at build time between using DMA
engine and the private DMA API.

Tested-by: Grazvydas Ignotas <notasas@gmail.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/mtd/nand/omap2.c |   92 +++++++++++++++++++++++++++++++++++++++++++++-
 1 files changed, 91 insertions(+), 1 deletions(-)

diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index d7f681d..2912d6c 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -9,6 +9,7 @@
  */
 
 #include <linux/platform_device.h>
+#include <linux/dmaengine.h>
 #include <linux/dma-mapping.h>
 #include <linux/delay.h>
 #include <linux/module.h>
@@ -18,6 +19,7 @@
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/nand.h>
 #include <linux/mtd/partitions.h>
+#include <linux/omap-dma.h>
 #include <linux/io.h>
 #include <linux/slab.h>
 
@@ -123,6 +125,7 @@ struct omap_nand_info {
 	int				gpmc_cs;
 	unsigned long			phys_base;
 	struct completion		comp;
+	struct dma_chan			*dma;
 	int				dma_ch;
 	int				gpmc_irq;
 	enum {
@@ -345,6 +348,10 @@ static void omap_nand_dma_cb(int lch, u16 ch_status, void *data)
 {
 	complete((struct completion *) data);
 }
+static void omap_nand_dma_callback(void *data)
+{
+	complete((struct completion *) data);
+}
 
 /*
  * omap_nand_dma_transfer: configer and start dma transfer
@@ -382,6 +389,56 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
 		addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
 	}
 
+	if (info->dma) {
+		struct dma_async_tx_descriptor *tx;
+		struct scatterlist sg;
+		unsigned n;
+
+		sg_init_one(&sg, addr, len);
+		n = dma_map_sg(info->dma->device->dev, &sg, 1, dir);
+		if (n == 0) {
+			dev_err(&info->pdev->dev,
+				"Couldn't DMA map a %d byte buffer\n", len);
+			goto out_copy;
+		}
+
+		tx = dmaengine_prep_slave_sg(info->dma, &sg, n,
+			is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
+			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+		if (!tx) {
+			dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
+			goto out_copy;
+		}
+		tx->callback = omap_nand_dma_callback;
+		tx->callback_param = &info->comp;
+		dmaengine_submit(tx);
+
+		/*  configure and start prefetch transfer */
+		ret = gpmc_prefetch_enable(info->gpmc_cs,
+			PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
+		if (ret) {
+			/* PFPW engine is busy, use cpu copy method */
+			dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
+			goto out_copy;
+		}
+
+		init_completion(&info->comp);
+		dma_async_issue_pending(info->dma);
+
+		/* setup and start DMA using dma_addr */
+		wait_for_completion(&info->comp);
+		tim = 0;
+		limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
+		while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
+			cpu_relax();
+
+		/* disable and stop the PFPW engine */
+		gpmc_prefetch_reset(info->gpmc_cs);
+
+		dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
+		return 0;
+	}
+
 	dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir);
 	if (dma_mapping_error(&info->pdev->dev, dma_addr)) {
 		dev_err(&info->pdev->dev,
@@ -414,7 +471,6 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
 		goto out_copy_unmap;
 
 	init_completion(&info->comp);
-
 	omap_start_dma(info->dma_ch);
 
 	/* setup and start DMA using dma_addr */
@@ -1164,6 +1220,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
 	struct omap_nand_platform_data	*pdata;
 	int				err;
 	int				i, offset;
+	dma_cap_mask_t mask;
+	unsigned sig;
 
 	pdata = pdev->dev.platform_data;
 	if (pdata == NULL) {
@@ -1244,6 +1302,33 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
 		break;
 
 	case NAND_OMAP_PREFETCH_DMA:
+		dma_cap_zero(mask);
+		dma_cap_set(DMA_SLAVE, mask);
+		sig = OMAP24XX_DMA_GPMC;
+		info->dma = dma_request_channel(mask, omap_dma_filter_fn, &sig);
+		if (!info->dma) {
+			dev_warn(&pdev->dev, "DMA engine request failed\n");
+		} else {
+			struct dma_slave_config cfg;
+			int rc;
+
+			memset(&cfg, 0, sizeof(cfg));
+			cfg.src_addr = info->phys_base;
+			cfg.dst_addr = info->phys_base;
+			cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+			cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+			cfg.src_maxburst = 16;
+			cfg.dst_maxburst = 16;
+			rc = dmaengine_slave_config(info->dma, &cfg);
+			if (rc) {
+				dev_err(&pdev->dev, "DMA engine slave config failed: %d\n",
+					rc);
+				goto out_release_mem_region;
+			}
+			info->nand.read_buf   = omap_read_buf_dma_pref;
+			info->nand.write_buf  = omap_write_buf_dma_pref;
+			break;
+		}
 		err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
 				omap_nand_dma_cb, &info->comp, &info->dma_ch);
 		if (err < 0) {
@@ -1358,6 +1443,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
 	return 0;
 
 out_release_mem_region:
+	if (info->dma)
+		dma_release_channel(info->dma);
 	release_mem_region(info->phys_base, NAND_IO_SIZE);
 out_free_info:
 	kfree(info);
@@ -1376,6 +1463,9 @@ static int omap_nand_remove(struct platform_device *pdev)
 	if (info->dma_ch != -1)
 		omap_free_dma(info->dma_ch);
 
+	if (info->dma)
+		dma_release_channel(info->dma);
+
 	if (info->gpmc_irq)
 		free_irq(info->gpmc_irq, info);
 
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 09/11] mtd: omap2: add DMA engine support
@ 2012-06-07 11:09       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 11:09 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: linux-mtd, David Woodhouse

Add DMA engine support to the OMAP2 NAND driver.  This supplements the
private DMA API implementation contained within this driver, and the
driver can be independently switched at build time between using DMA
engine and the private DMA API.

Tested-by: Grazvydas Ignotas <notasas@gmail.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/mtd/nand/omap2.c |   92 +++++++++++++++++++++++++++++++++++++++++++++-
 1 files changed, 91 insertions(+), 1 deletions(-)

diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index d7f681d..2912d6c 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -9,6 +9,7 @@
  */
 
 #include <linux/platform_device.h>
+#include <linux/dmaengine.h>
 #include <linux/dma-mapping.h>
 #include <linux/delay.h>
 #include <linux/module.h>
@@ -18,6 +19,7 @@
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/nand.h>
 #include <linux/mtd/partitions.h>
+#include <linux/omap-dma.h>
 #include <linux/io.h>
 #include <linux/slab.h>
 
@@ -123,6 +125,7 @@ struct omap_nand_info {
 	int				gpmc_cs;
 	unsigned long			phys_base;
 	struct completion		comp;
+	struct dma_chan			*dma;
 	int				dma_ch;
 	int				gpmc_irq;
 	enum {
@@ -345,6 +348,10 @@ static void omap_nand_dma_cb(int lch, u16 ch_status, void *data)
 {
 	complete((struct completion *) data);
 }
+static void omap_nand_dma_callback(void *data)
+{
+	complete((struct completion *) data);
+}
 
 /*
  * omap_nand_dma_transfer: configer and start dma transfer
@@ -382,6 +389,56 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
 		addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
 	}
 
+	if (info->dma) {
+		struct dma_async_tx_descriptor *tx;
+		struct scatterlist sg;
+		unsigned n;
+
+		sg_init_one(&sg, addr, len);
+		n = dma_map_sg(info->dma->device->dev, &sg, 1, dir);
+		if (n == 0) {
+			dev_err(&info->pdev->dev,
+				"Couldn't DMA map a %d byte buffer\n", len);
+			goto out_copy;
+		}
+
+		tx = dmaengine_prep_slave_sg(info->dma, &sg, n,
+			is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
+			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+		if (!tx) {
+			dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
+			goto out_copy;
+		}
+		tx->callback = omap_nand_dma_callback;
+		tx->callback_param = &info->comp;
+		dmaengine_submit(tx);
+
+		/*  configure and start prefetch transfer */
+		ret = gpmc_prefetch_enable(info->gpmc_cs,
+			PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
+		if (ret) {
+			/* PFPW engine is busy, use cpu copy method */
+			dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
+			goto out_copy;
+		}
+
+		init_completion(&info->comp);
+		dma_async_issue_pending(info->dma);
+
+		/* setup and start DMA using dma_addr */
+		wait_for_completion(&info->comp);
+		tim = 0;
+		limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
+		while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
+			cpu_relax();
+
+		/* disable and stop the PFPW engine */
+		gpmc_prefetch_reset(info->gpmc_cs);
+
+		dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
+		return 0;
+	}
+
 	dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir);
 	if (dma_mapping_error(&info->pdev->dev, dma_addr)) {
 		dev_err(&info->pdev->dev,
@@ -414,7 +471,6 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
 		goto out_copy_unmap;
 
 	init_completion(&info->comp);
-
 	omap_start_dma(info->dma_ch);
 
 	/* setup and start DMA using dma_addr */
@@ -1164,6 +1220,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
 	struct omap_nand_platform_data	*pdata;
 	int				err;
 	int				i, offset;
+	dma_cap_mask_t mask;
+	unsigned sig;
 
 	pdata = pdev->dev.platform_data;
 	if (pdata == NULL) {
@@ -1244,6 +1302,33 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
 		break;
 
 	case NAND_OMAP_PREFETCH_DMA:
+		dma_cap_zero(mask);
+		dma_cap_set(DMA_SLAVE, mask);
+		sig = OMAP24XX_DMA_GPMC;
+		info->dma = dma_request_channel(mask, omap_dma_filter_fn, &sig);
+		if (!info->dma) {
+			dev_warn(&pdev->dev, "DMA engine request failed\n");
+		} else {
+			struct dma_slave_config cfg;
+			int rc;
+
+			memset(&cfg, 0, sizeof(cfg));
+			cfg.src_addr = info->phys_base;
+			cfg.dst_addr = info->phys_base;
+			cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+			cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+			cfg.src_maxburst = 16;
+			cfg.dst_maxburst = 16;
+			rc = dmaengine_slave_config(info->dma, &cfg);
+			if (rc) {
+				dev_err(&pdev->dev, "DMA engine slave config failed: %d\n",
+					rc);
+				goto out_release_mem_region;
+			}
+			info->nand.read_buf   = omap_read_buf_dma_pref;
+			info->nand.write_buf  = omap_write_buf_dma_pref;
+			break;
+		}
 		err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
 				omap_nand_dma_cb, &info->comp, &info->dma_ch);
 		if (err < 0) {
@@ -1358,6 +1443,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
 	return 0;
 
 out_release_mem_region:
+	if (info->dma)
+		dma_release_channel(info->dma);
 	release_mem_region(info->phys_base, NAND_IO_SIZE);
 out_free_info:
 	kfree(info);
@@ -1376,6 +1463,9 @@ static int omap_nand_remove(struct platform_device *pdev)
 	if (info->dma_ch != -1)
 		omap_free_dma(info->dma_ch);
 
+	if (info->dma)
+		dma_release_channel(info->dma);
+
 	if (info->gpmc_irq)
 		free_irq(info->gpmc_irq, info);
 
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 09/11] mtd: omap2: add DMA engine support
@ 2012-06-07 11:09       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 11:09 UTC (permalink / raw)
  To: linux-arm-kernel

Add DMA engine support to the OMAP2 NAND driver.  This supplements the
private DMA API implementation contained within this driver, and the
driver can be independently switched at build time between using DMA
engine and the private DMA API.

Tested-by: Grazvydas Ignotas <notasas@gmail.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/mtd/nand/omap2.c |   92 +++++++++++++++++++++++++++++++++++++++++++++-
 1 files changed, 91 insertions(+), 1 deletions(-)

diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index d7f681d..2912d6c 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -9,6 +9,7 @@
  */
 
 #include <linux/platform_device.h>
+#include <linux/dmaengine.h>
 #include <linux/dma-mapping.h>
 #include <linux/delay.h>
 #include <linux/module.h>
@@ -18,6 +19,7 @@
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/nand.h>
 #include <linux/mtd/partitions.h>
+#include <linux/omap-dma.h>
 #include <linux/io.h>
 #include <linux/slab.h>
 
@@ -123,6 +125,7 @@ struct omap_nand_info {
 	int				gpmc_cs;
 	unsigned long			phys_base;
 	struct completion		comp;
+	struct dma_chan			*dma;
 	int				dma_ch;
 	int				gpmc_irq;
 	enum {
@@ -345,6 +348,10 @@ static void omap_nand_dma_cb(int lch, u16 ch_status, void *data)
 {
 	complete((struct completion *) data);
 }
+static void omap_nand_dma_callback(void *data)
+{
+	complete((struct completion *) data);
+}
 
 /*
  * omap_nand_dma_transfer: configer and start dma transfer
@@ -382,6 +389,56 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
 		addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
 	}
 
+	if (info->dma) {
+		struct dma_async_tx_descriptor *tx;
+		struct scatterlist sg;
+		unsigned n;
+
+		sg_init_one(&sg, addr, len);
+		n = dma_map_sg(info->dma->device->dev, &sg, 1, dir);
+		if (n == 0) {
+			dev_err(&info->pdev->dev,
+				"Couldn't DMA map a %d byte buffer\n", len);
+			goto out_copy;
+		}
+
+		tx = dmaengine_prep_slave_sg(info->dma, &sg, n,
+			is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
+			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+		if (!tx) {
+			dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
+			goto out_copy;
+		}
+		tx->callback = omap_nand_dma_callback;
+		tx->callback_param = &info->comp;
+		dmaengine_submit(tx);
+
+		/*  configure and start prefetch transfer */
+		ret = gpmc_prefetch_enable(info->gpmc_cs,
+			PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
+		if (ret) {
+			/* PFPW engine is busy, use cpu copy method */
+			dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
+			goto out_copy;
+		}
+
+		init_completion(&info->comp);
+		dma_async_issue_pending(info->dma);
+
+		/* setup and start DMA using dma_addr */
+		wait_for_completion(&info->comp);
+		tim = 0;
+		limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
+		while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
+			cpu_relax();
+
+		/* disable and stop the PFPW engine */
+		gpmc_prefetch_reset(info->gpmc_cs);
+
+		dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
+		return 0;
+	}
+
 	dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir);
 	if (dma_mapping_error(&info->pdev->dev, dma_addr)) {
 		dev_err(&info->pdev->dev,
@@ -414,7 +471,6 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
 		goto out_copy_unmap;
 
 	init_completion(&info->comp);
-
 	omap_start_dma(info->dma_ch);
 
 	/* setup and start DMA using dma_addr */
@@ -1164,6 +1220,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
 	struct omap_nand_platform_data	*pdata;
 	int				err;
 	int				i, offset;
+	dma_cap_mask_t mask;
+	unsigned sig;
 
 	pdata = pdev->dev.platform_data;
 	if (pdata == NULL) {
@@ -1244,6 +1302,33 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
 		break;
 
 	case NAND_OMAP_PREFETCH_DMA:
+		dma_cap_zero(mask);
+		dma_cap_set(DMA_SLAVE, mask);
+		sig = OMAP24XX_DMA_GPMC;
+		info->dma = dma_request_channel(mask, omap_dma_filter_fn, &sig);
+		if (!info->dma) {
+			dev_warn(&pdev->dev, "DMA engine request failed\n");
+		} else {
+			struct dma_slave_config cfg;
+			int rc;
+
+			memset(&cfg, 0, sizeof(cfg));
+			cfg.src_addr = info->phys_base;
+			cfg.dst_addr = info->phys_base;
+			cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+			cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+			cfg.src_maxburst = 16;
+			cfg.dst_maxburst = 16;
+			rc = dmaengine_slave_config(info->dma, &cfg);
+			if (rc) {
+				dev_err(&pdev->dev, "DMA engine slave config failed: %d\n",
+					rc);
+				goto out_release_mem_region;
+			}
+			info->nand.read_buf   = omap_read_buf_dma_pref;
+			info->nand.write_buf  = omap_write_buf_dma_pref;
+			break;
+		}
 		err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
 				omap_nand_dma_cb, &info->comp, &info->dma_ch);
 		if (err < 0) {
@@ -1358,6 +1443,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
 	return 0;
 
 out_release_mem_region:
+	if (info->dma)
+		dma_release_channel(info->dma);
 	release_mem_region(info->phys_base, NAND_IO_SIZE);
 out_free_info:
 	kfree(info);
@@ -1376,6 +1463,9 @@ static int omap_nand_remove(struct platform_device *pdev)
 	if (info->dma_ch != -1)
 		omap_free_dma(info->dma_ch);
 
+	if (info->dma)
+		dma_release_channel(info->dma);
+
 	if (info->gpmc_irq)
 		free_irq(info->gpmc_irq, info);
 
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 10/11] mtd: omap2: remove private DMA API implementation
  2012-06-07 11:06     ` Russell King - ARM Linux
  (?)
@ 2012-06-07 11:09       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 11:09 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: David Woodhouse, linux-mtd

Remove the private DMA API implementation from nand/omap2.c
making it use entirely the DMA engine API.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/mtd/nand/omap2.c |  136 +++++++++-------------------------------------
 1 files changed, 26 insertions(+), 110 deletions(-)

diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 2912d6c..e9309b3 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -126,7 +126,6 @@ struct omap_nand_info {
 	unsigned long			phys_base;
 	struct completion		comp;
 	struct dma_chan			*dma;
-	int				dma_ch;
 	int				gpmc_irq;
 	enum {
 		OMAP_NAND_IO_READ = 0,	/* read */
@@ -339,15 +338,9 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
 }
 
 /*
- * omap_nand_dma_cb: callback on the completion of dma transfer
- * @lch: logical channel
- * @ch_satuts: channel status
+ * omap_nand_dma_callback: callback on the completion of dma transfer
  * @data: pointer to completion data structure
  */
-static void omap_nand_dma_cb(int lch, u16 ch_status, void *data)
-{
-	complete((struct completion *) data);
-}
 static void omap_nand_dma_callback(void *data)
 {
 	complete((struct completion *) data);
@@ -365,17 +358,13 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
 {
 	struct omap_nand_info *info = container_of(mtd,
 					struct omap_nand_info, mtd);
+	struct dma_async_tx_descriptor *tx;
 	enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
 							DMA_FROM_DEVICE;
-	dma_addr_t dma_addr;
-	int ret;
+	struct scatterlist sg;
 	unsigned long tim, limit;
-
-	/* The fifo depth is 64 bytes max.
-	 * But configure the FIFO-threahold to 32 to get a sync at each frame
-	 * and frame length is 32 bytes.
-	 */
-	int buf_len = len >> 6;
+	unsigned n;
+	int ret;
 
 	if (addr >= high_memory) {
 		struct page *p1;
@@ -389,89 +378,33 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
 		addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
 	}
 
-	if (info->dma) {
-		struct dma_async_tx_descriptor *tx;
-		struct scatterlist sg;
-		unsigned n;
-
-		sg_init_one(&sg, addr, len);
-		n = dma_map_sg(info->dma->device->dev, &sg, 1, dir);
-		if (n == 0) {
-			dev_err(&info->pdev->dev,
-				"Couldn't DMA map a %d byte buffer\n", len);
-			goto out_copy;
-		}
-
-		tx = dmaengine_prep_slave_sg(info->dma, &sg, n,
-			is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
-			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-		if (!tx) {
-			dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
-			goto out_copy;
-		}
-		tx->callback = omap_nand_dma_callback;
-		tx->callback_param = &info->comp;
-		dmaengine_submit(tx);
-
-		/*  configure and start prefetch transfer */
-		ret = gpmc_prefetch_enable(info->gpmc_cs,
-			PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
-		if (ret) {
-			/* PFPW engine is busy, use cpu copy method */
-			dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
-			goto out_copy;
-		}
-
-		init_completion(&info->comp);
-		dma_async_issue_pending(info->dma);
-
-		/* setup and start DMA using dma_addr */
-		wait_for_completion(&info->comp);
-		tim = 0;
-		limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
-		while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
-			cpu_relax();
-
-		/* disable and stop the PFPW engine */
-		gpmc_prefetch_reset(info->gpmc_cs);
-
-		dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
-		return 0;
-	}
-
-	dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir);
-	if (dma_mapping_error(&info->pdev->dev, dma_addr)) {
+	sg_init_one(&sg, addr, len);
+	n = dma_map_sg(info->dma->device->dev, &sg, 1, dir);
+	if (n == 0) {
 		dev_err(&info->pdev->dev,
 			"Couldn't DMA map a %d byte buffer\n", len);
 		goto out_copy;
 	}
 
-	if (is_write) {
-	    omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
-						info->phys_base, 0, 0);
-	    omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
-							dma_addr, 0, 0);
-	    omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
-					0x10, buf_len, OMAP_DMA_SYNC_FRAME,
-					OMAP24XX_DMA_GPMC, OMAP_DMA_DST_SYNC);
-	} else {
-	    omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
-						info->phys_base, 0, 0);
-	    omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
-							dma_addr, 0, 0);
-	    omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
-					0x10, buf_len, OMAP_DMA_SYNC_FRAME,
-					OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC);
-	}
-	/*  configure and start prefetch transfer */
+	tx = dmaengine_prep_slave_sg(info->dma, &sg, n,
+		is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
+		DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+	if (!tx)
+		goto out_copy_unmap;
+
+	tx->callback = omap_nand_dma_callback;
+	tx->callback_param = &info->comp;
+	dmaengine_submit(tx);
+
+	/* configure and start prefetch transfer */
 	ret = gpmc_prefetch_enable(info->gpmc_cs,
-			PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
+		PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
 	if (ret)
 		/* PFPW engine is busy, use cpu copy method */
 		goto out_copy_unmap;
 
 	init_completion(&info->comp);
-	omap_start_dma(info->dma_ch);
+	dma_async_issue_pending(info->dma);
 
 	/* setup and start DMA using dma_addr */
 	wait_for_completion(&info->comp);
@@ -483,11 +416,11 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
 	/* disable and stop the PFPW engine */
 	gpmc_prefetch_reset(info->gpmc_cs);
 
-	dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
+	dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
 	return 0;
 
 out_copy_unmap:
-	dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
+	dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
 out_copy:
 	if (info->nand.options & NAND_BUSWIDTH_16)
 		is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
@@ -1307,7 +1240,9 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
 		sig = OMAP24XX_DMA_GPMC;
 		info->dma = dma_request_channel(mask, omap_dma_filter_fn, &sig);
 		if (!info->dma) {
-			dev_warn(&pdev->dev, "DMA engine request failed\n");
+			dev_err(&pdev->dev, "DMA engine request failed\n");
+			err = -ENXIO;
+			goto out_release_mem_region;
 		} else {
 			struct dma_slave_config cfg;
 			int rc;
@@ -1327,22 +1262,6 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
 			}
 			info->nand.read_buf   = omap_read_buf_dma_pref;
 			info->nand.write_buf  = omap_write_buf_dma_pref;
-			break;
-		}
-		err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
-				omap_nand_dma_cb, &info->comp, &info->dma_ch);
-		if (err < 0) {
-			info->dma_ch = -1;
-			dev_err(&pdev->dev, "DMA request failed!\n");
-			goto out_release_mem_region;
-		} else {
-			omap_set_dma_dest_burst_mode(info->dma_ch,
-					OMAP_DMA_DATA_BURST_16);
-			omap_set_dma_src_burst_mode(info->dma_ch,
-					OMAP_DMA_DATA_BURST_16);
-
-			info->nand.read_buf   = omap_read_buf_dma_pref;
-			info->nand.write_buf  = omap_write_buf_dma_pref;
 		}
 		break;
 
@@ -1460,9 +1379,6 @@ static int omap_nand_remove(struct platform_device *pdev)
 	omap3_free_bch(&info->mtd);
 
 	platform_set_drvdata(pdev, NULL);
-	if (info->dma_ch != -1)
-		omap_free_dma(info->dma_ch);
-
 	if (info->dma)
 		dma_release_channel(info->dma);
 
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 10/11] mtd: omap2: remove private DMA API implementation
@ 2012-06-07 11:09       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 11:09 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: linux-mtd, David Woodhouse

Remove the private DMA API implementation from nand/omap2.c
making it use entirely the DMA engine API.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/mtd/nand/omap2.c |  136 +++++++++-------------------------------------
 1 files changed, 26 insertions(+), 110 deletions(-)

diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 2912d6c..e9309b3 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -126,7 +126,6 @@ struct omap_nand_info {
 	unsigned long			phys_base;
 	struct completion		comp;
 	struct dma_chan			*dma;
-	int				dma_ch;
 	int				gpmc_irq;
 	enum {
 		OMAP_NAND_IO_READ = 0,	/* read */
@@ -339,15 +338,9 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
 }
 
 /*
- * omap_nand_dma_cb: callback on the completion of dma transfer
- * @lch: logical channel
- * @ch_satuts: channel status
+ * omap_nand_dma_callback: callback on the completion of dma transfer
  * @data: pointer to completion data structure
  */
-static void omap_nand_dma_cb(int lch, u16 ch_status, void *data)
-{
-	complete((struct completion *) data);
-}
 static void omap_nand_dma_callback(void *data)
 {
 	complete((struct completion *) data);
@@ -365,17 +358,13 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
 {
 	struct omap_nand_info *info = container_of(mtd,
 					struct omap_nand_info, mtd);
+	struct dma_async_tx_descriptor *tx;
 	enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
 							DMA_FROM_DEVICE;
-	dma_addr_t dma_addr;
-	int ret;
+	struct scatterlist sg;
 	unsigned long tim, limit;
-
-	/* The fifo depth is 64 bytes max.
-	 * But configure the FIFO-threahold to 32 to get a sync at each frame
-	 * and frame length is 32 bytes.
-	 */
-	int buf_len = len >> 6;
+	unsigned n;
+	int ret;
 
 	if (addr >= high_memory) {
 		struct page *p1;
@@ -389,89 +378,33 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
 		addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
 	}
 
-	if (info->dma) {
-		struct dma_async_tx_descriptor *tx;
-		struct scatterlist sg;
-		unsigned n;
-
-		sg_init_one(&sg, addr, len);
-		n = dma_map_sg(info->dma->device->dev, &sg, 1, dir);
-		if (n == 0) {
-			dev_err(&info->pdev->dev,
-				"Couldn't DMA map a %d byte buffer\n", len);
-			goto out_copy;
-		}
-
-		tx = dmaengine_prep_slave_sg(info->dma, &sg, n,
-			is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
-			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-		if (!tx) {
-			dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
-			goto out_copy;
-		}
-		tx->callback = omap_nand_dma_callback;
-		tx->callback_param = &info->comp;
-		dmaengine_submit(tx);
-
-		/*  configure and start prefetch transfer */
-		ret = gpmc_prefetch_enable(info->gpmc_cs,
-			PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
-		if (ret) {
-			/* PFPW engine is busy, use cpu copy method */
-			dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
-			goto out_copy;
-		}
-
-		init_completion(&info->comp);
-		dma_async_issue_pending(info->dma);
-
-		/* setup and start DMA using dma_addr */
-		wait_for_completion(&info->comp);
-		tim = 0;
-		limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
-		while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
-			cpu_relax();
-
-		/* disable and stop the PFPW engine */
-		gpmc_prefetch_reset(info->gpmc_cs);
-
-		dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
-		return 0;
-	}
-
-	dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir);
-	if (dma_mapping_error(&info->pdev->dev, dma_addr)) {
+	sg_init_one(&sg, addr, len);
+	n = dma_map_sg(info->dma->device->dev, &sg, 1, dir);
+	if (n == 0) {
 		dev_err(&info->pdev->dev,
 			"Couldn't DMA map a %d byte buffer\n", len);
 		goto out_copy;
 	}
 
-	if (is_write) {
-	    omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
-						info->phys_base, 0, 0);
-	    omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
-							dma_addr, 0, 0);
-	    omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
-					0x10, buf_len, OMAP_DMA_SYNC_FRAME,
-					OMAP24XX_DMA_GPMC, OMAP_DMA_DST_SYNC);
-	} else {
-	    omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
-						info->phys_base, 0, 0);
-	    omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
-							dma_addr, 0, 0);
-	    omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
-					0x10, buf_len, OMAP_DMA_SYNC_FRAME,
-					OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC);
-	}
-	/*  configure and start prefetch transfer */
+	tx = dmaengine_prep_slave_sg(info->dma, &sg, n,
+		is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
+		DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+	if (!tx)
+		goto out_copy_unmap;
+
+	tx->callback = omap_nand_dma_callback;
+	tx->callback_param = &info->comp;
+	dmaengine_submit(tx);
+
+	/* configure and start prefetch transfer */
 	ret = gpmc_prefetch_enable(info->gpmc_cs,
-			PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
+		PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
 	if (ret)
 		/* PFPW engine is busy, use cpu copy method */
 		goto out_copy_unmap;
 
 	init_completion(&info->comp);
-	omap_start_dma(info->dma_ch);
+	dma_async_issue_pending(info->dma);
 
 	/* setup and start DMA using dma_addr */
 	wait_for_completion(&info->comp);
@@ -483,11 +416,11 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
 	/* disable and stop the PFPW engine */
 	gpmc_prefetch_reset(info->gpmc_cs);
 
-	dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
+	dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
 	return 0;
 
 out_copy_unmap:
-	dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
+	dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
 out_copy:
 	if (info->nand.options & NAND_BUSWIDTH_16)
 		is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
@@ -1307,7 +1240,9 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
 		sig = OMAP24XX_DMA_GPMC;
 		info->dma = dma_request_channel(mask, omap_dma_filter_fn, &sig);
 		if (!info->dma) {
-			dev_warn(&pdev->dev, "DMA engine request failed\n");
+			dev_err(&pdev->dev, "DMA engine request failed\n");
+			err = -ENXIO;
+			goto out_release_mem_region;
 		} else {
 			struct dma_slave_config cfg;
 			int rc;
@@ -1327,22 +1262,6 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
 			}
 			info->nand.read_buf   = omap_read_buf_dma_pref;
 			info->nand.write_buf  = omap_write_buf_dma_pref;
-			break;
-		}
-		err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
-				omap_nand_dma_cb, &info->comp, &info->dma_ch);
-		if (err < 0) {
-			info->dma_ch = -1;
-			dev_err(&pdev->dev, "DMA request failed!\n");
-			goto out_release_mem_region;
-		} else {
-			omap_set_dma_dest_burst_mode(info->dma_ch,
-					OMAP_DMA_DATA_BURST_16);
-			omap_set_dma_src_burst_mode(info->dma_ch,
-					OMAP_DMA_DATA_BURST_16);
-
-			info->nand.read_buf   = omap_read_buf_dma_pref;
-			info->nand.write_buf  = omap_write_buf_dma_pref;
 		}
 		break;
 
@@ -1460,9 +1379,6 @@ static int omap_nand_remove(struct platform_device *pdev)
 	omap3_free_bch(&info->mtd);
 
 	platform_set_drvdata(pdev, NULL);
-	if (info->dma_ch != -1)
-		omap_free_dma(info->dma_ch);
-
 	if (info->dma)
 		dma_release_channel(info->dma);
 
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 10/11] mtd: omap2: remove private DMA API implementation
@ 2012-06-07 11:09       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 11:09 UTC (permalink / raw)
  To: linux-arm-kernel

Remove the private DMA API implementation from nand/omap2.c
making it use entirely the DMA engine API.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/mtd/nand/omap2.c |  136 +++++++++-------------------------------------
 1 files changed, 26 insertions(+), 110 deletions(-)

diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 2912d6c..e9309b3 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -126,7 +126,6 @@ struct omap_nand_info {
 	unsigned long			phys_base;
 	struct completion		comp;
 	struct dma_chan			*dma;
-	int				dma_ch;
 	int				gpmc_irq;
 	enum {
 		OMAP_NAND_IO_READ = 0,	/* read */
@@ -339,15 +338,9 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
 }
 
 /*
- * omap_nand_dma_cb: callback on the completion of dma transfer
- * @lch: logical channel
- * @ch_satuts: channel status
+ * omap_nand_dma_callback: callback on the completion of dma transfer
  * @data: pointer to completion data structure
  */
-static void omap_nand_dma_cb(int lch, u16 ch_status, void *data)
-{
-	complete((struct completion *) data);
-}
 static void omap_nand_dma_callback(void *data)
 {
 	complete((struct completion *) data);
@@ -365,17 +358,13 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
 {
 	struct omap_nand_info *info = container_of(mtd,
 					struct omap_nand_info, mtd);
+	struct dma_async_tx_descriptor *tx;
 	enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
 							DMA_FROM_DEVICE;
-	dma_addr_t dma_addr;
-	int ret;
+	struct scatterlist sg;
 	unsigned long tim, limit;
-
-	/* The fifo depth is 64 bytes max.
-	 * But configure the FIFO-threahold to 32 to get a sync at each frame
-	 * and frame length is 32 bytes.
-	 */
-	int buf_len = len >> 6;
+	unsigned n;
+	int ret;
 
 	if (addr >= high_memory) {
 		struct page *p1;
@@ -389,89 +378,33 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
 		addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
 	}
 
-	if (info->dma) {
-		struct dma_async_tx_descriptor *tx;
-		struct scatterlist sg;
-		unsigned n;
-
-		sg_init_one(&sg, addr, len);
-		n = dma_map_sg(info->dma->device->dev, &sg, 1, dir);
-		if (n == 0) {
-			dev_err(&info->pdev->dev,
-				"Couldn't DMA map a %d byte buffer\n", len);
-			goto out_copy;
-		}
-
-		tx = dmaengine_prep_slave_sg(info->dma, &sg, n,
-			is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
-			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-		if (!tx) {
-			dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
-			goto out_copy;
-		}
-		tx->callback = omap_nand_dma_callback;
-		tx->callback_param = &info->comp;
-		dmaengine_submit(tx);
-
-		/*  configure and start prefetch transfer */
-		ret = gpmc_prefetch_enable(info->gpmc_cs,
-			PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
-		if (ret) {
-			/* PFPW engine is busy, use cpu copy method */
-			dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
-			goto out_copy;
-		}
-
-		init_completion(&info->comp);
-		dma_async_issue_pending(info->dma);
-
-		/* setup and start DMA using dma_addr */
-		wait_for_completion(&info->comp);
-		tim = 0;
-		limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
-		while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
-			cpu_relax();
-
-		/* disable and stop the PFPW engine */
-		gpmc_prefetch_reset(info->gpmc_cs);
-
-		dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
-		return 0;
-	}
-
-	dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir);
-	if (dma_mapping_error(&info->pdev->dev, dma_addr)) {
+	sg_init_one(&sg, addr, len);
+	n = dma_map_sg(info->dma->device->dev, &sg, 1, dir);
+	if (n == 0) {
 		dev_err(&info->pdev->dev,
 			"Couldn't DMA map a %d byte buffer\n", len);
 		goto out_copy;
 	}
 
-	if (is_write) {
-	    omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
-						info->phys_base, 0, 0);
-	    omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
-							dma_addr, 0, 0);
-	    omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
-					0x10, buf_len, OMAP_DMA_SYNC_FRAME,
-					OMAP24XX_DMA_GPMC, OMAP_DMA_DST_SYNC);
-	} else {
-	    omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
-						info->phys_base, 0, 0);
-	    omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
-							dma_addr, 0, 0);
-	    omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
-					0x10, buf_len, OMAP_DMA_SYNC_FRAME,
-					OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC);
-	}
-	/*  configure and start prefetch transfer */
+	tx = dmaengine_prep_slave_sg(info->dma, &sg, n,
+		is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
+		DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+	if (!tx)
+		goto out_copy_unmap;
+
+	tx->callback = omap_nand_dma_callback;
+	tx->callback_param = &info->comp;
+	dmaengine_submit(tx);
+
+	/* configure and start prefetch transfer */
 	ret = gpmc_prefetch_enable(info->gpmc_cs,
-			PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
+		PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
 	if (ret)
 		/* PFPW engine is busy, use cpu copy method */
 		goto out_copy_unmap;
 
 	init_completion(&info->comp);
-	omap_start_dma(info->dma_ch);
+	dma_async_issue_pending(info->dma);
 
 	/* setup and start DMA using dma_addr */
 	wait_for_completion(&info->comp);
@@ -483,11 +416,11 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
 	/* disable and stop the PFPW engine */
 	gpmc_prefetch_reset(info->gpmc_cs);
 
-	dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
+	dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
 	return 0;
 
 out_copy_unmap:
-	dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
+	dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
 out_copy:
 	if (info->nand.options & NAND_BUSWIDTH_16)
 		is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
@@ -1307,7 +1240,9 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
 		sig = OMAP24XX_DMA_GPMC;
 		info->dma = dma_request_channel(mask, omap_dma_filter_fn, &sig);
 		if (!info->dma) {
-			dev_warn(&pdev->dev, "DMA engine request failed\n");
+			dev_err(&pdev->dev, "DMA engine request failed\n");
+			err = -ENXIO;
+			goto out_release_mem_region;
 		} else {
 			struct dma_slave_config cfg;
 			int rc;
@@ -1327,22 +1262,6 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
 			}
 			info->nand.read_buf   = omap_read_buf_dma_pref;
 			info->nand.write_buf  = omap_write_buf_dma_pref;
-			break;
-		}
-		err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
-				omap_nand_dma_cb, &info->comp, &info->dma_ch);
-		if (err < 0) {
-			info->dma_ch = -1;
-			dev_err(&pdev->dev, "DMA request failed!\n");
-			goto out_release_mem_region;
-		} else {
-			omap_set_dma_dest_burst_mode(info->dma_ch,
-					OMAP_DMA_DATA_BURST_16);
-			omap_set_dma_src_burst_mode(info->dma_ch,
-					OMAP_DMA_DATA_BURST_16);
-
-			info->nand.read_buf   = omap_read_buf_dma_pref;
-			info->nand.write_buf  = omap_write_buf_dma_pref;
 		}
 		break;
 
@@ -1460,9 +1379,6 @@ static int omap_nand_remove(struct platform_device *pdev)
 	omap3_free_bch(&info->mtd);
 
 	platform_set_drvdata(pdev, NULL);
-	if (info->dma_ch != -1)
-		omap_free_dma(info->dma_ch);
-
 	if (info->dma)
 		dma_release_channel(info->dma);
 
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 11/11] Add feature removal of old OMAP private DMA implementation
  2012-06-07 11:06     ` Russell King - ARM Linux
@ 2012-06-07 11:09       ` Russell King
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 11:09 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Rob Landley, linux-doc

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 Documentation/feature-removal-schedule.txt |   11 +++++++++++
 1 files changed, 11 insertions(+), 0 deletions(-)

diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 56000b3..1f7ba35 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -612,3 +612,14 @@ When:	June 2013
 Why:	Unsupported/unmaintained/unused since 2.6
 
 ----------------------------
+
+What:	OMAP private DMA implementation
+When:	2013
+Why:	We have a DMA engine implementation; all users should be updated
+	to use this rather than persisting with the old APIs.  The old APIs
+	block merging the old DMA engine implementation into the DMA
+	engine driver.
+Who:	Russell King <linux@arm.linux.org.uk>,
+	Santosh Shilimkar <santosh.shilimkar@ti.com>
+
+----------------------------
-- 
1.7.4.4


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 11/11] Add feature removal of old OMAP private DMA implementation
@ 2012-06-07 11:09       ` Russell King
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King @ 2012-06-07 11:09 UTC (permalink / raw)
  To: linux-arm-kernel

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 Documentation/feature-removal-schedule.txt |   11 +++++++++++
 1 files changed, 11 insertions(+), 0 deletions(-)

diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 56000b3..1f7ba35 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -612,3 +612,14 @@ When:	June 2013
 Why:	Unsupported/unmaintained/unused since 2.6
 
 ----------------------------
+
+What:	OMAP private DMA implementation
+When:	2013
+Why:	We have a DMA engine implementation; all users should be updated
+	to use this rather than persisting with the old APIs.  The old APIs
+	block merging the old DMA engine implementation into the DMA
+	engine driver.
+Who:	Russell King <linux@arm.linux.org.uk>,
+	Santosh Shilimkar <santosh.shilimkar@ti.com>
+
+----------------------------
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* Re: [CFT 01/11] dmaengine: add OMAP DMA engine driver
  2012-06-07 11:06       ` Russell King
@ 2012-06-07 12:40         ` S, Venkatraman
  -1 siblings, 0 replies; 172+ messages in thread
From: S, Venkatraman @ 2012-06-07 12:40 UTC (permalink / raw)
  To: Russell King; +Cc: linux-arm-kernel, linux-omap, Dan Williams, Vinod Koul

On Thu, Jun 7, 2012 at 4:36 PM, Russell King
<rmk+kernel@arm.linux.org.uk> wrote:
> Tested-by: Tony Lindgren <tony@atomide.com>
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
> ---
>  drivers/dma/Kconfig      |    6 +
>  drivers/dma/Makefile     |    1 +
>  drivers/dma/omap-dma.c   |  522 ++++++++++++++++++++++++++++++++++++++++++++++
>  include/linux/omap-dma.h |   24 ++
>  4 files changed, 553 insertions(+), 0 deletions(-)
>  create mode 100644 drivers/dma/omap-dma.c
>  create mode 100644 include/linux/omap-dma.h
>
> diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
> index eb2b60e..8be3bf6 100644
> --- a/drivers/dma/Kconfig
> +++ b/drivers/dma/Kconfig
> @@ -261,6 +261,12 @@ config DMA_SA11X0
>          SA-1110 SoCs.  This DMA engine can only be used with on-chip
>          devices.
>
> +config DMA_OMAP
> +       tristate "OMAP DMA support"
> +       depends on ARCH_OMAP
> +       select DMA_ENGINE
> +       select DMA_VIRTUAL_CHANNELS
> +
>  config DMA_ENGINE
>        bool
>
> diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
> index fc05f7d..ddc291a 100644
> --- a/drivers/dma/Makefile
> +++ b/drivers/dma/Makefile
> @@ -29,3 +29,4 @@ obj-$(CONFIG_PCH_DMA) += pch_dma.o
>  obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
>  obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
>  obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
> +obj-$(CONFIG_DMA_OMAP) += omap-dma.o
> diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
> new file mode 100644
> index 0000000..500bc71
> --- /dev/null
> +++ b/drivers/dma/omap-dma.c
> @@ -0,0 +1,522 @@
> +/*
> + * OMAP DMAengine support
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + */
> +#include <linux/dmaengine.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/err.h>
> +#include <linux/init.h>
> +#include <linux/interrupt.h>
> +#include <linux/list.h>
> +#include <linux/module.h>
> +#include <linux/omap-dma.h>
> +#include <linux/platform_device.h>
> +#include <linux/slab.h>
> +#include <linux/spinlock.h>
> +
> +#include "virt-dma.h"
Russell,
  I applied your entire series on 3.5-rc1 and build fails as it can't
find virt-dma.h
Perhaps a missed "git add" ?

> +#include <plat/dma.h>
> +
--
To unsubscribe from this list: send the line "unsubscribe linux-omap" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 172+ messages in thread

* [CFT 01/11] dmaengine: add OMAP DMA engine driver
@ 2012-06-07 12:40         ` S, Venkatraman
  0 siblings, 0 replies; 172+ messages in thread
From: S, Venkatraman @ 2012-06-07 12:40 UTC (permalink / raw)
  To: linux-arm-kernel

On Thu, Jun 7, 2012 at 4:36 PM, Russell King
<rmk+kernel@arm.linux.org.uk> wrote:
> Tested-by: Tony Lindgren <tony@atomide.com>
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
> ---
> ?drivers/dma/Kconfig ? ? ?| ? ?6 +
> ?drivers/dma/Makefile ? ? | ? ?1 +
> ?drivers/dma/omap-dma.c ? | ?522 ++++++++++++++++++++++++++++++++++++++++++++++
> ?include/linux/omap-dma.h | ? 24 ++
> ?4 files changed, 553 insertions(+), 0 deletions(-)
> ?create mode 100644 drivers/dma/omap-dma.c
> ?create mode 100644 include/linux/omap-dma.h
>
> diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
> index eb2b60e..8be3bf6 100644
> --- a/drivers/dma/Kconfig
> +++ b/drivers/dma/Kconfig
> @@ -261,6 +261,12 @@ config DMA_SA11X0
> ? ? ? ? ?SA-1110 SoCs. ?This DMA engine can only be used with on-chip
> ? ? ? ? ?devices.
>
> +config DMA_OMAP
> + ? ? ? tristate "OMAP DMA support"
> + ? ? ? depends on ARCH_OMAP
> + ? ? ? select DMA_ENGINE
> + ? ? ? select DMA_VIRTUAL_CHANNELS
> +
> ?config DMA_ENGINE
> ? ? ? ?bool
>
> diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
> index fc05f7d..ddc291a 100644
> --- a/drivers/dma/Makefile
> +++ b/drivers/dma/Makefile
> @@ -29,3 +29,4 @@ obj-$(CONFIG_PCH_DMA) += pch_dma.o
> ?obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
> ?obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
> ?obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
> +obj-$(CONFIG_DMA_OMAP) += omap-dma.o
> diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
> new file mode 100644
> index 0000000..500bc71
> --- /dev/null
> +++ b/drivers/dma/omap-dma.c
> @@ -0,0 +1,522 @@
> +/*
> + * OMAP DMAengine support
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + */
> +#include <linux/dmaengine.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/err.h>
> +#include <linux/init.h>
> +#include <linux/interrupt.h>
> +#include <linux/list.h>
> +#include <linux/module.h>
> +#include <linux/omap-dma.h>
> +#include <linux/platform_device.h>
> +#include <linux/slab.h>
> +#include <linux/spinlock.h>
> +
> +#include "virt-dma.h"
Russell,
  I applied your entire series on 3.5-rc1 and build fails as it can't
find virt-dma.h
Perhaps a missed "git add" ?

> +#include <plat/dma.h>
> +

^ permalink raw reply	[flat|nested] 172+ messages in thread

* Re: [CFT 01/11] dmaengine: add OMAP DMA engine driver
  2012-06-07 12:40         ` S, Venkatraman
@ 2012-06-07 12:45           ` S, Venkatraman
  -1 siblings, 0 replies; 172+ messages in thread
From: S, Venkatraman @ 2012-06-07 12:45 UTC (permalink / raw)
  To: Russell King; +Cc: linux-arm-kernel, linux-omap, Dan Williams, Vinod Koul

On Thu, Jun 7, 2012 at 6:10 PM, S, Venkatraman <svenkatr@ti.com> wrote:
> On Thu, Jun 7, 2012 at 4:36 PM, Russell King
> <rmk+kernel@arm.linux.org.uk> wrote:
>> Tested-by: Tony Lindgren <tony@atomide.com>
>> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
>> ---
>>  drivers/dma/Kconfig      |    6 +
>>  drivers/dma/Makefile     |    1 +
>>  drivers/dma/omap-dma.c   |  522 ++++++++++++++++++++++++++++++++++++++++++++++
>>  include/linux/omap-dma.h |   24 ++
>>  4 files changed, 553 insertions(+), 0 deletions(-)
>>  create mode 100644 drivers/dma/omap-dma.c
>>  create mode 100644 include/linux/omap-dma.h
>>
>> diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
>> index eb2b60e..8be3bf6 100644
>> --- a/drivers/dma/Kconfig
>> +++ b/drivers/dma/Kconfig
>> @@ -261,6 +261,12 @@ config DMA_SA11X0
>>          SA-1110 SoCs.  This DMA engine can only be used with on-chip
>>          devices.
>>
>> +config DMA_OMAP
>> +       tristate "OMAP DMA support"
>> +       depends on ARCH_OMAP
>> +       select DMA_ENGINE
>> +       select DMA_VIRTUAL_CHANNELS
>> +
>>  config DMA_ENGINE
>>        bool
>>
>> diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
>> index fc05f7d..ddc291a 100644
>> --- a/drivers/dma/Makefile
>> +++ b/drivers/dma/Makefile
>> @@ -29,3 +29,4 @@ obj-$(CONFIG_PCH_DMA) += pch_dma.o
>>  obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
>>  obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
>>  obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
>> +obj-$(CONFIG_DMA_OMAP) += omap-dma.o
>> diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
>> new file mode 100644
>> index 0000000..500bc71
>> --- /dev/null
>> +++ b/drivers/dma/omap-dma.c
>> @@ -0,0 +1,522 @@
>> +/*
>> + * OMAP DMAengine support
>> + *
>> + * This program is free software; you can redistribute it and/or modify
>> + * it under the terms of the GNU General Public License version 2 as
>> + * published by the Free Software Foundation.
>> + */
>> +#include <linux/dmaengine.h>
>> +#include <linux/dma-mapping.h>
>> +#include <linux/err.h>
>> +#include <linux/init.h>
>> +#include <linux/interrupt.h>
>> +#include <linux/list.h>
>> +#include <linux/module.h>
>> +#include <linux/omap-dma.h>
>> +#include <linux/platform_device.h>
>> +#include <linux/slab.h>
>> +#include <linux/spinlock.h>
>> +
>> +#include "virt-dma.h"
> Russell,
>  I applied your entire series on 3.5-rc1 and build fails as it can't
> find virt-dma.h
> Perhaps a missed "git add" ?
>
Ok I reread your messages again and these 11 are based on the generic
dma-engine series.

>> +#include <plat/dma.h>
>> +
--
To unsubscribe from this list: send the line "unsubscribe linux-omap" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 172+ messages in thread

* [CFT 01/11] dmaengine: add OMAP DMA engine driver
@ 2012-06-07 12:45           ` S, Venkatraman
  0 siblings, 0 replies; 172+ messages in thread
From: S, Venkatraman @ 2012-06-07 12:45 UTC (permalink / raw)
  To: linux-arm-kernel

On Thu, Jun 7, 2012 at 6:10 PM, S, Venkatraman <svenkatr@ti.com> wrote:
> On Thu, Jun 7, 2012 at 4:36 PM, Russell King
> <rmk+kernel@arm.linux.org.uk> wrote:
>> Tested-by: Tony Lindgren <tony@atomide.com>
>> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
>> ---
>> ?drivers/dma/Kconfig ? ? ?| ? ?6 +
>> ?drivers/dma/Makefile ? ? | ? ?1 +
>> ?drivers/dma/omap-dma.c ? | ?522 ++++++++++++++++++++++++++++++++++++++++++++++
>> ?include/linux/omap-dma.h | ? 24 ++
>> ?4 files changed, 553 insertions(+), 0 deletions(-)
>> ?create mode 100644 drivers/dma/omap-dma.c
>> ?create mode 100644 include/linux/omap-dma.h
>>
>> diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
>> index eb2b60e..8be3bf6 100644
>> --- a/drivers/dma/Kconfig
>> +++ b/drivers/dma/Kconfig
>> @@ -261,6 +261,12 @@ config DMA_SA11X0
>> ? ? ? ? ?SA-1110 SoCs. ?This DMA engine can only be used with on-chip
>> ? ? ? ? ?devices.
>>
>> +config DMA_OMAP
>> + ? ? ? tristate "OMAP DMA support"
>> + ? ? ? depends on ARCH_OMAP
>> + ? ? ? select DMA_ENGINE
>> + ? ? ? select DMA_VIRTUAL_CHANNELS
>> +
>> ?config DMA_ENGINE
>> ? ? ? ?bool
>>
>> diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
>> index fc05f7d..ddc291a 100644
>> --- a/drivers/dma/Makefile
>> +++ b/drivers/dma/Makefile
>> @@ -29,3 +29,4 @@ obj-$(CONFIG_PCH_DMA) += pch_dma.o
>> ?obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
>> ?obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
>> ?obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
>> +obj-$(CONFIG_DMA_OMAP) += omap-dma.o
>> diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
>> new file mode 100644
>> index 0000000..500bc71
>> --- /dev/null
>> +++ b/drivers/dma/omap-dma.c
>> @@ -0,0 +1,522 @@
>> +/*
>> + * OMAP DMAengine support
>> + *
>> + * This program is free software; you can redistribute it and/or modify
>> + * it under the terms of the GNU General Public License version 2 as
>> + * published by the Free Software Foundation.
>> + */
>> +#include <linux/dmaengine.h>
>> +#include <linux/dma-mapping.h>
>> +#include <linux/err.h>
>> +#include <linux/init.h>
>> +#include <linux/interrupt.h>
>> +#include <linux/list.h>
>> +#include <linux/module.h>
>> +#include <linux/omap-dma.h>
>> +#include <linux/platform_device.h>
>> +#include <linux/slab.h>
>> +#include <linux/spinlock.h>
>> +
>> +#include "virt-dma.h"
> Russell,
> ?I applied your entire series on 3.5-rc1 and build fails as it can't
> find virt-dma.h
> Perhaps a missed "git add" ?
>
Ok I reread your messages again and these 11 are based on the generic
dma-engine series.

>> +#include <plat/dma.h>
>> +

^ permalink raw reply	[flat|nested] 172+ messages in thread

* Re: [CFT 09/11] mtd: omap2: add DMA engine support
  2012-06-07 11:09       ` Russell King
  (?)
@ 2012-06-07 12:49         ` Artem Bityutskiy
  -1 siblings, 0 replies; 172+ messages in thread
From: Artem Bityutskiy @ 2012-06-07 12:49 UTC (permalink / raw)
  To: Russell King; +Cc: linux-arm-kernel, linux-omap, linux-mtd, David Woodhouse

[-- Attachment #1: Type: text/plain, Size: 542 bytes --]

On Thu, 2012-06-07 at 12:09 +0100, Russell King wrote:
> Add DMA engine support to the OMAP2 NAND driver.  This supplements the
> private DMA API implementation contained within this driver, and the
> driver can be independently switched at build time between using DMA
> engine and the private DMA API.
> 
> Tested-by: Grazvydas Ignotas <notasas@gmail.com>
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

I guess it is makes sense to make this stuff to go in via the OMAP tree.

-- 
Best Regards,
Artem Bityutskiy

[-- Attachment #2: This is a digitally signed message part --]
[-- Type: application/pgp-signature, Size: 836 bytes --]

^ permalink raw reply	[flat|nested] 172+ messages in thread

* Re: [CFT 09/11] mtd: omap2: add DMA engine support
@ 2012-06-07 12:49         ` Artem Bityutskiy
  0 siblings, 0 replies; 172+ messages in thread
From: Artem Bityutskiy @ 2012-06-07 12:49 UTC (permalink / raw)
  To: Russell King; +Cc: David Woodhouse, linux-omap, linux-mtd, linux-arm-kernel

[-- Attachment #1: Type: text/plain, Size: 542 bytes --]

On Thu, 2012-06-07 at 12:09 +0100, Russell King wrote:
> Add DMA engine support to the OMAP2 NAND driver.  This supplements the
> private DMA API implementation contained within this driver, and the
> driver can be independently switched at build time between using DMA
> engine and the private DMA API.
> 
> Tested-by: Grazvydas Ignotas <notasas@gmail.com>
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

I guess it is makes sense to make this stuff to go in via the OMAP tree.

-- 
Best Regards,
Artem Bityutskiy

[-- Attachment #2: This is a digitally signed message part --]
[-- Type: application/pgp-signature, Size: 836 bytes --]

^ permalink raw reply	[flat|nested] 172+ messages in thread

* [CFT 09/11] mtd: omap2: add DMA engine support
@ 2012-06-07 12:49         ` Artem Bityutskiy
  0 siblings, 0 replies; 172+ messages in thread
From: Artem Bityutskiy @ 2012-06-07 12:49 UTC (permalink / raw)
  To: linux-arm-kernel

On Thu, 2012-06-07 at 12:09 +0100, Russell King wrote:
> Add DMA engine support to the OMAP2 NAND driver.  This supplements the
> private DMA API implementation contained within this driver, and the
> driver can be independently switched at build time between using DMA
> engine and the private DMA API.
> 
> Tested-by: Grazvydas Ignotas <notasas@gmail.com>
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

I guess it is makes sense to make this stuff to go in via the OMAP tree.

-- 
Best Regards,
Artem Bityutskiy
-------------- next part --------------
A non-text attachment was scrubbed...
Name: signature.asc
Type: application/pgp-signature
Size: 836 bytes
Desc: This is a digitally signed message part
URL: <http://lists.infradead.org/pipermail/linux-arm-kernel/attachments/20120607/2a85a927/attachment.sig>

^ permalink raw reply	[flat|nested] 172+ messages in thread

* Re: [CFT 09/11] mtd: omap2: add DMA engine support
  2012-06-07 12:49         ` Artem Bityutskiy
  (?)
@ 2012-06-07 13:11           ` Russell King - ARM Linux
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King - ARM Linux @ 2012-06-07 13:11 UTC (permalink / raw)
  To: Artem Bityutskiy; +Cc: linux-arm-kernel, linux-omap, linux-mtd, David Woodhouse

On Thu, Jun 07, 2012 at 03:49:35PM +0300, Artem Bityutskiy wrote:
> On Thu, 2012-06-07 at 12:09 +0100, Russell King wrote:
> > Add DMA engine support to the OMAP2 NAND driver.  This supplements the
> > private DMA API implementation contained within this driver, and the
> > driver can be independently switched at build time between using DMA
> > engine and the private DMA API.
> > 
> > Tested-by: Grazvydas Ignotas <notasas@gmail.com>
> > Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
> 
> I guess it is makes sense to make this stuff to go in via the OMAP tree.

No, it makes sense to get this stuff via a single tree all together,
because, as you can see from the thread structure, it isn't purely
an OMAP thing.

The OMAP stuff depends on a core set, as does a bunch of PL08x and
SA11x0 changes.  We can't stuff all that through the OMAP tree, that
wouldn't make any sense.

What probably should happen is that the tip of the OMAP stuff gets
pulled by Tony into his tree, and we share those commits between my
tree and his - and then it doesn't matter what goes in when and by
whom.

^ permalink raw reply	[flat|nested] 172+ messages in thread

* Re: [CFT 09/11] mtd: omap2: add DMA engine support
@ 2012-06-07 13:11           ` Russell King - ARM Linux
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King - ARM Linux @ 2012-06-07 13:11 UTC (permalink / raw)
  To: Artem Bityutskiy; +Cc: David Woodhouse, linux-omap, linux-mtd, linux-arm-kernel

On Thu, Jun 07, 2012 at 03:49:35PM +0300, Artem Bityutskiy wrote:
> On Thu, 2012-06-07 at 12:09 +0100, Russell King wrote:
> > Add DMA engine support to the OMAP2 NAND driver.  This supplements the
> > private DMA API implementation contained within this driver, and the
> > driver can be independently switched at build time between using DMA
> > engine and the private DMA API.
> > 
> > Tested-by: Grazvydas Ignotas <notasas@gmail.com>
> > Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
> 
> I guess it is makes sense to make this stuff to go in via the OMAP tree.

No, it makes sense to get this stuff via a single tree all together,
because, as you can see from the thread structure, it isn't purely
an OMAP thing.

The OMAP stuff depends on a core set, as does a bunch of PL08x and
SA11x0 changes.  We can't stuff all that through the OMAP tree, that
wouldn't make any sense.

What probably should happen is that the tip of the OMAP stuff gets
pulled by Tony into his tree, and we share those commits between my
tree and his - and then it doesn't matter what goes in when and by
whom.

^ permalink raw reply	[flat|nested] 172+ messages in thread

* [CFT 09/11] mtd: omap2: add DMA engine support
@ 2012-06-07 13:11           ` Russell King - ARM Linux
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King - ARM Linux @ 2012-06-07 13:11 UTC (permalink / raw)
  To: linux-arm-kernel

On Thu, Jun 07, 2012 at 03:49:35PM +0300, Artem Bityutskiy wrote:
> On Thu, 2012-06-07 at 12:09 +0100, Russell King wrote:
> > Add DMA engine support to the OMAP2 NAND driver.  This supplements the
> > private DMA API implementation contained within this driver, and the
> > driver can be independently switched at build time between using DMA
> > engine and the private DMA API.
> > 
> > Tested-by: Grazvydas Ignotas <notasas@gmail.com>
> > Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
> 
> I guess it is makes sense to make this stuff to go in via the OMAP tree.

No, it makes sense to get this stuff via a single tree all together,
because, as you can see from the thread structure, it isn't purely
an OMAP thing.

The OMAP stuff depends on a core set, as does a bunch of PL08x and
SA11x0 changes.  We can't stuff all that through the OMAP tree, that
wouldn't make any sense.

What probably should happen is that the tip of the OMAP stuff gets
pulled by Tony into his tree, and we share those commits between my
tree and his - and then it doesn't matter what goes in when and by
whom.

^ permalink raw reply	[flat|nested] 172+ messages in thread

* Re: [CFT 09/11] mtd: omap2: add DMA engine support
  2012-06-07 13:11           ` Russell King - ARM Linux
  (?)
@ 2012-06-07 13:28             ` Artem Bityutskiy
  -1 siblings, 0 replies; 172+ messages in thread
From: Artem Bityutskiy @ 2012-06-07 13:28 UTC (permalink / raw)
  To: Russell King - ARM Linux
  Cc: linux-arm-kernel, linux-omap, linux-mtd, David Woodhouse

[-- Attachment #1: Type: text/plain, Size: 769 bytes --]

On Thu, 2012-06-07 at 14:11 +0100, Russell King - ARM Linux wrote:
> No, it makes sense to get this stuff via a single tree all together,
> because, as you can see from the thread structure, it isn't purely
> an OMAP thing.
> 
> The OMAP stuff depends on a core set, as does a bunch of PL08x and
> SA11x0 changes.  We can't stuff all that through the OMAP tree, that
> wouldn't make any sense.
> 
> What probably should happen is that the tip of the OMAP stuff gets
> pulled by Tony into his tree, and we share those commits between my
> tree and his - and then it doesn't matter what goes in when and by
> whom.

Oh, sure, sorry, I actually wanted to say that these to patches should
_not_ got via the MTD tree.

-- 
Best Regards,
Artem Bityutskiy

[-- Attachment #2: This is a digitally signed message part --]
[-- Type: application/pgp-signature, Size: 836 bytes --]

^ permalink raw reply	[flat|nested] 172+ messages in thread

* Re: [CFT 09/11] mtd: omap2: add DMA engine support
@ 2012-06-07 13:28             ` Artem Bityutskiy
  0 siblings, 0 replies; 172+ messages in thread
From: Artem Bityutskiy @ 2012-06-07 13:28 UTC (permalink / raw)
  To: Russell King - ARM Linux
  Cc: David Woodhouse, linux-omap, linux-mtd, linux-arm-kernel

[-- Attachment #1: Type: text/plain, Size: 769 bytes --]

On Thu, 2012-06-07 at 14:11 +0100, Russell King - ARM Linux wrote:
> No, it makes sense to get this stuff via a single tree all together,
> because, as you can see from the thread structure, it isn't purely
> an OMAP thing.
> 
> The OMAP stuff depends on a core set, as does a bunch of PL08x and
> SA11x0 changes.  We can't stuff all that through the OMAP tree, that
> wouldn't make any sense.
> 
> What probably should happen is that the tip of the OMAP stuff gets
> pulled by Tony into his tree, and we share those commits between my
> tree and his - and then it doesn't matter what goes in when and by
> whom.

Oh, sure, sorry, I actually wanted to say that these to patches should
_not_ got via the MTD tree.

-- 
Best Regards,
Artem Bityutskiy

[-- Attachment #2: This is a digitally signed message part --]
[-- Type: application/pgp-signature, Size: 836 bytes --]

^ permalink raw reply	[flat|nested] 172+ messages in thread

* [CFT 09/11] mtd: omap2: add DMA engine support
@ 2012-06-07 13:28             ` Artem Bityutskiy
  0 siblings, 0 replies; 172+ messages in thread
From: Artem Bityutskiy @ 2012-06-07 13:28 UTC (permalink / raw)
  To: linux-arm-kernel

On Thu, 2012-06-07 at 14:11 +0100, Russell King - ARM Linux wrote:
> No, it makes sense to get this stuff via a single tree all together,
> because, as you can see from the thread structure, it isn't purely
> an OMAP thing.
> 
> The OMAP stuff depends on a core set, as does a bunch of PL08x and
> SA11x0 changes.  We can't stuff all that through the OMAP tree, that
> wouldn't make any sense.
> 
> What probably should happen is that the tip of the OMAP stuff gets
> pulled by Tony into his tree, and we share those commits between my
> tree and his - and then it doesn't matter what goes in when and by
> whom.

Oh, sure, sorry, I actually wanted to say that these to patches should
_not_ got via the MTD tree.

-- 
Best Regards,
Artem Bityutskiy
-------------- next part --------------
A non-text attachment was scrubbed...
Name: signature.asc
Type: application/pgp-signature
Size: 836 bytes
Desc: This is a digitally signed message part
URL: <http://lists.infradead.org/pipermail/linux-arm-kernel/attachments/20120607/e70089bd/attachment.sig>

^ permalink raw reply	[flat|nested] 172+ messages in thread

* Re: [CFT 02/11] mmc: omap_hsmmc: add DMA engine support
  2012-06-07 11:06       ` Russell King
@ 2012-06-07 17:04         ` Tony Lindgren
  -1 siblings, 0 replies; 172+ messages in thread
From: Tony Lindgren @ 2012-06-07 17:04 UTC (permalink / raw)
  To: Russell King; +Cc: linux-arm-kernel, linux-omap, Chris Ball, linux-mmc

* Russell King <rmk+kernel@arm.linux.org.uk> [120607 04:11]:
> Add DMA engine support to the OMAP HSMMC driver.  This supplements the
> private DMA API implementation contained within this driver, and the
> driver can be switched at build time between using DMA engine and the
> private DMA API.
> 
> Tested-by: Grazvydas Ignotas <notasas@gmail.com>
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

Gave this quick boot test on 2430sdp, zoom3, n900 and blaze:

Tested-by: Tony Lindgren <tony@atomide.com>

^ permalink raw reply	[flat|nested] 172+ messages in thread

* [CFT 02/11] mmc: omap_hsmmc: add DMA engine support
@ 2012-06-07 17:04         ` Tony Lindgren
  0 siblings, 0 replies; 172+ messages in thread
From: Tony Lindgren @ 2012-06-07 17:04 UTC (permalink / raw)
  To: linux-arm-kernel

* Russell King <rmk+kernel@arm.linux.org.uk> [120607 04:11]:
> Add DMA engine support to the OMAP HSMMC driver.  This supplements the
> private DMA API implementation contained within this driver, and the
> driver can be switched at build time between using DMA engine and the
> private DMA API.
> 
> Tested-by: Grazvydas Ignotas <notasas@gmail.com>
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

Gave this quick boot test on 2430sdp, zoom3, n900 and blaze:

Tested-by: Tony Lindgren <tony@atomide.com>

^ permalink raw reply	[flat|nested] 172+ messages in thread

* Re: [CFT 03/11] mmc: omap_hsmmc: remove private DMA API implementation
  2012-06-07 11:07       ` Russell King
@ 2012-06-07 17:04         ` Tony Lindgren
  -1 siblings, 0 replies; 172+ messages in thread
From: Tony Lindgren @ 2012-06-07 17:04 UTC (permalink / raw)
  To: Russell King; +Cc: linux-arm-kernel, linux-omap, Chris Ball, linux-mmc

* Russell King <rmk+kernel@arm.linux.org.uk> [120607 04:11]:
> Remove the private DMA API implementation from omap_hsmmc, making it
> use entirely the DMA engine API.
> 
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

Tested-by: Tony Lindgren <tony@atomide.com>

^ permalink raw reply	[flat|nested] 172+ messages in thread

* [CFT 03/11] mmc: omap_hsmmc: remove private DMA API implementation
@ 2012-06-07 17:04         ` Tony Lindgren
  0 siblings, 0 replies; 172+ messages in thread
From: Tony Lindgren @ 2012-06-07 17:04 UTC (permalink / raw)
  To: linux-arm-kernel

* Russell King <rmk+kernel@arm.linux.org.uk> [120607 04:11]:
> Remove the private DMA API implementation from omap_hsmmc, making it
> use entirely the DMA engine API.
> 
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

Tested-by: Tony Lindgren <tony@atomide.com>

^ permalink raw reply	[flat|nested] 172+ messages in thread

* Re: [CFT 04/11] mmc: omap: add DMA engine support
  2012-06-07 11:07       ` Russell King
@ 2012-06-07 17:05         ` Tony Lindgren
  -1 siblings, 0 replies; 172+ messages in thread
From: Tony Lindgren @ 2012-06-07 17:05 UTC (permalink / raw)
  To: Russell King
  Cc: linux-arm-kernel, linux-omap, Jarkko Lavinen, Chris Ball, linux-mmc

* Russell King <rmk+kernel@arm.linux.org.uk> [120607 04:11]:
> Add DMA engine support to the OMAP driver.  This supplements the
> private DMA API implementation contained within this driver, and the
> driver can be switched at build time between using DMA engine and the
> private DMA API.
> 
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

This I boot tested on 770 and N800:

Tested-by: Tony Lindgren <tony@atomide.com>

^ permalink raw reply	[flat|nested] 172+ messages in thread

* [CFT 04/11] mmc: omap: add DMA engine support
@ 2012-06-07 17:05         ` Tony Lindgren
  0 siblings, 0 replies; 172+ messages in thread
From: Tony Lindgren @ 2012-06-07 17:05 UTC (permalink / raw)
  To: linux-arm-kernel

* Russell King <rmk+kernel@arm.linux.org.uk> [120607 04:11]:
> Add DMA engine support to the OMAP driver.  This supplements the
> private DMA API implementation contained within this driver, and the
> driver can be switched at build time between using DMA engine and the
> private DMA API.
> 
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

This I boot tested on 770 and N800:

Tested-by: Tony Lindgren <tony@atomide.com>

^ permalink raw reply	[flat|nested] 172+ messages in thread

* Re: [CFT 05/11] mmc: omap: remove private DMA API implementation
  2012-06-07 11:07       ` Russell King
@ 2012-06-07 17:05         ` Tony Lindgren
  -1 siblings, 0 replies; 172+ messages in thread
From: Tony Lindgren @ 2012-06-07 17:05 UTC (permalink / raw)
  To: Russell King
  Cc: linux-arm-kernel, linux-omap, Jarkko Lavinen, Chris Ball, linux-mmc

* Russell King <rmk+kernel@arm.linux.org.uk> [120607 04:12]:
> Remove the private DMA API implementation from omap, making it use
> entirely the DMA engine API.
> 
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

Tested-by: Tony Lindgren <tony@atomide.com>

^ permalink raw reply	[flat|nested] 172+ messages in thread

* [CFT 05/11] mmc: omap: remove private DMA API implementation
@ 2012-06-07 17:05         ` Tony Lindgren
  0 siblings, 0 replies; 172+ messages in thread
From: Tony Lindgren @ 2012-06-07 17:05 UTC (permalink / raw)
  To: linux-arm-kernel

* Russell King <rmk+kernel@arm.linux.org.uk> [120607 04:12]:
> Remove the private DMA API implementation from omap, making it use
> entirely the DMA engine API.
> 
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

Tested-by: Tony Lindgren <tony@atomide.com>

^ permalink raw reply	[flat|nested] 172+ messages in thread

* Re: [CFT 06/11] ARM: omap: remove mmc platform data dma_mask and initialization
  2012-06-07 11:08       ` Russell King
@ 2012-06-07 17:06         ` Tony Lindgren
  -1 siblings, 0 replies; 172+ messages in thread
From: Tony Lindgren @ 2012-06-07 17:06 UTC (permalink / raw)
  To: Russell King; +Cc: linux-arm-kernel, linux-omap

* Russell King <rmk+kernel@arm.linux.org.uk> [120607 04:12]:
> DMAengine uses the DMA engine device structure when mapping/unmapping
> memory for DMA, so the MMC devices do not need their DMA masks
> initialized (this reflects hardware: the MMC device is not the device
> doing DMA.)
> 
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

Tested-by: Tony Lindgren <tony@atomide.com>

^ permalink raw reply	[flat|nested] 172+ messages in thread

* [CFT 06/11] ARM: omap: remove mmc platform data dma_mask and initialization
@ 2012-06-07 17:06         ` Tony Lindgren
  0 siblings, 0 replies; 172+ messages in thread
From: Tony Lindgren @ 2012-06-07 17:06 UTC (permalink / raw)
  To: linux-arm-kernel

* Russell King <rmk+kernel@arm.linux.org.uk> [120607 04:12]:
> DMAengine uses the DMA engine device structure when mapping/unmapping
> memory for DMA, so the MMC devices do not need their DMA masks
> initialized (this reflects hardware: the MMC device is not the device
> doing DMA.)
> 
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

Tested-by: Tony Lindgren <tony@atomide.com>

^ permalink raw reply	[flat|nested] 172+ messages in thread

* Re: [CFT 11/11] Add feature removal of old OMAP private DMA implementation
  2012-06-07 11:09       ` Russell King
@ 2012-06-07 17:07         ` Tony Lindgren
  -1 siblings, 0 replies; 172+ messages in thread
From: Tony Lindgren @ 2012-06-07 17:07 UTC (permalink / raw)
  To: Russell King; +Cc: linux-arm-kernel, linux-omap, Rob Landley, linux-doc

* Russell King <rmk+kernel@arm.linux.org.uk> [120607 04:14]:
> Acked-by: Linus Walleij <linus.walleij@linaro.org>
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

Acked-by: Tony Lindgren <tony@atomide.com>

> ---
>  Documentation/feature-removal-schedule.txt |   11 +++++++++++
>  1 files changed, 11 insertions(+), 0 deletions(-)
> 
> diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
> index 56000b3..1f7ba35 100644
> --- a/Documentation/feature-removal-schedule.txt
> +++ b/Documentation/feature-removal-schedule.txt
> @@ -612,3 +612,14 @@ When:	June 2013
>  Why:	Unsupported/unmaintained/unused since 2.6
>  
>  ----------------------------
> +
> +What:	OMAP private DMA implementation
> +When:	2013
> +Why:	We have a DMA engine implementation; all users should be updated
> +	to use this rather than persisting with the old APIs.  The old APIs
> +	block merging the old DMA engine implementation into the DMA
> +	engine driver.
> +Who:	Russell King <linux@arm.linux.org.uk>,
> +	Santosh Shilimkar <santosh.shilimkar@ti.com>
> +
> +----------------------------
> -- 
> 1.7.4.4
> 
> --
> To unsubscribe from this list: send the line "unsubscribe linux-omap" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 172+ messages in thread

* [CFT 11/11] Add feature removal of old OMAP private DMA implementation
@ 2012-06-07 17:07         ` Tony Lindgren
  0 siblings, 0 replies; 172+ messages in thread
From: Tony Lindgren @ 2012-06-07 17:07 UTC (permalink / raw)
  To: linux-arm-kernel

* Russell King <rmk+kernel@arm.linux.org.uk> [120607 04:14]:
> Acked-by: Linus Walleij <linus.walleij@linaro.org>
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

Acked-by: Tony Lindgren <tony@atomide.com>

> ---
>  Documentation/feature-removal-schedule.txt |   11 +++++++++++
>  1 files changed, 11 insertions(+), 0 deletions(-)
> 
> diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
> index 56000b3..1f7ba35 100644
> --- a/Documentation/feature-removal-schedule.txt
> +++ b/Documentation/feature-removal-schedule.txt
> @@ -612,3 +612,14 @@ When:	June 2013
>  Why:	Unsupported/unmaintained/unused since 2.6
>  
>  ----------------------------
> +
> +What:	OMAP private DMA implementation
> +When:	2013
> +Why:	We have a DMA engine implementation; all users should be updated
> +	to use this rather than persisting with the old APIs.  The old APIs
> +	block merging the old DMA engine implementation into the DMA
> +	engine driver.
> +Who:	Russell King <linux@arm.linux.org.uk>,
> +	Santosh Shilimkar <santosh.shilimkar@ti.com>
> +
> +----------------------------
> -- 
> 1.7.4.4
> 
> --
> To unsubscribe from this list: send the line "unsubscribe linux-omap" in
> the body of a message to majordomo at vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 172+ messages in thread

* Re: [CFT 09/11] mtd: omap2: add DMA engine support
  2012-06-07 13:28             ` Artem Bityutskiy
  (?)
@ 2012-06-07 17:10               ` Tony Lindgren
  -1 siblings, 0 replies; 172+ messages in thread
From: Tony Lindgren @ 2012-06-07 17:10 UTC (permalink / raw)
  To: Artem Bityutskiy
  Cc: Russell King - ARM Linux, linux-arm-kernel, linux-omap,
	linux-mtd, David Woodhouse

* Artem Bityutskiy <dedekind1@gmail.com> [120607 06:28]:
> On Thu, 2012-06-07 at 14:11 +0100, Russell King - ARM Linux wrote:
> > No, it makes sense to get this stuff via a single tree all together,
> > because, as you can see from the thread structure, it isn't purely
> > an OMAP thing.
> > 
> > The OMAP stuff depends on a core set, as does a bunch of PL08x and
> > SA11x0 changes.  We can't stuff all that through the OMAP tree, that
> > wouldn't make any sense.
> > 
> > What probably should happen is that the tip of the OMAP stuff gets
> > pulled by Tony into his tree, and we share those commits between my
> > tree and his - and then it doesn't matter what goes in when and by
> > whom.
> 
> Oh, sure, sorry, I actually wanted to say that these to patches should
> _not_ got via the MTD tree.

What Russell is suggesting works good for me.

Regards,

Tony

^ permalink raw reply	[flat|nested] 172+ messages in thread

* Re: [CFT 09/11] mtd: omap2: add DMA engine support
@ 2012-06-07 17:10               ` Tony Lindgren
  0 siblings, 0 replies; 172+ messages in thread
From: Tony Lindgren @ 2012-06-07 17:10 UTC (permalink / raw)
  To: Artem Bityutskiy
  Cc: linux-mtd, linux-omap, Russell King - ARM Linux, David Woodhouse,
	linux-arm-kernel

* Artem Bityutskiy <dedekind1@gmail.com> [120607 06:28]:
> On Thu, 2012-06-07 at 14:11 +0100, Russell King - ARM Linux wrote:
> > No, it makes sense to get this stuff via a single tree all together,
> > because, as you can see from the thread structure, it isn't purely
> > an OMAP thing.
> > 
> > The OMAP stuff depends on a core set, as does a bunch of PL08x and
> > SA11x0 changes.  We can't stuff all that through the OMAP tree, that
> > wouldn't make any sense.
> > 
> > What probably should happen is that the tip of the OMAP stuff gets
> > pulled by Tony into his tree, and we share those commits between my
> > tree and his - and then it doesn't matter what goes in when and by
> > whom.
> 
> Oh, sure, sorry, I actually wanted to say that these to patches should
> _not_ got via the MTD tree.

What Russell is suggesting works good for me.

Regards,

Tony

^ permalink raw reply	[flat|nested] 172+ messages in thread

* [CFT 09/11] mtd: omap2: add DMA engine support
@ 2012-06-07 17:10               ` Tony Lindgren
  0 siblings, 0 replies; 172+ messages in thread
From: Tony Lindgren @ 2012-06-07 17:10 UTC (permalink / raw)
  To: linux-arm-kernel

* Artem Bityutskiy <dedekind1@gmail.com> [120607 06:28]:
> On Thu, 2012-06-07 at 14:11 +0100, Russell King - ARM Linux wrote:
> > No, it makes sense to get this stuff via a single tree all together,
> > because, as you can see from the thread structure, it isn't purely
> > an OMAP thing.
> > 
> > The OMAP stuff depends on a core set, as does a bunch of PL08x and
> > SA11x0 changes.  We can't stuff all that through the OMAP tree, that
> > wouldn't make any sense.
> > 
> > What probably should happen is that the tip of the OMAP stuff gets
> > pulled by Tony into his tree, and we share those commits between my
> > tree and his - and then it doesn't matter what goes in when and by
> > whom.
> 
> Oh, sure, sorry, I actually wanted to say that these to patches should
> _not_ got via the MTD tree.

What Russell is suggesting works good for me.

Regards,

Tony

^ permalink raw reply	[flat|nested] 172+ messages in thread

* Re: [CFT 03/11] mmc: omap_hsmmc: remove private DMA API implementation
  2012-06-07 11:07       ` Russell King
@ 2012-06-07 17:53         ` S, Venkatraman
  -1 siblings, 0 replies; 172+ messages in thread
From: S, Venkatraman @ 2012-06-07 17:53 UTC (permalink / raw)
  To: Russell King; +Cc: linux-arm-kernel, linux-omap, Chris Ball, linux-mmc

On Thu, Jun 7, 2012 at 4:37 PM, Russell King
<rmk+kernel@arm.linux.org.uk> wrote:
> Remove the private DMA API implementation from omap_hsmmc, making it
> use entirely the DMA engine API.
>
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

Tested this on 4430SDP with rootfs usage, untarring the kernel source
and compiling it natively.

Tested-by: Venkatraman S <svenkatr@ti.com>

> ---
>  drivers/mmc/host/omap_hsmmc.c |  265 ++++++++++-------------------------------
>  1 files changed, 64 insertions(+), 201 deletions(-)
>
--
To unsubscribe from this list: send the line "unsubscribe linux-omap" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 172+ messages in thread

* [CFT 03/11] mmc: omap_hsmmc: remove private DMA API implementation
@ 2012-06-07 17:53         ` S, Venkatraman
  0 siblings, 0 replies; 172+ messages in thread
From: S, Venkatraman @ 2012-06-07 17:53 UTC (permalink / raw)
  To: linux-arm-kernel

On Thu, Jun 7, 2012 at 4:37 PM, Russell King
<rmk+kernel@arm.linux.org.uk> wrote:
> Remove the private DMA API implementation from omap_hsmmc, making it
> use entirely the DMA engine API.
>
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

Tested this on 4430SDP with rootfs usage, untarring the kernel source
and compiling it natively.

Tested-by: Venkatraman S <svenkatr@ti.com>

> ---
> ?drivers/mmc/host/omap_hsmmc.c | ?265 ++++++++++-------------------------------
> ?1 files changed, 64 insertions(+), 201 deletions(-)
>

^ permalink raw reply	[flat|nested] 172+ messages in thread

* Re: [CFT 11/11] Add feature removal of old OMAP private DMA implementation
  2012-06-07 11:09       ` Russell King
@ 2012-06-08  6:10         ` Shilimkar, Santosh
  -1 siblings, 0 replies; 172+ messages in thread
From: Shilimkar, Santosh @ 2012-06-08  6:10 UTC (permalink / raw)
  To: Russell King; +Cc: linux-arm-kernel, linux-omap, Rob Landley, linux-doc

On Thu, Jun 7, 2012 at 4:39 PM, Russell King
<rmk+kernel@arm.linux.org.uk> wrote:
> Acked-by: Linus Walleij <linus.walleij@linaro.org>
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
> ---
>  Documentation/feature-removal-schedule.txt |   11 +++++++++++
>  1 files changed, 11 insertions(+), 0 deletions(-)
>
> diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
> index 56000b3..1f7ba35 100644
> --- a/Documentation/feature-removal-schedule.txt
> +++ b/Documentation/feature-removal-schedule.txt
> @@ -612,3 +612,14 @@ When:      June 2013
>  Why:   Unsupported/unmaintained/unused since 2.6
>
>  ----------------------------
> +
> +What:  OMAP private DMA implementation
> +When:  2013
> +Why:   We have a DMA engine implementation; all users should be updated
> +       to use this rather than persisting with the old APIs.  The old APIs
> +       block merging the old DMA engine implementation into the DMA
> +       engine driver.
> +Who:   Russell King <linux@arm.linux.org.uk>,
> +       Santosh Shilimkar <santosh.shilimkar@ti.com>
> +
> +----------------------------
> --

Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com>

^ permalink raw reply	[flat|nested] 172+ messages in thread

* [CFT 11/11] Add feature removal of old OMAP private DMA implementation
@ 2012-06-08  6:10         ` Shilimkar, Santosh
  0 siblings, 0 replies; 172+ messages in thread
From: Shilimkar, Santosh @ 2012-06-08  6:10 UTC (permalink / raw)
  To: linux-arm-kernel

On Thu, Jun 7, 2012 at 4:39 PM, Russell King
<rmk+kernel@arm.linux.org.uk> wrote:
> Acked-by: Linus Walleij <linus.walleij@linaro.org>
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
> ---
> ?Documentation/feature-removal-schedule.txt | ? 11 +++++++++++
> ?1 files changed, 11 insertions(+), 0 deletions(-)
>
> diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
> index 56000b3..1f7ba35 100644
> --- a/Documentation/feature-removal-schedule.txt
> +++ b/Documentation/feature-removal-schedule.txt
> @@ -612,3 +612,14 @@ When: ? ? ?June 2013
> ?Why: ? Unsupported/unmaintained/unused since 2.6
>
> ?----------------------------
> +
> +What: ?OMAP private DMA implementation
> +When: ?2013
> +Why: ? We have a DMA engine implementation; all users should be updated
> + ? ? ? to use this rather than persisting with the old APIs. ?The old APIs
> + ? ? ? block merging the old DMA engine implementation into the DMA
> + ? ? ? engine driver.
> +Who: ? Russell King <linux@arm.linux.org.uk>,
> + ? ? ? Santosh Shilimkar <santosh.shilimkar@ti.com>
> +
> +----------------------------
> --

Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com>

^ permalink raw reply	[flat|nested] 172+ messages in thread

* Re: [CFT 01/11] dmaengine: add OMAP DMA engine driver
  2012-06-07 11:06       ` Russell King
@ 2012-06-08  6:19         ` Shilimkar, Santosh
  -1 siblings, 0 replies; 172+ messages in thread
From: Shilimkar, Santosh @ 2012-06-08  6:19 UTC (permalink / raw)
  To: Russell King; +Cc: linux-arm-kernel, linux-omap, Dan Williams, Vinod Koul

On Thu, Jun 7, 2012 at 4:36 PM, Russell King
<rmk+kernel@arm.linux.org.uk> wrote:
> Tested-by: Tony Lindgren <tony@atomide.com>
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
> ---
Have scanned mainly the OMAP DMA related features in the patch
and they look fine to me.

Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com>

^ permalink raw reply	[flat|nested] 172+ messages in thread

* [CFT 01/11] dmaengine: add OMAP DMA engine driver
@ 2012-06-08  6:19         ` Shilimkar, Santosh
  0 siblings, 0 replies; 172+ messages in thread
From: Shilimkar, Santosh @ 2012-06-08  6:19 UTC (permalink / raw)
  To: linux-arm-kernel

On Thu, Jun 7, 2012 at 4:36 PM, Russell King
<rmk+kernel@arm.linux.org.uk> wrote:
> Tested-by: Tony Lindgren <tony@atomide.com>
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
> ---
Have scanned mainly the OMAP DMA related features in the patch
and they look fine to me.

Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com>

^ permalink raw reply	[flat|nested] 172+ messages in thread

* Re: [CFT] PL08x patches
  2012-06-07 10:45     ` Russell King - ARM Linux
@ 2012-06-08  8:32       ` Linus Walleij
  -1 siblings, 0 replies; 172+ messages in thread
From: Linus Walleij @ 2012-06-08  8:32 UTC (permalink / raw)
  To: Russell King - ARM Linux
  Cc: linux-arm-kernel, linux-omap, Vinod Koul, Dan Williams

On Thu, Jun 7, 2012 at 12:45 PM, Russell King - ARM Linux
<linux@arm.linux.org.uk> wrote:

> Here's the PL08x patches.

I've tested the entire set: first the three patches splitting out the virtual
channels, then the remaining 31 patches on PL08x.

The test was performed on a Nomadik S8815 which has a pretty
much vanilla PL08x block.

The DMAengine memcpy test completes just fine and behave like
before.
Tested-by: Linus Walleij <linus.walleij@linaro.org>

Yours,
Linus Walleij

^ permalink raw reply	[flat|nested] 172+ messages in thread

* [CFT] PL08x patches
@ 2012-06-08  8:32       ` Linus Walleij
  0 siblings, 0 replies; 172+ messages in thread
From: Linus Walleij @ 2012-06-08  8:32 UTC (permalink / raw)
  To: linux-arm-kernel

On Thu, Jun 7, 2012 at 12:45 PM, Russell King - ARM Linux
<linux@arm.linux.org.uk> wrote:

> Here's the PL08x patches.

I've tested the entire set: first the three patches splitting out the virtual
channels, then the remaining 31 patches on PL08x.

The test was performed on a Nomadik S8815 which has a pretty
much vanilla PL08x block.

The DMAengine memcpy test completes just fine and behave like
before.
Tested-by: Linus Walleij <linus.walleij@linaro.org>

Yours,
Linus Walleij

^ permalink raw reply	[flat|nested] 172+ messages in thread

* Re: [CFT 07/11] spi: omap2-mcspi: add DMA engine support
  2012-06-07 11:08       ` Russell King
@ 2012-06-08  8:50         ` Linus Walleij
  -1 siblings, 0 replies; 172+ messages in thread
From: Linus Walleij @ 2012-06-08  8:50 UTC (permalink / raw)
  To: Russell King; +Cc: linux-arm-kernel, linux-omap, spi-devel-general

On Thu, Jun 7, 2012 at 1:08 PM, Russell King
<rmk+kernel@arm.linux.org.uk> wrote:

> Add DMA engine support to the OMAP SPI driver.  This supplements the
> private DMA API implementation contained within this driver, and the
> driver can be independently switched at build time between using DMA
> engine and the private DMA API for the transmit and receive sides.
>
> Tested-by: Shubhrajyoti <shubhrajyoti@ti.com>
> Acked-by: Grant Likely <grant.likely@secretlab.ca>
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

This looks very good,
Acked-by: Linus Walleij <linus.walleij@linaro.org>

Thanks,
Linus Walleij
--
To unsubscribe from this list: send the line "unsubscribe linux-omap" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 172+ messages in thread

* [CFT 07/11] spi: omap2-mcspi: add DMA engine support
@ 2012-06-08  8:50         ` Linus Walleij
  0 siblings, 0 replies; 172+ messages in thread
From: Linus Walleij @ 2012-06-08  8:50 UTC (permalink / raw)
  To: linux-arm-kernel

On Thu, Jun 7, 2012 at 1:08 PM, Russell King
<rmk+kernel@arm.linux.org.uk> wrote:

> Add DMA engine support to the OMAP SPI driver. ?This supplements the
> private DMA API implementation contained within this driver, and the
> driver can be independently switched at build time between using DMA
> engine and the private DMA API for the transmit and receive sides.
>
> Tested-by: Shubhrajyoti <shubhrajyoti@ti.com>
> Acked-by: Grant Likely <grant.likely@secretlab.ca>
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

This looks very good,
Acked-by: Linus Walleij <linus.walleij@linaro.org>

Thanks,
Linus Walleij

^ permalink raw reply	[flat|nested] 172+ messages in thread

* Re: [CFT 04/11] mmc: omap: add DMA engine support
  2012-06-07 11:07       ` Russell King
@ 2012-06-08  8:52         ` Linus Walleij
  -1 siblings, 0 replies; 172+ messages in thread
From: Linus Walleij @ 2012-06-08  8:52 UTC (permalink / raw)
  To: Russell King
  Cc: linux-arm-kernel, linux-omap, Jarkko Lavinen, Chris Ball, linux-mmc

On Thu, Jun 7, 2012 at 1:07 PM, Russell King
<rmk+kernel@arm.linux.org.uk> wrote:

> Add DMA engine support to the OMAP driver.  This supplements the
> private DMA API implementation contained within this driver, and the
> driver can be switched at build time between using DMA engine and the
> private DMA API.
>
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

This looks good as well:
Acked-by: Linus Walleij <linus.walleij@linaro.org>

Thanks,
Linus Walleij

^ permalink raw reply	[flat|nested] 172+ messages in thread

* [CFT 04/11] mmc: omap: add DMA engine support
@ 2012-06-08  8:52         ` Linus Walleij
  0 siblings, 0 replies; 172+ messages in thread
From: Linus Walleij @ 2012-06-08  8:52 UTC (permalink / raw)
  To: linux-arm-kernel

On Thu, Jun 7, 2012 at 1:07 PM, Russell King
<rmk+kernel@arm.linux.org.uk> wrote:

> Add DMA engine support to the OMAP driver. ?This supplements the
> private DMA API implementation contained within this driver, and the
> driver can be switched at build time between using DMA engine and the
> private DMA API.
>
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

This looks good as well:
Acked-by: Linus Walleij <linus.walleij@linaro.org>

Thanks,
Linus Walleij

^ permalink raw reply	[flat|nested] 172+ messages in thread

* Re: [CFT 02/11] mmc: omap_hsmmc: add DMA engine support
  2012-06-07 11:06       ` Russell King
@ 2012-06-08  8:53         ` Linus Walleij
  -1 siblings, 0 replies; 172+ messages in thread
From: Linus Walleij @ 2012-06-08  8:53 UTC (permalink / raw)
  To: Russell King; +Cc: linux-arm-kernel, linux-omap, Chris Ball, linux-mmc

On Thu, Jun 7, 2012 at 1:06 PM, Russell King
<rmk+kernel@arm.linux.org.uk> wrote:

> Add DMA engine support to the OMAP HSMMC driver.  This supplements the
> private DMA API implementation contained within this driver, and the
> driver can be switched at build time between using DMA engine and the
> private DMA API.
>
> Tested-by: Grazvydas Ignotas <notasas@gmail.com>
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

Looks good:
Acked-by: Linus Walleij <linus.walleij@linaro.org>

Thanks,
Linus Walleij

^ permalink raw reply	[flat|nested] 172+ messages in thread

* [CFT 02/11] mmc: omap_hsmmc: add DMA engine support
@ 2012-06-08  8:53         ` Linus Walleij
  0 siblings, 0 replies; 172+ messages in thread
From: Linus Walleij @ 2012-06-08  8:53 UTC (permalink / raw)
  To: linux-arm-kernel

On Thu, Jun 7, 2012 at 1:06 PM, Russell King
<rmk+kernel@arm.linux.org.uk> wrote:

> Add DMA engine support to the OMAP HSMMC driver. ?This supplements the
> private DMA API implementation contained within this driver, and the
> driver can be switched at build time between using DMA engine and the
> private DMA API.
>
> Tested-by: Grazvydas Ignotas <notasas@gmail.com>
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

Looks good:
Acked-by: Linus Walleij <linus.walleij@linaro.org>

Thanks,
Linus Walleij

^ permalink raw reply	[flat|nested] 172+ messages in thread

* Re: [CFT 01/11] dmaengine: add OMAP DMA engine driver
  2012-06-07 11:06       ` Russell King
@ 2012-06-08  9:02         ` Russell King - ARM Linux
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King - ARM Linux @ 2012-06-08  9:02 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Vinod Koul, Dan Williams

On Thu, Jun 07, 2012 at 12:06:32PM +0100, Russell King wrote:
> Tested-by: Tony Lindgren <tony@atomide.com>
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
> ---
>  drivers/dma/Kconfig      |    6 +
>  drivers/dma/Makefile     |    1 +
>  drivers/dma/omap-dma.c   |  522 ++++++++++++++++++++++++++++++++++++++++++++++
>  include/linux/omap-dma.h |   24 ++
>  4 files changed, 553 insertions(+), 0 deletions(-)
>  create mode 100644 drivers/dma/omap-dma.c
>  create mode 100644 include/linux/omap-dma.h

There is a bug in here which no one has spotted... I just noticed it.

diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 500bc71..02eb2fd 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -130,14 +130,13 @@ static void omap_dma_callback(int ch, u16 status, void *data)
 
 	spin_lock_irqsave(&c->vc.lock, flags);
 	d = c->desc;
-	if (!d)
-		return;
-
-	if (++c->sgidx < d->sglen) {
-		omap_dma_start_sg(c, d, c->sgidx);
-	} else {
-		omap_dma_start_desc(c);
-		vchan_cookie_complete(&d->vd);
+	if (d) {
+		if (++c->sgidx < d->sglen) {
+			omap_dma_start_sg(c, d, c->sgidx);
+		} else {
+			omap_dma_start_desc(c);
+			vchan_cookie_complete(&d->vd);
+		}
 	}
 	spin_unlock_irqrestore(&c->vc.lock, flags);
 }


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 01/11] dmaengine: add OMAP DMA engine driver
@ 2012-06-08  9:02         ` Russell King - ARM Linux
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King - ARM Linux @ 2012-06-08  9:02 UTC (permalink / raw)
  To: linux-arm-kernel

On Thu, Jun 07, 2012 at 12:06:32PM +0100, Russell King wrote:
> Tested-by: Tony Lindgren <tony@atomide.com>
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
> ---
>  drivers/dma/Kconfig      |    6 +
>  drivers/dma/Makefile     |    1 +
>  drivers/dma/omap-dma.c   |  522 ++++++++++++++++++++++++++++++++++++++++++++++
>  include/linux/omap-dma.h |   24 ++
>  4 files changed, 553 insertions(+), 0 deletions(-)
>  create mode 100644 drivers/dma/omap-dma.c
>  create mode 100644 include/linux/omap-dma.h

There is a bug in here which no one has spotted... I just noticed it.

diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 500bc71..02eb2fd 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -130,14 +130,13 @@ static void omap_dma_callback(int ch, u16 status, void *data)
 
 	spin_lock_irqsave(&c->vc.lock, flags);
 	d = c->desc;
-	if (!d)
-		return;
-
-	if (++c->sgidx < d->sglen) {
-		omap_dma_start_sg(c, d, c->sgidx);
-	} else {
-		omap_dma_start_desc(c);
-		vchan_cookie_complete(&d->vd);
+	if (d) {
+		if (++c->sgidx < d->sglen) {
+			omap_dma_start_sg(c, d, c->sgidx);
+		} else {
+			omap_dma_start_desc(c);
+			vchan_cookie_complete(&d->vd);
+		}
 	}
 	spin_unlock_irqrestore(&c->vc.lock, flags);
 }

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* Re: [CFT 01/11] dmaengine: add OMAP DMA engine driver
  2012-06-08  9:02         ` Russell King - ARM Linux
@ 2012-06-08 10:00           ` Shilimkar, Santosh
  -1 siblings, 0 replies; 172+ messages in thread
From: Shilimkar, Santosh @ 2012-06-08 10:00 UTC (permalink / raw)
  To: Russell King - ARM Linux
  Cc: linux-arm-kernel, linux-omap, Vinod Koul, Dan Williams

On Fri, Jun 8, 2012 at 2:32 PM, Russell King - ARM Linux
<linux@arm.linux.org.uk> wrote:
> On Thu, Jun 07, 2012 at 12:06:32PM +0100, Russell King wrote:
>> Tested-by: Tony Lindgren <tony@atomide.com>
>> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
>> ---
>>  drivers/dma/Kconfig      |    6 +
>>  drivers/dma/Makefile     |    1 +
>>  drivers/dma/omap-dma.c   |  522 ++++++++++++++++++++++++++++++++++++++++++++++
>>  include/linux/omap-dma.h |   24 ++
>>  4 files changed, 553 insertions(+), 0 deletions(-)
>>  create mode 100644 drivers/dma/omap-dma.c
>>  create mode 100644 include/linux/omap-dma.h
>
> There is a bug in here which no one has spotted... I just noticed it.
>
> diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
> index 500bc71..02eb2fd 100644
> --- a/drivers/dma/omap-dma.c
> +++ b/drivers/dma/omap-dma.c
> @@ -130,14 +130,13 @@ static void omap_dma_callback(int ch, u16 status, void *data)
>
>        spin_lock_irqsave(&c->vc.lock, flags);
>        d = c->desc;
> -       if (!d)
> -               return;
> -
> -       if (++c->sgidx < d->sglen) {
> -               omap_dma_start_sg(c, d, c->sgidx);
> -       } else {
> -               omap_dma_start_desc(c);
> -               vchan_cookie_complete(&d->vd);
> +       if (d) {
> +               if (++c->sgidx < d->sglen) {
> +                       omap_dma_start_sg(c, d, c->sgidx);
> +               } else {
> +                       omap_dma_start_desc(c);
> +                       vchan_cookie_complete(&d->vd);
> +               }
>        }
>        spin_unlock_irqrestore(&c->vc.lock, flags);
>  }
>
You mean the lock release, right ?

Regards
Santosh
--
To unsubscribe from this list: send the line "unsubscribe linux-omap" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 172+ messages in thread

* [CFT 01/11] dmaengine: add OMAP DMA engine driver
@ 2012-06-08 10:00           ` Shilimkar, Santosh
  0 siblings, 0 replies; 172+ messages in thread
From: Shilimkar, Santosh @ 2012-06-08 10:00 UTC (permalink / raw)
  To: linux-arm-kernel

On Fri, Jun 8, 2012 at 2:32 PM, Russell King - ARM Linux
<linux@arm.linux.org.uk> wrote:
> On Thu, Jun 07, 2012 at 12:06:32PM +0100, Russell King wrote:
>> Tested-by: Tony Lindgren <tony@atomide.com>
>> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
>> ---
>> ?drivers/dma/Kconfig ? ? ?| ? ?6 +
>> ?drivers/dma/Makefile ? ? | ? ?1 +
>> ?drivers/dma/omap-dma.c ? | ?522 ++++++++++++++++++++++++++++++++++++++++++++++
>> ?include/linux/omap-dma.h | ? 24 ++
>> ?4 files changed, 553 insertions(+), 0 deletions(-)
>> ?create mode 100644 drivers/dma/omap-dma.c
>> ?create mode 100644 include/linux/omap-dma.h
>
> There is a bug in here which no one has spotted... I just noticed it.
>
> diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
> index 500bc71..02eb2fd 100644
> --- a/drivers/dma/omap-dma.c
> +++ b/drivers/dma/omap-dma.c
> @@ -130,14 +130,13 @@ static void omap_dma_callback(int ch, u16 status, void *data)
>
> ? ? ? ?spin_lock_irqsave(&c->vc.lock, flags);
> ? ? ? ?d = c->desc;
> - ? ? ? if (!d)
> - ? ? ? ? ? ? ? return;
> -
> - ? ? ? if (++c->sgidx < d->sglen) {
> - ? ? ? ? ? ? ? omap_dma_start_sg(c, d, c->sgidx);
> - ? ? ? } else {
> - ? ? ? ? ? ? ? omap_dma_start_desc(c);
> - ? ? ? ? ? ? ? vchan_cookie_complete(&d->vd);
> + ? ? ? if (d) {
> + ? ? ? ? ? ? ? if (++c->sgidx < d->sglen) {
> + ? ? ? ? ? ? ? ? ? ? ? omap_dma_start_sg(c, d, c->sgidx);
> + ? ? ? ? ? ? ? } else {
> + ? ? ? ? ? ? ? ? ? ? ? omap_dma_start_desc(c);
> + ? ? ? ? ? ? ? ? ? ? ? vchan_cookie_complete(&d->vd);
> + ? ? ? ? ? ? ? }
> ? ? ? ?}
> ? ? ? ?spin_unlock_irqrestore(&c->vc.lock, flags);
> ?}
>
You mean the lock release, right ?

Regards
Santosh

^ permalink raw reply	[flat|nested] 172+ messages in thread

* Re: [CFT 01/11] dmaengine: add OMAP DMA engine driver
  2012-06-08 10:00           ` Shilimkar, Santosh
@ 2012-06-08 10:01             ` Russell King - ARM Linux
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King - ARM Linux @ 2012-06-08 10:01 UTC (permalink / raw)
  To: Shilimkar, Santosh; +Cc: linux-arm-kernel, linux-omap, Vinod Koul, Dan Williams

On Fri, Jun 08, 2012 at 03:30:23PM +0530, Shilimkar, Santosh wrote:
> On Fri, Jun 8, 2012 at 2:32 PM, Russell King - ARM Linux
> <linux@arm.linux.org.uk> wrote:
> > On Thu, Jun 07, 2012 at 12:06:32PM +0100, Russell King wrote:
> >> Tested-by: Tony Lindgren <tony@atomide.com>
> >> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
> >> ---
> >>  drivers/dma/Kconfig      |    6 +
> >>  drivers/dma/Makefile     |    1 +
> >>  drivers/dma/omap-dma.c   |  522 ++++++++++++++++++++++++++++++++++++++++++++++
> >>  include/linux/omap-dma.h |   24 ++
> >>  4 files changed, 553 insertions(+), 0 deletions(-)
> >>  create mode 100644 drivers/dma/omap-dma.c
> >>  create mode 100644 include/linux/omap-dma.h
> >
> > There is a bug in here which no one has spotted... I just noticed it.
> >
> > diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
> > index 500bc71..02eb2fd 100644
> > --- a/drivers/dma/omap-dma.c
> > +++ b/drivers/dma/omap-dma.c
> > @@ -130,14 +130,13 @@ static void omap_dma_callback(int ch, u16 status, void *data)
> >
> >        spin_lock_irqsave(&c->vc.lock, flags);
> >        d = c->desc;
> > -       if (!d)
> > -               return;
> > -
> > -       if (++c->sgidx < d->sglen) {
> > -               omap_dma_start_sg(c, d, c->sgidx);
> > -       } else {
> > -               omap_dma_start_desc(c);
> > -               vchan_cookie_complete(&d->vd);
> > +       if (d) {
> > +               if (++c->sgidx < d->sglen) {
> > +                       omap_dma_start_sg(c, d, c->sgidx);
> > +               } else {
> > +                       omap_dma_start_desc(c);
> > +                       vchan_cookie_complete(&d->vd);
> > +               }
> >        }
> >        spin_unlock_irqrestore(&c->vc.lock, flags);
> >  }
> >
> You mean the lock release, right ?

Yes, if c->desc is NULL, we exit this function with the lock held,
which will lead to a lockup.
--
To unsubscribe from this list: send the line "unsubscribe linux-omap" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 172+ messages in thread

* [CFT 01/11] dmaengine: add OMAP DMA engine driver
@ 2012-06-08 10:01             ` Russell King - ARM Linux
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King - ARM Linux @ 2012-06-08 10:01 UTC (permalink / raw)
  To: linux-arm-kernel

On Fri, Jun 08, 2012 at 03:30:23PM +0530, Shilimkar, Santosh wrote:
> On Fri, Jun 8, 2012 at 2:32 PM, Russell King - ARM Linux
> <linux@arm.linux.org.uk> wrote:
> > On Thu, Jun 07, 2012 at 12:06:32PM +0100, Russell King wrote:
> >> Tested-by: Tony Lindgren <tony@atomide.com>
> >> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
> >> ---
> >> ?drivers/dma/Kconfig ? ? ?| ? ?6 +
> >> ?drivers/dma/Makefile ? ? | ? ?1 +
> >> ?drivers/dma/omap-dma.c ? | ?522 ++++++++++++++++++++++++++++++++++++++++++++++
> >> ?include/linux/omap-dma.h | ? 24 ++
> >> ?4 files changed, 553 insertions(+), 0 deletions(-)
> >> ?create mode 100644 drivers/dma/omap-dma.c
> >> ?create mode 100644 include/linux/omap-dma.h
> >
> > There is a bug in here which no one has spotted... I just noticed it.
> >
> > diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
> > index 500bc71..02eb2fd 100644
> > --- a/drivers/dma/omap-dma.c
> > +++ b/drivers/dma/omap-dma.c
> > @@ -130,14 +130,13 @@ static void omap_dma_callback(int ch, u16 status, void *data)
> >
> > ? ? ? ?spin_lock_irqsave(&c->vc.lock, flags);
> > ? ? ? ?d = c->desc;
> > - ? ? ? if (!d)
> > - ? ? ? ? ? ? ? return;
> > -
> > - ? ? ? if (++c->sgidx < d->sglen) {
> > - ? ? ? ? ? ? ? omap_dma_start_sg(c, d, c->sgidx);
> > - ? ? ? } else {
> > - ? ? ? ? ? ? ? omap_dma_start_desc(c);
> > - ? ? ? ? ? ? ? vchan_cookie_complete(&d->vd);
> > + ? ? ? if (d) {
> > + ? ? ? ? ? ? ? if (++c->sgidx < d->sglen) {
> > + ? ? ? ? ? ? ? ? ? ? ? omap_dma_start_sg(c, d, c->sgidx);
> > + ? ? ? ? ? ? ? } else {
> > + ? ? ? ? ? ? ? ? ? ? ? omap_dma_start_desc(c);
> > + ? ? ? ? ? ? ? ? ? ? ? vchan_cookie_complete(&d->vd);
> > + ? ? ? ? ? ? ? }
> > ? ? ? ?}
> > ? ? ? ?spin_unlock_irqrestore(&c->vc.lock, flags);
> > ?}
> >
> You mean the lock release, right ?

Yes, if c->desc is NULL, we exit this function with the lock held,
which will lead to a lockup.

^ permalink raw reply	[flat|nested] 172+ messages in thread

* Re: [CFT 11/11] Add feature removal of old OMAP private DMA implementation
  2012-06-07 11:09       ` Russell King
@ 2012-06-08 18:37         ` Rob Landley
  -1 siblings, 0 replies; 172+ messages in thread
From: Rob Landley @ 2012-06-08 18:37 UTC (permalink / raw)
  To: Russell King; +Cc: linux-arm-kernel, linux-omap, linux-doc

On 06/07/2012 06:09 AM, Russell King wrote:
> Acked-by: Linus Walleij <linus.walleij@linaro.org>
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
> ---
>  Documentation/feature-removal-schedule.txt |   11 +++++++++++
>  1 files changed, 11 insertions(+), 0 deletions(-)
> 
> diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
> index 56000b3..1f7ba35 100644
> --- a/Documentation/feature-removal-schedule.txt
> +++ b/Documentation/feature-removal-schedule.txt
> @@ -612,3 +612,14 @@ When:	June 2013
>  Why:	Unsupported/unmaintained/unused since 2.6
>  
>  ----------------------------
> +
> +What:	OMAP private DMA implementation
> +When:	2013
> +Why:	We have a DMA engine implementation; all users should be updated
> +	to use this rather than persisting with the old APIs.  The old APIs
> +	block merging the old DMA engine implementation into the DMA
> +	engine driver.
> +Who:	Russell King <linux@arm.linux.org.uk>,
> +	Santosh Shilimkar <santosh.shilimkar@ti.com>
> +
> +----------------------------

Whose tree do feature-removal-schedule patches go in through?

(They're not really documentation, they're design coordination/logistics.)

Rob

-- 
GNU/Linux isn't: Linux=GPLv2, GNU=GPLv3+, they can't share code.
Either it's "mere aggregation", or a license violation.  Pick one.

^ permalink raw reply	[flat|nested] 172+ messages in thread

* [CFT 11/11] Add feature removal of old OMAP private DMA implementation
@ 2012-06-08 18:37         ` Rob Landley
  0 siblings, 0 replies; 172+ messages in thread
From: Rob Landley @ 2012-06-08 18:37 UTC (permalink / raw)
  To: linux-arm-kernel

On 06/07/2012 06:09 AM, Russell King wrote:
> Acked-by: Linus Walleij <linus.walleij@linaro.org>
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
> ---
>  Documentation/feature-removal-schedule.txt |   11 +++++++++++
>  1 files changed, 11 insertions(+), 0 deletions(-)
> 
> diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
> index 56000b3..1f7ba35 100644
> --- a/Documentation/feature-removal-schedule.txt
> +++ b/Documentation/feature-removal-schedule.txt
> @@ -612,3 +612,14 @@ When:	June 2013
>  Why:	Unsupported/unmaintained/unused since 2.6
>  
>  ----------------------------
> +
> +What:	OMAP private DMA implementation
> +When:	2013
> +Why:	We have a DMA engine implementation; all users should be updated
> +	to use this rather than persisting with the old APIs.  The old APIs
> +	block merging the old DMA engine implementation into the DMA
> +	engine driver.
> +Who:	Russell King <linux@arm.linux.org.uk>,
> +	Santosh Shilimkar <santosh.shilimkar@ti.com>
> +
> +----------------------------

Whose tree do feature-removal-schedule patches go in through?

(They're not really documentation, they're design coordination/logistics.)

Rob

-- 
GNU/Linux isn't: Linux=GPLv2, GNU=GPLv3+, they can't share code.
Either it's "mere aggregation", or a license violation.  Pick one.

^ permalink raw reply	[flat|nested] 172+ messages in thread

* Re: [CFT 11/11] Add feature removal of old OMAP private DMA implementation
  2012-06-08 18:37         ` Rob Landley
@ 2012-06-09  8:32           ` Russell King - ARM Linux
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King - ARM Linux @ 2012-06-09  8:32 UTC (permalink / raw)
  To: Rob Landley; +Cc: linux-arm-kernel, linux-omap, linux-doc

On Fri, Jun 08, 2012 at 01:37:11PM -0500, Rob Landley wrote:
> On 06/07/2012 06:09 AM, Russell King wrote:
> > Acked-by: Linus Walleij <linus.walleij@linaro.org>
> > Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
> > ---
> >  Documentation/feature-removal-schedule.txt |   11 +++++++++++
> >  1 files changed, 11 insertions(+), 0 deletions(-)
> > 
> > diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
> > index 56000b3..1f7ba35 100644
> > --- a/Documentation/feature-removal-schedule.txt
> > +++ b/Documentation/feature-removal-schedule.txt
> > @@ -612,3 +612,14 @@ When:	June 2013
> >  Why:	Unsupported/unmaintained/unused since 2.6
> >  
> >  ----------------------------
> > +
> > +What:	OMAP private DMA implementation
> > +When:	2013
> > +Why:	We have a DMA engine implementation; all users should be updated
> > +	to use this rather than persisting with the old APIs.  The old APIs
> > +	block merging the old DMA engine implementation into the DMA
> > +	engine driver.
> > +Who:	Russell King <linux@arm.linux.org.uk>,
> > +	Santosh Shilimkar <santosh.shilimkar@ti.com>
> > +
> > +----------------------------
> 
> Whose tree do feature-removal-schedule patches go in through?
> 
> (They're not really documentation, they're design coordination/logistics.)

I don't think there is any specific tree.

It would also be silly to split it from this patch set; if it were to be
split, and there would need to be coordination with the rest of the patch
set to ensure this change didn't go in without the rest - that would not
make sense.

^ permalink raw reply	[flat|nested] 172+ messages in thread

* [CFT 11/11] Add feature removal of old OMAP private DMA implementation
@ 2012-06-09  8:32           ` Russell King - ARM Linux
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King - ARM Linux @ 2012-06-09  8:32 UTC (permalink / raw)
  To: linux-arm-kernel

On Fri, Jun 08, 2012 at 01:37:11PM -0500, Rob Landley wrote:
> On 06/07/2012 06:09 AM, Russell King wrote:
> > Acked-by: Linus Walleij <linus.walleij@linaro.org>
> > Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
> > ---
> >  Documentation/feature-removal-schedule.txt |   11 +++++++++++
> >  1 files changed, 11 insertions(+), 0 deletions(-)
> > 
> > diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
> > index 56000b3..1f7ba35 100644
> > --- a/Documentation/feature-removal-schedule.txt
> > +++ b/Documentation/feature-removal-schedule.txt
> > @@ -612,3 +612,14 @@ When:	June 2013
> >  Why:	Unsupported/unmaintained/unused since 2.6
> >  
> >  ----------------------------
> > +
> > +What:	OMAP private DMA implementation
> > +When:	2013
> > +Why:	We have a DMA engine implementation; all users should be updated
> > +	to use this rather than persisting with the old APIs.  The old APIs
> > +	block merging the old DMA engine implementation into the DMA
> > +	engine driver.
> > +Who:	Russell King <linux@arm.linux.org.uk>,
> > +	Santosh Shilimkar <santosh.shilimkar@ti.com>
> > +
> > +----------------------------
> 
> Whose tree do feature-removal-schedule patches go in through?
> 
> (They're not really documentation, they're design coordination/logistics.)

I don't think there is any specific tree.

It would also be silly to split it from this patch set; if it were to be
split, and there would need to be coordination with the rest of the patch
set to ensure this change didn't go in without the rest - that would not
make sense.

^ permalink raw reply	[flat|nested] 172+ messages in thread

* Re: [CFT 05/31] dmaengine: PL08x: clean up get_signal/put_signal
  2012-06-07 10:47       ` Russell King
@ 2012-06-10 10:03         ` Russell King - ARM Linux
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King - ARM Linux @ 2012-06-10 10:03 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Vinod Koul, Dan Williams

On Thu, Jun 07, 2012 at 11:47:23AM +0100, Russell King wrote:
> Try to avoid dereferencing the DMA engine's channel struct in these
> platform helpers; instead, pass a pointer to the channel data into
> get_signal(), and the returned signal number to put_signal().
> 
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

The Spear stuff breaks as a result of this patch, so it needs the patch
below combined with this one to prevent that breakage occuring.

diff --git a/arch/arm/plat-spear/include/plat/pl080.h b/arch/arm/plat-spear/include/plat/pl080.h
index e14a3e4..1786eeb 100644
--- a/arch/arm/plat-spear/include/plat/pl080.h
+++ b/arch/arm/plat-spear/include/plat/pl080.h
@@ -14,8 +14,8 @@
 #ifndef __PLAT_PL080_H
 #define __PLAT_PL080_H
 
-struct pl08x_dma_chan;
-int pl080_get_signal(struct pl08x_dma_chan *ch);
-void pl080_put_signal(struct pl08x_dma_chan *ch);
+struct pl08x_channel_data;
+int pl080_get_signal(const struct pl08x_channel_data *cd);
+void pl080_put_signal(const struct pl08x_channel_data *cd, int signal);
 
 #endif /* __PLAT_PL080_H */
diff --git a/arch/arm/plat-spear/pl080.c b/arch/arm/plat-spear/pl080.c
index a56a067..08bec1a 100644
--- a/arch/arm/plat-spear/pl080.c
+++ b/arch/arm/plat-spear/pl080.c
@@ -27,9 +27,8 @@ struct {
 	unsigned char val;
 } signals[16] = {{0, 0}, };
 
-int pl080_get_signal(struct pl08x_dma_chan *ch)
+int pl080_get_signal(const struct pl08x_channel_data *cd)
 {
-	const struct pl08x_channel_data *cd = ch->cd;
 	unsigned int signal = cd->min_signal, val;
 	unsigned long flags;
 
@@ -63,18 +62,17 @@ int pl080_get_signal(struct pl08x_dma_chan *ch)
 	return signal;
 }
 
-void pl080_put_signal(struct pl08x_dma_chan *ch)
+void pl080_put_signal(const struct pl08x_channel_data *cd, int signal)
 {
-	const struct pl08x_channel_data *cd = ch->cd;
 	unsigned long flags;
 
 	spin_lock_irqsave(&lock, flags);
 
 	/* if signal is not used */
-	if (!signals[cd->min_signal].busy)
+	if (!signals[signal].busy)
 		BUG();
 
-	signals[cd->min_signal].busy--;
+	signals[signal].busy--;
 
 	spin_unlock_irqrestore(&lock, flags);
 }

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 05/31] dmaengine: PL08x: clean up get_signal/put_signal
@ 2012-06-10 10:03         ` Russell King - ARM Linux
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King - ARM Linux @ 2012-06-10 10:03 UTC (permalink / raw)
  To: linux-arm-kernel

On Thu, Jun 07, 2012 at 11:47:23AM +0100, Russell King wrote:
> Try to avoid dereferencing the DMA engine's channel struct in these
> platform helpers; instead, pass a pointer to the channel data into
> get_signal(), and the returned signal number to put_signal().
> 
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

The Spear stuff breaks as a result of this patch, so it needs the patch
below combined with this one to prevent that breakage occuring.

diff --git a/arch/arm/plat-spear/include/plat/pl080.h b/arch/arm/plat-spear/include/plat/pl080.h
index e14a3e4..1786eeb 100644
--- a/arch/arm/plat-spear/include/plat/pl080.h
+++ b/arch/arm/plat-spear/include/plat/pl080.h
@@ -14,8 +14,8 @@
 #ifndef __PLAT_PL080_H
 #define __PLAT_PL080_H
 
-struct pl08x_dma_chan;
-int pl080_get_signal(struct pl08x_dma_chan *ch);
-void pl080_put_signal(struct pl08x_dma_chan *ch);
+struct pl08x_channel_data;
+int pl080_get_signal(const struct pl08x_channel_data *cd);
+void pl080_put_signal(const struct pl08x_channel_data *cd, int signal);
 
 #endif /* __PLAT_PL080_H */
diff --git a/arch/arm/plat-spear/pl080.c b/arch/arm/plat-spear/pl080.c
index a56a067..08bec1a 100644
--- a/arch/arm/plat-spear/pl080.c
+++ b/arch/arm/plat-spear/pl080.c
@@ -27,9 +27,8 @@ struct {
 	unsigned char val;
 } signals[16] = {{0, 0}, };
 
-int pl080_get_signal(struct pl08x_dma_chan *ch)
+int pl080_get_signal(const struct pl08x_channel_data *cd)
 {
-	const struct pl08x_channel_data *cd = ch->cd;
 	unsigned int signal = cd->min_signal, val;
 	unsigned long flags;
 
@@ -63,18 +62,17 @@ int pl080_get_signal(struct pl08x_dma_chan *ch)
 	return signal;
 }
 
-void pl080_put_signal(struct pl08x_dma_chan *ch)
+void pl080_put_signal(const struct pl08x_channel_data *cd, int signal)
 {
-	const struct pl08x_channel_data *cd = ch->cd;
 	unsigned long flags;
 
 	spin_lock_irqsave(&lock, flags);
 
 	/* if signal is not used */
-	if (!signals[cd->min_signal].busy)
+	if (!signals[signal].busy)
 		BUG();
 
-	signals[cd->min_signal].busy--;
+	signals[signal].busy--;
 
 	spin_unlock_irqrestore(&lock, flags);
 }

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* Re: [CFT 07/11] spi: omap2-mcspi: add DMA engine support
  2012-06-07 11:08       ` Russell King
@ 2012-06-14 11:53         ` Russell King - ARM Linux
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King - ARM Linux @ 2012-06-14 11:53 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Grant Likely, spi-devel-general

On Thu, Jun 07, 2012 at 12:08:35PM +0100, Russell King wrote:
> Add DMA engine support to the OMAP SPI driver.  This supplements the
> private DMA API implementation contained within this driver, and the
> driver can be independently switched at build time between using DMA
> engine and the private DMA API for the transmit and receive sides.
> 
> Tested-by: Shubhrajyoti <shubhrajyoti@ti.com>
> Acked-by: Grant Likely <grant.likely@secretlab.ca>
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

Right, now that we have working OMAP in mainline again...

------------[ cut here ]------------
WARNING: at lib/dma-debug.c:865 check_unmap+0x1a0/0x6f8()
ks8851 spi1.0: DMA-API: device driver tries to free DMA memory it has not allocated [device address=0x000000009f5c3002] [size=592 bytes]
Modules linked in:
Backtrace:
[<c0017dd0>] (dump_backtrace+0x0/0x10c) from [<c0346870>] (dump_stack+0x18/0x1c)
 r7:df79fd78 r6:c01a2200 r5:c04036bd r4:00000361
[<c0346858>] (dump_stack+0x0/0x1c) from [<c0033c48>] (warn_slowpath_common+0x58/0x70)
[<c0033bf0>] (warn_slowpath_common+0x0/0x70) from [<c0033d04>] (warn_slowpath_fmt+0x38/0x40)
 r8:df79fdf8 r7:00000000 r6:00000250 r5:00000000 r4:9f5c3002
[<c0033ccc>] (warn_slowpath_fmt+0x0/0x40) from [<c01a2200>] (check_unmap+0x1a0/0x6f8)
 r3:c040ea22 r2:c0403a0f
[<c01a2060>] (check_unmap+0x0/0x6f8) from [<c01a2978>] (debug_dma_unmap_page+0x74/0x80)
[<c01a2904>] (debug_dma_unmap_page+0x0/0x80) from [<c021bd64>] (omap2_mcspi_txrx_dma+0x33c/0x54c)
[<c021ba28>] (omap2_mcspi_txrx_dma+0x0/0x54c) from [<c021c590>] (omap2_mcspi_work+0x1b8/0x2b8)
[<c021c3d8>] (omap2_mcspi_work+0x0/0x2b8) from [<c021c974>] (omap2_mcspi_transfer_one_message+0x2e4/0x310)
[<c021c690>] (omap2_mcspi_transfer_one_message+0x0/0x310) from [<c021a9f8>] (spi_pump_messages+0x130/0x154)
[<c021a8c8>] (spi_pump_messages+0x0/0x154) from [<c00508dc>] (kthread_worker_fn+0x108/0x188)
 r7:df6a3d94 r6:df79e000 r5:df6a3d90 r4:df6a3da4
[<c00507d4>] (kthread_worker_fn+0x0/0x188) from [<c0050a60>] (kthread+0x8c/0x98)
[<c00509d4>] (kthread+0x0/0x98) from [<c0039134>] (do_exit+0x0/0x314)
 r7:00000013 r6:c0039134 r5:c00509d4 r4:df443d78
---[ end trace 1b75b31a2719ed1f ]---

So, trying to figure this out... the result is not nice.

If the spi message has is_dma_mapped = false, then we potentially map the
DMA buffers against mcspi->dev.  This struct device is the same as the
master->dev.parent.

However, when we come to complete a transfer, we unmap them against the
spi_device's struct device - in other words a different device.

That's the reason for the warning.  However, when using DMA engine, both
of these struct device's are the wrong one to be using - the right one to
use is the one assocated with the DMA engine.

However, this presents a problem with transfers with is_dma_mapped = true.
SPI device drivers appear to assume that the right struct device to use
to map for DMA is master->dev.parent.  That's fine if your SPI master
device is the struct device performing the DMA, but with DMA engine
involved, this is not true.  Not sure at the moment what to do about
that one.

^ permalink raw reply	[flat|nested] 172+ messages in thread

* [CFT 07/11] spi: omap2-mcspi: add DMA engine support
@ 2012-06-14 11:53         ` Russell King - ARM Linux
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King - ARM Linux @ 2012-06-14 11:53 UTC (permalink / raw)
  To: linux-arm-kernel

On Thu, Jun 07, 2012 at 12:08:35PM +0100, Russell King wrote:
> Add DMA engine support to the OMAP SPI driver.  This supplements the
> private DMA API implementation contained within this driver, and the
> driver can be independently switched at build time between using DMA
> engine and the private DMA API for the transmit and receive sides.
> 
> Tested-by: Shubhrajyoti <shubhrajyoti@ti.com>
> Acked-by: Grant Likely <grant.likely@secretlab.ca>
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

Right, now that we have working OMAP in mainline again...

------------[ cut here ]------------
WARNING: at lib/dma-debug.c:865 check_unmap+0x1a0/0x6f8()
ks8851 spi1.0: DMA-API: device driver tries to free DMA memory it has not allocated [device address=0x000000009f5c3002] [size=592 bytes]
Modules linked in:
Backtrace:
[<c0017dd0>] (dump_backtrace+0x0/0x10c) from [<c0346870>] (dump_stack+0x18/0x1c)
 r7:df79fd78 r6:c01a2200 r5:c04036bd r4:00000361
[<c0346858>] (dump_stack+0x0/0x1c) from [<c0033c48>] (warn_slowpath_common+0x58/0x70)
[<c0033bf0>] (warn_slowpath_common+0x0/0x70) from [<c0033d04>] (warn_slowpath_fmt+0x38/0x40)
 r8:df79fdf8 r7:00000000 r6:00000250 r5:00000000 r4:9f5c3002
[<c0033ccc>] (warn_slowpath_fmt+0x0/0x40) from [<c01a2200>] (check_unmap+0x1a0/0x6f8)
 r3:c040ea22 r2:c0403a0f
[<c01a2060>] (check_unmap+0x0/0x6f8) from [<c01a2978>] (debug_dma_unmap_page+0x74/0x80)
[<c01a2904>] (debug_dma_unmap_page+0x0/0x80) from [<c021bd64>] (omap2_mcspi_txrx_dma+0x33c/0x54c)
[<c021ba28>] (omap2_mcspi_txrx_dma+0x0/0x54c) from [<c021c590>] (omap2_mcspi_work+0x1b8/0x2b8)
[<c021c3d8>] (omap2_mcspi_work+0x0/0x2b8) from [<c021c974>] (omap2_mcspi_transfer_one_message+0x2e4/0x310)
[<c021c690>] (omap2_mcspi_transfer_one_message+0x0/0x310) from [<c021a9f8>] (spi_pump_messages+0x130/0x154)
[<c021a8c8>] (spi_pump_messages+0x0/0x154) from [<c00508dc>] (kthread_worker_fn+0x108/0x188)
 r7:df6a3d94 r6:df79e000 r5:df6a3d90 r4:df6a3da4
[<c00507d4>] (kthread_worker_fn+0x0/0x188) from [<c0050a60>] (kthread+0x8c/0x98)
[<c00509d4>] (kthread+0x0/0x98) from [<c0039134>] (do_exit+0x0/0x314)
 r7:00000013 r6:c0039134 r5:c00509d4 r4:df443d78
---[ end trace 1b75b31a2719ed1f ]---

So, trying to figure this out... the result is not nice.

If the spi message has is_dma_mapped = false, then we potentially map the
DMA buffers against mcspi->dev.  This struct device is the same as the
master->dev.parent.

However, when we come to complete a transfer, we unmap them against the
spi_device's struct device - in other words a different device.

That's the reason for the warning.  However, when using DMA engine, both
of these struct device's are the wrong one to be using - the right one to
use is the one assocated with the DMA engine.

However, this presents a problem with transfers with is_dma_mapped = true.
SPI device drivers appear to assume that the right struct device to use
to map for DMA is master->dev.parent.  That's fine if your SPI master
device is the struct device performing the DMA, but with DMA engine
involved, this is not true.  Not sure at the moment what to do about
that one.

^ permalink raw reply	[flat|nested] 172+ messages in thread

* Re: [CFT 07/11] spi: omap2-mcspi: add DMA engine support
  2012-06-14 11:53         ` Russell King - ARM Linux
@ 2012-06-14 12:08           ` Russell King - ARM Linux
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King - ARM Linux @ 2012-06-14 12:08 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Grant Likely, spi-devel-general

On Thu, Jun 14, 2012 at 12:53:35PM +0100, Russell King - ARM Linux wrote:
> On Thu, Jun 07, 2012 at 12:08:35PM +0100, Russell King wrote:
> > Add DMA engine support to the OMAP SPI driver.  This supplements the
> > private DMA API implementation contained within this driver, and the
> > driver can be independently switched at build time between using DMA
> > engine and the private DMA API for the transmit and receive sides.
> > 
> > Tested-by: Shubhrajyoti <shubhrajyoti@ti.com>
> > Acked-by: Grant Likely <grant.likely@secretlab.ca>
> > Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
> 
> Right, now that we have working OMAP in mainline again...

Another warning:

------------[ cut here ]------------
WARNING: at /home/rmk/git/linux-rmk/drivers/base/dd.c:257 driver_probe_device+0x78/0x21c()
Modules linked in:
Backtrace:
[<c0017d0c>] (dump_backtrace+0x0/0x10c) from [<c033e208>] (dump_stack+0x18/0x1c) r7:00000000 r6:c01ff28c r5:c040050c r4:00000101
[<c033e1f0>] (dump_stack+0x0/0x1c) from [<c00337ec>] (warn_slowpath_common+0x58/0x70)
[<c0033794>] (warn_slowpath_common+0x0/0x70) from [<c0033828>] (warn_slowpath_null+0x24/0x2c)
 r8:c04aa4d8 r7:c04aa63c r6:de70ce00 r5:de70ce34 r4:de70ce00
[<c0033804>] (warn_slowpath_null+0x0/0x2c) from [<c01ff28c>] (driver_probe_device+0x78/0x21c)
[<c01ff214>] (driver_probe_device+0x0/0x21c) from [<c01ff49c>] (__driver_attach+0x6c/0x90)
 r7:de443ec8 r6:c04aa63c r5:de70ce34 r4:de70ce00
[<c01ff430>] (__driver_attach+0x0/0x90) from [<c01fda70>] (bus_for_each_dev+0x58/0x98)
 r6:c04aa63c r5:c01ff430 r4:00000000
[<c01fda18>] (bus_for_each_dev+0x0/0x98) from [<c01ff0f4>] (driver_attach+0x20/0x28)
 r7:de6c9e80 r6:c04aa63c r5:c04aa63c r4:c0465b80
[<c01ff0d4>] (driver_attach+0x0/0x28) from [<c01fe2f4>] (bus_add_driver+0xb4/0x230)
[<c01fe240>] (bus_add_driver+0x0/0x230) from [<c01ffb24>] (driver_register+0xac/0x138)
[<c01ffa78>] (driver_register+0x0/0x138) from [<c0215d4c>] (spi_register_driver+0x4c/0x60)
 r8:0000005b r7:c045f848 r6:00000006 r5:00000018 r4:c0465b80
[<c0215d00>] (spi_register_driver+0x0/0x60) from [<c045414c>] (ks8851_init+0x14/0x1c)
[<c0454138>] (ks8851_init+0x0/0x1c) from [<c0008770>] (do_one_initcall+0x9c/0x164)
[<c00086d4>] (do_one_initcall+0x0/0x164) from [<c0436410>] (kernel_init+0x128/0x210)
[<c04362e8>] (kernel_init+0x0/0x210) from [<c0038754>] (do_exit+0x0/0x72c)
---[ end trace 4dcda79f5e89dd84 ]---
ks8851 spi1.0: message enable is 0
ks8851 spi1.0: eth0: revision 0, MAC 08:00:28:01:4d:c6, IRQ 194, has EEPROM

The relevant line:

        WARN_ON(!list_empty(&dev->devres_head));

Which suggests that someone is using devres against the struct device for
the KS8851 device before the driver is bound.  That's a bug, plain and
simple.  I've not yet been able to track down what it is or where it's
being done, but it is something that has been introduced during the last
merge window.

devm_* APIs should only be used by _drivers_ against the struct device
that they are driving - because the lifetime of these things is bounded
by the point at which the driver is bound to that struct device, to the
point that it is unbound from that struct device (and at that point,
all devm_* stuff against the struct device gets destroyed.)

^ permalink raw reply	[flat|nested] 172+ messages in thread

* [CFT 07/11] spi: omap2-mcspi: add DMA engine support
@ 2012-06-14 12:08           ` Russell King - ARM Linux
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King - ARM Linux @ 2012-06-14 12:08 UTC (permalink / raw)
  To: linux-arm-kernel

On Thu, Jun 14, 2012 at 12:53:35PM +0100, Russell King - ARM Linux wrote:
> On Thu, Jun 07, 2012 at 12:08:35PM +0100, Russell King wrote:
> > Add DMA engine support to the OMAP SPI driver.  This supplements the
> > private DMA API implementation contained within this driver, and the
> > driver can be independently switched at build time between using DMA
> > engine and the private DMA API for the transmit and receive sides.
> > 
> > Tested-by: Shubhrajyoti <shubhrajyoti@ti.com>
> > Acked-by: Grant Likely <grant.likely@secretlab.ca>
> > Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
> 
> Right, now that we have working OMAP in mainline again...

Another warning:

------------[ cut here ]------------
WARNING: at /home/rmk/git/linux-rmk/drivers/base/dd.c:257 driver_probe_device+0x78/0x21c()
Modules linked in:
Backtrace:
[<c0017d0c>] (dump_backtrace+0x0/0x10c) from [<c033e208>] (dump_stack+0x18/0x1c) r7:00000000 r6:c01ff28c r5:c040050c r4:00000101
[<c033e1f0>] (dump_stack+0x0/0x1c) from [<c00337ec>] (warn_slowpath_common+0x58/0x70)
[<c0033794>] (warn_slowpath_common+0x0/0x70) from [<c0033828>] (warn_slowpath_null+0x24/0x2c)
 r8:c04aa4d8 r7:c04aa63c r6:de70ce00 r5:de70ce34 r4:de70ce00
[<c0033804>] (warn_slowpath_null+0x0/0x2c) from [<c01ff28c>] (driver_probe_device+0x78/0x21c)
[<c01ff214>] (driver_probe_device+0x0/0x21c) from [<c01ff49c>] (__driver_attach+0x6c/0x90)
 r7:de443ec8 r6:c04aa63c r5:de70ce34 r4:de70ce00
[<c01ff430>] (__driver_attach+0x0/0x90) from [<c01fda70>] (bus_for_each_dev+0x58/0x98)
 r6:c04aa63c r5:c01ff430 r4:00000000
[<c01fda18>] (bus_for_each_dev+0x0/0x98) from [<c01ff0f4>] (driver_attach+0x20/0x28)
 r7:de6c9e80 r6:c04aa63c r5:c04aa63c r4:c0465b80
[<c01ff0d4>] (driver_attach+0x0/0x28) from [<c01fe2f4>] (bus_add_driver+0xb4/0x230)
[<c01fe240>] (bus_add_driver+0x0/0x230) from [<c01ffb24>] (driver_register+0xac/0x138)
[<c01ffa78>] (driver_register+0x0/0x138) from [<c0215d4c>] (spi_register_driver+0x4c/0x60)
 r8:0000005b r7:c045f848 r6:00000006 r5:00000018 r4:c0465b80
[<c0215d00>] (spi_register_driver+0x0/0x60) from [<c045414c>] (ks8851_init+0x14/0x1c)
[<c0454138>] (ks8851_init+0x0/0x1c) from [<c0008770>] (do_one_initcall+0x9c/0x164)
[<c00086d4>] (do_one_initcall+0x0/0x164) from [<c0436410>] (kernel_init+0x128/0x210)
[<c04362e8>] (kernel_init+0x0/0x210) from [<c0038754>] (do_exit+0x0/0x72c)
---[ end trace 4dcda79f5e89dd84 ]---
ks8851 spi1.0: message enable is 0
ks8851 spi1.0: eth0: revision 0, MAC 08:00:28:01:4d:c6, IRQ 194, has EEPROM

The relevant line:

        WARN_ON(!list_empty(&dev->devres_head));

Which suggests that someone is using devres against the struct device for
the KS8851 device before the driver is bound.  That's a bug, plain and
simple.  I've not yet been able to track down what it is or where it's
being done, but it is something that has been introduced during the last
merge window.

devm_* APIs should only be used by _drivers_ against the struct device
that they are driving - because the lifetime of these things is bounded
by the point at which the driver is bound to that struct device, to the
point that it is unbound from that struct device (and at that point,
all devm_* stuff against the struct device gets destroyed.)

^ permalink raw reply	[flat|nested] 172+ messages in thread

* Re: [CFT 07/11] spi: omap2-mcspi: add DMA engine support
  2012-06-14 12:08           ` Russell King - ARM Linux
@ 2012-06-14 12:50             ` Russell King - ARM Linux
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King - ARM Linux @ 2012-06-14 12:50 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap; +Cc: Grant Likely, spi-devel-general

On Thu, Jun 14, 2012 at 01:08:43PM +0100, Russell King - ARM Linux wrote:
> On Thu, Jun 14, 2012 at 12:53:35PM +0100, Russell King - ARM Linux wrote:
> > On Thu, Jun 07, 2012 at 12:08:35PM +0100, Russell King wrote:
> > > Add DMA engine support to the OMAP SPI driver.  This supplements the
> > > private DMA API implementation contained within this driver, and the
> > > driver can be independently switched at build time between using DMA
> > > engine and the private DMA API for the transmit and receive sides.
> > > 
> > > Tested-by: Shubhrajyoti <shubhrajyoti@ti.com>
> > > Acked-by: Grant Likely <grant.likely@secretlab.ca>
> > > Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
> > 
> > Right, now that we have working OMAP in mainline again...
> 
> Another warning:
> 
> ------------[ cut here ]------------
> WARNING: at /home/rmk/git/linux-rmk/drivers/base/dd.c:257 driver_probe_device+0x78/0x21c()
> Modules linked in:
> Backtrace:
> [<c0017d0c>] (dump_backtrace+0x0/0x10c) from [<c033e208>] (dump_stack+0x18/0x1c) r7:00000000 r6:c01ff28c r5:c040050c r4:00000101
> [<c033e1f0>] (dump_stack+0x0/0x1c) from [<c00337ec>] (warn_slowpath_common+0x58/0x70)
> [<c0033794>] (warn_slowpath_common+0x0/0x70) from [<c0033828>] (warn_slowpath_null+0x24/0x2c)
>  r8:c04aa4d8 r7:c04aa63c r6:de70ce00 r5:de70ce34 r4:de70ce00
> [<c0033804>] (warn_slowpath_null+0x0/0x2c) from [<c01ff28c>] (driver_probe_device+0x78/0x21c)
> [<c01ff214>] (driver_probe_device+0x0/0x21c) from [<c01ff49c>] (__driver_attach+0x6c/0x90)
>  r7:de443ec8 r6:c04aa63c r5:de70ce34 r4:de70ce00
> [<c01ff430>] (__driver_attach+0x0/0x90) from [<c01fda70>] (bus_for_each_dev+0x58/0x98)
>  r6:c04aa63c r5:c01ff430 r4:00000000
> [<c01fda18>] (bus_for_each_dev+0x0/0x98) from [<c01ff0f4>] (driver_attach+0x20/0x28)
>  r7:de6c9e80 r6:c04aa63c r5:c04aa63c r4:c0465b80
> [<c01ff0d4>] (driver_attach+0x0/0x28) from [<c01fe2f4>] (bus_add_driver+0xb4/0x230)
> [<c01fe240>] (bus_add_driver+0x0/0x230) from [<c01ffb24>] (driver_register+0xac/0x138)
> [<c01ffa78>] (driver_register+0x0/0x138) from [<c0215d4c>] (spi_register_driver+0x4c/0x60)
>  r8:0000005b r7:c045f848 r6:00000006 r5:00000018 r4:c0465b80
> [<c0215d00>] (spi_register_driver+0x0/0x60) from [<c045414c>] (ks8851_init+0x14/0x1c)
> [<c0454138>] (ks8851_init+0x0/0x1c) from [<c0008770>] (do_one_initcall+0x9c/0x164)
> [<c00086d4>] (do_one_initcall+0x0/0x164) from [<c0436410>] (kernel_init+0x128/0x210)
> [<c04362e8>] (kernel_init+0x0/0x210) from [<c0038754>] (do_exit+0x0/0x72c)
> ---[ end trace 4dcda79f5e89dd84 ]---
> ks8851 spi1.0: message enable is 0
> ks8851 spi1.0: eth0: revision 0, MAC 08:00:28:01:4d:c6, IRQ 194, has EEPROM
> 
> The relevant line:
> 
>         WARN_ON(!list_empty(&dev->devres_head));
> 
> Which suggests that someone is using devres against the struct device for
> the KS8851 device before the driver is bound.  That's a bug, plain and
> simple.  I've not yet been able to track down what it is or where it's
> being done, but it is something that has been introduced during the last
> merge window.
> 
> devm_* APIs should only be used by _drivers_ against the struct device
> that they are driving - because the lifetime of these things is bounded
> by the point at which the driver is bound to that struct device, to the
> point that it is unbound from that struct device (and at that point,
> all devm_* stuff against the struct device gets destroyed.)

This commit introduced the bug:

commit 1a77b127ae147f5827043a9896d7f4cb248b402e
Author: Shubhrajyoti D <shubhrajyoti@ti.com>
Date:   Sat Mar 17 12:44:01 2012 +0530

    OMAP : SPI : use devm_* functions

    The various devm_* functions allocate memory that is released when a driver
    detaches. This patch uses devm_request_and_ioremap
    to request memory in probe function. Since the freeing is not
    needed the calls are deleted from remove function.Also use
    use devm_kzalloc for the cs memory allocation.

    Signed-off-by: Shubhrajyoti D <shubhrajyoti@ti.com>

and sure enough, reverting this makes the warning go away.

Specifically, it is this part which is the culpret:

diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 7745f91..cb2c0e3 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -789,7 +789,7 @@ static int omap2_mcspi_setup(struct spi_device *spi)
        mcspi_dma = &mcspi->dma_channels[spi->chip_select];

        if (!cs) {
-               cs = kzalloc(sizeof *cs, GFP_KERNEL);
+               cs = devm_kzalloc(&spi->dev , sizeof *cs, GFP_KERNEL);
                if (!cs)
                        return -ENOMEM;
                cs->base = mcspi->base + spi->chip_select * 0x14;
@@ -831,7 +831,6 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
                cs = spi->controller_state;
                list_del(&cs->node);

-               kfree(spi->controller_state);
        }

        if (spi->chip_select < spi->master->num_chipselect) {

because, at the time when omap2_mcspi_setup() is called, spi->dev is
not bound, and so is outside of the devres valid lifetime of that
struct device.

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 07/11] spi: omap2-mcspi: add DMA engine support
@ 2012-06-14 12:50             ` Russell King - ARM Linux
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King - ARM Linux @ 2012-06-14 12:50 UTC (permalink / raw)
  To: linux-arm-kernel

On Thu, Jun 14, 2012 at 01:08:43PM +0100, Russell King - ARM Linux wrote:
> On Thu, Jun 14, 2012 at 12:53:35PM +0100, Russell King - ARM Linux wrote:
> > On Thu, Jun 07, 2012 at 12:08:35PM +0100, Russell King wrote:
> > > Add DMA engine support to the OMAP SPI driver.  This supplements the
> > > private DMA API implementation contained within this driver, and the
> > > driver can be independently switched at build time between using DMA
> > > engine and the private DMA API for the transmit and receive sides.
> > > 
> > > Tested-by: Shubhrajyoti <shubhrajyoti@ti.com>
> > > Acked-by: Grant Likely <grant.likely@secretlab.ca>
> > > Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
> > 
> > Right, now that we have working OMAP in mainline again...
> 
> Another warning:
> 
> ------------[ cut here ]------------
> WARNING: at /home/rmk/git/linux-rmk/drivers/base/dd.c:257 driver_probe_device+0x78/0x21c()
> Modules linked in:
> Backtrace:
> [<c0017d0c>] (dump_backtrace+0x0/0x10c) from [<c033e208>] (dump_stack+0x18/0x1c) r7:00000000 r6:c01ff28c r5:c040050c r4:00000101
> [<c033e1f0>] (dump_stack+0x0/0x1c) from [<c00337ec>] (warn_slowpath_common+0x58/0x70)
> [<c0033794>] (warn_slowpath_common+0x0/0x70) from [<c0033828>] (warn_slowpath_null+0x24/0x2c)
>  r8:c04aa4d8 r7:c04aa63c r6:de70ce00 r5:de70ce34 r4:de70ce00
> [<c0033804>] (warn_slowpath_null+0x0/0x2c) from [<c01ff28c>] (driver_probe_device+0x78/0x21c)
> [<c01ff214>] (driver_probe_device+0x0/0x21c) from [<c01ff49c>] (__driver_attach+0x6c/0x90)
>  r7:de443ec8 r6:c04aa63c r5:de70ce34 r4:de70ce00
> [<c01ff430>] (__driver_attach+0x0/0x90) from [<c01fda70>] (bus_for_each_dev+0x58/0x98)
>  r6:c04aa63c r5:c01ff430 r4:00000000
> [<c01fda18>] (bus_for_each_dev+0x0/0x98) from [<c01ff0f4>] (driver_attach+0x20/0x28)
>  r7:de6c9e80 r6:c04aa63c r5:c04aa63c r4:c0465b80
> [<c01ff0d4>] (driver_attach+0x0/0x28) from [<c01fe2f4>] (bus_add_driver+0xb4/0x230)
> [<c01fe240>] (bus_add_driver+0x0/0x230) from [<c01ffb24>] (driver_register+0xac/0x138)
> [<c01ffa78>] (driver_register+0x0/0x138) from [<c0215d4c>] (spi_register_driver+0x4c/0x60)
>  r8:0000005b r7:c045f848 r6:00000006 r5:00000018 r4:c0465b80
> [<c0215d00>] (spi_register_driver+0x0/0x60) from [<c045414c>] (ks8851_init+0x14/0x1c)
> [<c0454138>] (ks8851_init+0x0/0x1c) from [<c0008770>] (do_one_initcall+0x9c/0x164)
> [<c00086d4>] (do_one_initcall+0x0/0x164) from [<c0436410>] (kernel_init+0x128/0x210)
> [<c04362e8>] (kernel_init+0x0/0x210) from [<c0038754>] (do_exit+0x0/0x72c)
> ---[ end trace 4dcda79f5e89dd84 ]---
> ks8851 spi1.0: message enable is 0
> ks8851 spi1.0: eth0: revision 0, MAC 08:00:28:01:4d:c6, IRQ 194, has EEPROM
> 
> The relevant line:
> 
>         WARN_ON(!list_empty(&dev->devres_head));
> 
> Which suggests that someone is using devres against the struct device for
> the KS8851 device before the driver is bound.  That's a bug, plain and
> simple.  I've not yet been able to track down what it is or where it's
> being done, but it is something that has been introduced during the last
> merge window.
> 
> devm_* APIs should only be used by _drivers_ against the struct device
> that they are driving - because the lifetime of these things is bounded
> by the point at which the driver is bound to that struct device, to the
> point that it is unbound from that struct device (and at that point,
> all devm_* stuff against the struct device gets destroyed.)

This commit introduced the bug:

commit 1a77b127ae147f5827043a9896d7f4cb248b402e
Author: Shubhrajyoti D <shubhrajyoti@ti.com>
Date:   Sat Mar 17 12:44:01 2012 +0530

    OMAP : SPI : use devm_* functions

    The various devm_* functions allocate memory that is released when a driver
    detaches. This patch uses devm_request_and_ioremap
    to request memory in probe function. Since the freeing is not
    needed the calls are deleted from remove function.Also use
    use devm_kzalloc for the cs memory allocation.

    Signed-off-by: Shubhrajyoti D <shubhrajyoti@ti.com>

and sure enough, reverting this makes the warning go away.

Specifically, it is this part which is the culpret:

diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 7745f91..cb2c0e3 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -789,7 +789,7 @@ static int omap2_mcspi_setup(struct spi_device *spi)
        mcspi_dma = &mcspi->dma_channels[spi->chip_select];

        if (!cs) {
-               cs = kzalloc(sizeof *cs, GFP_KERNEL);
+               cs = devm_kzalloc(&spi->dev , sizeof *cs, GFP_KERNEL);
                if (!cs)
                        return -ENOMEM;
                cs->base = mcspi->base + spi->chip_select * 0x14;
@@ -831,7 +831,6 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
                cs = spi->controller_state;
                list_del(&cs->node);

-               kfree(spi->controller_state);
        }

        if (spi->chip_select < spi->master->num_chipselect) {

because, at the time when omap2_mcspi_setup() is called, spi->dev is
not bound, and so is outside of the devres valid lifetime of that
struct device.

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [PATCH] SPI: OMAP: fix over-eager devm_xxx() conversion (was: Re: [CFT 07/11] spi: omap2-mcspi: add DMA engine support)
  2012-06-14 12:50             ` Russell King - ARM Linux
@ 2012-06-14 14:07               ` Russell King - ARM Linux
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King - ARM Linux @ 2012-06-14 14:07 UTC (permalink / raw)
  To: linux-arm-kernel, linux-omap, Grant Likely; +Cc: spi-devel-general

From: Russell King <rmk+kernel@arm.linux.org.uk>
Subject: [PATCH] SPI: OMAP: fix over-eager devm_xxx() conversion

1a77b127ae (OMAP : SPI : use devm_* functions) converted the SPI
device controller state to use devm_kzalloc().  Unfortunately, this
is used against an unbound struct device, which results in the
following when the device is eventually bound to its driver:

------------[ cut here ]------------
WARNING: at /home/rmk/git/linux-rmk/drivers/base/dd.c:257 driver_probe_device+0x78/0x21c()
Modules linked in:
Backtrace:
[<c0017d0c>] (dump_backtrace+0x0/0x10c) from [<c033e208>] (dump_stack+0x18/0x1c) r7:00000000 r6:c01ff28c r5:c040050c r4:00000101
[<c033e1f0>] (dump_stack+0x0/0x1c) from [<c00337ec>] (warn_slowpath_common+0x58/0x70)
[<c0033794>] (warn_slowpath_common+0x0/0x70) from [<c0033828>] (warn_slowpath_null+0x24/0x2c)
[<c0033804>] (warn_slowpath_null+0x0/0x2c) from [<c01ff28c>] (driver_probe_device+0x78/0x21c)
[<c01ff214>] (driver_probe_device+0x0/0x21c) from [<c01ff49c>] (__driver_attach+0x6c/0x90)
[<c01ff430>] (__driver_attach+0x0/0x90) from [<c01fda70>] (bus_for_each_dev+0x58/0x98)
[<c01fda18>] (bus_for_each_dev+0x0/0x98) from [<c01ff0f4>] (driver_attach+0x20/0x28)
[<c01ff0d4>] (driver_attach+0x0/0x28) from [<c01fe2f4>] (bus_add_driver+0xb4/0x230)
[<c01fe240>] (bus_add_driver+0x0/0x230) from [<c01ffb24>] (driver_register+0xac/0x138)
[<c01ffa78>] (driver_register+0x0/0x138) from [<c0215d4c>] (spi_register_driver+0x4c/0x60)
[<c0215d00>] (spi_register_driver+0x0/0x60) from [<c045414c>] (ks8851_init+0x14/0x1c)
[<c0454138>] (ks8851_init+0x0/0x1c) from [<c0008770>] (do_one_initcall+0x9c/0x164)
[<c00086d4>] (do_one_initcall+0x0/0x164) from [<c0436410>] (kernel_init+0x128/0x210)
[<c04362e8>] (kernel_init+0x0/0x210) from [<c0038754>] (do_exit+0x0/0x72c)
---[ end trace 4dcda79f5e89dd84 ]---
ks8851 spi1.0: message enable is 0
ks8851 spi1.0: eth0: revision 0, MAC 08:00:28:01:4d:c6, IRQ 194, has EEPROM

Fix this by partially reverting the original commit.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/spi/spi-omap2-mcspi.c |    3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 9d3409a..6263b0f 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -829,7 +829,7 @@ static int omap2_mcspi_setup(struct spi_device *spi)
 	mcspi_dma = &mcspi->dma_channels[spi->chip_select];
 
 	if (!cs) {
-		cs = devm_kzalloc(&spi->dev , sizeof *cs, GFP_KERNEL);
+		cs = kzalloc(sizeof *cs, GFP_KERNEL);
 		if (!cs)
 			return -ENOMEM;
 		cs->base = mcspi->base + spi->chip_select * 0x14;
@@ -869,6 +869,7 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
 		cs = spi->controller_state;
 		list_del(&cs->node);
 
+		kfree(cs);
 	}
 
 	if (spi->chip_select < spi->master->num_chipselect) {

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [PATCH] SPI: OMAP: fix over-eager devm_xxx() conversion (was: Re: [CFT 07/11] spi: omap2-mcspi: add DMA engine support)
@ 2012-06-14 14:07               ` Russell King - ARM Linux
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King - ARM Linux @ 2012-06-14 14:07 UTC (permalink / raw)
  To: linux-arm-kernel

From: Russell King <rmk+kernel@arm.linux.org.uk>
Subject: [PATCH] SPI: OMAP: fix over-eager devm_xxx() conversion

1a77b127ae (OMAP : SPI : use devm_* functions) converted the SPI
device controller state to use devm_kzalloc().  Unfortunately, this
is used against an unbound struct device, which results in the
following when the device is eventually bound to its driver:

------------[ cut here ]------------
WARNING: at /home/rmk/git/linux-rmk/drivers/base/dd.c:257 driver_probe_device+0x78/0x21c()
Modules linked in:
Backtrace:
[<c0017d0c>] (dump_backtrace+0x0/0x10c) from [<c033e208>] (dump_stack+0x18/0x1c) r7:00000000 r6:c01ff28c r5:c040050c r4:00000101
[<c033e1f0>] (dump_stack+0x0/0x1c) from [<c00337ec>] (warn_slowpath_common+0x58/0x70)
[<c0033794>] (warn_slowpath_common+0x0/0x70) from [<c0033828>] (warn_slowpath_null+0x24/0x2c)
[<c0033804>] (warn_slowpath_null+0x0/0x2c) from [<c01ff28c>] (driver_probe_device+0x78/0x21c)
[<c01ff214>] (driver_probe_device+0x0/0x21c) from [<c01ff49c>] (__driver_attach+0x6c/0x90)
[<c01ff430>] (__driver_attach+0x0/0x90) from [<c01fda70>] (bus_for_each_dev+0x58/0x98)
[<c01fda18>] (bus_for_each_dev+0x0/0x98) from [<c01ff0f4>] (driver_attach+0x20/0x28)
[<c01ff0d4>] (driver_attach+0x0/0x28) from [<c01fe2f4>] (bus_add_driver+0xb4/0x230)
[<c01fe240>] (bus_add_driver+0x0/0x230) from [<c01ffb24>] (driver_register+0xac/0x138)
[<c01ffa78>] (driver_register+0x0/0x138) from [<c0215d4c>] (spi_register_driver+0x4c/0x60)
[<c0215d00>] (spi_register_driver+0x0/0x60) from [<c045414c>] (ks8851_init+0x14/0x1c)
[<c0454138>] (ks8851_init+0x0/0x1c) from [<c0008770>] (do_one_initcall+0x9c/0x164)
[<c00086d4>] (do_one_initcall+0x0/0x164) from [<c0436410>] (kernel_init+0x128/0x210)
[<c04362e8>] (kernel_init+0x0/0x210) from [<c0038754>] (do_exit+0x0/0x72c)
---[ end trace 4dcda79f5e89dd84 ]---
ks8851 spi1.0: message enable is 0
ks8851 spi1.0: eth0: revision 0, MAC 08:00:28:01:4d:c6, IRQ 194, has EEPROM

Fix this by partially reverting the original commit.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/spi/spi-omap2-mcspi.c |    3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 9d3409a..6263b0f 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -829,7 +829,7 @@ static int omap2_mcspi_setup(struct spi_device *spi)
 	mcspi_dma = &mcspi->dma_channels[spi->chip_select];
 
 	if (!cs) {
-		cs = devm_kzalloc(&spi->dev , sizeof *cs, GFP_KERNEL);
+		cs = kzalloc(sizeof *cs, GFP_KERNEL);
 		if (!cs)
 			return -ENOMEM;
 		cs->base = mcspi->base + spi->chip_select * 0x14;
@@ -869,6 +869,7 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
 		cs = spi->controller_state;
 		list_del(&cs->node);
 
+		kfree(cs);
 	}
 
 	if (spi->chip_select < spi->master->num_chipselect) {

^ permalink raw reply related	[flat|nested] 172+ messages in thread

* Re: [PATCH] SPI: OMAP: fix over-eager devm_xxx() conversion (was: Re: [CFT 07/11] spi: omap2-mcspi: add DMA engine support)
  2012-06-14 14:07               ` Russell King - ARM Linux
@ 2012-06-16 10:33                   ` Russell King - ARM Linux
  -1 siblings, 0 replies; 172+ messages in thread
From: Russell King - ARM Linux @ 2012-06-16 10:33 UTC (permalink / raw)
  To: linux-arm-kernel-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r,
	linux-omap-u79uwXL29TY76Z2rM5mHXA, Grant Likely
  Cc: spi-devel-general-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f

Okay, I'm going to queue this up in my tree for -rc as no one seems to
be listening to any of the emails I've sent on Thursday.

On Thu, Jun 14, 2012 at 03:07:12PM +0100, Russell King - ARM Linux wrote:
> From: Russell King <rmk+kernel-lFZ/pmaqli7XmaaqVzeoHQ@public.gmane.org>
> Subject: [PATCH] SPI: OMAP: fix over-eager devm_xxx() conversion
> 
> 1a77b127ae (OMAP : SPI : use devm_* functions) converted the SPI
> device controller state to use devm_kzalloc().  Unfortunately, this
> is used against an unbound struct device, which results in the
> following when the device is eventually bound to its driver:
> 
> ------------[ cut here ]------------
> WARNING: at /home/rmk/git/linux-rmk/drivers/base/dd.c:257 driver_probe_device+0x78/0x21c()
> Modules linked in:
> Backtrace:
> [<c0017d0c>] (dump_backtrace+0x0/0x10c) from [<c033e208>] (dump_stack+0x18/0x1c) r7:00000000 r6:c01ff28c r5:c040050c r4:00000101
> [<c033e1f0>] (dump_stack+0x0/0x1c) from [<c00337ec>] (warn_slowpath_common+0x58/0x70)
> [<c0033794>] (warn_slowpath_common+0x0/0x70) from [<c0033828>] (warn_slowpath_null+0x24/0x2c)
> [<c0033804>] (warn_slowpath_null+0x0/0x2c) from [<c01ff28c>] (driver_probe_device+0x78/0x21c)
> [<c01ff214>] (driver_probe_device+0x0/0x21c) from [<c01ff49c>] (__driver_attach+0x6c/0x90)
> [<c01ff430>] (__driver_attach+0x0/0x90) from [<c01fda70>] (bus_for_each_dev+0x58/0x98)
> [<c01fda18>] (bus_for_each_dev+0x0/0x98) from [<c01ff0f4>] (driver_attach+0x20/0x28)
> [<c01ff0d4>] (driver_attach+0x0/0x28) from [<c01fe2f4>] (bus_add_driver+0xb4/0x230)
> [<c01fe240>] (bus_add_driver+0x0/0x230) from [<c01ffb24>] (driver_register+0xac/0x138)
> [<c01ffa78>] (driver_register+0x0/0x138) from [<c0215d4c>] (spi_register_driver+0x4c/0x60)
> [<c0215d00>] (spi_register_driver+0x0/0x60) from [<c045414c>] (ks8851_init+0x14/0x1c)
> [<c0454138>] (ks8851_init+0x0/0x1c) from [<c0008770>] (do_one_initcall+0x9c/0x164)
> [<c00086d4>] (do_one_initcall+0x0/0x164) from [<c0436410>] (kernel_init+0x128/0x210)
> [<c04362e8>] (kernel_init+0x0/0x210) from [<c0038754>] (do_exit+0x0/0x72c)
> ---[ end trace 4dcda79f5e89dd84 ]---
> ks8851 spi1.0: message enable is 0
> ks8851 spi1.0: eth0: revision 0, MAC 08:00:28:01:4d:c6, IRQ 194, has EEPROM
> 
> Fix this by partially reverting the original commit.
> 
> Signed-off-by: Russell King <rmk+kernel-lFZ/pmaqli7XmaaqVzeoHQ@public.gmane.org>
> ---
>  drivers/spi/spi-omap2-mcspi.c |    3 ++-
>  1 file changed, 2 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
> index 9d3409a..6263b0f 100644
> --- a/drivers/spi/spi-omap2-mcspi.c
> +++ b/drivers/spi/spi-omap2-mcspi.c
> @@ -829,7 +829,7 @@ static int omap2_mcspi_setup(struct spi_device *spi)
>  	mcspi_dma = &mcspi->dma_channels[spi->chip_select];
>  
>  	if (!cs) {
> -		cs = devm_kzalloc(&spi->dev , sizeof *cs, GFP_KERNEL);
> +		cs = kzalloc(sizeof *cs, GFP_KERNEL);
>  		if (!cs)
>  			return -ENOMEM;
>  		cs->base = mcspi->base + spi->chip_select * 0x14;
> @@ -869,6 +869,7 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
>  		cs = spi->controller_state;
>  		list_del(&cs->node);
>  
> +		kfree(cs);
>  	}
>  
>  	if (spi->chip_select < spi->master->num_chipselect) {
> 
> _______________________________________________
> linux-arm-kernel mailing list
> linux-arm-kernel-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r@public.gmane.org
> http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

------------------------------------------------------------------------------
Live Security Virtual Conference
Exclusive live event will cover all the ways today's security and 
threat landscape has changed and how IT managers can respond. Discussions 
will include endpoint security, mobile security and the latest in malware 
threats. http://www.accelacomm.com/jaw/sfrnl04242012/114/50122263/

^ permalink raw reply	[flat|nested] 172+ messages in thread

* [PATCH] SPI: OMAP: fix over-eager devm_xxx() conversion (was: Re: [CFT 07/11] spi: omap2-mcspi: add DMA engine support)
@ 2012-06-16 10:33                   ` Russell King - ARM Linux
  0 siblings, 0 replies; 172+ messages in thread
From: Russell King - ARM Linux @ 2012-06-16 10:33 UTC (permalink / raw)
  To: linux-arm-kernel

Okay, I'm going to queue this up in my tree for -rc as no one seems to
be listening to any of the emails I've sent on Thursday.

On Thu, Jun 14, 2012 at 03:07:12PM +0100, Russell King - ARM Linux wrote:
> From: Russell King <rmk+kernel@arm.linux.org.uk>
> Subject: [PATCH] SPI: OMAP: fix over-eager devm_xxx() conversion
> 
> 1a77b127ae (OMAP : SPI : use devm_* functions) converted the SPI
> device controller state to use devm_kzalloc().  Unfortunately, this
> is used against an unbound struct device, which results in the
> following when the device is eventually bound to its driver:
> 
> ------------[ cut here ]------------
> WARNING: at /home/rmk/git/linux-rmk/drivers/base/dd.c:257 driver_probe_device+0x78/0x21c()
> Modules linked in:
> Backtrace:
> [<c0017d0c>] (dump_backtrace+0x0/0x10c) from [<c033e208>] (dump_stack+0x18/0x1c) r7:00000000 r6:c01ff28c r5:c040050c r4:00000101
> [<c033e1f0>] (dump_stack+0x0/0x1c) from [<c00337ec>] (warn_slowpath_common+0x58/0x70)
> [<c0033794>] (warn_slowpath_common+0x0/0x70) from [<c0033828>] (warn_slowpath_null+0x24/0x2c)
> [<c0033804>] (warn_slowpath_null+0x0/0x2c) from [<c01ff28c>] (driver_probe_device+0x78/0x21c)
> [<c01ff214>] (driver_probe_device+0x0/0x21c) from [<c01ff49c>] (__driver_attach+0x6c/0x90)
> [<c01ff430>] (__driver_attach+0x0/0x90) from [<c01fda70>] (bus_for_each_dev+0x58/0x98)
> [<c01fda18>] (bus_for_each_dev+0x0/0x98) from [<c01ff0f4>] (driver_attach+0x20/0x28)
> [<c01ff0d4>] (driver_attach+0x0/0x28) from [<c01fe2f4>] (bus_add_driver+0xb4/0x230)
> [<c01fe240>] (bus_add_driver+0x0/0x230) from [<c01ffb24>] (driver_register+0xac/0x138)
> [<c01ffa78>] (driver_register+0x0/0x138) from [<c0215d4c>] (spi_register_driver+0x4c/0x60)
> [<c0215d00>] (spi_register_driver+0x0/0x60) from [<c045414c>] (ks8851_init+0x14/0x1c)
> [<c0454138>] (ks8851_init+0x0/0x1c) from [<c0008770>] (do_one_initcall+0x9c/0x164)
> [<c00086d4>] (do_one_initcall+0x0/0x164) from [<c0436410>] (kernel_init+0x128/0x210)
> [<c04362e8>] (kernel_init+0x0/0x210) from [<c0038754>] (do_exit+0x0/0x72c)
> ---[ end trace 4dcda79f5e89dd84 ]---
> ks8851 spi1.0: message enable is 0
> ks8851 spi1.0: eth0: revision 0, MAC 08:00:28:01:4d:c6, IRQ 194, has EEPROM
> 
> Fix this by partially reverting the original commit.
> 
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
> ---
>  drivers/spi/spi-omap2-mcspi.c |    3 ++-
>  1 file changed, 2 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
> index 9d3409a..6263b0f 100644
> --- a/drivers/spi/spi-omap2-mcspi.c
> +++ b/drivers/spi/spi-omap2-mcspi.c
> @@ -829,7 +829,7 @@ static int omap2_mcspi_setup(struct spi_device *spi)
>  	mcspi_dma = &mcspi->dma_channels[spi->chip_select];
>  
>  	if (!cs) {
> -		cs = devm_kzalloc(&spi->dev , sizeof *cs, GFP_KERNEL);
> +		cs = kzalloc(sizeof *cs, GFP_KERNEL);
>  		if (!cs)
>  			return -ENOMEM;
>  		cs->base = mcspi->base + spi->chip_select * 0x14;
> @@ -869,6 +869,7 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
>  		cs = spi->controller_state;
>  		list_del(&cs->node);
>  
> +		kfree(cs);
>  	}
>  
>  	if (spi->chip_select < spi->master->num_chipselect) {
> 
> _______________________________________________
> linux-arm-kernel mailing list
> linux-arm-kernel at lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 172+ messages in thread

* Re: [CFT 07/11] spi: omap2-mcspi: add DMA engine support
  2012-06-14 12:50             ` Russell King - ARM Linux
@ 2012-06-18  6:41               ` Shubhrajyoti
  -1 siblings, 0 replies; 172+ messages in thread
From: Shubhrajyoti @ 2012-06-18  6:41 UTC (permalink / raw)
  To: Russell King - ARM Linux
  Cc: linux-arm-kernel, linux-omap, Grant Likely, spi-devel-general

On Thursday 14 June 2012 06:20 PM, Russell King - ARM Linux wrote:
> because, at the time when omap2_mcspi_setup() is called, spi->dev is
> not bound, and so is outside of the devres valid lifetime of that
> struct device.
Agree,
Apologies for breaking in the initial commit.

^ permalink raw reply	[flat|nested] 172+ messages in thread

* [CFT 07/11] spi: omap2-mcspi: add DMA engine support
@ 2012-06-18  6:41               ` Shubhrajyoti
  0 siblings, 0 replies; 172+ messages in thread
From: Shubhrajyoti @ 2012-06-18  6:41 UTC (permalink / raw)
  To: linux-arm-kernel

On Thursday 14 June 2012 06:20 PM, Russell King - ARM Linux wrote:
> because, at the time when omap2_mcspi_setup() is called, spi->dev is
> not bound, and so is outside of the devres valid lifetime of that
> struct device.
Agree,
Apologies for breaking in the initial commit.

^ permalink raw reply	[flat|nested] 172+ messages in thread

* Re: [CFT 03/11] mmc: omap_hsmmc: remove private DMA API implementation
  2012-06-07 11:07       ` Russell King
@ 2012-07-10 21:48         ` Kevin Hilman
  -1 siblings, 0 replies; 172+ messages in thread
From: Kevin Hilman @ 2012-07-10 21:48 UTC (permalink / raw)
  To: Russell King; +Cc: linux-arm-kernel, linux-omap, Chris Ball, linux-mmc

Hi Russell,

Russell King <rmk+kernel@arm.linux.org.uk> writes:

> Remove the private DMA API implementation from omap_hsmmc, making it
> use entirely the DMA engine API.
>
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

While testing this, I noticed a minor problem in the case of probe
failure (e.g. if dmaengine is not built into the kernel.)

The current driver suffers from this same problem but should probably be
fixed when converting to dmaengine...

[...]

> @@ -2048,36 +1919,28 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
>  		dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n");
>  		goto err_irq;
>  	}
> -	host->dma_line_tx = res->start;
> +	tx_req = res->start;
>  
>  	res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
>  	if (!res) {
>  		dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n");
>  		goto err_irq;
>  	}
> -	host->dma_line_rx = res->start;
> +	rx_req = res->start;
>  
> -	{
> -		dma_cap_mask_t mask;
> -		unsigned sig;
> -		extern bool omap_dma_filter_fn(struct dma_chan *chan, void *param);
> -
> -		dma_cap_zero(mask);
> -		dma_cap_set(DMA_SLAVE, mask);
> -#if 1
> -		sig = host->dma_line_rx;
> -		host->rx_chan = dma_request_channel(mask, omap_dma_filter_fn, &sig);
> -		if (!host->rx_chan) {
> -			dev_warn(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", sig);
> -		}
> -#endif
> -#if 1
> -		sig = host->dma_line_tx;
> -		host->tx_chan = dma_request_channel(mask, omap_dma_filter_fn, &sig);
> -		if (!host->tx_chan) {
> -			dev_warn(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", sig);
> -		}
> -#endif
> +	dma_cap_zero(mask);
> +	dma_cap_set(DMA_SLAVE, mask);
> +
> +	host->rx_chan = dma_request_channel(mask, omap_dma_filter_fn, &rx_req);
> +	if (!host->rx_chan) {
> +		dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req);
> +		goto err_irq;
> +	}
> +
> +	host->tx_chan = dma_request_channel(mask, omap_dma_filter_fn, &tx_req);
> +	if (!host->tx_chan) {
> +		dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req);
> +		goto err_irq;
>  	}

If either of these fails, ret is zero so even though this results in a
failed probe, the return value (ret) is zero meaning the driver still
gets bound to the device.

The patch below fixes this and applies on your 'for-next' branch.  Or,
feel free to fold this into the original if you prefer.

Kevin

>From af7537997b46ee3991985fecd4b4a302bdc0df31 Mon Sep 17 00:00:00 2001
From: Kevin Hilman <khilman@ti.com>
Date: Tue, 10 Jul 2012 14:30:18 -0700
Subject: [PATCH] mmc: omap_hsmmc: ensure probe returns error if DMA channel
 request fails

If dma_request_channel() fails (e.g. because DMA engine is not built
into the kernel), the return value from probe is zero causing the
driver to be bound to the device even though probe failed.

To fix, ensure that probe returns an error value when a DMA channel
request fail.

Cc: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Kevin Hilman <khilman@ti.com>
---
 drivers/mmc/host/omap_hsmmc.c |    2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 2338703..ddcecf8 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1924,12 +1924,14 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
 	host->rx_chan = dma_request_channel(mask, omap_dma_filter_fn, &rx_req);
 	if (!host->rx_chan) {
 		dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req);
+		ret = -ENXIO;
 		goto err_irq;
 	}
 
 	host->tx_chan = dma_request_channel(mask, omap_dma_filter_fn, &tx_req);
 	if (!host->tx_chan) {
 		dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req);
+		ret -ENXIO;
 		goto err_irq;
 	}
 
-- 
1.7.9.2


^ permalink raw reply related	[flat|nested] 172+ messages in thread

* [CFT 03/11] mmc: omap_hsmmc: remove private DMA API implementation
@ 2012-07-10 21:48         ` Kevin Hilman
  0 siblings, 0 replies; 172+ messages in thread
From: Kevin Hilman @ 2012-07-10 21:48 UTC (permalink / raw)
  To: linux-arm-kernel

Hi Russell,

Russell King <rmk+kernel@arm.linux.org.uk> writes:

> Remove the private DMA API implementation from omap_hsmmc, making it
> use entirely the DMA engine API.
>
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

While testing this, I noticed a minor problem in the case of probe
failure (e.g. if dmaengine is not built into the kernel.)

The current driver suffers from this same problem but should probably be
fixed when converting to dmaengine...

[...]

> @@ -2048,36 +1919,28 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
>  		dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n");
>  		goto err_irq;
>  	}
> -	host->dma_line_tx = res->start;
> +	tx_req = res->start;
>  
>  	res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
>  	if (!res) {
>  		dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n");
>  		goto err_irq;
>  	}
> -	host->dma_line_rx = res->start;
> +	rx_req = res->start;
>  
> -	{
> -		dma_cap_mask_t mask;
> -		unsigned sig;
> -		extern bool omap_dma_filter_fn(struct dma_chan *chan, void *param);
> -
> -		dma_cap_zero(mask);
> -		dma_cap_set(DMA_SLAVE, mask);
> -#if 1
> -		sig = host->dma_line_rx;
> -		host->rx_chan = dma_request_channel(mask, omap_dma_filter_fn, &sig);
> -		if (!host->rx_chan) {
> -			dev_warn(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", sig);
> -		}
> -#endif
> -#if 1
> -		sig = host->dma_line_tx;
> -		host->tx_chan = dma_request_channel(mask, omap_dma_filter_fn, &sig);
> -		if (!host->tx_chan) {
> -			dev_warn(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", sig);
> -		}
> -#endif
> +	dma_cap_zero(mask);
> +	dma_cap_set(DMA_SLAVE, mask);
> +
> +	host->rx_chan = dma_request_channel(mask, omap_dma_filter_fn, &rx_req);
> +	if (!host->rx_chan) {
> +		dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req);
> +		goto err_irq;
> +	}
> +
> +	host->tx_chan = dma_request_channel(mask, omap_dma_filter_fn, &tx_req);
> +	if (!host->tx_chan) {
> +		dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req);
> +		goto err_irq;
>  	}

If either of these fails, ret is zero so even though this results in a
failed probe, the return value (ret) is zero meaning the driver still
gets bound to the device.

The patch below fixes this and applies on your 'for-next' branch.  Or,
feel free to fold this into the original if you prefer.

Kevin

>From af7537997b46ee3991985fecd4b4a302bdc0df31 Mon Sep 17 00:00:00 2001
From: Kevin Hilman <khilman@ti.com>
Date: Tue, 10 Jul 2012 14:30:18 -0700
Subject: [PATCH] mmc: omap_hsmmc: ensure probe returns error if DMA channel
 request fails

If dma_request_channel() fails (e.g. because DMA engine is not built
into the kernel), the return value from probe is zero causing the
driver to be bound to the device even though probe failed.

To fix, ensure that probe returns an error value when a DMA channel
request fail.

Cc: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Kevin Hilman <khilman@ti.com>
---
 drivers/mmc/host/omap_hsmmc.c |    2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 2338703..ddcecf8 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1924,12 +1924,14 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
 	host->rx_chan = dma_request_channel(mask, omap_dma_filter_fn, &rx_req);
 	if (!host->rx_chan) {
 		dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req);
+		ret = -ENXIO;
 		goto err_irq;
 	}
 
 	host->tx_chan = dma_request_channel(mask, omap_dma_filter_fn, &tx_req);
 	if (!host->tx_chan) {
 		dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req);
+		ret -ENXIO;
 		goto err_irq;
 	}
 
-- 
1.7.9.2

^ permalink raw reply related	[flat|nested] 172+ messages in thread

end of thread, other threads:[~2012-07-10 21:48 UTC | newest]

Thread overview: 172+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2012-06-07 10:34 [CFT] DMA engine patches Russell King - ARM Linux
2012-06-07 10:34 ` Russell King - ARM Linux
2012-06-07 10:40 ` [CFT 1/3] dmaengine: split out virtual channel DMA support from sa11x0 driver Russell King
2012-06-07 10:40   ` Russell King
2012-06-07 10:41 ` [CFT 2/3] dmaengine: virt-dma: vchan_find_desc() Russell King
2012-06-07 10:41   ` Russell King
2012-06-07 10:41 ` [CFT 3/3] dmaengine: virt-dma: add support for cyclic DMA periodic callbacks Russell King
2012-06-07 10:41   ` Russell King
2012-06-07 10:42   ` [CFT] SA11x0 patches Russell King - ARM Linux
2012-06-07 10:42     ` Russell King - ARM Linux
2012-06-07 10:43     ` [CFT 1/2] dmaengine: sa11x0-dma: fix DMA residue support Russell King
2012-06-07 10:43       ` Russell King
2012-06-07 10:43     ` [CFT 2/2] dmaengine: sa11x0-dma: add cyclic DMA support Russell King
2012-06-07 10:43       ` Russell King
2012-06-07 10:45   ` [CFT] PL08x patches Russell King - ARM Linux
2012-06-07 10:45     ` Russell King - ARM Linux
2012-06-07 10:46     ` [CFT 01/31] dmaengine: PL08x: remove runtime PM support Russell King
2012-06-07 10:46       ` Russell King
2012-06-07 10:46     ` [CFT 02/31] dmaengine: PL08x: fix missed dma_transfer_direction fixup Russell King
2012-06-07 10:46       ` Russell King
2012-06-07 10:46     ` [CFT 03/31] dmaengine: PL08x: remove redundant spinlock Russell King
2012-06-07 10:46       ` Russell King
2012-06-07 10:47     ` [CFT 04/31] dmaengine: PL08x: remove circular_buffer boolean from channel data Russell King
2012-06-07 10:47       ` Russell King
2012-06-07 10:47     ` [CFT 05/31] dmaengine: PL08x: clean up get_signal/put_signal Russell King
2012-06-07 10:47       ` Russell King
2012-06-10 10:03       ` Russell King - ARM Linux
2012-06-10 10:03         ` Russell King - ARM Linux
2012-06-07 10:47     ` [CFT 06/31] dmaengine: PL08x: move private data structures into amba-pl08x.c Russell King
2012-06-07 10:47       ` Russell King
2012-06-07 10:48     ` [CFT 07/31] dmaengine: PL08x: constify channel names and bus_id strings Russell King
2012-06-07 10:48       ` Russell King
2012-06-07 10:48     ` [CFT 08/31] dmaengine: PL08x: get src/dst addr direct from dma_slave_config struct Russell King
2012-06-07 10:48       ` Russell King
2012-06-07 10:48     ` [CFT 09/31] dmaengine: PL08x: get rid of device_fc in struct pl08x_dma_chan Russell King
2012-06-07 10:48       ` Russell King
2012-06-07 10:49     ` [CFT 10/31] dmaengine: PL08x: move the bus and increment selection to dma prepare function Russell King
2012-06-07 10:49       ` Russell King
2012-06-07 10:49     ` [CFT 11/31] dmaengine: PL08x: extract function to to generate cctl values Russell King
2012-06-07 10:49       ` Russell King
2012-06-07 10:49     ` [CFT 12/31] dmaengine: PL08x: ignore 'direction' argument in dma_slave_config Russell King
2012-06-07 10:49       ` Russell King
2012-06-07 10:50     ` [CFT 13/31] dmaengine: PL08x: get rid of unnecessary checks " Russell King
2012-06-07 10:50       ` Russell King
2012-06-07 10:50     ` [CFT 14/31] dmaengine: PL08x: split DMA signal muxing from channel alloc Russell King
2012-06-07 10:50       ` Russell King
2012-06-07 10:50     ` [CFT 15/31] dmaengine: PL08x: move DMA signal muxing into pl08x_dma_chan struct Russell King
2012-06-07 10:50       ` Russell King
2012-06-07 10:51     ` [CFT 16/31] dmaengine: PL08x: track mux usage on a per-channel basis Russell King
2012-06-07 10:51       ` Russell King
2012-06-07 10:51     ` [CFT 17/31] dmaengine: PL08x: convert to a list of completed descriptors Russell King
2012-06-07 10:51       ` Russell King
2012-06-07 10:51     ` [CFT 18/31] dmaengine: PL08x: move DMA signal muxing into slave prepare code Russell King
2012-06-07 10:51       ` Russell King
2012-06-07 10:52     ` [CFT 19/31] dmaengine: PL08x: remove waiting descriptor pointer Russell King
2012-06-07 10:52       ` Russell King
2012-06-07 10:52     ` [CFT 20/31] dmaengine: PL08x: re-jig the starting of txds Russell King
2012-06-07 10:52       ` Russell King
2012-06-07 10:52     ` [CFT 21/31] dmaengine: PL08x: split the pend_list in two Russell King
2012-06-07 10:52       ` Russell King
2012-06-07 10:53     ` [CFT 22/31] dmaengine: PL08x: start next descriptor from irq context Russell King
2012-06-07 10:53       ` Russell King
2012-06-07 10:53     ` [CFT 23/31] dmaengine: PL08x: rejig physical channel allocation Russell King
2012-06-07 10:53       ` Russell King
2012-06-07 10:53     ` [CFT 24/31] dmaengine: PL08x: convert to use virt-dma structs Russell King
2012-06-07 10:53       ` Russell King
2012-06-07 10:54     ` [CFT 25/31] dmaengine: PL08x: use vchan's spinlock Russell King
2012-06-07 10:54       ` Russell King
2012-06-07 10:54     ` [CFT 26/31] dmaengine: PL08x: convert to use vchan submitted/issued lists Russell King
2012-06-07 10:54       ` Russell King
2012-06-07 10:54     ` [CFT 27/31] dmaengine: PL08x: convert to use vchan done list Russell King
2012-06-07 10:54       ` Russell King
2012-06-07 10:55     ` [CFT 28/31] dmaengine: PL08x: fix tx_status function to return correct residue Russell King
2012-06-07 10:55       ` Russell King
2012-06-07 10:55     ` [CFT 29/31] dmaengine: PL08x: get rid of pl08x_prep_channel_resources Russell King
2012-06-07 10:55       ` Russell King
2012-06-07 10:55     ` [CFT 30/31] dmaengine: PL08x: get rid of write only pool_ctr and free_txd locking Russell King
2012-06-07 10:55       ` Russell King
2012-06-07 10:56     ` [CFT 31/31] dmaengine: PL08x: ensure all descriptors are freed when channel is released Russell King
2012-06-07 10:56       ` Russell King
2012-06-08  8:32     ` [CFT] PL08x patches Linus Walleij
2012-06-08  8:32       ` Linus Walleij
2012-06-07 11:06   ` [CFT] OMAP patches Russell King - ARM Linux
2012-06-07 11:06     ` Russell King - ARM Linux
2012-06-07 11:06     ` [CFT 01/11] dmaengine: add OMAP DMA engine driver Russell King
2012-06-07 11:06       ` Russell King
2012-06-07 12:40       ` S, Venkatraman
2012-06-07 12:40         ` S, Venkatraman
2012-06-07 12:45         ` S, Venkatraman
2012-06-07 12:45           ` S, Venkatraman
2012-06-08  6:19       ` Shilimkar, Santosh
2012-06-08  6:19         ` Shilimkar, Santosh
2012-06-08  9:02       ` Russell King - ARM Linux
2012-06-08  9:02         ` Russell King - ARM Linux
2012-06-08 10:00         ` Shilimkar, Santosh
2012-06-08 10:00           ` Shilimkar, Santosh
2012-06-08 10:01           ` Russell King - ARM Linux
2012-06-08 10:01             ` Russell King - ARM Linux
2012-06-07 11:06     ` [CFT 02/11] mmc: omap_hsmmc: add DMA engine support Russell King
2012-06-07 11:06       ` Russell King
2012-06-07 17:04       ` Tony Lindgren
2012-06-07 17:04         ` Tony Lindgren
2012-06-08  8:53       ` Linus Walleij
2012-06-08  8:53         ` Linus Walleij
2012-06-07 11:07     ` [CFT 03/11] mmc: omap_hsmmc: remove private DMA API implementation Russell King
2012-06-07 11:07       ` Russell King
2012-06-07 17:04       ` Tony Lindgren
2012-06-07 17:04         ` Tony Lindgren
2012-06-07 17:53       ` S, Venkatraman
2012-06-07 17:53         ` S, Venkatraman
2012-07-10 21:48       ` Kevin Hilman
2012-07-10 21:48         ` Kevin Hilman
2012-06-07 11:07     ` [CFT 04/11] mmc: omap: add DMA engine support Russell King
2012-06-07 11:07       ` Russell King
2012-06-07 17:05       ` Tony Lindgren
2012-06-07 17:05         ` Tony Lindgren
2012-06-08  8:52       ` Linus Walleij
2012-06-08  8:52         ` Linus Walleij
2012-06-07 11:07     ` [CFT 05/11] mmc: omap: remove private DMA API implementation Russell King
2012-06-07 11:07       ` Russell King
2012-06-07 17:05       ` Tony Lindgren
2012-06-07 17:05         ` Tony Lindgren
2012-06-07 11:08     ` [CFT 06/11] ARM: omap: remove mmc platform data dma_mask and initialization Russell King
2012-06-07 11:08       ` Russell King
2012-06-07 17:06       ` Tony Lindgren
2012-06-07 17:06         ` Tony Lindgren
2012-06-07 11:08     ` [CFT 07/11] spi: omap2-mcspi: add DMA engine support Russell King
2012-06-07 11:08       ` Russell King
2012-06-08  8:50       ` Linus Walleij
2012-06-08  8:50         ` Linus Walleij
2012-06-14 11:53       ` Russell King - ARM Linux
2012-06-14 11:53         ` Russell King - ARM Linux
2012-06-14 12:08         ` Russell King - ARM Linux
2012-06-14 12:08           ` Russell King - ARM Linux
2012-06-14 12:50           ` Russell King - ARM Linux
2012-06-14 12:50             ` Russell King - ARM Linux
2012-06-14 14:07             ` [PATCH] SPI: OMAP: fix over-eager devm_xxx() conversion (was: Re: [CFT 07/11] spi: omap2-mcspi: add DMA engine support) Russell King - ARM Linux
2012-06-14 14:07               ` Russell King - ARM Linux
     [not found]               ` <20120614140712.GH31187-l+eeeJia6m9vn6HldHNs0ANdhmdF6hFW@public.gmane.org>
2012-06-16 10:33                 ` Russell King - ARM Linux
2012-06-16 10:33                   ` Russell King - ARM Linux
2012-06-18  6:41             ` [CFT 07/11] spi: omap2-mcspi: add DMA engine support Shubhrajyoti
2012-06-18  6:41               ` Shubhrajyoti
2012-06-07 11:08     ` [CFT 08/11] spi: omap2-mcspi: remove private DMA API implementation Russell King
2012-06-07 11:08       ` Russell King
2012-06-07 11:09     ` [CFT 09/11] mtd: omap2: add DMA engine support Russell King
2012-06-07 11:09       ` Russell King
2012-06-07 11:09       ` Russell King
2012-06-07 12:49       ` Artem Bityutskiy
2012-06-07 12:49         ` Artem Bityutskiy
2012-06-07 12:49         ` Artem Bityutskiy
2012-06-07 13:11         ` Russell King - ARM Linux
2012-06-07 13:11           ` Russell King - ARM Linux
2012-06-07 13:11           ` Russell King - ARM Linux
2012-06-07 13:28           ` Artem Bityutskiy
2012-06-07 13:28             ` Artem Bityutskiy
2012-06-07 13:28             ` Artem Bityutskiy
2012-06-07 17:10             ` Tony Lindgren
2012-06-07 17:10               ` Tony Lindgren
2012-06-07 17:10               ` Tony Lindgren
2012-06-07 11:09     ` [CFT 10/11] mtd: omap2: remove private DMA API implementation Russell King
2012-06-07 11:09       ` Russell King
2012-06-07 11:09       ` Russell King
2012-06-07 11:09     ` [CFT 11/11] Add feature removal of old OMAP private DMA implementation Russell King
2012-06-07 11:09       ` Russell King
2012-06-07 17:07       ` Tony Lindgren
2012-06-07 17:07         ` Tony Lindgren
2012-06-08  6:10       ` Shilimkar, Santosh
2012-06-08  6:10         ` Shilimkar, Santosh
2012-06-08 18:37       ` Rob Landley
2012-06-08 18:37         ` Rob Landley
2012-06-09  8:32         ` Russell King - ARM Linux
2012-06-09  8:32           ` Russell King - ARM Linux

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.