linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/3 v2] sh: fix dma driver
@ 2009-12-10 17:35 Guennadi Liakhovetski
  2009-12-10 17:35 ` [PATCH 1/3 v2] sh: stylistic improvements for the DMA driver Guennadi Liakhovetski
                   ` (3 more replies)
  0 siblings, 4 replies; 12+ messages in thread
From: Guennadi Liakhovetski @ 2009-12-10 17:35 UTC (permalink / raw)
  To: linux-kernel; +Cc: Dan Williams, linux-sh

Further testing with preempt enabled revealed more problems with 
insufficient locking and improper descriptor state management. Therefore 
I'm resubmitting the

sh: fix DMA driver's descriptor chaining and cookie assignment

patch. At the same time I swapped it with stylistic changes, to put the 
complex stuff in the end. Also added a minor patch fixing comment in 
dmaengine.h. Patches

sh: DMA driver has to specify its alignment requirements
dmaengine: fix dmatest to verify minimum transfer length and test buffer size
sh: dmaengine support for sh7724

are not changed, so, not resubmitted

Thanks
Guennadi
---
Guennadi Liakhovetski, Ph.D.
Freelance Open-Source Software Developer
http://www.open-technology.de/

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 1/3 v2] sh: stylistic improvements for the DMA driver
  2009-12-10 17:35 [PATCH 0/3 v2] sh: fix dma driver Guennadi Liakhovetski
@ 2009-12-10 17:35 ` Guennadi Liakhovetski
  2009-12-10 17:35 ` [PATCH 2/3 v2] sh: fix DMA driver's descriptor chaining and cookie assignment Guennadi Liakhovetski
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 12+ messages in thread
From: Guennadi Liakhovetski @ 2009-12-10 17:35 UTC (permalink / raw)
  To: linux-kernel; +Cc: Dan Williams, linux-sh

Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
---
 drivers/dma/shdma.c |   34 +++++++++++++++++-----------------
 drivers/dma/shdma.h |   14 +++++++-------
 2 files changed, 24 insertions(+), 24 deletions(-)

diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index f5fae12..8d9acd7 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -80,17 +80,17 @@ static int sh_dmae_rst(int id)
 	unsigned short dmaor;
 
 	sh_dmae_ctl_stop(id);
-	dmaor = (dmaor_read_reg(id)|DMAOR_INIT);
+	dmaor = dmaor_read_reg(id) | DMAOR_INIT;
 
 	dmaor_write_reg(id, dmaor);
-	if ((dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF))) {
+	if (dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF)) {
 		pr_warning(KERN_ERR "dma-sh: Can't initialize DMAOR.\n");
 		return -EINVAL;
 	}
 	return 0;
 }
 
-static int dmae_is_idle(struct sh_dmae_chan *sh_chan)
+static int dmae_is_busy(struct sh_dmae_chan *sh_chan)
 {
 	u32 chcr = sh_dmae_readl(sh_chan, CHCR);
 	if (chcr & CHCR_DE) {
@@ -110,15 +110,14 @@ static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs hw)
 {
 	sh_dmae_writel(sh_chan, hw.sar, SAR);
 	sh_dmae_writel(sh_chan, hw.dar, DAR);
-	sh_dmae_writel(sh_chan,
-		(hw.tcr >> calc_xmit_shift(sh_chan)), TCR);
+	sh_dmae_writel(sh_chan, hw.tcr >> calc_xmit_shift(sh_chan), TCR);
 }
 
 static void dmae_start(struct sh_dmae_chan *sh_chan)
 {
 	u32 chcr = sh_dmae_readl(sh_chan, CHCR);
 
-	chcr |= (CHCR_DE|CHCR_IE);
+	chcr |= CHCR_DE | CHCR_IE;
 	sh_dmae_writel(sh_chan, chcr, CHCR);
 }
 
@@ -132,7 +131,7 @@ static void dmae_halt(struct sh_dmae_chan *sh_chan)
 
 static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
 {
-	int ret = dmae_is_idle(sh_chan);
+	int ret = dmae_is_busy(sh_chan);
 	/* When DMA was working, can not set data to CHCR */
 	if (ret)
 		return ret;
@@ -149,7 +148,7 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
 {
 	u32 addr;
 	int shift = 0;
-	int ret = dmae_is_idle(sh_chan);
+	int ret = dmae_is_busy(sh_chan);
 	if (ret)
 		return ret;
 
@@ -307,7 +306,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
 		new = sh_dmae_get_desc(sh_chan);
 		if (!new) {
 			dev_err(sh_chan->dev,
-					"No free memory for link descriptor\n");
+				"No free memory for link descriptor\n");
 			goto err_get_desc;
 		}
 
@@ -388,7 +387,7 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
 	struct sh_dmae_regs hw;
 
 	/* DMA work check */
-	if (dmae_is_idle(sh_chan))
+	if (dmae_is_busy(sh_chan))
 		return;
 
 	/* Find the first un-transfer desciptor */
@@ -497,8 +496,9 @@ static void dmae_do_tasklet(unsigned long data)
 	struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
 	struct sh_desc *desc, *_desc, *cur_desc = NULL;
 	u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
+
 	list_for_each_entry_safe(desc, _desc,
-					&sh_chan->ld_queue, node) {
+				 &sh_chan->ld_queue, node) {
 		if ((desc->hw.sar + desc->hw.tcr) == sar_buf) {
 			cur_desc = desc;
 			break;
@@ -543,8 +543,8 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id)
 	/* alloc channel */
 	new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
 	if (!new_sh_chan) {
-		dev_err(shdev->common.dev, "No free memory for allocating "
-				"dma channels!\n");
+		dev_err(shdev->common.dev,
+			"No free memory for allocating dma channels!\n");
 		return -ENOMEM;
 	}
 
@@ -586,8 +586,8 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id)
 			"sh-dmae%d", new_sh_chan->id);
 
 	/* set up channel irq */
-	err = request_irq(irq, &sh_dmae_interrupt,
-		irqflags, new_sh_chan->dev_id, new_sh_chan);
+	err = request_irq(irq, &sh_dmae_interrupt, irqflags,
+			  new_sh_chan->dev_id, new_sh_chan);
 	if (err) {
 		dev_err(shdev->common.dev, "DMA channel %d request_irq error "
 			"with return %d\n", id, err);
@@ -691,8 +691,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
 	}
 
 	for (ecnt = 0 ; ecnt < ARRAY_SIZE(eirq); ecnt++) {
-		err = request_irq(eirq[ecnt], sh_dmae_err,
-			irqflags, "DMAC Address Error", shdev);
+		err = request_irq(eirq[ecnt], sh_dmae_err, irqflags,
+				  "DMAC Address Error", shdev);
 		if (err) {
 			dev_err(&pdev->dev, "DMA device request_irq"
 				"error (irq %d) with return %d\n",
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
index 2b4bc15..60b81e5 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/shdma.h
@@ -35,15 +35,15 @@ struct sh_desc {
 
 struct sh_dmae_chan {
 	dma_cookie_t completed_cookie;	/* The maximum cookie completed */
-	spinlock_t desc_lock;			/* Descriptor operation lock */
-	struct list_head ld_queue;		/* Link descriptors queue */
-	struct list_head ld_free;		/* Link descriptors free */
-	struct dma_chan common;			/* DMA common channel */
-	struct device *dev;				/* Channel device */
+	spinlock_t desc_lock;		/* Descriptor operation lock */
+	struct list_head ld_queue;	/* Link descriptors queue */
+	struct list_head ld_free;	/* Link descriptors free */
+	struct dma_chan common;		/* DMA common channel */
+	struct device *dev;		/* Channel device */
 	struct tasklet_struct tasklet;	/* Tasklet */
-	int descs_allocated;			/* desc count */
+	int descs_allocated;		/* desc count */
 	int id;				/* Raw id of this channel */
-	char dev_id[16];	/* unique name per DMAC of channel */
+	char dev_id[16];		/* unique name per DMAC of channel */
 
 	/* Set chcr */
 	int (*set_chcr)(struct sh_dmae_chan *sh_chan, u32 regs);
-- 
1.6.2.4


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 2/3 v2] sh: fix DMA driver's descriptor chaining and cookie assignment
  2009-12-10 17:35 [PATCH 0/3 v2] sh: fix dma driver Guennadi Liakhovetski
  2009-12-10 17:35 ` [PATCH 1/3 v2] sh: stylistic improvements for the DMA driver Guennadi Liakhovetski
@ 2009-12-10 17:35 ` Guennadi Liakhovetski
  2009-12-11  8:02   ` Paul Mundt
  2009-12-10 17:35 ` [PATCH 3/3] dmaengine: clarify the meaning of the DMA_CTRL_ACK flag Guennadi Liakhovetski
  2009-12-12  4:58 ` [PATCH 0/3 v2] sh: fix dma driver Dan Williams
  3 siblings, 1 reply; 12+ messages in thread
From: Guennadi Liakhovetski @ 2009-12-10 17:35 UTC (permalink / raw)
  To: linux-kernel; +Cc: Dan Williams, linux-sh

The SH DMA driver wrongly assigns negative cookies to transfer descriptors,
also, its chaining of partial descriptors is broken. The latter problem is
usually invisible, because maximum transfer size per chunk is 16M, but if you
artificially set this limit lower, the driver fails. Since cookies are also
used in chunk management, both these problems are fixed in one patch. As side
effects a possible memory leak, when descriptors are prepared, but not
submitted, and multiple races have also been fixed.

Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
---
 drivers/dma/shdma.c |  324 ++++++++++++++++++++++++++++++++------------------
 drivers/dma/shdma.h |    9 +-
 2 files changed, 213 insertions(+), 120 deletions(-)

diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 8d9acd7..9546f5f 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -23,16 +23,19 @@
 #include <linux/dmaengine.h>
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
-#include <linux/dmapool.h>
 #include <linux/platform_device.h>
 #include <cpu/dma.h>
 #include <asm/dma-sh.h>
 #include "shdma.h"
 
 /* DMA descriptor control */
-#define DESC_LAST	(-1)
-#define DESC_COMP	(1)
-#define DESC_NCOMP	(0)
+enum sh_dmae_desc_status {
+	DESC_IDLE,
+	DESC_PREPARED,
+	DESC_SUBMITTED,
+	DESC_COMPLETED,	/* completed, have to call callback */
+	DESC_WAITING,	/* callback called, waiting for ack / re-submit */
+};
 
 #define NR_DESCS_PER_CHANNEL 32
 /*
@@ -45,6 +48,8 @@
  */
 #define RS_DEFAULT  (RS_DUAL)
 
+static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
+
 #define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id])
 static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
 {
@@ -106,11 +111,11 @@ static inline unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan)
 	return ts_shift[(chcr & CHCR_TS_MASK) >> CHCR_TS_SHIFT];
 }
 
-static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs hw)
+static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
 {
-	sh_dmae_writel(sh_chan, hw.sar, SAR);
-	sh_dmae_writel(sh_chan, hw.dar, DAR);
-	sh_dmae_writel(sh_chan, hw.tcr >> calc_xmit_shift(sh_chan), TCR);
+	sh_dmae_writel(sh_chan, hw->sar, SAR);
+	sh_dmae_writel(sh_chan, hw->dar, DAR);
+	sh_dmae_writel(sh_chan, hw->tcr >> calc_xmit_shift(sh_chan), TCR);
 }
 
 static void dmae_start(struct sh_dmae_chan *sh_chan)
@@ -184,8 +189,9 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
 
 static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
 {
-	struct sh_desc *desc = tx_to_sh_desc(tx);
+	struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
 	struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
+	dma_async_tx_callback callback = tx->callback;
 	dma_cookie_t cookie;
 
 	spin_lock_bh(&sh_chan->desc_lock);
@@ -195,45 +201,53 @@ static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
 	if (cookie < 0)
 		cookie = 1;
 
-	/* If desc only in the case of 1 */
-	if (desc->async_tx.cookie != -EBUSY)
-		desc->async_tx.cookie = cookie;
-	sh_chan->common.cookie = desc->async_tx.cookie;
+	sh_chan->common.cookie = cookie;
+	tx->cookie = cookie;
+
+	/* Mark all chunks of this descriptor as submitted, move to the queue */
+	list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
+		/*
+		 * All chunks are on the global ld_free, so, we have to find
+		 * the end of the chain ourselves
+		 */
+		if (chunk != desc && (chunk->mark == DESC_IDLE ||
+				      chunk->async_tx.cookie > 0 ||
+				      chunk->async_tx.cookie == -EBUSY ||
+				      &chunk->node == &sh_chan->ld_free))
+			break;
+		chunk->mark = DESC_SUBMITTED;
+		/* Callback goes to the last chunk */
+		chunk->async_tx.callback = NULL;
+		chunk->cookie = cookie;
+		list_move_tail(&chunk->node, &sh_chan->ld_queue);
+		last = chunk;
+	}
+
+	last->async_tx.callback = callback;
+	last->async_tx.callback_param = tx->callback_param;
 
-	list_splice_init(&desc->tx_list, sh_chan->ld_queue.prev);
+	dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n",
+		tx->cookie, &last->async_tx, sh_chan->id,
+		desc->hw.sar, desc->hw.tcr, desc->hw.dar);
 
 	spin_unlock_bh(&sh_chan->desc_lock);
 
 	return cookie;
 }
 
+/* Called with desc_lock held */
 static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
 {
-	struct sh_desc *desc, *_desc, *ret = NULL;
+	struct sh_desc *desc;
 
-	spin_lock_bh(&sh_chan->desc_lock);
-	list_for_each_entry_safe(desc, _desc, &sh_chan->ld_free, node) {
-		if (async_tx_test_ack(&desc->async_tx)) {
+	list_for_each_entry(desc, &sh_chan->ld_free, node)
+		if (desc->mark != DESC_PREPARED) {
+			BUG_ON(desc->mark != DESC_IDLE);
 			list_del(&desc->node);
-			ret = desc;
-			break;
+			return desc;
 		}
-	}
-	spin_unlock_bh(&sh_chan->desc_lock);
-
-	return ret;
-}
-
-static void sh_dmae_put_desc(struct sh_dmae_chan *sh_chan, struct sh_desc *desc)
-{
-	if (desc) {
-		spin_lock_bh(&sh_chan->desc_lock);
-
-		list_splice_init(&desc->tx_list, &sh_chan->ld_free);
-		list_add(&desc->node, &sh_chan->ld_free);
 
-		spin_unlock_bh(&sh_chan->desc_lock);
-	}
+	return NULL;
 }
 
 static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
@@ -252,11 +266,10 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
 		dma_async_tx_descriptor_init(&desc->async_tx,
 					&sh_chan->common);
 		desc->async_tx.tx_submit = sh_dmae_tx_submit;
-		desc->async_tx.flags = DMA_CTRL_ACK;
-		INIT_LIST_HEAD(&desc->tx_list);
-		sh_dmae_put_desc(sh_chan, desc);
+		desc->mark = DESC_IDLE;
 
 		spin_lock_bh(&sh_chan->desc_lock);
+		list_add(&desc->node, &sh_chan->ld_free);
 		sh_chan->descs_allocated++;
 	}
 	spin_unlock_bh(&sh_chan->desc_lock);
@@ -273,7 +286,10 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
 	struct sh_desc *desc, *_desc;
 	LIST_HEAD(list);
 
-	BUG_ON(!list_empty(&sh_chan->ld_queue));
+	/* Prepared and not submitted descriptors can still be on the queue */
+	if (!list_empty(&sh_chan->ld_queue))
+		sh_dmae_chan_ld_cleanup(sh_chan, true);
+
 	spin_lock_bh(&sh_chan->desc_lock);
 
 	list_splice_init(&sh_chan->ld_free, &list);
@@ -292,6 +308,8 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
 	struct sh_dmae_chan *sh_chan;
 	struct sh_desc *first = NULL, *prev = NULL, *new;
 	size_t copy_size;
+	LIST_HEAD(tx_list);
+	int chunks = (len + SH_DMA_TCR_MAX) / (SH_DMA_TCR_MAX + 1);
 
 	if (!chan)
 		return NULL;
@@ -301,108 +319,189 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
 
 	sh_chan = to_sh_chan(chan);
 
+	/* Have to lock the whole loop to protect against concurrent release */
+	spin_lock_bh(&sh_chan->desc_lock);
+
+	/*
+	 * Chaining:
+	 * first descriptor is what user is dealing with in all API calls, its
+	 *	cookie is at first set to -EBUSY, at tx-submit to a positive
+	 *	number
+	 * if more than one chunk is needed further chunks have cookie = -EINVAL
+	 * the last chunk, if not equal to the first, has cookie = -ENOSPC
+	 * all chunks are linked onto the tx_list head with their .node heads
+	 *	only during this function, then they are immediately spliced
+	 *	back onto the free list in form of a chain
+	 */
 	do {
-		/* Allocate the link descriptor from DMA pool */
+		/* Allocate the link descriptor from the free list */
 		new = sh_dmae_get_desc(sh_chan);
 		if (!new) {
 			dev_err(sh_chan->dev,
 				"No free memory for link descriptor\n");
-			goto err_get_desc;
+			list_for_each_entry(new, &tx_list, node)
+				new->mark = DESC_IDLE;
+			list_splice(&tx_list, &sh_chan->ld_free);
+			spin_unlock_bh(&sh_chan->desc_lock);
+			return NULL;
 		}
 
-		copy_size = min(len, (size_t)SH_DMA_TCR_MAX);
+		copy_size = min(len, (size_t)SH_DMA_TCR_MAX + 1);
 
 		new->hw.sar = dma_src;
 		new->hw.dar = dma_dest;
 		new->hw.tcr = copy_size;
-		if (!first)
+		if (!first) {
+			/* First desc */
+			new->async_tx.cookie = -EBUSY;
 			first = new;
+		} else {
+			/* Other desc - invisible to the user */
+			new->async_tx.cookie = -EINVAL;
+		}
 
-		new->mark = DESC_NCOMP;
-		async_tx_ack(&new->async_tx);
+		dev_dbg(sh_chan->dev,
+			"chaining %u of %u with %p, dst %x, cookie %d\n",
+			copy_size, len, &new->async_tx, dma_dest,
+			new->async_tx.cookie);
+
+		new->mark = DESC_PREPARED;
+		new->async_tx.flags = flags;
+		new->chunks = chunks--;
 
 		prev = new;
 		len -= copy_size;
 		dma_src += copy_size;
 		dma_dest += copy_size;
 		/* Insert the link descriptor to the LD ring */
-		list_add_tail(&new->node, &first->tx_list);
+		list_add_tail(&new->node, &tx_list);
 	} while (len);
 
-	new->async_tx.flags = flags; /* client is in control of this ack */
-	new->async_tx.cookie = -EBUSY; /* Last desc */
+	if (new != first)
+		new->async_tx.cookie = -ENOSPC;
 
-	return &first->async_tx;
+	/* Put them back on the free list, so, they don't get lost */
+	list_splice_tail(&tx_list, &sh_chan->ld_free);
 
-err_get_desc:
-	sh_dmae_put_desc(sh_chan, first);
-	return NULL;
+	spin_unlock_bh(&sh_chan->desc_lock);
 
+	return &first->async_tx;
 }
 
-/*
- * sh_chan_ld_cleanup - Clean up link descriptors
- *
- * This function clean up the ld_queue of DMA channel.
- */
-static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan)
+static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
 {
 	struct sh_desc *desc, *_desc;
+	/* Is the "exposed" head of a chain acked? */
+	bool head_acked = false;
+	dma_cookie_t cookie = 0;
+	dma_async_tx_callback callback = NULL;
+	void *param = NULL;
 
 	spin_lock_bh(&sh_chan->desc_lock);
 	list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
-		dma_async_tx_callback callback;
-		void *callback_param;
-
-		/* non send data */
-		if (desc->mark == DESC_NCOMP)
+		struct dma_async_tx_descriptor *tx = &desc->async_tx;
+
+		BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
+		BUG_ON(desc->mark != DESC_SUBMITTED &&
+		       desc->mark != DESC_COMPLETED &&
+		       desc->mark != DESC_WAITING);
+
+		/*
+		 * queue is ordered, and we use this loop to (1) clean up all
+		 * completed descriptors, and to (2) update descriptor flags of
+		 * any chunks in a (partially) completed chain
+		 */
+		if (!all && desc->mark == DESC_SUBMITTED &&
+		    desc->cookie != cookie)
 			break;
 
-		/* send data sesc */
-		callback = desc->async_tx.callback;
-		callback_param = desc->async_tx.callback_param;
+		if (tx->cookie > 0)
+			cookie = tx->cookie;
 
-		/* Remove from ld_queue list */
-		list_splice_init(&desc->tx_list, &sh_chan->ld_free);
+		if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
+			BUG_ON(sh_chan->completed_cookie != desc->cookie - 1);
+			sh_chan->completed_cookie = desc->cookie;
+		}
 
-		dev_dbg(sh_chan->dev, "link descriptor %p will be recycle.\n",
-				desc);
+		/* Call callback on the last chunk */
+		if (desc->mark == DESC_COMPLETED && tx->callback) {
+			desc->mark = DESC_WAITING;
+			callback = tx->callback;
+			param = tx->callback_param;
+			dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n",
+				tx->cookie, tx, sh_chan->id);
+			BUG_ON(desc->chunks != 1);
+			break;
+		}
 
-		list_move(&desc->node, &sh_chan->ld_free);
-		/* Run the link descriptor callback function */
-		if (callback) {
-			spin_unlock_bh(&sh_chan->desc_lock);
-			dev_dbg(sh_chan->dev, "link descriptor %p callback\n",
-					desc);
-			callback(callback_param);
-			spin_lock_bh(&sh_chan->desc_lock);
+		if (tx->cookie > 0 || tx->cookie == -EBUSY) {
+			if (desc->mark == DESC_COMPLETED) {
+				BUG_ON(tx->cookie < 0);
+				desc->mark = DESC_WAITING;
+			}
+			head_acked = async_tx_test_ack(tx);
+		} else {
+			switch (desc->mark) {
+			case DESC_COMPLETED:
+				desc->mark = DESC_WAITING;
+				/* Fall through */
+			case DESC_WAITING:
+				if (head_acked)
+					async_tx_ack(&desc->async_tx);
+			}
+		}
+
+		dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n",
+			tx, tx->cookie);
+
+		if (((desc->mark == DESC_COMPLETED ||
+		      desc->mark == DESC_WAITING) &&
+		     async_tx_test_ack(&desc->async_tx)) || all) {
+			/* Remove from ld_queue list */
+			desc->mark = DESC_IDLE;
+			list_move(&desc->node, &sh_chan->ld_free);
 		}
 	}
 	spin_unlock_bh(&sh_chan->desc_lock);
+
+	if (callback)
+		callback(param);
+
+	return callback;
+}
+
+/*
+ * sh_chan_ld_cleanup - Clean up link descriptors
+ *
+ * This function cleans up the ld_queue of DMA channel.
+ */
+static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
+{
+	while (__ld_cleanup(sh_chan, all))
+		;
 }
 
 static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
 {
-	struct list_head *ld_node;
-	struct sh_dmae_regs hw;
+	struct sh_desc *sd;
 
+	spin_lock_bh(&sh_chan->desc_lock);
 	/* DMA work check */
-	if (dmae_is_busy(sh_chan))
+	if (dmae_is_busy(sh_chan)) {
+		spin_unlock_bh(&sh_chan->desc_lock);
 		return;
+	}
 
 	/* Find the first un-transfer desciptor */
-	for (ld_node = sh_chan->ld_queue.next;
-		(ld_node != &sh_chan->ld_queue)
-			&& (to_sh_desc(ld_node)->mark == DESC_COMP);
-		ld_node = ld_node->next)
-		cpu_relax();
-
-	if (ld_node != &sh_chan->ld_queue) {
-		/* Get the ld start address from ld_queue */
-		hw = to_sh_desc(ld_node)->hw;
-		dmae_set_reg(sh_chan, hw);
-		dmae_start(sh_chan);
-	}
+	list_for_each_entry(sd, &sh_chan->ld_queue, node)
+		if (sd->mark == DESC_SUBMITTED) {
+			/* Get the ld start address from ld_queue */
+			dmae_set_reg(sh_chan, &sd->hw);
+			dmae_start(sh_chan);
+			break;
+		}
+
+	spin_unlock_bh(&sh_chan->desc_lock);
 }
 
 static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
@@ -420,12 +519,11 @@ static enum dma_status sh_dmae_is_complete(struct dma_chan *chan,
 	dma_cookie_t last_used;
 	dma_cookie_t last_complete;
 
-	sh_dmae_chan_ld_cleanup(sh_chan);
+	sh_dmae_chan_ld_cleanup(sh_chan, false);
 
 	last_used = chan->cookie;
 	last_complete = sh_chan->completed_cookie;
-	if (last_complete == -EBUSY)
-		last_complete = last_used;
+	BUG_ON(last_complete < 0);
 
 	if (done)
 		*done = last_complete;
@@ -480,11 +578,13 @@ static irqreturn_t sh_dmae_err(int irq, void *data)
 		err = sh_dmae_rst(0);
 		if (err)
 			return err;
+#ifdef SH_DMAC_BASE1
 		if (shdev->pdata.mode & SHDMA_DMAOR1) {
 			err = sh_dmae_rst(1);
 			if (err)
 				return err;
 		}
+#endif
 		disable_irq(irq);
 		return IRQ_HANDLED;
 	}
@@ -494,35 +594,25 @@ static irqreturn_t sh_dmae_err(int irq, void *data)
 static void dmae_do_tasklet(unsigned long data)
 {
 	struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
-	struct sh_desc *desc, *_desc, *cur_desc = NULL;
+	struct sh_desc *desc;
 	u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
 
-	list_for_each_entry_safe(desc, _desc,
-				 &sh_chan->ld_queue, node) {
-		if ((desc->hw.sar + desc->hw.tcr) == sar_buf) {
-			cur_desc = desc;
+	spin_lock(&sh_chan->desc_lock);
+	list_for_each_entry(desc, &sh_chan->ld_queue, node) {
+		if ((desc->hw.sar + desc->hw.tcr) == sar_buf &&
+		    desc->mark == DESC_SUBMITTED) {
+			dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
+				desc->async_tx.cookie, &desc->async_tx,
+				desc->hw.dar);
+			desc->mark = DESC_COMPLETED;
 			break;
 		}
 	}
+	spin_unlock(&sh_chan->desc_lock);
 
-	if (cur_desc) {
-		switch (cur_desc->async_tx.cookie) {
-		case 0: /* other desc data */
-			break;
-		case -EBUSY: /* last desc */
-		sh_chan->completed_cookie =
-				cur_desc->async_tx.cookie;
-			break;
-		default: /* first desc ( 0 < )*/
-			sh_chan->completed_cookie =
-				cur_desc->async_tx.cookie - 1;
-			break;
-		}
-		cur_desc->mark = DESC_COMP;
-	}
 	/* Next desc */
 	sh_chan_xfer_ld_queue(sh_chan);
-	sh_dmae_chan_ld_cleanup(sh_chan);
+	sh_dmae_chan_ld_cleanup(sh_chan, false);
 }
 
 static unsigned int get_dmae_irq(unsigned int id)
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
index 60b81e5..108f1cf 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/shdma.h
@@ -13,9 +13,9 @@
 #ifndef __DMA_SHDMA_H
 #define __DMA_SHDMA_H
 
-#include <linux/device.h>
-#include <linux/dmapool.h>
 #include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
 
 #define SH_DMA_TCR_MAX 0x00FFFFFF	/* 16MB */
 
@@ -26,13 +26,16 @@ struct sh_dmae_regs {
 };
 
 struct sh_desc {
-	struct list_head tx_list;
 	struct sh_dmae_regs hw;
 	struct list_head node;
 	struct dma_async_tx_descriptor async_tx;
+	dma_cookie_t cookie;
+	int chunks;
 	int mark;
 };
 
+struct device;
+
 struct sh_dmae_chan {
 	dma_cookie_t completed_cookie;	/* The maximum cookie completed */
 	spinlock_t desc_lock;		/* Descriptor operation lock */
-- 
1.6.2.4


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 3/3] dmaengine: clarify the meaning of the DMA_CTRL_ACK flag
  2009-12-10 17:35 [PATCH 0/3 v2] sh: fix dma driver Guennadi Liakhovetski
  2009-12-10 17:35 ` [PATCH 1/3 v2] sh: stylistic improvements for the DMA driver Guennadi Liakhovetski
  2009-12-10 17:35 ` [PATCH 2/3 v2] sh: fix DMA driver's descriptor chaining and cookie assignment Guennadi Liakhovetski
@ 2009-12-10 17:35 ` Guennadi Liakhovetski
  2009-12-12  4:58 ` [PATCH 0/3 v2] sh: fix dma driver Dan Williams
  3 siblings, 0 replies; 12+ messages in thread
From: Guennadi Liakhovetski @ 2009-12-10 17:35 UTC (permalink / raw)
  To: linux-kernel; +Cc: Dan Williams, linux-sh

DMA_CTRL_ACK's description applies to its clear state, not to its set state.

Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
---
 include/linux/dmaengine.h |    2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 2b9f2ac..7878498 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -74,7 +74,7 @@ enum dma_transaction_type {
  *  control completion, and communicate status.
  * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
  *  this transaction
- * @DMA_CTRL_ACK - the descriptor cannot be reused until the client
+ * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client
  *  acknowledges receipt, i.e. has has a chance to establish any dependency
  *  chains
  * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s)
-- 
1.6.2.4


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/3 v2] sh: fix DMA driver's descriptor chaining and cookie assignment
  2009-12-10 17:35 ` [PATCH 2/3 v2] sh: fix DMA driver's descriptor chaining and cookie assignment Guennadi Liakhovetski
@ 2009-12-11  8:02   ` Paul Mundt
  2009-12-12  2:15     ` Nobuhiro Iwamatsu
  0 siblings, 1 reply; 12+ messages in thread
From: Paul Mundt @ 2009-12-11  8:02 UTC (permalink / raw)
  To: Guennadi Liakhovetski; +Cc: linux-kernel, Dan Williams, linux-sh

On Thu, Dec 10, 2009 at 06:35:11PM +0100, Guennadi Liakhovetski wrote:
> The SH DMA driver wrongly assigns negative cookies to transfer descriptors,
> also, its chaining of partial descriptors is broken. The latter problem is
> usually invisible, because maximum transfer size per chunk is 16M, but if you
> artificially set this limit lower, the driver fails. Since cookies are also
> used in chunk management, both these problems are fixed in one patch. As side
> effects a possible memory leak, when descriptors are prepared, but not
> submitted, and multiple races have also been fixed.
> 
> Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>

Looks good to me.

Acked-by: Paul Mundt <lethal@linux-sh.org>

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/3 v2] sh: fix DMA driver's descriptor chaining and  cookie assignment
  2009-12-11  8:02   ` Paul Mundt
@ 2009-12-12  2:15     ` Nobuhiro Iwamatsu
  2009-12-17  3:09       ` Nobuhiro Iwamatsu
  0 siblings, 1 reply; 12+ messages in thread
From: Nobuhiro Iwamatsu @ 2009-12-12  2:15 UTC (permalink / raw)
  To: Paul Mundt; +Cc: Guennadi Liakhovetski, linux-kernel, Dan Williams, linux-sh

Hi,

Oh, I was going to check an old patch.

2009/12/11 Paul Mundt <lethal@linux-sh.org>:
> On Thu, Dec 10, 2009 at 06:35:11PM +0100, Guennadi Liakhovetski wrote:
>> The SH DMA driver wrongly assigns negative cookies to transfer descriptors,
>> also, its chaining of partial descriptors is broken. The latter problem is
>> usually invisible, because maximum transfer size per chunk is 16M, but if you
>> artificially set this limit lower, the driver fails. Since cookies are also
>> used in chunk management, both these problems are fixed in one patch. As side
>> effects a possible memory leak, when descriptors are prepared, but not
>> submitted, and multiple races have also been fixed.
>>
>> Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
>
> Looks good to me.
Me too.

But I want to check other CPU of sh. I will do next week.
Please wait "Acked-by".

>
> Acked-by: Paul Mundt <lethal@linux-sh.org>

Best regards,
  Nobuhiro

> --
> To unsubscribe from this list: send the line "unsubscribe linux-sh" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
>



-- 
Nobuhiro Iwamatsu
   iwamatsu at {nigauri.org / debian.org}
   GPG ID: 40AD1FA6

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 0/3 v2] sh: fix dma driver
  2009-12-10 17:35 [PATCH 0/3 v2] sh: fix dma driver Guennadi Liakhovetski
                   ` (2 preceding siblings ...)
  2009-12-10 17:35 ` [PATCH 3/3] dmaengine: clarify the meaning of the DMA_CTRL_ACK flag Guennadi Liakhovetski
@ 2009-12-12  4:58 ` Dan Williams
  3 siblings, 0 replies; 12+ messages in thread
From: Dan Williams @ 2009-12-12  4:58 UTC (permalink / raw)
  To: Guennadi Liakhovetski; +Cc: linux-kernel, linux-sh, Nobuhiro Iwamatsu

On Thu, Dec 10, 2009 at 10:35 AM, Guennadi Liakhovetski
<g.liakhovetski@gmx.de> wrote:
> Further testing with preempt enabled revealed more problems with
> insufficient locking and improper descriptor state management. Therefore
> I'm resubmitting the
>
> sh: fix DMA driver's descriptor chaining and cookie assignment
>
> patch. At the same time I swapped it with stylistic changes, to put the
> complex stuff in the end. Also added a minor patch fixing comment in
> dmaengine.h. Patches
>
> sh: DMA driver has to specify its alignment requirements
> dmaengine: fix dmatest to verify minimum transfer length and test buffer size
> sh: dmaengine support for sh7724
>
> are not changed, so, not resubmitted

Applied:
      sh: DMA driver has to specify its alignment requirements
      dmaengine: fix dmatest to verify minimum transfer length and
test buffer size
      sh: stylistic improvements for the DMA driver
      dmaengine: clarify the meaning of the DMA_CTRL_ACK flag

Waiting for Nobuhiro's ack on:
      sh: fix DMA driver's descriptor chaining and cookie assignment
      sh: dmaengine support for sh7724

Thanks,
Dan

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/3 v2] sh: fix DMA driver's descriptor chaining and  cookie assignment
  2009-12-12  2:15     ` Nobuhiro Iwamatsu
@ 2009-12-17  3:09       ` Nobuhiro Iwamatsu
  2009-12-17  7:40         ` Guennadi Liakhovetski
  0 siblings, 1 reply; 12+ messages in thread
From: Nobuhiro Iwamatsu @ 2009-12-17  3:09 UTC (permalink / raw)
  To: Paul Mundt; +Cc: Guennadi Liakhovetski, linux-kernel, Dan Williams, linux-sh

[-- Attachment #1: Type: text/plain, Size: 1246 bytes --]

Hi,

2009/12/12 Nobuhiro Iwamatsu <iwamatsu@nigauri.org>:
> Hi,
>
> Oh, I was going to check an old patch.
>
> 2009/12/11 Paul Mundt <lethal@linux-sh.org>:
>> On Thu, Dec 10, 2009 at 06:35:11PM +0100, Guennadi Liakhovetski wrote:
>>> The SH DMA driver wrongly assigns negative cookies to transfer descriptors,
>>> also, its chaining of partial descriptors is broken. The latter problem is
>>> usually invisible, because maximum transfer size per chunk is 16M, but if you
>>> artificially set this limit lower, the driver fails. Since cookies are also
>>> used in chunk management, both these problems are fixed in one patch. As side
>>> effects a possible memory leak, when descriptors are prepared, but not
>>> submitted, and multiple races have also been fixed.
>>>
>>> Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
>>
>> Looks good to me.
> Me too.
>
> But I want to check other CPU of sh. I will do next week.
> Please wait "Acked-by".
>
>

I tested this patch. But I can not apply to Paul's git/HEAD.
I re-create patch and I attached.
This version of patch work fine.
Guennadi, could you test this patch on sh7724?

Best regards,
  Nobuhiro

-- 
Nobuhiro Iwamatsu
   iwamatsu at {nigauri.org / debian.org}
   GPG ID: 40AD1FA6

[-- Attachment #2: sh-fix-DMA-driver-s-descriptor-chaining-and-cookie-a.patch --]
[-- Type: text/plain, Size: 16708 bytes --]

From 5a17934f5c2731b35e71418e94bf5e557a2c1f19 Mon Sep 17 00:00:00 2001
From: Nobuhiro Iwamatsu <iwamatsu@nigauri.org>
Date: Thu, 17 Dec 2009 10:46:00 +0900
Subject: [PATCH v3] sh: fix DMA driver's descriptor chaining and cookie assignment

The SH DMA driver wrongly assigns negative cookies to transfer descriptors,
also, its chaining of partial descriptors is broken. The latter problem is
usually invisible, because maximum transfer size per chunk is 16M, but if you
artificially set this limit lower, the driver fails. Since cookies are also
used in chunk management, both these problems are fixed in one patch. As side
effects a possible memory leak, when descriptors are prepared, but not
submitted, and multiple races have also been fixed.

Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
Signed-off-by: Nobuhiro Iwamatsu <iwamatsu@nigauri.org>
---
 drivers/dma/shdma.c |  327 +++++++++++++++++++++++++++++++++-----------------
 drivers/dma/shdma.h |    9 +-
 2 files changed, 222 insertions(+), 114 deletions(-)

diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 2e4a54c..7750e65 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -23,16 +23,19 @@
 #include <linux/dmaengine.h>
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
-#include <linux/dmapool.h>
 #include <linux/platform_device.h>
 #include <cpu/dma.h>
 #include <asm/dma-sh.h>
 #include "shdma.h"
 
 /* DMA descriptor control */
-#define DESC_LAST	(-1)
-#define DESC_COMP	(1)
-#define DESC_NCOMP	(0)
+enum sh_dmae_desc_status {
+	DESC_IDLE,
+	DESC_PREPARED,
+	DESC_SUBMITTED,
+	DESC_COMPLETED,	/* completed, have to call callback */
+	DESC_WAITING,	/* callback called, waiting for ack / re-submit */
+};
 
 #define NR_DESCS_PER_CHANNEL 32
 /*
@@ -45,6 +48,8 @@
  */
 #define RS_DEFAULT  (RS_DUAL)
 
+static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
+
 #define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id])
 static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
 {
@@ -106,11 +111,11 @@ static inline unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan)
 	return ts_shift[(chcr & CHCR_TS_MASK) >> CHCR_TS_SHIFT];
 }
 
-static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs hw)
+static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
 {
-	sh_dmae_writel(sh_chan, hw.sar, SAR);
-	sh_dmae_writel(sh_chan, hw.dar, DAR);
-	sh_dmae_writel(sh_chan, hw.tcr >> calc_xmit_shift(sh_chan), TCR);
+	sh_dmae_writel(sh_chan, hw->sar, SAR);
+	sh_dmae_writel(sh_chan, hw->dar, DAR);
+	sh_dmae_writel(sh_chan, hw->tcr >> calc_xmit_shift(sh_chan), TCR);
 }
 
 static void dmae_start(struct sh_dmae_chan *sh_chan)
@@ -184,8 +189,9 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
 
 static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
 {
-	struct sh_desc *desc = tx_to_sh_desc(tx);
+	struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
 	struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
+	dma_async_tx_callback callback = tx->callback;
 	dma_cookie_t cookie;
 
 	spin_lock_bh(&sh_chan->desc_lock);
@@ -195,45 +201,53 @@ static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
 	if (cookie < 0)
 		cookie = 1;
 
-	/* If desc only in the case of 1 */
-	if (desc->async_tx.cookie != -EBUSY)
-		desc->async_tx.cookie = cookie;
-	sh_chan->common.cookie = desc->async_tx.cookie;
+	sh_chan->common.cookie = cookie;
+	tx->cookie = cookie;
+
+	/* Mark all chunks of this descriptor as submitted, move to the queue */
+	list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
+		/*
+		 * All chunks are on the global ld_free, so, we have to find
+		 * the end of the chain ourselves
+		 */
+		if (chunk != desc && (chunk->mark == DESC_IDLE ||
+				chunk->async_tx.cookie > 0 ||
+				chunk->async_tx.cookie == -EBUSY ||
+				&chunk->node == &sh_chan->ld_free))
+			break;
+		chunk->mark = DESC_SUBMITTED;
+		/* Callback goes to the last chunk */
+		chunk->async_tx.callback = NULL;
+		chunk->cookie = cookie;
+		list_move_tail(&chunk->node, &sh_chan->ld_queue);
+		last = chunk;
+	}
+
+	last->async_tx.callback = callback;
+	last->async_tx.callback_param = tx->callback_param;
 
-	list_splice_init(&desc->tx_list, sh_chan->ld_queue.prev);
+	dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n",
+		tx->cookie, &last->async_tx, sh_chan->id,
+		desc->hw.sar, desc->hw.tcr, desc->hw.dar);
 
 	spin_unlock_bh(&sh_chan->desc_lock);
 
 	return cookie;
 }
 
+/* Called with desc_lock held */
 static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
 {
-	struct sh_desc *desc, *_desc, *ret = NULL;
+	struct sh_desc *desc;
 
-	spin_lock_bh(&sh_chan->desc_lock);
-	list_for_each_entry_safe(desc, _desc, &sh_chan->ld_free, node) {
-		if (async_tx_test_ack(&desc->async_tx)) {
+	list_for_each_entry(desc, &sh_chan->ld_free, node)
+		if (desc->mark != DESC_PREPARED) {
+			BUG_ON(desc->mark != DESC_IDLE);
 			list_del(&desc->node);
-			ret = desc;
-			break;
+			return desc;
 		}
-	}
-	spin_unlock_bh(&sh_chan->desc_lock);
 
-	return ret;
-}
-
-static void sh_dmae_put_desc(struct sh_dmae_chan *sh_chan, struct sh_desc *desc)
-{
-	if (desc) {
-		spin_lock_bh(&sh_chan->desc_lock);
-
-		list_splice_init(&desc->tx_list, &sh_chan->ld_free);
-		list_add(&desc->node, &sh_chan->ld_free);
-
-		spin_unlock_bh(&sh_chan->desc_lock);
-	}
+	return NULL;
 }
 
 static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
@@ -252,11 +266,10 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
 		dma_async_tx_descriptor_init(&desc->async_tx,
 					&sh_chan->common);
 		desc->async_tx.tx_submit = sh_dmae_tx_submit;
-		desc->async_tx.flags = DMA_CTRL_ACK;
-		INIT_LIST_HEAD(&desc->tx_list);
-		sh_dmae_put_desc(sh_chan, desc);
+		desc->mark = DESC_IDLE;
 
 		spin_lock_bh(&sh_chan->desc_lock);
+		list_add(&desc->node, &sh_chan->ld_free);
 		sh_chan->descs_allocated++;
 	}
 	spin_unlock_bh(&sh_chan->desc_lock);
@@ -273,7 +286,10 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
 	struct sh_desc *desc, *_desc;
 	LIST_HEAD(list);
 
-	BUG_ON(!list_empty(&sh_chan->ld_queue));
+	/* Prepared and not submitted descriptors can still be on the queue */
+	if (!list_empty(&sh_chan->ld_queue))
+		sh_dmae_chan_ld_cleanup(sh_chan, true);
+
 	spin_lock_bh(&sh_chan->desc_lock);
 
 	list_splice_init(&sh_chan->ld_free, &list);
@@ -292,6 +308,8 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
 	struct sh_dmae_chan *sh_chan;
 	struct sh_desc *first = NULL, *prev = NULL, *new;
 	size_t copy_size;
+	LIST_HEAD(tx_list);
+	int chunks = (len + SH_DMA_TCR_MAX) / (SH_DMA_TCR_MAX + 1);
 
 	if (!chan)
 		return NULL;
@@ -301,42 +319,75 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
 
 	sh_chan = to_sh_chan(chan);
 
+	/* Have to lock the whole loop to protect against concurrent release */
+	spin_lock_bh(&sh_chan->desc_lock);
+
+	/*
+	 * Chaining:
+	 * first descriptor is what user is dealing with in all API calls, its
+	 * cookie is at first set to -EBUSY, at tx-submit to a positive
+	 * number
+	 * if more than one chunk is needed further chunks have cookie = -EINVAL
+	 * the last chunk, if not equal to the first, has cookie = -ENOSPC
+	 * all chunks are linked onto the tx_list head with their .node heads
+	 * only during this function, then they are immediately spliced
+	 * back onto the free list in form of a chain
+	 */
+
 	do {
-		/* Allocate the link descriptor from DMA pool */
+		/* Allocate the link descriptor from the free list */
 		new = sh_dmae_get_desc(sh_chan);
 		if (!new) {
 			dev_err(sh_chan->dev,
 				"No free memory for link descriptor\n");
-			goto err_get_desc;
+			list_for_each_entry(new, &tx_list, node)
+			new->mark = DESC_IDLE;
+			list_splice(&tx_list, &sh_chan->ld_free);
+			spin_unlock_bh(&sh_chan->desc_lock);
+			return NULL;
 		}
 
-		copy_size = min(len, (size_t)SH_DMA_TCR_MAX);
+		copy_size = min(len, (size_t)SH_DMA_TCR_MAX + 1);
 
 		new->hw.sar = dma_src;
 		new->hw.dar = dma_dest;
 		new->hw.tcr = copy_size;
-		if (!first)
+
+		if (!first) {
+			/* First desc */
+			new->async_tx.cookie = -EBUSY;
 			first = new;
+		} else {
+			/* Other desc - invisible to the user */
+			new->async_tx.cookie = -EINVAL;
+		}
+
+		dev_dbg(sh_chan->dev,
+			"chaining %u of %u with %p, dst %x, cookie %d\n",
+			copy_size, len, &new->async_tx, dma_dest,
+			new->async_tx.cookie);
 
-		new->mark = DESC_NCOMP;
-		async_tx_ack(&new->async_tx);
+		new->mark = DESC_PREPARED;
+		new->async_tx.flags = flags;
+		new->chunks = chunks--;
 
 		prev = new;
 		len -= copy_size;
 		dma_src += copy_size;
 		dma_dest += copy_size;
 		/* Insert the link descriptor to the LD ring */
-		list_add_tail(&new->node, &first->tx_list);
+		list_add_tail(&new->node, &tx_list);
 	} while (len);
 
-	new->async_tx.flags = flags; /* client is in control of this ack */
-	new->async_tx.cookie = -EBUSY; /* Last desc */
+	if (new != first)
+		new->async_tx.cookie = -ENOSPC;
 
-	return &first->async_tx;
+	/* Put them back on the free list, so, they don't get lost */
+	list_splice_tail(&tx_list, &sh_chan->ld_free);
 
-err_get_desc:
-	sh_dmae_put_desc(sh_chan, first);
-	return NULL;
+	spin_unlock_bh(&sh_chan->desc_lock);
+
+	return &first->async_tx;
 
 }
 
@@ -345,64 +396,126 @@ err_get_desc:
  *
  * This function clean up the ld_queue of DMA channel.
  */
-static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan)
+static dma_async_tx_callback
+__ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
 {
 	struct sh_desc *desc, *_desc;
+	/* Is the "exposed" head of a chain acked? */
+	bool head_acked = false;
+	dma_cookie_t cookie = 0;
+	dma_async_tx_callback callback = NULL;
+	void *param = NULL;
+
 
 	spin_lock_bh(&sh_chan->desc_lock);
 	list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
-		dma_async_tx_callback callback;
-		void *callback_param;
-
-		/* non send data */
-		if (desc->mark == DESC_NCOMP)
+		struct dma_async_tx_descriptor *tx = &desc->async_tx;
+
+		BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
+		BUG_ON(desc->mark != DESC_SUBMITTED &&
+			desc->mark != DESC_COMPLETED &&
+			desc->mark != DESC_WAITING);
+
+		/*
+		 * queue is ordered, and we use this loop to (1) clean up all
+		 * completed descriptors, and to (2) update descriptor flags of
+		 * any chunks in a (partially) completed chain
+		 */
+		if (!all && desc->mark == DESC_SUBMITTED &&
+			desc->cookie != cookie)
 			break;
 
-		/* send data sesc */
-		callback = desc->async_tx.callback;
-		callback_param = desc->async_tx.callback_param;
+		if (tx->cookie > 0)
+			cookie = tx->cookie;
 
 		/* Remove from ld_queue list */
-		list_splice_init(&desc->tx_list, &sh_chan->ld_free);
+		if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
+			BUG_ON(sh_chan->completed_cookie != desc->cookie - 1);
+			sh_chan->completed_cookie = desc->cookie;
+		}
 
-		dev_dbg(sh_chan->dev, "link descriptor %p will be recycle.\n",
-				desc);
+		/* Call callback on the last chunk */
+		if (desc->mark == DESC_COMPLETED && tx->callback) {
+			desc->mark = DESC_WAITING;
+			callback = tx->callback;
+			param = tx->callback_param;
+			dev_dbg(sh_chan->dev,
+				"descriptor #%d@%p on %d callback\n",
+				tx->cookie, tx, sh_chan->id);
+			BUG_ON(desc->chunks != 1);
+			break;
+		}
 
-		list_move(&desc->node, &sh_chan->ld_free);
-		/* Run the link descriptor callback function */
-		if (callback) {
-			spin_unlock_bh(&sh_chan->desc_lock);
-			dev_dbg(sh_chan->dev, "link descriptor %p callback\n",
-					desc);
-			callback(callback_param);
-			spin_lock_bh(&sh_chan->desc_lock);
+		if (tx->cookie > 0 || tx->cookie == -EBUSY) {
+			if (desc->mark == DESC_COMPLETED) {
+				BUG_ON(tx->cookie < 0);
+				desc->mark = DESC_WAITING;
+			}
+			head_acked = async_tx_test_ack(tx);
+		} else {
+			switch (desc->mark) {
+			case DESC_COMPLETED:
+				desc->mark = DESC_WAITING;
+				/* Fall through */
+			case DESC_WAITING:
+				if (head_acked)
+					async_tx_ack(&desc->async_tx);
+			}
+		}
+
+		dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n",
+			tx, tx->cookie);
+
+		if (((desc->mark == DESC_COMPLETED ||
+			desc->mark == DESC_WAITING) &&
+			async_tx_test_ack(&desc->async_tx)) || all) {
+			/* Remove from ld_queue list */
+			desc->mark = DESC_IDLE;
+			list_move(&desc->node, &sh_chan->ld_free);
 		}
 	}
 	spin_unlock_bh(&sh_chan->desc_lock);
+
+	if (callback)
+		callback(param);
+
+	return callback;
+}
+
+/*
+ * sh_chan_ld_cleanup - Clean up link descriptors
+ *
+ * This function cleans up the ld_queue of DMA channel.
+ */
+static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
+{
+	while (__ld_cleanup(sh_chan, all))
+		;
 }
 
+
 static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
 {
-	struct list_head *ld_node;
-	struct sh_dmae_regs hw;
+	struct sh_desc *sd;
+
+	spin_lock_bh(&sh_chan->desc_lock);
 
 	/* DMA work check */
-	if (dmae_is_busy(sh_chan))
+	if (dmae_is_busy(sh_chan)) {
+		spin_unlock_bh(&sh_chan->desc_lock);
 		return;
+	}
 
 	/* Find the first un-transfer desciptor */
-	for (ld_node = sh_chan->ld_queue.next;
-		(ld_node != &sh_chan->ld_queue)
-			&& (to_sh_desc(ld_node)->mark == DESC_COMP);
-		ld_node = ld_node->next)
-		cpu_relax();
-
-	if (ld_node != &sh_chan->ld_queue) {
-		/* Get the ld start address from ld_queue */
-		hw = to_sh_desc(ld_node)->hw;
-		dmae_set_reg(sh_chan, hw);
-		dmae_start(sh_chan);
-	}
+	list_for_each_entry(sd, &sh_chan->ld_queue, node)
+		if (sd->mark == DESC_SUBMITTED) {
+			/* Get the ld start address from ld_queue */
+			dmae_set_reg(sh_chan, &sd->hw);
+			dmae_start(sh_chan);
+			break;
+		}
+
+	spin_unlock_bh(&sh_chan->desc_lock);
 }
 
 static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
@@ -420,12 +533,11 @@ static enum dma_status sh_dmae_is_complete(struct dma_chan *chan,
 	dma_cookie_t last_used;
 	dma_cookie_t last_complete;
 
-	sh_dmae_chan_ld_cleanup(sh_chan);
+	sh_dmae_chan_ld_cleanup(sh_chan, false);
 
 	last_used = chan->cookie;
 	last_complete = sh_chan->completed_cookie;
-	if (last_complete == -EBUSY)
-		last_complete = last_used;
+	BUG_ON(last_complete < 0);
 
 	if (done)
 		*done = last_complete;
@@ -480,11 +592,13 @@ static irqreturn_t sh_dmae_err(int irq, void *data)
 		err = sh_dmae_rst(0);
 		if (err)
 			return err;
+#ifdef SH_DMAC_BASE1
 		if (shdev->pdata.mode & SHDMA_DMAOR1) {
 			err = sh_dmae_rst(1);
 			if (err)
 				return err;
 		}
+#endif
 		disable_irq(irq);
 		return IRQ_HANDLED;
 	}
@@ -494,35 +608,26 @@ static irqreturn_t sh_dmae_err(int irq, void *data)
 static void dmae_do_tasklet(unsigned long data)
 {
 	struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
-	struct sh_desc *desc, *_desc, *cur_desc = NULL;
+	struct sh_desc *desc;
 	u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
 
-	list_for_each_entry_safe(desc, _desc,
-				 &sh_chan->ld_queue, node) {
-		if ((desc->hw.sar + desc->hw.tcr) == sar_buf) {
-			cur_desc = desc;
+	spin_lock(&sh_chan->desc_lock);
+	list_for_each_entry(desc, &sh_chan->ld_queue, node) {
+		if ((desc->hw.sar + desc->hw.tcr) == sar_buf &&
+			desc->mark == DESC_SUBMITTED) {
+			dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
+				desc->async_tx.cookie, &desc->async_tx,
+				desc->hw.dar);
+			desc->mark = DESC_COMPLETED;
 			break;
 		}
 	}
 
-	if (cur_desc) {
-		switch (cur_desc->async_tx.cookie) {
-		case 0: /* other desc data */
-			break;
-		case -EBUSY: /* last desc */
-		sh_chan->completed_cookie =
-				cur_desc->async_tx.cookie;
-			break;
-		default: /* first desc ( 0 < )*/
-			sh_chan->completed_cookie =
-				cur_desc->async_tx.cookie - 1;
-			break;
-		}
-		cur_desc->mark = DESC_COMP;
-	}
+	spin_unlock(&sh_chan->desc_lock);
+
 	/* Next desc */
 	sh_chan_xfer_ld_queue(sh_chan);
-	sh_dmae_chan_ld_cleanup(sh_chan);
+	sh_dmae_chan_ld_cleanup(sh_chan, false);
 }
 
 static unsigned int get_dmae_irq(unsigned int id)
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
index 60b81e5..108f1cf 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/shdma.h
@@ -13,9 +13,9 @@
 #ifndef __DMA_SHDMA_H
 #define __DMA_SHDMA_H
 
-#include <linux/device.h>
-#include <linux/dmapool.h>
 #include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
 
 #define SH_DMA_TCR_MAX 0x00FFFFFF	/* 16MB */
 
@@ -26,13 +26,16 @@ struct sh_dmae_regs {
 };
 
 struct sh_desc {
-	struct list_head tx_list;
 	struct sh_dmae_regs hw;
 	struct list_head node;
 	struct dma_async_tx_descriptor async_tx;
+	dma_cookie_t cookie;
+	int chunks;
 	int mark;
 };
 
+struct device;
+
 struct sh_dmae_chan {
 	dma_cookie_t completed_cookie;	/* The maximum cookie completed */
 	spinlock_t desc_lock;		/* Descriptor operation lock */
-- 
1.6.5.4


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/3 v2] sh: fix DMA driver's descriptor chaining and  cookie assignment
  2009-12-17  3:09       ` Nobuhiro Iwamatsu
@ 2009-12-17  7:40         ` Guennadi Liakhovetski
  2009-12-17  8:33           ` Nobuhiro Iwamatsu
  0 siblings, 1 reply; 12+ messages in thread
From: Guennadi Liakhovetski @ 2009-12-17  7:40 UTC (permalink / raw)
  To: Nobuhiro Iwamatsu; +Cc: Paul Mundt, linux-kernel, Dan Williams, linux-sh

Hello Iwamatsu-san

On Thu, 17 Dec 2009, Nobuhiro Iwamatsu wrote:

> Hi,
> 
> 2009/12/12 Nobuhiro Iwamatsu <iwamatsu@nigauri.org>:
> > Hi,
> >
> > Oh, I was going to check an old patch.
> >
> > 2009/12/11 Paul Mundt <lethal@linux-sh.org>:
> >> On Thu, Dec 10, 2009 at 06:35:11PM +0100, Guennadi Liakhovetski wrote:
> >>> The SH DMA driver wrongly assigns negative cookies to transfer descriptors,
> >>> also, its chaining of partial descriptors is broken. The latter problem is
> >>> usually invisible, because maximum transfer size per chunk is 16M, but if you
> >>> artificially set this limit lower, the driver fails. Since cookies are also
> >>> used in chunk management, both these problems are fixed in one patch. As side
> >>> effects a possible memory leak, when descriptors are prepared, but not
> >>> submitted, and multiple races have also been fixed.
> >>>
> >>> Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
> >>
> >> Looks good to me.
> > Me too.
> >
> > But I want to check other CPU of sh. I will do next week.
> > Please wait "Acked-by".
> >
> >
> 
> I tested this patch. But I can not apply to Paul's git/HEAD.
> I re-create patch and I attached.
> This version of patch work fine.
> Guennadi, could you test this patch on sh7724?

Thanks for testing. I looked at your version of my patch, and I don't 
agree with it. You needlessly changed indentation in it and also forgot to 
include a "From: " line in it to indicate its original authorship. 
Besides, I think, Dan will be applying this patch on top of current next 
or Linus' tree. Of course, it can be, that my version of the patch does 
not apply to those trees either now. So, Dan, if you cannot merge my 
version of this patch yourself, please, let me know, I'll rebase it, but 
please do not use this version.

Thanks
Guennadi
---
Guennadi Liakhovetski

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/3 v2] sh: fix DMA driver's descriptor chaining and  cookie assignment
  2009-12-17  7:40         ` Guennadi Liakhovetski
@ 2009-12-17  8:33           ` Nobuhiro Iwamatsu
  2009-12-17  8:43             ` Guennadi Liakhovetski
  0 siblings, 1 reply; 12+ messages in thread
From: Nobuhiro Iwamatsu @ 2009-12-17  8:33 UTC (permalink / raw)
  To: Guennadi Liakhovetski; +Cc: Paul Mundt, linux-kernel, Dan Williams, linux-sh

HI,

2009/12/17 Guennadi Liakhovetski <g.liakhovetski@gmx.de>:
> Hello Iwamatsu-san
>
> On Thu, 17 Dec 2009, Nobuhiro Iwamatsu wrote:
>
>> Hi,
>>
>> 2009/12/12 Nobuhiro Iwamatsu <iwamatsu@nigauri.org>:
>> > Hi,
>> >
>> > Oh, I was going to check an old patch.
>> >
>> > 2009/12/11 Paul Mundt <lethal@linux-sh.org>:
>> >> On Thu, Dec 10, 2009 at 06:35:11PM +0100, Guennadi Liakhovetski wrote:
>> >>> The SH DMA driver wrongly assigns negative cookies to transfer descriptors,
>> >>> also, its chaining of partial descriptors is broken. The latter problem is
>> >>> usually invisible, because maximum transfer size per chunk is 16M, but if you
>> >>> artificially set this limit lower, the driver fails. Since cookies are also
>> >>> used in chunk management, both these problems are fixed in one patch. As side
>> >>> effects a possible memory leak, when descriptors are prepared, but not
>> >>> submitted, and multiple races have also been fixed.
>> >>>
>> >>> Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
>> >>
>> >> Looks good to me.
>> > Me too.
>> >
>> > But I want to check other CPU of sh. I will do next week.
>> > Please wait "Acked-by".
>> >
>> >
>>
>> I tested this patch. But I can not apply to Paul's git/HEAD.
>> I re-create patch and I attached.
>> This version of patch work fine.
>> Guennadi, could you test this patch on sh7724?
>
> Thanks for testing. I looked at your version of my patch, and I don't
> agree with it. You needlessly changed indentation in it and also forgot to
> include a "From: " line in it to indicate its original authorship.
> Besides, I think, Dan will be applying this patch on top of current next
> or Linus' tree. Of course, it can be, that my version of the patch does
> not apply to those trees either now. So, Dan, if you cannot merge my
> version of this patch yourself, please, let me know, I'll rebase it, but
> please do not use this version.

Yes, you are right.
And sorry, I am not going to seize your patch by force.
I sent the patch which I revised at hand.

Best regards,
  Nobuhiro

-- 
Nobuhiro Iwamatsu
   iwamatsu at {nigauri.org / debian.org}
   GPG ID: 40AD1FA6

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/3 v2] sh: fix DMA driver's descriptor chaining and  cookie assignment
  2009-12-17  8:33           ` Nobuhiro Iwamatsu
@ 2009-12-17  8:43             ` Guennadi Liakhovetski
  2009-12-17 17:50               ` Dan Williams
  0 siblings, 1 reply; 12+ messages in thread
From: Guennadi Liakhovetski @ 2009-12-17  8:43 UTC (permalink / raw)
  To: Nobuhiro Iwamatsu; +Cc: Paul Mundt, linux-kernel, Dan Williams, linux-sh

On Thu, 17 Dec 2009, Nobuhiro Iwamatsu wrote:

> HI,
> 
> 2009/12/17 Guennadi Liakhovetski <g.liakhovetski@gmx.de>:
> > Hello Iwamatsu-san
> >
> > On Thu, 17 Dec 2009, Nobuhiro Iwamatsu wrote:
> >
> >> Hi,
> >>
> >> 2009/12/12 Nobuhiro Iwamatsu <iwamatsu@nigauri.org>:
> >> > Hi,
> >> >
> >> > Oh, I was going to check an old patch.
> >> >
> >> > 2009/12/11 Paul Mundt <lethal@linux-sh.org>:
> >> >> On Thu, Dec 10, 2009 at 06:35:11PM +0100, Guennadi Liakhovetski wrote:
> >> >>> The SH DMA driver wrongly assigns negative cookies to transfer descriptors,
> >> >>> also, its chaining of partial descriptors is broken. The latter problem is
> >> >>> usually invisible, because maximum transfer size per chunk is 16M, but if you
> >> >>> artificially set this limit lower, the driver fails. Since cookies are also
> >> >>> used in chunk management, both these problems are fixed in one patch. As side
> >> >>> effects a possible memory leak, when descriptors are prepared, but not
> >> >>> submitted, and multiple races have also been fixed.
> >> >>>
> >> >>> Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
> >> >>
> >> >> Looks good to me.
> >> > Me too.
> >> >
> >> > But I want to check other CPU of sh. I will do next week.
> >> > Please wait "Acked-by".
> >> >
> >> >
> >>
> >> I tested this patch. But I can not apply to Paul's git/HEAD.
> >> I re-create patch and I attached.
> >> This version of patch work fine.
> >> Guennadi, could you test this patch on sh7724?
> >
> > Thanks for testing. I looked at your version of my patch, and I don't
> > agree with it. You needlessly changed indentation in it and also forgot to
> > include a "From: " line in it to indicate its original authorship.
> > Besides, I think, Dan will be applying this patch on top of current next
> > or Linus' tree. Of course, it can be, that my version of the patch does
> > not apply to those trees either now. So, Dan, if you cannot merge my
> > version of this patch yourself, please, let me know, I'll rebase it, but
> > please do not use this version.
> 
> Yes, you are right.
> And sorry, I am not going to seize your patch by force.

Sure, I never assumed the opposite, that's why I said "you forgot." Same 
thing happened to me too recently, fortunately, I noticed it early enough 
to push a correct version. The actual problem with your version was 
white-space changes.

> I sent the patch which I revised at hand.

Thanks
Guennadi
---
Guennadi Liakhovetski

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/3 v2] sh: fix DMA driver's descriptor chaining and  cookie assignment
  2009-12-17  8:43             ` Guennadi Liakhovetski
@ 2009-12-17 17:50               ` Dan Williams
  0 siblings, 0 replies; 12+ messages in thread
From: Dan Williams @ 2009-12-17 17:50 UTC (permalink / raw)
  To: Guennadi Liakhovetski
  Cc: Nobuhiro Iwamatsu, Paul Mundt, linux-kernel, linux-sh

On Thu, Dec 17, 2009 at 1:43 AM, Guennadi Liakhovetski
<g.liakhovetski@gmx.de> wrote:
> Sure, I never assumed the opposite, that's why I said "you forgot." Same
> thing happened to me too recently, fortunately, I noticed it early enough
> to push a correct version. The actual problem with your version was
> white-space changes.

I kept your original v2 patch.  You can double check the state of my
'fixes' branch [1].

Thanks,
Dan

[1]: http://git.kernel.org/?p=linux/kernel/git/djbw/async_tx.git;a=shortlog;h=refs/heads/fixes

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2009-12-17 17:50 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2009-12-10 17:35 [PATCH 0/3 v2] sh: fix dma driver Guennadi Liakhovetski
2009-12-10 17:35 ` [PATCH 1/3 v2] sh: stylistic improvements for the DMA driver Guennadi Liakhovetski
2009-12-10 17:35 ` [PATCH 2/3 v2] sh: fix DMA driver's descriptor chaining and cookie assignment Guennadi Liakhovetski
2009-12-11  8:02   ` Paul Mundt
2009-12-12  2:15     ` Nobuhiro Iwamatsu
2009-12-17  3:09       ` Nobuhiro Iwamatsu
2009-12-17  7:40         ` Guennadi Liakhovetski
2009-12-17  8:33           ` Nobuhiro Iwamatsu
2009-12-17  8:43             ` Guennadi Liakhovetski
2009-12-17 17:50               ` Dan Williams
2009-12-10 17:35 ` [PATCH 3/3] dmaengine: clarify the meaning of the DMA_CTRL_ACK flag Guennadi Liakhovetski
2009-12-12  4:58 ` [PATCH 0/3 v2] sh: fix dma driver Dan Williams

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).