linuxppc-dev.lists.ozlabs.org archive mirror
 help / color / mirror / Atom feed
* fsldma: cleanup driver and fix async_tx compatibility
@ 2010-01-01  6:10 Ira W. Snyder
  2010-01-01  6:10 ` [PATCH 1/8] fsldma: reduce kernel text size Ira W. Snyder
                   ` (9 more replies)
  0 siblings, 10 replies; 22+ messages in thread
From: Ira W. Snyder @ 2010-01-01  6:10 UTC (permalink / raw)
  To: dan.j.williams
  Cc: herbert, B04825, linuxppc-dev, Vishnu, Dipen.Dudhat,
	Maneesh.Gupta, R58472

This patch series cleans up the Freescale DMAEngine driver, including
verifying the locking and making sure that all code paths are correct.
There were a few places that seemed suspicious, and they have been fixed.

I have written a quick memory->memory DMAEngine test driver, and the
performance is identical before and after my changes (<0.1% change). I
measured both setting up the DMA operation (via device_prep_dma_interrupt()
and device_prep_dma_memcpy()) and the actual DMA transfer itself.

As an added bonus, the interrupt load is measurably reduced. My test driver
transfers 32MB as 32x 1MB chunks + 1 interrupt descriptor, using the
functions noted above. Previous to this patch series, 31 interrupts were
generated. After this patch series, only a single interrupt is generated
for the whole transaction.

Some testing on 85xx/86xx hardware would be appreciated. Also, some testing
by the users attempting to use async_tx and talitos to handle RAID offload
would be great as well.

 Documentation/powerpc/dts-bindings/fsl/dma.txt |   17 +-
 drivers/dma/fsldma.c                           | 1036 ++++++++++++------------
 drivers/dma/fsldma.h                           |   35 +-
 3 files changed, 556 insertions(+), 532 deletions(-)

Thanks,
Ira

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH 1/8] fsldma: reduce kernel text size
  2010-01-01  6:10 fsldma: cleanup driver and fix async_tx compatibility Ira W. Snyder
@ 2010-01-01  6:10 ` Ira W. Snyder
  2010-01-01  6:10 ` [PATCH 2/8] fsldma: remove unused structure members Ira W. Snyder
                   ` (8 subsequent siblings)
  9 siblings, 0 replies; 22+ messages in thread
From: Ira W. Snyder @ 2010-01-01  6:10 UTC (permalink / raw)
  To: dan.j.williams
  Cc: herbert, B04825, linuxppc-dev, Vishnu, Dipen.Dudhat,
	Maneesh.Gupta, R58472

Some of the functions are written in a way where they use multiple reads
and writes where a single read/write pair could suffice. This shrinks the
kernel text size measurably, while making the functions easier to
understand.

add/remove: 0/0 grow/shrink: 1/4 up/down: 4/-196 (-192)
function                                     old     new   delta
fsl_chan_set_request_count                   120     124      +4
dma_halt                                     300     272     -28
fsl_chan_set_src_loop_size                   208     156     -52
fsl_chan_set_dest_loop_size                  208     156     -52
fsl_chan_xfer_ld_queue                       500     436     -64

Signed-off-by: Ira W. Snyder <iws@ovro.caltech.edu>
---
 drivers/dma/fsldma.c |   83 +++++++++++++++++++++++++++-----------------------
 1 files changed, 45 insertions(+), 38 deletions(-)

diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 296f9e7..0bad741 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -143,43 +143,45 @@ static int dma_is_idle(struct fsl_dma_chan *fsl_chan)
 
 static void dma_start(struct fsl_dma_chan *fsl_chan)
 {
-	u32 mr_set = 0;
-
-	if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
-		DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32);
-		mr_set |= FSL_DMA_MR_EMP_EN;
-	} else if ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
-		DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
-			DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
-				& ~FSL_DMA_MR_EMP_EN, 32);
+	u32 mode;
+
+	mode = DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32);
+
+	if ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
+		if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
+			DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32);
+			mode |= FSL_DMA_MR_EMP_EN;
+		} else {
+			mode &= ~FSL_DMA_MR_EMP_EN;
+		}
 	}
 
 	if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT)
-		mr_set |= FSL_DMA_MR_EMS_EN;
+		mode |= FSL_DMA_MR_EMS_EN;
 	else
-		mr_set |= FSL_DMA_MR_CS;
+		mode |= FSL_DMA_MR_CS;
 
-	DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
-			DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
-			| mr_set, 32);
+	DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32);
 }
 
 static void dma_halt(struct fsl_dma_chan *fsl_chan)
 {
+	u32 mode;
 	int i;
 
-	DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
-		DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | FSL_DMA_MR_CA,
-		32);
-	DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
-		DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~(FSL_DMA_MR_CS
-		| FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA), 32);
+	mode = DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32);
+	mode |= FSL_DMA_MR_CA;
+	DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32);
+
+	mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA);
+	DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32);
 
 	for (i = 0; i < 100; i++) {
 		if (dma_is_idle(fsl_chan))
 			break;
 		udelay(10);
 	}
+
 	if (i >= 100 && !dma_is_idle(fsl_chan))
 		dev_err(fsl_chan->dev, "DMA halt timeout!\n");
 }
@@ -231,22 +233,23 @@ static void append_ld_queue(struct fsl_dma_chan *fsl_chan,
  */
 static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size)
 {
+	u32 mode;
+
+	mode = DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32);
+
 	switch (size) {
 	case 0:
-		DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
-			DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) &
-			(~FSL_DMA_MR_SAHE), 32);
+		mode &= ~FSL_DMA_MR_SAHE;
 		break;
 	case 1:
 	case 2:
 	case 4:
 	case 8:
-		DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
-			DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) |
-			FSL_DMA_MR_SAHE | (__ilog2(size) << 14),
-			32);
+		mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14);
 		break;
 	}
+
+	DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32);
 }
 
 /**
@@ -262,22 +265,23 @@ static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size)
  */
 static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size)
 {
+	u32 mode;
+
+	mode = DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32);
+
 	switch (size) {
 	case 0:
-		DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
-			DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) &
-			(~FSL_DMA_MR_DAHE), 32);
+		mode &= ~FSL_DMA_MR_DAHE;
 		break;
 	case 1:
 	case 2:
 	case 4:
 	case 8:
-		DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
-			DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) |
-			FSL_DMA_MR_DAHE | (__ilog2(size) << 16),
-			32);
+		mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16);
 		break;
 	}
+
+	DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32);
 }
 
 /**
@@ -294,11 +298,14 @@ static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size)
  */
 static void fsl_chan_set_request_count(struct fsl_dma_chan *fsl_chan, int size)
 {
+	u32 mode;
+
 	BUG_ON(size > 1024);
-	DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
-		DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
-			| ((__ilog2(size) << 24) & 0x0f000000),
-		32);
+
+	mode = DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32);
+	mode |= (__ilog2(size) << 24) & 0x0f000000;
+
+	DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32);
 }
 
 /**
-- 
1.5.4.3

^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [PATCH 2/8] fsldma: remove unused structure members
  2010-01-01  6:10 fsldma: cleanup driver and fix async_tx compatibility Ira W. Snyder
  2010-01-01  6:10 ` [PATCH 1/8] fsldma: reduce kernel text size Ira W. Snyder
@ 2010-01-01  6:10 ` Ira W. Snyder
  2010-01-01  6:10 ` [PATCH 3/8] fsldma: rename struct fsl_dma_chan to struct fsldma_chan Ira W. Snyder
                   ` (7 subsequent siblings)
  9 siblings, 0 replies; 22+ messages in thread
From: Ira W. Snyder @ 2010-01-01  6:10 UTC (permalink / raw)
  To: dan.j.williams
  Cc: herbert, B04825, linuxppc-dev, Vishnu, Dipen.Dudhat,
	Maneesh.Gupta, R58472

Remove some unused members from the fsldma data structures. A few trivial
uses of struct resource were converted to use the stack rather than keeping
the memory allocated for the lifetime of the driver.

Signed-off-by: Ira W. Snyder <iws@ovro.caltech.edu>
---
 drivers/dma/fsldma.c |   17 ++++++++---------
 drivers/dma/fsldma.h |    4 ----
 2 files changed, 8 insertions(+), 13 deletions(-)

diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 0bad741..0b4e638 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1072,6 +1072,7 @@ static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev,
 	struct device_node *node, u32 feature, const char *compatible)
 {
 	struct fsl_dma_chan *new_fsl_chan;
+	struct resource res;
 	int err;
 
 	/* alloc channel */
@@ -1083,7 +1084,7 @@ static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev,
 	}
 
 	/* get dma channel register base */
-	err = of_address_to_resource(node, 0, &new_fsl_chan->reg);
+	err = of_address_to_resource(node, 0, &res);
 	if (err) {
 		dev_err(fdev->dev, "Can't get %s property 'reg'\n",
 				node->full_name);
@@ -1101,10 +1102,8 @@ static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev,
 	WARN_ON(fdev->feature != new_fsl_chan->feature);
 
 	new_fsl_chan->dev = fdev->dev;
-	new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start,
-			new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1);
-
-	new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7;
+	new_fsl_chan->reg_base = ioremap(res.start, resource_size(&res));
+	new_fsl_chan->id = ((res.start - 0x100) & 0xfff) >> 7;
 	if (new_fsl_chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
 		dev_err(fdev->dev, "There is no %d channel!\n",
 				new_fsl_chan->id);
@@ -1183,6 +1182,7 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev,
 	int err;
 	struct fsl_dma_device *fdev;
 	struct device_node *child;
+	struct resource res;
 
 	fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL);
 	if (!fdev) {
@@ -1193,7 +1193,7 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev,
 	INIT_LIST_HEAD(&fdev->common.channels);
 
 	/* get DMA controller register base */
-	err = of_address_to_resource(dev->node, 0, &fdev->reg);
+	err = of_address_to_resource(dev->node, 0, &res);
 	if (err) {
 		dev_err(&dev->dev, "Can't get %s property 'reg'\n",
 				dev->node->full_name);
@@ -1202,9 +1202,8 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev,
 
 	dev_info(&dev->dev, "Probe the Freescale DMA driver for %s "
 			"controller at 0x%llx...\n",
-			match->compatible, (unsigned long long)fdev->reg.start);
-	fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end
-						- fdev->reg.start + 1);
+			match->compatible, (unsigned long long)res.start);
+	fdev->reg_base = ioremap(res.start, resource_size(&res));
 
 	dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
 	dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index 0df14cb..dbb5b5c 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -92,8 +92,6 @@ struct fsl_desc_sw {
 	struct list_head node;
 	struct list_head tx_list;
 	struct dma_async_tx_descriptor async_tx;
-	struct list_head *ld;
-	void *priv;
 } __attribute__((aligned(32)));
 
 struct fsl_dma_chan_regs {
@@ -111,7 +109,6 @@ struct fsl_dma_chan;
 
 struct fsl_dma_device {
 	void __iomem *reg_base;	/* DGSR register base */
-	struct resource reg;	/* Resource for register */
 	struct device *dev;
 	struct dma_device common;
 	struct fsl_dma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE];
@@ -138,7 +135,6 @@ struct fsl_dma_chan {
 	struct dma_chan common;		/* DMA common channel */
 	struct dma_pool *desc_pool;	/* Descriptors pool */
 	struct device *dev;		/* Channel device */
-	struct resource reg;		/* Resource for register */
 	int irq;			/* Channel IRQ */
 	int id;				/* Raw id of this channel */
 	struct tasklet_struct tasklet;
-- 
1.5.4.3

^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [PATCH 3/8] fsldma: rename struct fsl_dma_chan to struct fsldma_chan
  2010-01-01  6:10 fsldma: cleanup driver and fix async_tx compatibility Ira W. Snyder
  2010-01-01  6:10 ` [PATCH 1/8] fsldma: reduce kernel text size Ira W. Snyder
  2010-01-01  6:10 ` [PATCH 2/8] fsldma: remove unused structure members Ira W. Snyder
@ 2010-01-01  6:10 ` Ira W. Snyder
  2010-01-01  6:10 ` [PATCH 4/8] fsldma: rename dest to dst for uniformity Ira W. Snyder
                   ` (6 subsequent siblings)
  9 siblings, 0 replies; 22+ messages in thread
From: Ira W. Snyder @ 2010-01-01  6:10 UTC (permalink / raw)
  To: dan.j.williams
  Cc: herbert, B04825, linuxppc-dev, Vishnu, Dipen.Dudhat,
	Maneesh.Gupta, R58472

This is the beginning of a cleanup which will change all instances of
"fsl_dma" to "fsldma" to match the name of the driver itself.

Signed-off-by: Ira W. Snyder <iws@ovro.caltech.edu>
---
 drivers/dma/fsldma.c |  128 ++++++++++++++++++++++++++-----------------------
 drivers/dma/fsldma.h |   26 +++++-----
 2 files changed, 81 insertions(+), 73 deletions(-)

diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 0b4e638..6795d96 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -37,7 +37,7 @@
 #include <asm/fsldma.h>
 #include "fsldma.h"
 
-static void dma_init(struct fsl_dma_chan *fsl_chan)
+static void dma_init(struct fsldma_chan *fsl_chan)
 {
 	/* Reset the channel */
 	DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 0, 32);
@@ -64,23 +64,23 @@ static void dma_init(struct fsl_dma_chan *fsl_chan)
 
 }
 
-static void set_sr(struct fsl_dma_chan *fsl_chan, u32 val)
+static void set_sr(struct fsldma_chan *fsl_chan, u32 val)
 {
 	DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32);
 }
 
-static u32 get_sr(struct fsl_dma_chan *fsl_chan)
+static u32 get_sr(struct fsldma_chan *fsl_chan)
 {
 	return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32);
 }
 
-static void set_desc_cnt(struct fsl_dma_chan *fsl_chan,
+static void set_desc_cnt(struct fsldma_chan *fsl_chan,
 				struct fsl_dma_ld_hw *hw, u32 count)
 {
 	hw->count = CPU_TO_DMA(fsl_chan, count, 32);
 }
 
-static void set_desc_src(struct fsl_dma_chan *fsl_chan,
+static void set_desc_src(struct fsldma_chan *fsl_chan,
 				struct fsl_dma_ld_hw *hw, dma_addr_t src)
 {
 	u64 snoop_bits;
@@ -90,7 +90,7 @@ static void set_desc_src(struct fsl_dma_chan *fsl_chan,
 	hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64);
 }
 
-static void set_desc_dest(struct fsl_dma_chan *fsl_chan,
+static void set_desc_dest(struct fsldma_chan *fsl_chan,
 				struct fsl_dma_ld_hw *hw, dma_addr_t dest)
 {
 	u64 snoop_bits;
@@ -100,7 +100,7 @@ static void set_desc_dest(struct fsl_dma_chan *fsl_chan,
 	hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dest, 64);
 }
 
-static void set_desc_next(struct fsl_dma_chan *fsl_chan,
+static void set_desc_next(struct fsldma_chan *fsl_chan,
 				struct fsl_dma_ld_hw *hw, dma_addr_t next)
 {
 	u64 snoop_bits;
@@ -110,38 +110,38 @@ static void set_desc_next(struct fsl_dma_chan *fsl_chan,
 	hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64);
 }
 
-static void set_cdar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr)
+static void set_cdar(struct fsldma_chan *fsl_chan, dma_addr_t addr)
 {
 	DMA_OUT(fsl_chan, &fsl_chan->reg_base->cdar, addr | FSL_DMA_SNEN, 64);
 }
 
-static dma_addr_t get_cdar(struct fsl_dma_chan *fsl_chan)
+static dma_addr_t get_cdar(struct fsldma_chan *fsl_chan)
 {
 	return DMA_IN(fsl_chan, &fsl_chan->reg_base->cdar, 64) & ~FSL_DMA_SNEN;
 }
 
-static void set_ndar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr)
+static void set_ndar(struct fsldma_chan *fsl_chan, dma_addr_t addr)
 {
 	DMA_OUT(fsl_chan, &fsl_chan->reg_base->ndar, addr, 64);
 }
 
-static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan)
+static dma_addr_t get_ndar(struct fsldma_chan *fsl_chan)
 {
 	return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64);
 }
 
-static u32 get_bcr(struct fsl_dma_chan *fsl_chan)
+static u32 get_bcr(struct fsldma_chan *fsl_chan)
 {
 	return DMA_IN(fsl_chan, &fsl_chan->reg_base->bcr, 32);
 }
 
-static int dma_is_idle(struct fsl_dma_chan *fsl_chan)
+static int dma_is_idle(struct fsldma_chan *fsl_chan)
 {
 	u32 sr = get_sr(fsl_chan);
 	return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
 }
 
-static void dma_start(struct fsl_dma_chan *fsl_chan)
+static void dma_start(struct fsldma_chan *fsl_chan)
 {
 	u32 mode;
 
@@ -164,7 +164,7 @@ static void dma_start(struct fsl_dma_chan *fsl_chan)
 	DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32);
 }
 
-static void dma_halt(struct fsl_dma_chan *fsl_chan)
+static void dma_halt(struct fsldma_chan *fsl_chan)
 {
 	u32 mode;
 	int i;
@@ -186,7 +186,7 @@ static void dma_halt(struct fsl_dma_chan *fsl_chan)
 		dev_err(fsl_chan->dev, "DMA halt timeout!\n");
 }
 
-static void set_ld_eol(struct fsl_dma_chan *fsl_chan,
+static void set_ld_eol(struct fsldma_chan *fsl_chan,
 			struct fsl_desc_sw *desc)
 {
 	u64 snoop_bits;
@@ -199,7 +199,7 @@ static void set_ld_eol(struct fsl_dma_chan *fsl_chan,
 			| snoop_bits, 64);
 }
 
-static void append_ld_queue(struct fsl_dma_chan *fsl_chan,
+static void append_ld_queue(struct fsldma_chan *fsl_chan,
 		struct fsl_desc_sw *new_desc)
 {
 	struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev);
@@ -231,7 +231,7 @@ static void append_ld_queue(struct fsl_dma_chan *fsl_chan,
  * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
  * SA + 1 ... and so on.
  */
-static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size)
+static void fsl_chan_set_src_loop_size(struct fsldma_chan *fsl_chan, int size)
 {
 	u32 mode;
 
@@ -263,7 +263,7 @@ static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size)
  * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
  * TA + 1 ... and so on.
  */
-static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size)
+static void fsl_chan_set_dest_loop_size(struct fsldma_chan *fsl_chan, int size)
 {
 	u32 mode;
 
@@ -296,7 +296,7 @@ static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size)
  *
  * A size of 0 disables external pause control. The maximum size is 1024.
  */
-static void fsl_chan_set_request_count(struct fsl_dma_chan *fsl_chan, int size)
+static void fsl_chan_set_request_count(struct fsldma_chan *fsl_chan, int size)
 {
 	u32 mode;
 
@@ -317,7 +317,7 @@ static void fsl_chan_set_request_count(struct fsl_dma_chan *fsl_chan, int size)
  * The DMA Request Count feature should be used in addition to this feature
  * to set the number of bytes to transfer before pausing the channel.
  */
-static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int enable)
+static void fsl_chan_toggle_ext_pause(struct fsldma_chan *fsl_chan, int enable)
 {
 	if (enable)
 		fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
@@ -335,7 +335,7 @@ static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int enable)
  * transfer immediately. The DMA channel will wait for the
  * control pin asserted.
  */
-static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable)
+static void fsl_chan_toggle_ext_start(struct fsldma_chan *fsl_chan, int enable)
 {
 	if (enable)
 		fsl_chan->feature |= FSL_DMA_CHAN_START_EXT;
@@ -345,7 +345,7 @@ static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable)
 
 static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
 {
-	struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan);
+	struct fsldma_chan *fsl_chan = to_fsl_chan(tx->chan);
 	struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
 	struct fsl_desc_sw *child;
 	unsigned long flags;
@@ -379,7 +379,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
  * Return - The descriptor allocated. NULL for failed.
  */
 static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
-					struct fsl_dma_chan *fsl_chan)
+					struct fsldma_chan *fsl_chan)
 {
 	dma_addr_t pdesc;
 	struct fsl_desc_sw *desc_sw;
@@ -408,7 +408,7 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
  */
 static int fsl_dma_alloc_chan_resources(struct dma_chan *chan)
 {
-	struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
+	struct fsldma_chan *fsl_chan = to_fsl_chan(chan);
 
 	/* Has this channel already been allocated? */
 	if (fsl_chan->desc_pool)
@@ -435,7 +435,7 @@ static int fsl_dma_alloc_chan_resources(struct dma_chan *chan)
  */
 static void fsl_dma_free_chan_resources(struct dma_chan *chan)
 {
-	struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
+	struct fsldma_chan *fsl_chan = to_fsl_chan(chan);
 	struct fsl_desc_sw *desc, *_desc;
 	unsigned long flags;
 
@@ -459,7 +459,7 @@ static void fsl_dma_free_chan_resources(struct dma_chan *chan)
 static struct dma_async_tx_descriptor *
 fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags)
 {
-	struct fsl_dma_chan *fsl_chan;
+	struct fsldma_chan *fsl_chan;
 	struct fsl_desc_sw *new;
 
 	if (!chan)
@@ -489,7 +489,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
 	struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
 	size_t len, unsigned long flags)
 {
-	struct fsl_dma_chan *fsl_chan;
+	struct fsldma_chan *fsl_chan;
 	struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
 	struct list_head *list;
 	size_t copy;
@@ -575,7 +575,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
 	struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
 	enum dma_data_direction direction, unsigned long flags)
 {
-	struct fsl_dma_chan *fsl_chan;
+	struct fsldma_chan *fsl_chan;
 	struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
 	struct fsl_dma_slave *slave;
 	struct list_head *tx_list;
@@ -759,7 +759,7 @@ fail:
 
 static void fsl_dma_device_terminate_all(struct dma_chan *chan)
 {
-	struct fsl_dma_chan *fsl_chan;
+	struct fsldma_chan *fsl_chan;
 	struct fsl_desc_sw *desc, *tmp;
 	unsigned long flags;
 
@@ -786,7 +786,7 @@ static void fsl_dma_device_terminate_all(struct dma_chan *chan)
  * fsl_dma_update_completed_cookie - Update the completed cookie.
  * @fsl_chan : Freescale DMA channel
  */
-static void fsl_dma_update_completed_cookie(struct fsl_dma_chan *fsl_chan)
+static void fsl_dma_update_completed_cookie(struct fsldma_chan *fsl_chan)
 {
 	struct fsl_desc_sw *cur_desc, *desc;
 	dma_addr_t ld_phy;
@@ -820,7 +820,7 @@ static void fsl_dma_update_completed_cookie(struct fsl_dma_chan *fsl_chan)
  * If 'in_intr' is set, the function will move the link descriptor to
  * the recycle list. Otherwise, free it directly.
  */
-static void fsl_chan_ld_cleanup(struct fsl_dma_chan *fsl_chan)
+static void fsl_chan_ld_cleanup(struct fsldma_chan *fsl_chan)
 {
 	struct fsl_desc_sw *desc, *_desc;
 	unsigned long flags;
@@ -864,7 +864,7 @@ static void fsl_chan_ld_cleanup(struct fsl_dma_chan *fsl_chan)
  * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue.
  * @fsl_chan : Freescale DMA channel
  */
-static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan)
+static void fsl_chan_xfer_ld_queue(struct fsldma_chan *fsl_chan)
 {
 	struct list_head *ld_node;
 	dma_addr_t next_dest_addr;
@@ -912,7 +912,7 @@ out_unlock:
  */
 static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan)
 {
-	struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
+	struct fsldma_chan *fsl_chan = to_fsl_chan(chan);
 
 #ifdef FSL_DMA_LD_DEBUG
 	struct fsl_desc_sw *ld;
@@ -949,7 +949,7 @@ static enum dma_status fsl_dma_is_complete(struct dma_chan *chan,
 					dma_cookie_t *done,
 					dma_cookie_t *used)
 {
-	struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
+	struct fsldma_chan *fsl_chan = to_fsl_chan(chan);
 	dma_cookie_t last_used;
 	dma_cookie_t last_complete;
 
@@ -969,7 +969,7 @@ static enum dma_status fsl_dma_is_complete(struct dma_chan *chan,
 
 static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data)
 {
-	struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data;
+	struct fsldma_chan *fsl_chan = data;
 	u32 stat;
 	int update_cookie = 0;
 	int xfer_ld_q = 0;
@@ -1050,9 +1050,9 @@ static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data)
 
 static irqreturn_t fsl_dma_do_interrupt(int irq, void *data)
 {
-	struct fsl_dma_device *fdev = (struct fsl_dma_device *)data;
-	u32 gsr;
+	struct fsldma_device *fdev = data;
 	int ch_nr;
+	u32 gsr;
 
 	gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->reg_base)
 			: in_le32(fdev->reg_base);
@@ -1064,19 +1064,23 @@ static irqreturn_t fsl_dma_do_interrupt(int irq, void *data)
 
 static void dma_do_tasklet(unsigned long data)
 {
-	struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data;
+	struct fsldma_chan *fsl_chan = (struct fsldma_chan *)data;
 	fsl_chan_ld_cleanup(fsl_chan);
 }
 
-static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev,
+/*----------------------------------------------------------------------------*/
+/* OpenFirmware Subsystem                                                     */
+/*----------------------------------------------------------------------------*/
+
+static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
 	struct device_node *node, u32 feature, const char *compatible)
 {
-	struct fsl_dma_chan *new_fsl_chan;
+	struct fsldma_chan *new_fsl_chan;
 	struct resource res;
 	int err;
 
 	/* alloc channel */
-	new_fsl_chan = kzalloc(sizeof(struct fsl_dma_chan), GFP_KERNEL);
+	new_fsl_chan = kzalloc(sizeof(*new_fsl_chan), GFP_KERNEL);
 	if (!new_fsl_chan) {
 		dev_err(fdev->dev, "No free memory for allocating "
 				"dma channels!\n");
@@ -1167,7 +1171,7 @@ err_no_reg:
 	return err;
 }
 
-static void fsl_dma_chan_remove(struct fsl_dma_chan *fchan)
+static void fsl_dma_chan_remove(struct fsldma_chan *fchan)
 {
 	if (fchan->irq != NO_IRQ)
 		free_irq(fchan->irq, fchan);
@@ -1176,15 +1180,15 @@ static void fsl_dma_chan_remove(struct fsl_dma_chan *fchan)
 	kfree(fchan);
 }
 
-static int __devinit of_fsl_dma_probe(struct of_device *dev,
+static int __devinit fsldma_of_probe(struct of_device *dev,
 			const struct of_device_id *match)
 {
 	int err;
-	struct fsl_dma_device *fdev;
+	struct fsldma_device *fdev;
 	struct device_node *child;
 	struct resource res;
 
-	fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL);
+	fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
 	if (!fdev) {
 		dev_err(&dev->dev, "No enough memory for 'priv'\n");
 		return -ENOMEM;
@@ -1256,9 +1260,9 @@ err_no_reg:
 	return err;
 }
 
-static int of_fsl_dma_remove(struct of_device *of_dev)
+static int fsldma_of_remove(struct of_device *of_dev)
 {
-	struct fsl_dma_device *fdev;
+	struct fsldma_device *fdev;
 	unsigned int i;
 
 	fdev = dev_get_drvdata(&of_dev->dev);
@@ -1280,39 +1284,43 @@ static int of_fsl_dma_remove(struct of_device *of_dev)
 	return 0;
 }
 
-static struct of_device_id of_fsl_dma_ids[] = {
+static struct of_device_id fsldma_of_ids[] = {
 	{ .compatible = "fsl,eloplus-dma", },
 	{ .compatible = "fsl,elo-dma", },
 	{}
 };
 
-static struct of_platform_driver of_fsl_dma_driver = {
-	.name = "fsl-elo-dma",
-	.match_table = of_fsl_dma_ids,
-	.probe = of_fsl_dma_probe,
-	.remove = of_fsl_dma_remove,
+static struct of_platform_driver fsldma_of_driver = {
+	.name		= "fsl-elo-dma",
+	.match_table	= fsldma_of_ids,
+	.probe		= fsldma_of_probe,
+	.remove		= fsldma_of_remove,
 };
 
-static __init int of_fsl_dma_init(void)
+/*----------------------------------------------------------------------------*/
+/* Module Init / Exit                                                         */
+/*----------------------------------------------------------------------------*/
+
+static __init int fsldma_init(void)
 {
 	int ret;
 
 	pr_info("Freescale Elo / Elo Plus DMA driver\n");
 
-	ret = of_register_platform_driver(&of_fsl_dma_driver);
+	ret = of_register_platform_driver(&fsldma_of_driver);
 	if (ret)
 		pr_err("fsldma: failed to register platform driver\n");
 
 	return ret;
 }
 
-static void __exit of_fsl_dma_exit(void)
+static void __exit fsldma_exit(void)
 {
-	of_unregister_platform_driver(&of_fsl_dma_driver);
+	of_unregister_platform_driver(&fsldma_of_driver);
 }
 
-subsys_initcall(of_fsl_dma_init);
-module_exit(of_fsl_dma_exit);
+subsys_initcall(fsldma_init);
+module_exit(fsldma_exit);
 
 MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index dbb5b5c..f8c2baa 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -94,7 +94,7 @@ struct fsl_desc_sw {
 	struct dma_async_tx_descriptor async_tx;
 } __attribute__((aligned(32)));
 
-struct fsl_dma_chan_regs {
+struct fsldma_chan_regs {
 	u32 mr;	/* 0x00 - Mode Register */
 	u32 sr;	/* 0x04 - Status Register */
 	u64 cdar;	/* 0x08 - Current descriptor address register */
@@ -104,19 +104,19 @@ struct fsl_dma_chan_regs {
 	u64 ndar;	/* 0x24 - Next Descriptor Address Register */
 };
 
-struct fsl_dma_chan;
+struct fsldma_chan;
 #define FSL_DMA_MAX_CHANS_PER_DEVICE 4
 
-struct fsl_dma_device {
+struct fsldma_device {
 	void __iomem *reg_base;	/* DGSR register base */
 	struct device *dev;
 	struct dma_device common;
-	struct fsl_dma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE];
+	struct fsldma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE];
 	u32 feature;		/* The same as DMA channels */
 	int irq;		/* Channel IRQ */
 };
 
-/* Define macros for fsl_dma_chan->feature property */
+/* Define macros for fsldma_chan->feature property */
 #define FSL_DMA_LITTLE_ENDIAN	0x00000000
 #define FSL_DMA_BIG_ENDIAN	0x00000001
 
@@ -127,8 +127,8 @@ struct fsl_dma_device {
 #define FSL_DMA_CHAN_PAUSE_EXT	0x00001000
 #define FSL_DMA_CHAN_START_EXT	0x00002000
 
-struct fsl_dma_chan {
-	struct fsl_dma_chan_regs __iomem *reg_base;
+struct fsldma_chan {
+	struct fsldma_chan_regs __iomem *reg_base;
 	dma_cookie_t completed_cookie;	/* The maximum cookie completed */
 	spinlock_t desc_lock;		/* Descriptor operation lock */
 	struct list_head ld_queue;	/* Link descriptors queue */
@@ -140,14 +140,14 @@ struct fsl_dma_chan {
 	struct tasklet_struct tasklet;
 	u32 feature;
 
-	void (*toggle_ext_pause)(struct fsl_dma_chan *fsl_chan, int enable);
-	void (*toggle_ext_start)(struct fsl_dma_chan *fsl_chan, int enable);
-	void (*set_src_loop_size)(struct fsl_dma_chan *fsl_chan, int size);
-	void (*set_dest_loop_size)(struct fsl_dma_chan *fsl_chan, int size);
-	void (*set_request_count)(struct fsl_dma_chan *fsl_chan, int size);
+	void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable);
+	void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable);
+	void (*set_src_loop_size)(struct fsldma_chan *fsl_chan, int size);
+	void (*set_dest_loop_size)(struct fsldma_chan *fsl_chan, int size);
+	void (*set_request_count)(struct fsldma_chan *fsl_chan, int size);
 };
 
-#define to_fsl_chan(chan) container_of(chan, struct fsl_dma_chan, common)
+#define to_fsl_chan(chan) container_of(chan, struct fsldma_chan, common)
 #define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node)
 #define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx)
 
-- 
1.5.4.3

^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [PATCH 4/8] fsldma: rename dest to dst for uniformity
  2010-01-01  6:10 fsldma: cleanup driver and fix async_tx compatibility Ira W. Snyder
                   ` (2 preceding siblings ...)
  2010-01-01  6:10 ` [PATCH 3/8] fsldma: rename struct fsl_dma_chan to struct fsldma_chan Ira W. Snyder
@ 2010-01-01  6:10 ` Ira W. Snyder
  2010-01-01  6:10 ` [PATCH 5/8] fsldma: clean up the OF subsystem routines Ira W. Snyder
                   ` (5 subsequent siblings)
  9 siblings, 0 replies; 22+ messages in thread
From: Ira W. Snyder @ 2010-01-01  6:10 UTC (permalink / raw)
  To: dan.j.williams
  Cc: herbert, B04825, linuxppc-dev, Vishnu, Dipen.Dudhat,
	Maneesh.Gupta, R58472

Most functions in the standard library use "dst" as a parameter, rather
than "dest". This renames all use of "dest" to "dst" to match the usual
convention.

Signed-off-by: Ira W. Snyder <iws@ovro.caltech.edu>
---
 drivers/dma/fsldma.c |   32 ++++++++++++++++----------------
 drivers/dma/fsldma.h |    2 +-
 2 files changed, 17 insertions(+), 17 deletions(-)

diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 6795d96..c2db754 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -90,14 +90,14 @@ static void set_desc_src(struct fsldma_chan *fsl_chan,
 	hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64);
 }
 
-static void set_desc_dest(struct fsldma_chan *fsl_chan,
-				struct fsl_dma_ld_hw *hw, dma_addr_t dest)
+static void set_desc_dst(struct fsldma_chan *fsl_chan,
+				struct fsl_dma_ld_hw *hw, dma_addr_t dst)
 {
 	u64 snoop_bits;
 
 	snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
 		? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
-	hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dest, 64);
+	hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dst, 64);
 }
 
 static void set_desc_next(struct fsldma_chan *fsl_chan,
@@ -253,7 +253,7 @@ static void fsl_chan_set_src_loop_size(struct fsldma_chan *fsl_chan, int size)
 }
 
 /**
- * fsl_chan_set_dest_loop_size - Set destination address hold transfer size
+ * fsl_chan_set_dst_loop_size - Set destination address hold transfer size
  * @fsl_chan : Freescale DMA channel
  * @size     : Address loop size, 0 for disable loop
  *
@@ -263,7 +263,7 @@ static void fsl_chan_set_src_loop_size(struct fsldma_chan *fsl_chan, int size)
  * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
  * TA + 1 ... and so on.
  */
-static void fsl_chan_set_dest_loop_size(struct fsldma_chan *fsl_chan, int size)
+static void fsl_chan_set_dst_loop_size(struct fsldma_chan *fsl_chan, int size)
 {
 	u32 mode;
 
@@ -486,7 +486,7 @@ fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags)
 }
 
 static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
-	struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
+	struct dma_chan *chan, dma_addr_t dma_dst, dma_addr_t dma_src,
 	size_t len, unsigned long flags)
 {
 	struct fsldma_chan *fsl_chan;
@@ -519,7 +519,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
 
 		set_desc_cnt(fsl_chan, &new->hw, copy);
 		set_desc_src(fsl_chan, &new->hw, dma_src);
-		set_desc_dest(fsl_chan, &new->hw, dma_dest);
+		set_desc_dst(fsl_chan, &new->hw, dma_dst);
 
 		if (!first)
 			first = new;
@@ -532,7 +532,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
 		prev = new;
 		len -= copy;
 		dma_src += copy;
-		dma_dest += copy;
+		dma_dst += copy;
 
 		/* Insert the link descriptor to the LD ring */
 		list_add_tail(&new->node, &first->tx_list);
@@ -680,7 +680,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
 			/* Fill in the descriptor */
 			set_desc_cnt(fsl_chan, &new->hw, copy);
 			set_desc_src(fsl_chan, &new->hw, dma_src);
-			set_desc_dest(fsl_chan, &new->hw, dma_dst);
+			set_desc_dst(fsl_chan, &new->hw, dma_dst);
 
 			/*
 			 * If this is not the first descriptor, chain the
@@ -721,8 +721,8 @@ finished:
 	if (fsl_chan->set_src_loop_size)
 		fsl_chan->set_src_loop_size(fsl_chan, slave->src_loop_size);
 
-	if (fsl_chan->set_dest_loop_size)
-		fsl_chan->set_dest_loop_size(fsl_chan, slave->dst_loop_size);
+	if (fsl_chan->set_dst_loop_size)
+		fsl_chan->set_dst_loop_size(fsl_chan, slave->dst_loop_size);
 
 	if (fsl_chan->toggle_ext_start)
 		fsl_chan->toggle_ext_start(fsl_chan, slave->external_start);
@@ -867,7 +867,7 @@ static void fsl_chan_ld_cleanup(struct fsldma_chan *fsl_chan)
 static void fsl_chan_xfer_ld_queue(struct fsldma_chan *fsl_chan)
 {
 	struct list_head *ld_node;
-	dma_addr_t next_dest_addr;
+	dma_addr_t next_dst_addr;
 	unsigned long flags;
 
 	spin_lock_irqsave(&fsl_chan->desc_lock, flags);
@@ -892,10 +892,10 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *fsl_chan)
 
 	if (ld_node != &fsl_chan->ld_queue) {
 		/* Get the ld start address from ld_queue */
-		next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys;
+		next_dst_addr = to_fsl_desc(ld_node)->async_tx.phys;
 		dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%llx\n",
-				(unsigned long long)next_dest_addr);
-		set_cdar(fsl_chan, next_dest_addr);
+				(unsigned long long)next_dst_addr);
+		set_cdar(fsl_chan, next_dst_addr);
 		dma_start(fsl_chan);
 	} else {
 		set_cdar(fsl_chan, 0);
@@ -1130,7 +1130,7 @@ static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
 	case FSL_DMA_IP_83XX:
 		new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start;
 		new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size;
-		new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size;
+		new_fsl_chan->set_dst_loop_size = fsl_chan_set_dst_loop_size;
 		new_fsl_chan->set_request_count = fsl_chan_set_request_count;
 	}
 
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index f8c2baa..a67b8e3 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -143,7 +143,7 @@ struct fsldma_chan {
 	void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable);
 	void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable);
 	void (*set_src_loop_size)(struct fsldma_chan *fsl_chan, int size);
-	void (*set_dest_loop_size)(struct fsldma_chan *fsl_chan, int size);
+	void (*set_dst_loop_size)(struct fsldma_chan *fsl_chan, int size);
 	void (*set_request_count)(struct fsldma_chan *fsl_chan, int size);
 };
 
-- 
1.5.4.3

^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [PATCH 5/8] fsldma: clean up the OF subsystem routines
  2010-01-01  6:10 fsldma: cleanup driver and fix async_tx compatibility Ira W. Snyder
                   ` (3 preceding siblings ...)
  2010-01-01  6:10 ` [PATCH 4/8] fsldma: rename dest to dst for uniformity Ira W. Snyder
@ 2010-01-01  6:10 ` Ira W. Snyder
  2010-01-01  6:10 ` [PATCH 6/8] fsldma: simplify IRQ probing and handling Ira W. Snyder
                   ` (4 subsequent siblings)
  9 siblings, 0 replies; 22+ messages in thread
From: Ira W. Snyder @ 2010-01-01  6:10 UTC (permalink / raw)
  To: dan.j.williams
  Cc: herbert, B04825, linuxppc-dev, Vishnu, Dipen.Dudhat,
	Maneesh.Gupta, R58472

This fixes some errors in the cleanup paths of the OF subsystem, including
missing checks for ioremap failing. Also, some variables were renamed for
brevity.

Signed-off-by: Ira W. Snyder <iws@ovro.caltech.edu>
---
 drivers/dma/fsldma.c |  259 +++++++++++++++++++++++++------------------------
 drivers/dma/fsldma.h |    4 +-
 2 files changed, 134 insertions(+), 129 deletions(-)

diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index c2db754..507b297 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -40,7 +40,7 @@
 static void dma_init(struct fsldma_chan *fsl_chan)
 {
 	/* Reset the channel */
-	DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 0, 32);
+	DMA_OUT(fsl_chan, &fsl_chan->regs->mr, 0, 32);
 
 	switch (fsl_chan->feature & FSL_DMA_IP_MASK) {
 	case FSL_DMA_IP_85XX:
@@ -49,7 +49,7 @@ static void dma_init(struct fsldma_chan *fsl_chan)
 		 * EOSIE - End of segments interrupt enable (basic mode)
 		 * EOLNIE - End of links interrupt enable
 		 */
-		DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EIE
+		DMA_OUT(fsl_chan, &fsl_chan->regs->mr, FSL_DMA_MR_EIE
 				| FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32);
 		break;
 	case FSL_DMA_IP_83XX:
@@ -57,7 +57,7 @@ static void dma_init(struct fsldma_chan *fsl_chan)
 		 * EOTIE - End-of-transfer interrupt enable
 		 * PRC_RM - PCI read multiple
 		 */
-		DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE
+		DMA_OUT(fsl_chan, &fsl_chan->regs->mr, FSL_DMA_MR_EOTIE
 				| FSL_DMA_MR_PRC_RM, 32);
 		break;
 	}
@@ -66,12 +66,12 @@ static void dma_init(struct fsldma_chan *fsl_chan)
 
 static void set_sr(struct fsldma_chan *fsl_chan, u32 val)
 {
-	DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32);
+	DMA_OUT(fsl_chan, &fsl_chan->regs->sr, val, 32);
 }
 
 static u32 get_sr(struct fsldma_chan *fsl_chan)
 {
-	return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32);
+	return DMA_IN(fsl_chan, &fsl_chan->regs->sr, 32);
 }
 
 static void set_desc_cnt(struct fsldma_chan *fsl_chan,
@@ -112,27 +112,27 @@ static void set_desc_next(struct fsldma_chan *fsl_chan,
 
 static void set_cdar(struct fsldma_chan *fsl_chan, dma_addr_t addr)
 {
-	DMA_OUT(fsl_chan, &fsl_chan->reg_base->cdar, addr | FSL_DMA_SNEN, 64);
+	DMA_OUT(fsl_chan, &fsl_chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
 }
 
 static dma_addr_t get_cdar(struct fsldma_chan *fsl_chan)
 {
-	return DMA_IN(fsl_chan, &fsl_chan->reg_base->cdar, 64) & ~FSL_DMA_SNEN;
+	return DMA_IN(fsl_chan, &fsl_chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
 }
 
 static void set_ndar(struct fsldma_chan *fsl_chan, dma_addr_t addr)
 {
-	DMA_OUT(fsl_chan, &fsl_chan->reg_base->ndar, addr, 64);
+	DMA_OUT(fsl_chan, &fsl_chan->regs->ndar, addr, 64);
 }
 
 static dma_addr_t get_ndar(struct fsldma_chan *fsl_chan)
 {
-	return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64);
+	return DMA_IN(fsl_chan, &fsl_chan->regs->ndar, 64);
 }
 
 static u32 get_bcr(struct fsldma_chan *fsl_chan)
 {
-	return DMA_IN(fsl_chan, &fsl_chan->reg_base->bcr, 32);
+	return DMA_IN(fsl_chan, &fsl_chan->regs->bcr, 32);
 }
 
 static int dma_is_idle(struct fsldma_chan *fsl_chan)
@@ -145,11 +145,11 @@ static void dma_start(struct fsldma_chan *fsl_chan)
 {
 	u32 mode;
 
-	mode = DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32);
+	mode = DMA_IN(fsl_chan, &fsl_chan->regs->mr, 32);
 
 	if ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
 		if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
-			DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32);
+			DMA_OUT(fsl_chan, &fsl_chan->regs->bcr, 0, 32);
 			mode |= FSL_DMA_MR_EMP_EN;
 		} else {
 			mode &= ~FSL_DMA_MR_EMP_EN;
@@ -161,7 +161,7 @@ static void dma_start(struct fsldma_chan *fsl_chan)
 	else
 		mode |= FSL_DMA_MR_CS;
 
-	DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32);
+	DMA_OUT(fsl_chan, &fsl_chan->regs->mr, mode, 32);
 }
 
 static void dma_halt(struct fsldma_chan *fsl_chan)
@@ -169,12 +169,12 @@ static void dma_halt(struct fsldma_chan *fsl_chan)
 	u32 mode;
 	int i;
 
-	mode = DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32);
+	mode = DMA_IN(fsl_chan, &fsl_chan->regs->mr, 32);
 	mode |= FSL_DMA_MR_CA;
-	DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32);
+	DMA_OUT(fsl_chan, &fsl_chan->regs->mr, mode, 32);
 
 	mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA);
-	DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32);
+	DMA_OUT(fsl_chan, &fsl_chan->regs->mr, mode, 32);
 
 	for (i = 0; i < 100; i++) {
 		if (dma_is_idle(fsl_chan))
@@ -235,7 +235,7 @@ static void fsl_chan_set_src_loop_size(struct fsldma_chan *fsl_chan, int size)
 {
 	u32 mode;
 
-	mode = DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32);
+	mode = DMA_IN(fsl_chan, &fsl_chan->regs->mr, 32);
 
 	switch (size) {
 	case 0:
@@ -249,7 +249,7 @@ static void fsl_chan_set_src_loop_size(struct fsldma_chan *fsl_chan, int size)
 		break;
 	}
 
-	DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32);
+	DMA_OUT(fsl_chan, &fsl_chan->regs->mr, mode, 32);
 }
 
 /**
@@ -267,7 +267,7 @@ static void fsl_chan_set_dst_loop_size(struct fsldma_chan *fsl_chan, int size)
 {
 	u32 mode;
 
-	mode = DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32);
+	mode = DMA_IN(fsl_chan, &fsl_chan->regs->mr, 32);
 
 	switch (size) {
 	case 0:
@@ -281,7 +281,7 @@ static void fsl_chan_set_dst_loop_size(struct fsldma_chan *fsl_chan, int size)
 		break;
 	}
 
-	DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32);
+	DMA_OUT(fsl_chan, &fsl_chan->regs->mr, mode, 32);
 }
 
 /**
@@ -302,10 +302,10 @@ static void fsl_chan_set_request_count(struct fsldma_chan *fsl_chan, int size)
 
 	BUG_ON(size > 1024);
 
-	mode = DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32);
+	mode = DMA_IN(fsl_chan, &fsl_chan->regs->mr, 32);
 	mode |= (__ilog2(size) << 24) & 0x0f000000;
 
-	DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32);
+	DMA_OUT(fsl_chan, &fsl_chan->regs->mr, mode, 32);
 }
 
 /**
@@ -967,7 +967,7 @@ static enum dma_status fsl_dma_is_complete(struct dma_chan *chan,
 	return dma_async_is_complete(cookie, last_complete, last_used);
 }
 
-static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data)
+static irqreturn_t fsldma_chan_irq(int irq, void *data)
 {
 	struct fsldma_chan *fsl_chan = data;
 	u32 stat;
@@ -1048,17 +1048,17 @@ static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data)
 	return IRQ_HANDLED;
 }
 
-static irqreturn_t fsl_dma_do_interrupt(int irq, void *data)
+static irqreturn_t fsldma_irq(int irq, void *data)
 {
 	struct fsldma_device *fdev = data;
 	int ch_nr;
 	u32 gsr;
 
-	gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->reg_base)
-			: in_le32(fdev->reg_base);
+	gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs)
+			: in_le32(fdev->regs);
 	ch_nr = (32 - ffs(gsr)) / 8;
 
-	return fdev->chan[ch_nr] ? fsl_dma_chan_do_interrupt(irq,
+	return fdev->chan[ch_nr] ? fsldma_chan_irq(irq,
 			fdev->chan[ch_nr]) : IRQ_NONE;
 }
 
@@ -1075,140 +1075,142 @@ static void dma_do_tasklet(unsigned long data)
 static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
 	struct device_node *node, u32 feature, const char *compatible)
 {
-	struct fsldma_chan *new_fsl_chan;
+	struct fsldma_chan *fchan;
 	struct resource res;
 	int err;
 
 	/* alloc channel */
-	new_fsl_chan = kzalloc(sizeof(*new_fsl_chan), GFP_KERNEL);
-	if (!new_fsl_chan) {
-		dev_err(fdev->dev, "No free memory for allocating "
-				"dma channels!\n");
-		return -ENOMEM;
+	fchan = kzalloc(sizeof(*fchan), GFP_KERNEL);
+	if (!fchan) {
+		dev_err(fdev->dev, "no free memory for DMA channels!\n");
+		err = -ENOMEM;
+		goto out_return;
+	}
+
+	/* ioremap registers for use */
+	fchan->regs = of_iomap(node, 0);
+	if (!fchan->regs) {
+		dev_err(fdev->dev, "unable to ioremap registers\n");
+		err = -ENOMEM;
+		goto out_free_fchan;
 	}
 
-	/* get dma channel register base */
 	err = of_address_to_resource(node, 0, &res);
 	if (err) {
-		dev_err(fdev->dev, "Can't get %s property 'reg'\n",
-				node->full_name);
-		goto err_no_reg;
+		dev_err(fdev->dev, "unable to find 'reg' property\n");
+		goto out_iounmap_regs;
 	}
 
-	new_fsl_chan->feature = feature;
-
+	fchan->feature = feature;
 	if (!fdev->feature)
-		fdev->feature = new_fsl_chan->feature;
+		fdev->feature = fchan->feature;
 
-	/* If the DMA device's feature is different than its channels',
-	 * report the bug.
+	/*
+	 * If the DMA device's feature is different than the feature
+	 * of its channels, report the bug
 	 */
-	WARN_ON(fdev->feature != new_fsl_chan->feature);
-
-	new_fsl_chan->dev = fdev->dev;
-	new_fsl_chan->reg_base = ioremap(res.start, resource_size(&res));
-	new_fsl_chan->id = ((res.start - 0x100) & 0xfff) >> 7;
-	if (new_fsl_chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
-		dev_err(fdev->dev, "There is no %d channel!\n",
-				new_fsl_chan->id);
+	WARN_ON(fdev->feature != fchan->feature);
+
+	fchan->dev = fdev->dev;
+	fchan->id = ((res.start - 0x100) & 0xfff) >> 7;
+	if (fchan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
+		dev_err(fdev->dev, "too many channels for device\n");
 		err = -EINVAL;
-		goto err_no_chan;
+		goto out_iounmap_regs;
 	}
-	fdev->chan[new_fsl_chan->id] = new_fsl_chan;
-	tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet,
-			(unsigned long)new_fsl_chan);
 
-	/* Init the channel */
-	dma_init(new_fsl_chan);
+	fdev->chan[fchan->id] = fchan;
+	tasklet_init(&fchan->tasklet, dma_do_tasklet, (unsigned long)fchan);
+
+	/* Initialize the channel */
+	dma_init(fchan);
 
 	/* Clear cdar registers */
-	set_cdar(new_fsl_chan, 0);
+	set_cdar(fchan, 0);
 
-	switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) {
+	switch (fchan->feature & FSL_DMA_IP_MASK) {
 	case FSL_DMA_IP_85XX:
-		new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
+		fchan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
 	case FSL_DMA_IP_83XX:
-		new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start;
-		new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size;
-		new_fsl_chan->set_dst_loop_size = fsl_chan_set_dst_loop_size;
-		new_fsl_chan->set_request_count = fsl_chan_set_request_count;
+		fchan->toggle_ext_start = fsl_chan_toggle_ext_start;
+		fchan->set_src_loop_size = fsl_chan_set_src_loop_size;
+		fchan->set_dst_loop_size = fsl_chan_set_dst_loop_size;
+		fchan->set_request_count = fsl_chan_set_request_count;
 	}
 
-	spin_lock_init(&new_fsl_chan->desc_lock);
-	INIT_LIST_HEAD(&new_fsl_chan->ld_queue);
+	spin_lock_init(&fchan->desc_lock);
+	INIT_LIST_HEAD(&fchan->ld_queue);
 
-	new_fsl_chan->common.device = &fdev->common;
+	fchan->common.device = &fdev->common;
 
 	/* Add the channel to DMA device channel list */
-	list_add_tail(&new_fsl_chan->common.device_node,
-			&fdev->common.channels);
+	list_add_tail(&fchan->common.device_node, &fdev->common.channels);
 	fdev->common.chancnt++;
 
-	new_fsl_chan->irq = irq_of_parse_and_map(node, 0);
-	if (new_fsl_chan->irq != NO_IRQ) {
-		err = request_irq(new_fsl_chan->irq,
-					&fsl_dma_chan_do_interrupt, IRQF_SHARED,
-					"fsldma-channel", new_fsl_chan);
+	fchan->irq = irq_of_parse_and_map(node, 0);
+	if (fchan->irq != NO_IRQ) {
+		err = request_irq(fchan->irq, &fsldma_chan_irq,
+				  IRQF_SHARED, "fsldma-channel", fchan);
 		if (err) {
-			dev_err(fdev->dev, "DMA channel %s request_irq error "
-				"with return %d\n", node->full_name, err);
-			goto err_no_irq;
+			dev_err(fdev->dev, "unable to request IRQ "
+					   "for channel %d\n", fchan->id);
+			goto out_list_del;
 		}
 	}
 
-	dev_info(fdev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id,
-		 compatible,
-		 new_fsl_chan->irq != NO_IRQ ? new_fsl_chan->irq : fdev->irq);
+	dev_info(fdev->dev, "#%d (%s), irq %d\n", fchan->id, compatible,
+		 fchan->irq != NO_IRQ ? fchan->irq : fdev->irq);
 
 	return 0;
 
-err_no_irq:
-	list_del(&new_fsl_chan->common.device_node);
-err_no_chan:
-	iounmap(new_fsl_chan->reg_base);
-err_no_reg:
-	kfree(new_fsl_chan);
+out_list_del:
+	irq_dispose_mapping(fchan->irq);
+	list_del_init(&fchan->common.device_node);
+out_iounmap_regs:
+	iounmap(fchan->regs);
+out_free_fchan:
+	kfree(fchan);
+out_return:
 	return err;
 }
 
 static void fsl_dma_chan_remove(struct fsldma_chan *fchan)
 {
-	if (fchan->irq != NO_IRQ)
+	if (fchan->irq != NO_IRQ) {
 		free_irq(fchan->irq, fchan);
+		irq_dispose_mapping(fchan->irq);
+	}
+
 	list_del(&fchan->common.device_node);
-	iounmap(fchan->reg_base);
+	iounmap(fchan->regs);
 	kfree(fchan);
 }
 
-static int __devinit fsldma_of_probe(struct of_device *dev,
+static int __devinit fsldma_of_probe(struct of_device *op,
 			const struct of_device_id *match)
 {
-	int err;
 	struct fsldma_device *fdev;
 	struct device_node *child;
-	struct resource res;
+	int err;
 
 	fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
 	if (!fdev) {
-		dev_err(&dev->dev, "No enough memory for 'priv'\n");
-		return -ENOMEM;
+		dev_err(&op->dev, "No enough memory for 'priv'\n");
+		err = -ENOMEM;
+		goto out_return;
 	}
-	fdev->dev = &dev->dev;
+
+	fdev->dev = &op->dev;
 	INIT_LIST_HEAD(&fdev->common.channels);
 
-	/* get DMA controller register base */
-	err = of_address_to_resource(dev->node, 0, &res);
-	if (err) {
-		dev_err(&dev->dev, "Can't get %s property 'reg'\n",
-				dev->node->full_name);
-		goto err_no_reg;
+	/* ioremap the registers for use */
+	fdev->regs = of_iomap(op->node, 0);
+	if (!fdev->regs) {
+		dev_err(&op->dev, "unable to ioremap registers\n");
+		err = -ENOMEM;
+		goto out_free_fdev;
 	}
 
-	dev_info(&dev->dev, "Probe the Freescale DMA driver for %s "
-			"controller at 0x%llx...\n",
-			match->compatible, (unsigned long long)res.start);
-	fdev->reg_base = ioremap(res.start, resource_size(&res));
-
 	dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
 	dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
 	dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
@@ -1220,66 +1222,69 @@ static int __devinit fsldma_of_probe(struct of_device *dev,
 	fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
 	fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
 	fdev->common.device_terminate_all = fsl_dma_device_terminate_all;
-	fdev->common.dev = &dev->dev;
+	fdev->common.dev = &op->dev;
 
-	fdev->irq = irq_of_parse_and_map(dev->node, 0);
+	fdev->irq = irq_of_parse_and_map(op->node, 0);
 	if (fdev->irq != NO_IRQ) {
-		err = request_irq(fdev->irq, &fsl_dma_do_interrupt, IRQF_SHARED,
-					"fsldma-device", fdev);
+		err = request_irq(fdev->irq, &fsldma_irq, IRQF_SHARED,
+				  "fsldma-device", fdev);
 		if (err) {
-			dev_err(&dev->dev, "DMA device request_irq error "
-				"with return %d\n", err);
-			goto err;
+			dev_err(&op->dev, "unable to request IRQ\n");
+			goto out_iounmap_regs;
 		}
 	}
 
-	dev_set_drvdata(&(dev->dev), fdev);
+	dev_set_drvdata(&op->dev, fdev);
 
-	/* We cannot use of_platform_bus_probe() because there is no
-	 * of_platform_bus_remove.  Instead, we manually instantiate every DMA
+	/*
+	 * We cannot use of_platform_bus_probe() because there is no
+	 * of_platform_bus_remove(). Instead, we manually instantiate every DMA
 	 * channel object.
 	 */
-	for_each_child_of_node(dev->node, child) {
-		if (of_device_is_compatible(child, "fsl,eloplus-dma-channel"))
+	for_each_child_of_node(op->node, child) {
+		if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) {
 			fsl_dma_chan_probe(fdev, child,
 				FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
 				"fsl,eloplus-dma-channel");
-		if (of_device_is_compatible(child, "fsl,elo-dma-channel"))
+		}
+
+		if (of_device_is_compatible(child, "fsl,elo-dma-channel")) {
 			fsl_dma_chan_probe(fdev, child,
 				FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
 				"fsl,elo-dma-channel");
+		}
 	}
 
 	dma_async_device_register(&fdev->common);
 	return 0;
 
-err:
-	iounmap(fdev->reg_base);
-err_no_reg:
+out_iounmap_regs:
+	iounmap(fdev->regs);
+out_free_fdev:
 	kfree(fdev);
+out_return:
 	return err;
 }
 
-static int fsldma_of_remove(struct of_device *of_dev)
+static int fsldma_of_remove(struct of_device *op)
 {
 	struct fsldma_device *fdev;
 	unsigned int i;
 
-	fdev = dev_get_drvdata(&of_dev->dev);
-
+	fdev = dev_get_drvdata(&op->dev);
 	dma_async_device_unregister(&fdev->common);
 
-	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++)
+	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
 		if (fdev->chan[i])
 			fsl_dma_chan_remove(fdev->chan[i]);
+	}
 
 	if (fdev->irq != NO_IRQ)
 		free_irq(fdev->irq, fdev);
 
-	iounmap(fdev->reg_base);
-
+	iounmap(fdev->regs);
+	dev_set_drvdata(&op->dev, NULL);
 	kfree(fdev);
-	dev_set_drvdata(&of_dev->dev, NULL);
 
 	return 0;
 }
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index a67b8e3..ea3b19c 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -108,7 +108,7 @@ struct fsldma_chan;
 #define FSL_DMA_MAX_CHANS_PER_DEVICE 4
 
 struct fsldma_device {
-	void __iomem *reg_base;	/* DGSR register base */
+	void __iomem *regs;	/* DGSR register base */
 	struct device *dev;
 	struct dma_device common;
 	struct fsldma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE];
@@ -128,7 +128,7 @@ struct fsldma_device {
 #define FSL_DMA_CHAN_START_EXT	0x00002000
 
 struct fsldma_chan {
-	struct fsldma_chan_regs __iomem *reg_base;
+	struct fsldma_chan_regs __iomem *regs;
 	dma_cookie_t completed_cookie;	/* The maximum cookie completed */
 	spinlock_t desc_lock;		/* Descriptor operation lock */
 	struct list_head ld_queue;	/* Link descriptors queue */
-- 
1.5.4.3

^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [PATCH 6/8] fsldma: simplify IRQ probing and handling
  2010-01-01  6:10 fsldma: cleanup driver and fix async_tx compatibility Ira W. Snyder
                   ` (4 preceding siblings ...)
  2010-01-01  6:10 ` [PATCH 5/8] fsldma: clean up the OF subsystem routines Ira W. Snyder
@ 2010-01-01  6:10 ` Ira W. Snyder
  2010-01-06 18:02   ` Scott Wood
  2010-01-01  6:10 ` [PATCH 7/8] fsldma: rename fsl_chan to fchan Ira W. Snyder
                   ` (3 subsequent siblings)
  9 siblings, 1 reply; 22+ messages in thread
From: Ira W. Snyder @ 2010-01-01  6:10 UTC (permalink / raw)
  To: dan.j.williams
  Cc: herbert, B04825, linuxppc-dev, Vishnu, Dipen.Dudhat,
	Maneesh.Gupta, R58472

The IRQ probing is needlessly complex. All off the 83xx device trees in
arch/powerpc/boot/dts/ specify 5 interrupts per DMA controller: one for the
controller, and one for each channel. These interrupts are all attached to
the same IRQ line.

This causes an interesting situation if two channels interrupt at the same
time. The controller's handler will handle the first channel, and the
channel handler will handle the remaining channels. Instead of this, just
let the channel handler handle all channels, and ignore the controller
handler completely.

The same can be accomplished on 83xx by removing the controller's interrupt
property from the device tree. Testing has not shown any problems with this
configuration. All in-tree device trees already have an interrupt property
specified for each channel.

Signed-off-by: Ira W. Snyder <iws@ovro.caltech.edu>
---
 Documentation/powerpc/dts-bindings/fsl/dma.txt |   17 +++++---
 drivers/dma/fsldma.c                           |   49 +++++++-----------------
 2 files changed, 25 insertions(+), 41 deletions(-)

diff --git a/Documentation/powerpc/dts-bindings/fsl/dma.txt b/Documentation/powerpc/dts-bindings/fsl/dma.txt
index 0732cdd..d5d2d3d 100644
--- a/Documentation/powerpc/dts-bindings/fsl/dma.txt
+++ b/Documentation/powerpc/dts-bindings/fsl/dma.txt
@@ -12,6 +12,9 @@ Required properties:
 - ranges		: Should be defined as specified in 1) to describe the
 		  DMA controller channels.
 - cell-index        : controller index.  0 for controller @ 0x8100
+
+Optional properties:
+
 - interrupts        : <interrupt mapping for DMA IRQ>
 - interrupt-parent  : optional, if needed for interrupt mapping
 
@@ -23,11 +26,7 @@ Required properties:
 			 "fsl,elo-dma-channel". However, see note below.
         - reg               : <registers mapping for channel>
         - cell-index        : dma channel index starts at 0.
-
-Optional properties:
         - interrupts        : <interrupt mapping for DMA channel IRQ>
-			  (on 83xx this is expected to be identical to
-			   the interrupts property of the parent node)
         - interrupt-parent  : optional, if needed for interrupt mapping
 
 Example:
@@ -37,28 +36,34 @@ Example:
 		compatible = "fsl,mpc8349-dma", "fsl,elo-dma";
 		reg = <0x82a8 4>;
 		ranges = <0 0x8100 0x1a4>;
-		interrupt-parent = <&ipic>;
-		interrupts = <71 8>;
 		cell-index = <0>;
 		dma-channel@0 {
 			compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
 			cell-index = <0>;
 			reg = <0 0x80>;
+			interrupt-parent = <&ipic>;
+			interrupts = <71 8>;
 		};
 		dma-channel@80 {
 			compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
 			cell-index = <1>;
 			reg = <0x80 0x80>;
+			interrupt-parent = <&ipic>;
+			interrupts = <71 8>;
 		};
 		dma-channel@100 {
 			compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
 			cell-index = <2>;
 			reg = <0x100 0x80>;
+			interrupt-parent = <&ipic>;
+			interrupts = <71 8>;
 		};
 		dma-channel@180 {
 			compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
 			cell-index = <3>;
 			reg = <0x180 0x80>;
+			interrupt-parent = <&ipic>;
+			interrupts = <71 8>;
 		};
 	};
 
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 507b297..d8cc05b 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1048,20 +1048,6 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
 	return IRQ_HANDLED;
 }
 
-static irqreturn_t fsldma_irq(int irq, void *data)
-{
-	struct fsldma_device *fdev = data;
-	int ch_nr;
-	u32 gsr;
-
-	gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs)
-			: in_le32(fdev->regs);
-	ch_nr = (32 - ffs(gsr)) / 8;
-
-	return fdev->chan[ch_nr] ? fsldma_chan_irq(irq,
-			fdev->chan[ch_nr]) : IRQ_NONE;
-}
-
 static void dma_do_tasklet(unsigned long data)
 {
 	struct fsldma_chan *fsl_chan = (struct fsldma_chan *)data;
@@ -1143,19 +1129,24 @@ static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
 
 	fchan->common.device = &fdev->common;
 
+	/* find the IRQ line */
+	fchan->irq = irq_of_parse_and_map(node, 0);
+	if (fchan->irq == NO_IRQ) {
+		dev_err(fdev->dev, "unable to find IRQ line\n");
+		err = -EINVAL;
+		goto out_iounmap_regs;
+	}
+
 	/* Add the channel to DMA device channel list */
 	list_add_tail(&fchan->common.device_node, &fdev->common.channels);
 	fdev->common.chancnt++;
 
-	fchan->irq = irq_of_parse_and_map(node, 0);
-	if (fchan->irq != NO_IRQ) {
-		err = request_irq(fchan->irq, &fsldma_chan_irq,
-				  IRQF_SHARED, "fsldma-channel", fchan);
-		if (err) {
-			dev_err(fdev->dev, "unable to request IRQ "
-					   "for channel %d\n", fchan->id);
-			goto out_list_del;
-		}
+	err = request_irq(fchan->irq, fsldma_chan_irq, IRQF_SHARED,
+			  "fsldma-channel", fchan);
+	if (err) {
+		dev_err(fdev->dev, "unable to request IRQ for channel %d\n",
+			fchan->id);
+		goto out_list_del;
 	}
 
 	dev_info(fdev->dev, "#%d (%s), irq %d\n", fchan->id, compatible,
@@ -1224,16 +1215,6 @@ static int __devinit fsldma_of_probe(struct of_device *op,
 	fdev->common.device_terminate_all = fsl_dma_device_terminate_all;
 	fdev->common.dev = &op->dev;
 
-	fdev->irq = irq_of_parse_and_map(op->node, 0);
-	if (fdev->irq != NO_IRQ) {
-		err = request_irq(fdev->irq, &fsldma_irq, IRQF_SHARED,
-				  "fsldma-device", fdev);
-		if (err) {
-			dev_err(&op->dev, "unable to request IRQ\n");
-			goto out_iounmap_regs;
-		}
-	}
-
 	dev_set_drvdata(&op->dev, fdev);
 
 	/*
@@ -1258,8 +1239,6 @@ static int __devinit fsldma_of_probe(struct of_device *op,
 	dma_async_device_register(&fdev->common);
 	return 0;
 
-out_iounmap_regs:
-	iounmap(fdev->regs);
 out_free_fdev:
 	kfree(fdev);
 out_return:
-- 
1.5.4.3

^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [PATCH 7/8] fsldma: rename fsl_chan to fchan
  2010-01-01  6:10 fsldma: cleanup driver and fix async_tx compatibility Ira W. Snyder
                   ` (5 preceding siblings ...)
  2010-01-01  6:10 ` [PATCH 6/8] fsldma: simplify IRQ probing and handling Ira W. Snyder
@ 2010-01-01  6:10 ` Ira W. Snyder
  2010-01-06 18:04   ` Scott Wood
  2010-01-01  6:10 ` [PATCH 8/8] fsldma: major cleanups and fixes Ira W. Snyder
                   ` (2 subsequent siblings)
  9 siblings, 1 reply; 22+ messages in thread
From: Ira W. Snyder @ 2010-01-01  6:10 UTC (permalink / raw)
  To: dan.j.williams
  Cc: herbert, B04825, linuxppc-dev, Vishnu, Dipen.Dudhat,
	Maneesh.Gupta, R58472

The name fsl_chan seems too long, so it has been shortened to fchan.

Signed-off-by: Ira W. Snyder <iws@ovro.caltech.edu>
---
 drivers/dma/fsldma.c |  446 +++++++++++++++++++++++++-------------------------
 1 files changed, 223 insertions(+), 223 deletions(-)

diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index d8cc05b..f65b28b 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -37,19 +37,19 @@
 #include <asm/fsldma.h>
 #include "fsldma.h"
 
-static void dma_init(struct fsldma_chan *fsl_chan)
+static void dma_init(struct fsldma_chan *fchan)
 {
 	/* Reset the channel */
-	DMA_OUT(fsl_chan, &fsl_chan->regs->mr, 0, 32);
+	DMA_OUT(fchan, &fchan->regs->mr, 0, 32);
 
-	switch (fsl_chan->feature & FSL_DMA_IP_MASK) {
+	switch (fchan->feature & FSL_DMA_IP_MASK) {
 	case FSL_DMA_IP_85XX:
 		/* Set the channel to below modes:
 		 * EIE - Error interrupt enable
 		 * EOSIE - End of segments interrupt enable (basic mode)
 		 * EOLNIE - End of links interrupt enable
 		 */
-		DMA_OUT(fsl_chan, &fsl_chan->regs->mr, FSL_DMA_MR_EIE
+		DMA_OUT(fchan, &fchan->regs->mr, FSL_DMA_MR_EIE
 				| FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32);
 		break;
 	case FSL_DMA_IP_83XX:
@@ -57,154 +57,154 @@ static void dma_init(struct fsldma_chan *fsl_chan)
 		 * EOTIE - End-of-transfer interrupt enable
 		 * PRC_RM - PCI read multiple
 		 */
-		DMA_OUT(fsl_chan, &fsl_chan->regs->mr, FSL_DMA_MR_EOTIE
+		DMA_OUT(fchan, &fchan->regs->mr, FSL_DMA_MR_EOTIE
 				| FSL_DMA_MR_PRC_RM, 32);
 		break;
 	}
 
 }
 
-static void set_sr(struct fsldma_chan *fsl_chan, u32 val)
+static void set_sr(struct fsldma_chan *fchan, u32 val)
 {
-	DMA_OUT(fsl_chan, &fsl_chan->regs->sr, val, 32);
+	DMA_OUT(fchan, &fchan->regs->sr, val, 32);
 }
 
-static u32 get_sr(struct fsldma_chan *fsl_chan)
+static u32 get_sr(struct fsldma_chan *fchan)
 {
-	return DMA_IN(fsl_chan, &fsl_chan->regs->sr, 32);
+	return DMA_IN(fchan, &fchan->regs->sr, 32);
 }
 
-static void set_desc_cnt(struct fsldma_chan *fsl_chan,
+static void set_desc_cnt(struct fsldma_chan *fchan,
 				struct fsl_dma_ld_hw *hw, u32 count)
 {
-	hw->count = CPU_TO_DMA(fsl_chan, count, 32);
+	hw->count = CPU_TO_DMA(fchan, count, 32);
 }
 
-static void set_desc_src(struct fsldma_chan *fsl_chan,
+static void set_desc_src(struct fsldma_chan *fchan,
 				struct fsl_dma_ld_hw *hw, dma_addr_t src)
 {
 	u64 snoop_bits;
 
-	snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
+	snoop_bits = ((fchan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
 		? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
-	hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64);
+	hw->src_addr = CPU_TO_DMA(fchan, snoop_bits | src, 64);
 }
 
-static void set_desc_dst(struct fsldma_chan *fsl_chan,
+static void set_desc_dst(struct fsldma_chan *fchan,
 				struct fsl_dma_ld_hw *hw, dma_addr_t dst)
 {
 	u64 snoop_bits;
 
-	snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
+	snoop_bits = ((fchan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
 		? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
-	hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dst, 64);
+	hw->dst_addr = CPU_TO_DMA(fchan, snoop_bits | dst, 64);
 }
 
-static void set_desc_next(struct fsldma_chan *fsl_chan,
+static void set_desc_next(struct fsldma_chan *fchan,
 				struct fsl_dma_ld_hw *hw, dma_addr_t next)
 {
 	u64 snoop_bits;
 
-	snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
+	snoop_bits = ((fchan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
 		? FSL_DMA_SNEN : 0;
-	hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64);
+	hw->next_ln_addr = CPU_TO_DMA(fchan, snoop_bits | next, 64);
 }
 
-static void set_cdar(struct fsldma_chan *fsl_chan, dma_addr_t addr)
+static void set_cdar(struct fsldma_chan *fchan, dma_addr_t addr)
 {
-	DMA_OUT(fsl_chan, &fsl_chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
+	DMA_OUT(fchan, &fchan->regs->cdar, addr | FSL_DMA_SNEN, 64);
 }
 
-static dma_addr_t get_cdar(struct fsldma_chan *fsl_chan)
+static dma_addr_t get_cdar(struct fsldma_chan *fchan)
 {
-	return DMA_IN(fsl_chan, &fsl_chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
+	return DMA_IN(fchan, &fchan->regs->cdar, 64) & ~FSL_DMA_SNEN;
 }
 
-static void set_ndar(struct fsldma_chan *fsl_chan, dma_addr_t addr)
+static void set_ndar(struct fsldma_chan *fchan, dma_addr_t addr)
 {
-	DMA_OUT(fsl_chan, &fsl_chan->regs->ndar, addr, 64);
+	DMA_OUT(fchan, &fchan->regs->ndar, addr, 64);
 }
 
-static dma_addr_t get_ndar(struct fsldma_chan *fsl_chan)
+static dma_addr_t get_ndar(struct fsldma_chan *fchan)
 {
-	return DMA_IN(fsl_chan, &fsl_chan->regs->ndar, 64);
+	return DMA_IN(fchan, &fchan->regs->ndar, 64);
 }
 
-static u32 get_bcr(struct fsldma_chan *fsl_chan)
+static u32 get_bcr(struct fsldma_chan *fchan)
 {
-	return DMA_IN(fsl_chan, &fsl_chan->regs->bcr, 32);
+	return DMA_IN(fchan, &fchan->regs->bcr, 32);
 }
 
-static int dma_is_idle(struct fsldma_chan *fsl_chan)
+static int dma_is_idle(struct fsldma_chan *fchan)
 {
-	u32 sr = get_sr(fsl_chan);
+	u32 sr = get_sr(fchan);
 	return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
 }
 
-static void dma_start(struct fsldma_chan *fsl_chan)
+static void dma_start(struct fsldma_chan *fchan)
 {
 	u32 mode;
 
-	mode = DMA_IN(fsl_chan, &fsl_chan->regs->mr, 32);
+	mode = DMA_IN(fchan, &fchan->regs->mr, 32);
 
-	if ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
-		if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
-			DMA_OUT(fsl_chan, &fsl_chan->regs->bcr, 0, 32);
+	if ((fchan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
+		if (fchan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
+			DMA_OUT(fchan, &fchan->regs->bcr, 0, 32);
 			mode |= FSL_DMA_MR_EMP_EN;
 		} else {
 			mode &= ~FSL_DMA_MR_EMP_EN;
 		}
 	}
 
-	if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT)
+	if (fchan->feature & FSL_DMA_CHAN_START_EXT)
 		mode |= FSL_DMA_MR_EMS_EN;
 	else
 		mode |= FSL_DMA_MR_CS;
 
-	DMA_OUT(fsl_chan, &fsl_chan->regs->mr, mode, 32);
+	DMA_OUT(fchan, &fchan->regs->mr, mode, 32);
 }
 
-static void dma_halt(struct fsldma_chan *fsl_chan)
+static void dma_halt(struct fsldma_chan *fchan)
 {
 	u32 mode;
 	int i;
 
-	mode = DMA_IN(fsl_chan, &fsl_chan->regs->mr, 32);
+	mode = DMA_IN(fchan, &fchan->regs->mr, 32);
 	mode |= FSL_DMA_MR_CA;
-	DMA_OUT(fsl_chan, &fsl_chan->regs->mr, mode, 32);
+	DMA_OUT(fchan, &fchan->regs->mr, mode, 32);
 
 	mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA);
-	DMA_OUT(fsl_chan, &fsl_chan->regs->mr, mode, 32);
+	DMA_OUT(fchan, &fchan->regs->mr, mode, 32);
 
 	for (i = 0; i < 100; i++) {
-		if (dma_is_idle(fsl_chan))
+		if (dma_is_idle(fchan))
 			break;
 		udelay(10);
 	}
 
-	if (i >= 100 && !dma_is_idle(fsl_chan))
-		dev_err(fsl_chan->dev, "DMA halt timeout!\n");
+	if (i >= 100 && !dma_is_idle(fchan))
+		dev_err(fchan->dev, "DMA halt timeout!\n");
 }
 
-static void set_ld_eol(struct fsldma_chan *fsl_chan,
+static void set_ld_eol(struct fsldma_chan *fchan,
 			struct fsl_desc_sw *desc)
 {
 	u64 snoop_bits;
 
-	snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
+	snoop_bits = ((fchan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
 		? FSL_DMA_SNEN : 0;
 
-	desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan,
-		DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
+	desc->hw.next_ln_addr = CPU_TO_DMA(fchan,
+		DMA_TO_CPU(fchan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
 			| snoop_bits, 64);
 }
 
-static void append_ld_queue(struct fsldma_chan *fsl_chan,
+static void append_ld_queue(struct fsldma_chan *fchan,
 		struct fsl_desc_sw *new_desc)
 {
-	struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev);
+	struct fsl_desc_sw *queue_tail = to_fsl_desc(fchan->ld_queue.prev);
 
-	if (list_empty(&fsl_chan->ld_queue))
+	if (list_empty(&fchan->ld_queue))
 		return;
 
 	/* Link to the new descriptor physical address and
@@ -214,15 +214,15 @@ static void append_ld_queue(struct fsldma_chan *fsl_chan,
 	 *
 	 * For FSL_DMA_IP_83xx, the snoop enable bit need be set.
 	 */
-	queue_tail->hw.next_ln_addr = CPU_TO_DMA(fsl_chan,
+	queue_tail->hw.next_ln_addr = CPU_TO_DMA(fchan,
 			new_desc->async_tx.phys | FSL_DMA_EOSIE |
-			(((fsl_chan->feature & FSL_DMA_IP_MASK)
+			(((fchan->feature & FSL_DMA_IP_MASK)
 				== FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64);
 }
 
 /**
  * fsl_chan_set_src_loop_size - Set source address hold transfer size
- * @fsl_chan : Freescale DMA channel
+ * @fchan : Freescale DMA channel
  * @size     : Address loop size, 0 for disable loop
  *
  * The set source address hold transfer size. The source
@@ -231,11 +231,11 @@ static void append_ld_queue(struct fsldma_chan *fsl_chan,
  * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
  * SA + 1 ... and so on.
  */
-static void fsl_chan_set_src_loop_size(struct fsldma_chan *fsl_chan, int size)
+static void fsl_chan_set_src_loop_size(struct fsldma_chan *fchan, int size)
 {
 	u32 mode;
 
-	mode = DMA_IN(fsl_chan, &fsl_chan->regs->mr, 32);
+	mode = DMA_IN(fchan, &fchan->regs->mr, 32);
 
 	switch (size) {
 	case 0:
@@ -249,12 +249,12 @@ static void fsl_chan_set_src_loop_size(struct fsldma_chan *fsl_chan, int size)
 		break;
 	}
 
-	DMA_OUT(fsl_chan, &fsl_chan->regs->mr, mode, 32);
+	DMA_OUT(fchan, &fchan->regs->mr, mode, 32);
 }
 
 /**
  * fsl_chan_set_dst_loop_size - Set destination address hold transfer size
- * @fsl_chan : Freescale DMA channel
+ * @fchan : Freescale DMA channel
  * @size     : Address loop size, 0 for disable loop
  *
  * The set destination address hold transfer size. The destination
@@ -263,11 +263,11 @@ static void fsl_chan_set_src_loop_size(struct fsldma_chan *fsl_chan, int size)
  * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
  * TA + 1 ... and so on.
  */
-static void fsl_chan_set_dst_loop_size(struct fsldma_chan *fsl_chan, int size)
+static void fsl_chan_set_dst_loop_size(struct fsldma_chan *fchan, int size)
 {
 	u32 mode;
 
-	mode = DMA_IN(fsl_chan, &fsl_chan->regs->mr, 32);
+	mode = DMA_IN(fchan, &fchan->regs->mr, 32);
 
 	switch (size) {
 	case 0:
@@ -281,12 +281,12 @@ static void fsl_chan_set_dst_loop_size(struct fsldma_chan *fsl_chan, int size)
 		break;
 	}
 
-	DMA_OUT(fsl_chan, &fsl_chan->regs->mr, mode, 32);
+	DMA_OUT(fchan, &fchan->regs->mr, mode, 32);
 }
 
 /**
  * fsl_chan_set_request_count - Set DMA Request Count for external control
- * @fsl_chan : Freescale DMA channel
+ * @fchan : Freescale DMA channel
  * @size     : Number of bytes to transfer in a single request
  *
  * The Freescale DMA channel can be controlled by the external signal DREQ#.
@@ -296,38 +296,38 @@ static void fsl_chan_set_dst_loop_size(struct fsldma_chan *fsl_chan, int size)
  *
  * A size of 0 disables external pause control. The maximum size is 1024.
  */
-static void fsl_chan_set_request_count(struct fsldma_chan *fsl_chan, int size)
+static void fsl_chan_set_request_count(struct fsldma_chan *fchan, int size)
 {
 	u32 mode;
 
 	BUG_ON(size > 1024);
 
-	mode = DMA_IN(fsl_chan, &fsl_chan->regs->mr, 32);
+	mode = DMA_IN(fchan, &fchan->regs->mr, 32);
 	mode |= (__ilog2(size) << 24) & 0x0f000000;
 
-	DMA_OUT(fsl_chan, &fsl_chan->regs->mr, mode, 32);
+	DMA_OUT(fchan, &fchan->regs->mr, mode, 32);
 }
 
 /**
  * fsl_chan_toggle_ext_pause - Toggle channel external pause status
- * @fsl_chan : Freescale DMA channel
+ * @fchan : Freescale DMA channel
  * @enable   : 0 is disabled, 1 is enabled.
  *
  * The Freescale DMA channel can be controlled by the external signal DREQ#.
  * The DMA Request Count feature should be used in addition to this feature
  * to set the number of bytes to transfer before pausing the channel.
  */
-static void fsl_chan_toggle_ext_pause(struct fsldma_chan *fsl_chan, int enable)
+static void fsl_chan_toggle_ext_pause(struct fsldma_chan *fchan, int enable)
 {
 	if (enable)
-		fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
+		fchan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
 	else
-		fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
+		fchan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
 }
 
 /**
  * fsl_chan_toggle_ext_start - Toggle channel external start status
- * @fsl_chan : Freescale DMA channel
+ * @fchan : Freescale DMA channel
  * @enable   : 0 is disabled, 1 is enabled.
  *
  * If enable the external start, the channel can be started by an
@@ -335,26 +335,26 @@ static void fsl_chan_toggle_ext_pause(struct fsldma_chan *fsl_chan, int enable)
  * transfer immediately. The DMA channel will wait for the
  * control pin asserted.
  */
-static void fsl_chan_toggle_ext_start(struct fsldma_chan *fsl_chan, int enable)
+static void fsl_chan_toggle_ext_start(struct fsldma_chan *fchan, int enable)
 {
 	if (enable)
-		fsl_chan->feature |= FSL_DMA_CHAN_START_EXT;
+		fchan->feature |= FSL_DMA_CHAN_START_EXT;
 	else
-		fsl_chan->feature &= ~FSL_DMA_CHAN_START_EXT;
+		fchan->feature &= ~FSL_DMA_CHAN_START_EXT;
 }
 
 static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
 {
-	struct fsldma_chan *fsl_chan = to_fsl_chan(tx->chan);
+	struct fsldma_chan *fchan = to_fsl_chan(tx->chan);
 	struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
 	struct fsl_desc_sw *child;
 	unsigned long flags;
 	dma_cookie_t cookie;
 
 	/* cookie increment and adding to ld_queue must be atomic */
-	spin_lock_irqsave(&fsl_chan->desc_lock, flags);
+	spin_lock_irqsave(&fchan->desc_lock, flags);
 
-	cookie = fsl_chan->common.cookie;
+	cookie = fchan->common.cookie;
 	list_for_each_entry(child, &desc->tx_list, node) {
 		cookie++;
 		if (cookie < 0)
@@ -363,33 +363,33 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
 		desc->async_tx.cookie = cookie;
 	}
 
-	fsl_chan->common.cookie = cookie;
-	append_ld_queue(fsl_chan, desc);
-	list_splice_init(&desc->tx_list, fsl_chan->ld_queue.prev);
+	fchan->common.cookie = cookie;
+	append_ld_queue(fchan, desc);
+	list_splice_init(&desc->tx_list, fchan->ld_queue.prev);
 
-	spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
+	spin_unlock_irqrestore(&fchan->desc_lock, flags);
 
 	return cookie;
 }
 
 /**
  * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
- * @fsl_chan : Freescale DMA channel
+ * @fchan : Freescale DMA channel
  *
  * Return - The descriptor allocated. NULL for failed.
  */
 static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
-					struct fsldma_chan *fsl_chan)
+					struct fsldma_chan *fchan)
 {
 	dma_addr_t pdesc;
 	struct fsl_desc_sw *desc_sw;
 
-	desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc);
+	desc_sw = dma_pool_alloc(fchan->desc_pool, GFP_ATOMIC, &pdesc);
 	if (desc_sw) {
 		memset(desc_sw, 0, sizeof(struct fsl_desc_sw));
 		INIT_LIST_HEAD(&desc_sw->tx_list);
 		dma_async_tx_descriptor_init(&desc_sw->async_tx,
-						&fsl_chan->common);
+						&fchan->common);
 		desc_sw->async_tx.tx_submit = fsl_dma_tx_submit;
 		desc_sw->async_tx.phys = pdesc;
 	}
@@ -400,7 +400,7 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
 
 /**
  * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
- * @fsl_chan : Freescale DMA channel
+ * @fchan : Freescale DMA channel
  *
  * This function will create a dma pool for descriptor allocation.
  *
@@ -408,21 +408,21 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
  */
 static int fsl_dma_alloc_chan_resources(struct dma_chan *chan)
 {
-	struct fsldma_chan *fsl_chan = to_fsl_chan(chan);
+	struct fsldma_chan *fchan = to_fsl_chan(chan);
 
 	/* Has this channel already been allocated? */
-	if (fsl_chan->desc_pool)
+	if (fchan->desc_pool)
 		return 1;
 
 	/* We need the descriptor to be aligned to 32bytes
 	 * for meeting FSL DMA specification requirement.
 	 */
-	fsl_chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool",
-			fsl_chan->dev, sizeof(struct fsl_desc_sw),
+	fchan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool",
+			fchan->dev, sizeof(struct fsl_desc_sw),
 			32, 0);
-	if (!fsl_chan->desc_pool) {
-		dev_err(fsl_chan->dev, "No memory for channel %d "
-			"descriptor dma pool.\n", fsl_chan->id);
+	if (!fchan->desc_pool) {
+		dev_err(fchan->dev, "No memory for channel %d "
+			"descriptor dma pool.\n", fchan->id);
 		return 0;
 	}
 
@@ -431,45 +431,45 @@ static int fsl_dma_alloc_chan_resources(struct dma_chan *chan)
 
 /**
  * fsl_dma_free_chan_resources - Free all resources of the channel.
- * @fsl_chan : Freescale DMA channel
+ * @fchan : Freescale DMA channel
  */
 static void fsl_dma_free_chan_resources(struct dma_chan *chan)
 {
-	struct fsldma_chan *fsl_chan = to_fsl_chan(chan);
+	struct fsldma_chan *fchan = to_fsl_chan(chan);
 	struct fsl_desc_sw *desc, *_desc;
 	unsigned long flags;
 
-	dev_dbg(fsl_chan->dev, "Free all channel resources.\n");
-	spin_lock_irqsave(&fsl_chan->desc_lock, flags);
-	list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) {
+	dev_dbg(fchan->dev, "Free all channel resources.\n");
+	spin_lock_irqsave(&fchan->desc_lock, flags);
+	list_for_each_entry_safe(desc, _desc, &fchan->ld_queue, node) {
 #ifdef FSL_DMA_LD_DEBUG
-		dev_dbg(fsl_chan->dev,
+		dev_dbg(fchan->dev,
 				"LD %p will be released.\n", desc);
 #endif
 		list_del(&desc->node);
 		/* free link descriptor */
-		dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
+		dma_pool_free(fchan->desc_pool, desc, desc->async_tx.phys);
 	}
-	spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
-	dma_pool_destroy(fsl_chan->desc_pool);
+	spin_unlock_irqrestore(&fchan->desc_lock, flags);
+	dma_pool_destroy(fchan->desc_pool);
 
-	fsl_chan->desc_pool = NULL;
+	fchan->desc_pool = NULL;
 }
 
 static struct dma_async_tx_descriptor *
 fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags)
 {
-	struct fsldma_chan *fsl_chan;
+	struct fsldma_chan *fchan;
 	struct fsl_desc_sw *new;
 
 	if (!chan)
 		return NULL;
 
-	fsl_chan = to_fsl_chan(chan);
+	fchan = to_fsl_chan(chan);
 
-	new = fsl_dma_alloc_descriptor(fsl_chan);
+	new = fsl_dma_alloc_descriptor(fchan);
 	if (!new) {
-		dev_err(fsl_chan->dev, "No free memory for link descriptor\n");
+		dev_err(fchan->dev, "No free memory for link descriptor\n");
 		return NULL;
 	}
 
@@ -480,7 +480,7 @@ fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags)
 	list_add_tail(&new->node, &new->tx_list);
 
 	/* Set End-of-link to the last link descriptor of new list*/
-	set_ld_eol(fsl_chan, new);
+	set_ld_eol(fchan, new);
 
 	return &new->async_tx;
 }
@@ -489,7 +489,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
 	struct dma_chan *chan, dma_addr_t dma_dst, dma_addr_t dma_src,
 	size_t len, unsigned long flags)
 {
-	struct fsldma_chan *fsl_chan;
+	struct fsldma_chan *fchan;
 	struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
 	struct list_head *list;
 	size_t copy;
@@ -500,31 +500,31 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
 	if (!len)
 		return NULL;
 
-	fsl_chan = to_fsl_chan(chan);
+	fchan = to_fsl_chan(chan);
 
 	do {
 
 		/* Allocate the link descriptor from DMA pool */
-		new = fsl_dma_alloc_descriptor(fsl_chan);
+		new = fsl_dma_alloc_descriptor(fchan);
 		if (!new) {
-			dev_err(fsl_chan->dev,
+			dev_err(fchan->dev,
 					"No free memory for link descriptor\n");
 			goto fail;
 		}
 #ifdef FSL_DMA_LD_DEBUG
-		dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new);
+		dev_dbg(fchan->dev, "new link desc alloc %p\n", new);
 #endif
 
 		copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
 
-		set_desc_cnt(fsl_chan, &new->hw, copy);
-		set_desc_src(fsl_chan, &new->hw, dma_src);
-		set_desc_dst(fsl_chan, &new->hw, dma_dst);
+		set_desc_cnt(fchan, &new->hw, copy);
+		set_desc_src(fchan, &new->hw, dma_src);
+		set_desc_dst(fchan, &new->hw, dma_dst);
 
 		if (!first)
 			first = new;
 		else
-			set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys);
+			set_desc_next(fchan, &prev->hw, new->async_tx.phys);
 
 		new->async_tx.cookie = 0;
 		async_tx_ack(&new->async_tx);
@@ -542,7 +542,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
 	new->async_tx.cookie = -EBUSY;
 
 	/* Set End-of-link to the last link descriptor of new list*/
-	set_ld_eol(fsl_chan, new);
+	set_ld_eol(fchan, new);
 
 	return &first->async_tx;
 
@@ -553,7 +553,7 @@ fail:
 	list = &first->tx_list;
 	list_for_each_entry_safe_reverse(new, prev, list, node) {
 		list_del(&new->node);
-		dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys);
+		dma_pool_free(fchan->desc_pool, new, new->async_tx.phys);
 	}
 
 	return NULL;
@@ -575,7 +575,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
 	struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
 	enum dma_data_direction direction, unsigned long flags)
 {
-	struct fsldma_chan *fsl_chan;
+	struct fsldma_chan *fchan;
 	struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
 	struct fsl_dma_slave *slave;
 	struct list_head *tx_list;
@@ -594,7 +594,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
 	if (!chan->private)
 		return NULL;
 
-	fsl_chan = to_fsl_chan(chan);
+	fchan = to_fsl_chan(chan);
 	slave = chan->private;
 
 	if (list_empty(&slave->addresses))
@@ -644,14 +644,14 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
 			}
 
 			/* Allocate the link descriptor from DMA pool */
-			new = fsl_dma_alloc_descriptor(fsl_chan);
+			new = fsl_dma_alloc_descriptor(fchan);
 			if (!new) {
-				dev_err(fsl_chan->dev, "No free memory for "
+				dev_err(fchan->dev, "No free memory for "
 						       "link descriptor\n");
 				goto fail;
 			}
 #ifdef FSL_DMA_LD_DEBUG
-			dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new);
+			dev_dbg(fchan->dev, "new link desc alloc %p\n", new);
 #endif
 
 			/*
@@ -678,9 +678,9 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
 			}
 
 			/* Fill in the descriptor */
-			set_desc_cnt(fsl_chan, &new->hw, copy);
-			set_desc_src(fsl_chan, &new->hw, dma_src);
-			set_desc_dst(fsl_chan, &new->hw, dma_dst);
+			set_desc_cnt(fchan, &new->hw, copy);
+			set_desc_src(fchan, &new->hw, dma_src);
+			set_desc_dst(fchan, &new->hw, dma_dst);
 
 			/*
 			 * If this is not the first descriptor, chain the
@@ -689,7 +689,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
 			if (!first) {
 				first = new;
 			} else {
-				set_desc_next(fsl_chan, &prev->hw,
+				set_desc_next(fchan, &prev->hw,
 					      new->async_tx.phys);
 			}
 
@@ -715,23 +715,23 @@ finished:
 	new->async_tx.cookie = -EBUSY;
 
 	/* Set End-of-link to the last link descriptor of new list */
-	set_ld_eol(fsl_chan, new);
+	set_ld_eol(fchan, new);
 
 	/* Enable extra controller features */
-	if (fsl_chan->set_src_loop_size)
-		fsl_chan->set_src_loop_size(fsl_chan, slave->src_loop_size);
+	if (fchan->set_src_loop_size)
+		fchan->set_src_loop_size(fchan, slave->src_loop_size);
 
-	if (fsl_chan->set_dst_loop_size)
-		fsl_chan->set_dst_loop_size(fsl_chan, slave->dst_loop_size);
+	if (fchan->set_dst_loop_size)
+		fchan->set_dst_loop_size(fchan, slave->dst_loop_size);
 
-	if (fsl_chan->toggle_ext_start)
-		fsl_chan->toggle_ext_start(fsl_chan, slave->external_start);
+	if (fchan->toggle_ext_start)
+		fchan->toggle_ext_start(fchan, slave->external_start);
 
-	if (fsl_chan->toggle_ext_pause)
-		fsl_chan->toggle_ext_pause(fsl_chan, slave->external_pause);
+	if (fchan->toggle_ext_pause)
+		fchan->toggle_ext_pause(fchan, slave->external_pause);
 
-	if (fsl_chan->set_request_count)
-		fsl_chan->set_request_count(fsl_chan, slave->request_count);
+	if (fchan->set_request_count)
+		fchan->set_request_count(fchan, slave->request_count);
 
 	return &first->async_tx;
 
@@ -751,7 +751,7 @@ fail:
 	tx_list = &first->tx_list;
 	list_for_each_entry_safe_reverse(new, prev, tx_list, node) {
 		list_del_init(&new->node);
-		dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys);
+		dma_pool_free(fchan->desc_pool, new, new->async_tx.phys);
 	}
 
 	return NULL;
@@ -759,54 +759,54 @@ fail:
 
 static void fsl_dma_device_terminate_all(struct dma_chan *chan)
 {
-	struct fsldma_chan *fsl_chan;
+	struct fsldma_chan *fchan;
 	struct fsl_desc_sw *desc, *tmp;
 	unsigned long flags;
 
 	if (!chan)
 		return;
 
-	fsl_chan = to_fsl_chan(chan);
+	fchan = to_fsl_chan(chan);
 
 	/* Halt the DMA engine */
-	dma_halt(fsl_chan);
+	dma_halt(fchan);
 
-	spin_lock_irqsave(&fsl_chan->desc_lock, flags);
+	spin_lock_irqsave(&fchan->desc_lock, flags);
 
 	/* Remove and free all of the descriptors in the LD queue */
-	list_for_each_entry_safe(desc, tmp, &fsl_chan->ld_queue, node) {
+	list_for_each_entry_safe(desc, tmp, &fchan->ld_queue, node) {
 		list_del(&desc->node);
-		dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
+		dma_pool_free(fchan->desc_pool, desc, desc->async_tx.phys);
 	}
 
-	spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
+	spin_unlock_irqrestore(&fchan->desc_lock, flags);
 }
 
 /**
  * fsl_dma_update_completed_cookie - Update the completed cookie.
- * @fsl_chan : Freescale DMA channel
+ * @fchan : Freescale DMA channel
  */
-static void fsl_dma_update_completed_cookie(struct fsldma_chan *fsl_chan)
+static void fsl_dma_update_completed_cookie(struct fsldma_chan *fchan)
 {
 	struct fsl_desc_sw *cur_desc, *desc;
 	dma_addr_t ld_phy;
 
-	ld_phy = get_cdar(fsl_chan) & FSL_DMA_NLDA_MASK;
+	ld_phy = get_cdar(fchan) & FSL_DMA_NLDA_MASK;
 
 	if (ld_phy) {
 		cur_desc = NULL;
-		list_for_each_entry(desc, &fsl_chan->ld_queue, node)
+		list_for_each_entry(desc, &fchan->ld_queue, node)
 			if (desc->async_tx.phys == ld_phy) {
 				cur_desc = desc;
 				break;
 			}
 
 		if (cur_desc && cur_desc->async_tx.cookie) {
-			if (dma_is_idle(fsl_chan))
-				fsl_chan->completed_cookie =
+			if (dma_is_idle(fchan))
+				fchan->completed_cookie =
 					cur_desc->async_tx.cookie;
 			else
-				fsl_chan->completed_cookie =
+				fchan->completed_cookie =
 					cur_desc->async_tx.cookie - 1;
 		}
 	}
@@ -814,27 +814,27 @@ static void fsl_dma_update_completed_cookie(struct fsldma_chan *fsl_chan)
 
 /**
  * fsl_chan_ld_cleanup - Clean up link descriptors
- * @fsl_chan : Freescale DMA channel
+ * @fchan : Freescale DMA channel
  *
  * This function clean up the ld_queue of DMA channel.
  * If 'in_intr' is set, the function will move the link descriptor to
  * the recycle list. Otherwise, free it directly.
  */
-static void fsl_chan_ld_cleanup(struct fsldma_chan *fsl_chan)
+static void fsl_chan_ld_cleanup(struct fsldma_chan *fchan)
 {
 	struct fsl_desc_sw *desc, *_desc;
 	unsigned long flags;
 
-	spin_lock_irqsave(&fsl_chan->desc_lock, flags);
+	spin_lock_irqsave(&fchan->desc_lock, flags);
 
-	dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n",
-			fsl_chan->completed_cookie);
-	list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) {
+	dev_dbg(fchan->dev, "chan completed_cookie = %d\n",
+			fchan->completed_cookie);
+	list_for_each_entry_safe(desc, _desc, &fchan->ld_queue, node) {
 		dma_async_tx_callback callback;
 		void *callback_param;
 
 		if (dma_async_is_complete(desc->async_tx.cookie,
-			    fsl_chan->completed_cookie, fsl_chan->common.cookie)
+			    fchan->completed_cookie, fchan->common.cookie)
 				== DMA_IN_PROGRESS)
 			break;
 
@@ -844,119 +844,119 @@ static void fsl_chan_ld_cleanup(struct fsldma_chan *fsl_chan)
 		/* Remove from ld_queue list */
 		list_del(&desc->node);
 
-		dev_dbg(fsl_chan->dev, "link descriptor %p will be recycle.\n",
+		dev_dbg(fchan->dev, "link descriptor %p will be recycle.\n",
 				desc);
-		dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
+		dma_pool_free(fchan->desc_pool, desc, desc->async_tx.phys);
 
 		/* Run the link descriptor callback function */
 		if (callback) {
-			spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
-			dev_dbg(fsl_chan->dev, "link descriptor %p callback\n",
+			spin_unlock_irqrestore(&fchan->desc_lock, flags);
+			dev_dbg(fchan->dev, "link descriptor %p callback\n",
 					desc);
 			callback(callback_param);
-			spin_lock_irqsave(&fsl_chan->desc_lock, flags);
+			spin_lock_irqsave(&fchan->desc_lock, flags);
 		}
 	}
-	spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
+	spin_unlock_irqrestore(&fchan->desc_lock, flags);
 }
 
 /**
  * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue.
- * @fsl_chan : Freescale DMA channel
+ * @fchan : Freescale DMA channel
  */
-static void fsl_chan_xfer_ld_queue(struct fsldma_chan *fsl_chan)
+static void fsl_chan_xfer_ld_queue(struct fsldma_chan *fchan)
 {
 	struct list_head *ld_node;
 	dma_addr_t next_dst_addr;
 	unsigned long flags;
 
-	spin_lock_irqsave(&fsl_chan->desc_lock, flags);
+	spin_lock_irqsave(&fchan->desc_lock, flags);
 
-	if (!dma_is_idle(fsl_chan))
+	if (!dma_is_idle(fchan))
 		goto out_unlock;
 
-	dma_halt(fsl_chan);
+	dma_halt(fchan);
 
 	/* If there are some link descriptors
 	 * not transfered in queue. We need to start it.
 	 */
 
 	/* Find the first un-transfer desciptor */
-	for (ld_node = fsl_chan->ld_queue.next;
-		(ld_node != &fsl_chan->ld_queue)
+	for (ld_node = fchan->ld_queue.next;
+		(ld_node != &fchan->ld_queue)
 			&& (dma_async_is_complete(
 				to_fsl_desc(ld_node)->async_tx.cookie,
-				fsl_chan->completed_cookie,
-				fsl_chan->common.cookie) == DMA_SUCCESS);
+				fchan->completed_cookie,
+				fchan->common.cookie) == DMA_SUCCESS);
 		ld_node = ld_node->next);
 
-	if (ld_node != &fsl_chan->ld_queue) {
+	if (ld_node != &fchan->ld_queue) {
 		/* Get the ld start address from ld_queue */
 		next_dst_addr = to_fsl_desc(ld_node)->async_tx.phys;
-		dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%llx\n",
+		dev_dbg(fchan->dev, "xfer LDs staring from 0x%llx\n",
 				(unsigned long long)next_dst_addr);
-		set_cdar(fsl_chan, next_dst_addr);
-		dma_start(fsl_chan);
+		set_cdar(fchan, next_dst_addr);
+		dma_start(fchan);
 	} else {
-		set_cdar(fsl_chan, 0);
-		set_ndar(fsl_chan, 0);
+		set_cdar(fchan, 0);
+		set_ndar(fchan, 0);
 	}
 
 out_unlock:
-	spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
+	spin_unlock_irqrestore(&fchan->desc_lock, flags);
 }
 
 /**
  * fsl_dma_memcpy_issue_pending - Issue the DMA start command
- * @fsl_chan : Freescale DMA channel
+ * @fchan : Freescale DMA channel
  */
 static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan)
 {
-	struct fsldma_chan *fsl_chan = to_fsl_chan(chan);
+	struct fsldma_chan *fchan = to_fsl_chan(chan);
 
 #ifdef FSL_DMA_LD_DEBUG
 	struct fsl_desc_sw *ld;
 	unsigned long flags;
 
-	spin_lock_irqsave(&fsl_chan->desc_lock, flags);
-	if (list_empty(&fsl_chan->ld_queue)) {
-		spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
+	spin_lock_irqsave(&fchan->desc_lock, flags);
+	if (list_empty(&fchan->ld_queue)) {
+		spin_unlock_irqrestore(&fchan->desc_lock, flags);
 		return;
 	}
 
-	dev_dbg(fsl_chan->dev, "--memcpy issue--\n");
-	list_for_each_entry(ld, &fsl_chan->ld_queue, node) {
+	dev_dbg(fchan->dev, "--memcpy issue--\n");
+	list_for_each_entry(ld, &fchan->ld_queue, node) {
 		int i;
-		dev_dbg(fsl_chan->dev, "Ch %d, LD %08x\n",
-				fsl_chan->id, ld->async_tx.phys);
+		dev_dbg(fchan->dev, "Ch %d, LD %08x\n",
+				fchan->id, ld->async_tx.phys);
 		for (i = 0; i < 8; i++)
-			dev_dbg(fsl_chan->dev, "LD offset %d: %08x\n",
+			dev_dbg(fchan->dev, "LD offset %d: %08x\n",
 					i, *(((u32 *)&ld->hw) + i));
 	}
-	dev_dbg(fsl_chan->dev, "----------------\n");
-	spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
+	dev_dbg(fchan->dev, "----------------\n");
+	spin_unlock_irqrestore(&fchan->desc_lock, flags);
 #endif
 
-	fsl_chan_xfer_ld_queue(fsl_chan);
+	fsl_chan_xfer_ld_queue(fchan);
 }
 
 /**
  * fsl_dma_is_complete - Determine the DMA status
- * @fsl_chan : Freescale DMA channel
+ * @fchan : Freescale DMA channel
  */
 static enum dma_status fsl_dma_is_complete(struct dma_chan *chan,
 					dma_cookie_t cookie,
 					dma_cookie_t *done,
 					dma_cookie_t *used)
 {
-	struct fsldma_chan *fsl_chan = to_fsl_chan(chan);
+	struct fsldma_chan *fchan = to_fsl_chan(chan);
 	dma_cookie_t last_used;
 	dma_cookie_t last_complete;
 
-	fsl_chan_ld_cleanup(fsl_chan);
+	fsl_chan_ld_cleanup(fchan);
 
 	last_used = chan->cookie;
-	last_complete = fsl_chan->completed_cookie;
+	last_complete = fchan->completed_cookie;
 
 	if (done)
 		*done = last_complete;
@@ -969,30 +969,30 @@ static enum dma_status fsl_dma_is_complete(struct dma_chan *chan,
 
 static irqreturn_t fsldma_chan_irq(int irq, void *data)
 {
-	struct fsldma_chan *fsl_chan = data;
-	u32 stat;
+	struct fsldma_chan *fchan = data;
 	int update_cookie = 0;
 	int xfer_ld_q = 0;
+	u32 stat;
 
-	stat = get_sr(fsl_chan);
-	dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n",
-						fsl_chan->id, stat);
-	set_sr(fsl_chan, stat);		/* Clear the event register */
+	stat = get_sr(fchan);
+	dev_dbg(fchan->dev, "event: channel %d, stat = 0x%x\n",
+						fchan->id, stat);
+	set_sr(fchan, stat);		/* Clear the event register */
 
 	stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
 	if (!stat)
 		return IRQ_NONE;
 
 	if (stat & FSL_DMA_SR_TE)
-		dev_err(fsl_chan->dev, "Transfer Error!\n");
+		dev_err(fchan->dev, "Transfer Error!\n");
 
 	/* Programming Error
 	 * The DMA_INTERRUPT async_tx is a NULL transfer, which will
 	 * triger a PE interrupt.
 	 */
 	if (stat & FSL_DMA_SR_PE) {
-		dev_dbg(fsl_chan->dev, "event: Programming Error INT\n");
-		if (get_bcr(fsl_chan) == 0) {
+		dev_dbg(fchan->dev, "event: Programming Error INT\n");
+		if (get_bcr(fchan) == 0) {
 			/* BCR register is 0, this is a DMA_INTERRUPT async_tx.
 			 * Now, update the completed cookie, and continue the
 			 * next uncompleted transfer.
@@ -1007,10 +1007,10 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
 	 * we will recycle the used descriptor.
 	 */
 	if (stat & FSL_DMA_SR_EOSI) {
-		dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n");
-		dev_dbg(fsl_chan->dev, "event: clndar 0x%llx, nlndar 0x%llx\n",
-			(unsigned long long)get_cdar(fsl_chan),
-			(unsigned long long)get_ndar(fsl_chan));
+		dev_dbg(fchan->dev, "event: End-of-segments INT\n");
+		dev_dbg(fchan->dev, "event: clndar 0x%llx, nlndar 0x%llx\n",
+			(unsigned long long)get_cdar(fchan),
+			(unsigned long long)get_ndar(fchan));
 		stat &= ~FSL_DMA_SR_EOSI;
 		update_cookie = 1;
 	}
@@ -1019,7 +1019,7 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
 	 * and start the next transfer if it exist.
 	 */
 	if (stat & FSL_DMA_SR_EOCDI) {
-		dev_dbg(fsl_chan->dev, "event: End-of-Chain link INT\n");
+		dev_dbg(fchan->dev, "event: End-of-Chain link INT\n");
 		stat &= ~FSL_DMA_SR_EOCDI;
 		update_cookie = 1;
 		xfer_ld_q = 1;
@@ -1030,28 +1030,28 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
 	 * prepare next transfer.
 	 */
 	if (stat & FSL_DMA_SR_EOLNI) {
-		dev_dbg(fsl_chan->dev, "event: End-of-link INT\n");
+		dev_dbg(fchan->dev, "event: End-of-link INT\n");
 		stat &= ~FSL_DMA_SR_EOLNI;
 		xfer_ld_q = 1;
 	}
 
 	if (update_cookie)
-		fsl_dma_update_completed_cookie(fsl_chan);
+		fsl_dma_update_completed_cookie(fchan);
 	if (xfer_ld_q)
-		fsl_chan_xfer_ld_queue(fsl_chan);
+		fsl_chan_xfer_ld_queue(fchan);
 	if (stat)
-		dev_dbg(fsl_chan->dev, "event: unhandled sr 0x%02x\n",
+		dev_dbg(fchan->dev, "event: unhandled sr 0x%02x\n",
 					stat);
 
-	dev_dbg(fsl_chan->dev, "event: Exit\n");
-	tasklet_schedule(&fsl_chan->tasklet);
+	dev_dbg(fchan->dev, "event: Exit\n");
+	tasklet_schedule(&fchan->tasklet);
 	return IRQ_HANDLED;
 }
 
 static void dma_do_tasklet(unsigned long data)
 {
-	struct fsldma_chan *fsl_chan = (struct fsldma_chan *)data;
-	fsl_chan_ld_cleanup(fsl_chan);
+	struct fsldma_chan *fchan = (struct fsldma_chan *)data;
+	fsl_chan_ld_cleanup(fchan);
 }
 
 /*----------------------------------------------------------------------------*/
-- 
1.5.4.3

^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [PATCH 8/8] fsldma: major cleanups and fixes
  2010-01-01  6:10 fsldma: cleanup driver and fix async_tx compatibility Ira W. Snyder
                   ` (6 preceding siblings ...)
  2010-01-01  6:10 ` [PATCH 7/8] fsldma: rename fsl_chan to fchan Ira W. Snyder
@ 2010-01-01  6:10 ` Ira W. Snyder
  2010-01-05  6:08 ` fsldma: cleanup driver and fix async_tx compatibility Dudhat Dipen-B09055
  2010-01-11  5:47 ` Dudhat Dipen-B09055
  9 siblings, 0 replies; 22+ messages in thread
From: Ira W. Snyder @ 2010-01-01  6:10 UTC (permalink / raw)
  To: dan.j.williams
  Cc: herbert, B04825, linuxppc-dev, Vishnu, Dipen.Dudhat,
	Maneesh.Gupta, R58472

Fix locking. Use two queues in the driver, one for pending transacions, and
one for transactions which are actually running on the hardware. Call
dma_run_dependencies() on descriptor cleanup so that the async_tx API works
correctly.

There are a number of places throughout the code where lists of descriptors
are freed in a loop. Create functions to handle this, and use them instead
of open-coding the loop each time.

Signed-off-by: Ira W. Snyder <iws@ovro.caltech.edu>
---
 drivers/dma/fsldma.c |  386 ++++++++++++++++++++++++++-----------------------
 drivers/dma/fsldma.h |    3 +-
 2 files changed, 207 insertions(+), 182 deletions(-)

diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index f65b28b..d49bdff 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -61,7 +61,6 @@ static void dma_init(struct fsldma_chan *fchan)
 				| FSL_DMA_MR_PRC_RM, 32);
 		break;
 	}
-
 }
 
 static void set_sr(struct fsldma_chan *fchan, u32 val)
@@ -120,11 +119,6 @@ static dma_addr_t get_cdar(struct fsldma_chan *fchan)
 	return DMA_IN(fchan, &fchan->regs->cdar, 64) & ~FSL_DMA_SNEN;
 }
 
-static void set_ndar(struct fsldma_chan *fchan, dma_addr_t addr)
-{
-	DMA_OUT(fchan, &fchan->regs->ndar, addr, 64);
-}
-
 static dma_addr_t get_ndar(struct fsldma_chan *fchan)
 {
 	return DMA_IN(fchan, &fchan->regs->ndar, 64);
@@ -178,11 +172,12 @@ static void dma_halt(struct fsldma_chan *fchan)
 
 	for (i = 0; i < 100; i++) {
 		if (dma_is_idle(fchan))
-			break;
+			return;
+
 		udelay(10);
 	}
 
-	if (i >= 100 && !dma_is_idle(fchan))
+	if (!dma_is_idle(fchan))
 		dev_err(fchan->dev, "DMA halt timeout!\n");
 }
 
@@ -199,27 +194,6 @@ static void set_ld_eol(struct fsldma_chan *fchan,
 			| snoop_bits, 64);
 }
 
-static void append_ld_queue(struct fsldma_chan *fchan,
-		struct fsl_desc_sw *new_desc)
-{
-	struct fsl_desc_sw *queue_tail = to_fsl_desc(fchan->ld_queue.prev);
-
-	if (list_empty(&fchan->ld_queue))
-		return;
-
-	/* Link to the new descriptor physical address and
-	 * Enable End-of-segment interrupt for
-	 * the last link descriptor.
-	 * (the previous node's next link descriptor)
-	 *
-	 * For FSL_DMA_IP_83xx, the snoop enable bit need be set.
-	 */
-	queue_tail->hw.next_ln_addr = CPU_TO_DMA(fchan,
-			new_desc->async_tx.phys | FSL_DMA_EOSIE |
-			(((fchan->feature & FSL_DMA_IP_MASK)
-				== FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64);
-}
-
 /**
  * fsl_chan_set_src_loop_size - Set source address hold transfer size
  * @fchan : Freescale DMA channel
@@ -343,6 +317,31 @@ static void fsl_chan_toggle_ext_start(struct fsldma_chan *fchan, int enable)
 		fchan->feature &= ~FSL_DMA_CHAN_START_EXT;
 }
 
+static void append_ld_queue(struct fsldma_chan *fchan,
+			    struct fsl_desc_sw *desc)
+{
+	struct fsl_desc_sw *tail = to_fsl_desc(fchan->ld_pending.prev);
+
+	if (list_empty(&fchan->ld_pending))
+		goto out_splice;
+
+	/*
+	 * Add the hardware descriptor to the chain of hardware descriptors
+	 * that already exists in memory.
+	 *
+	 * This will un-set the EOL bit of the existing transaction, and the
+	 * last link in this transaction will become the EOL descriptor.
+	 */
+	set_desc_next(fchan, &tail->hw, desc->async_tx.phys);
+
+	/*
+	 * Add the software descriptor and all children to the list
+	 * of pending transactions
+	 */
+out_splice:
+	list_splice_tail_init(&desc->tx_list, &fchan->ld_pending);
+}
+
 static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
 {
 	struct fsldma_chan *fchan = to_fsl_chan(tx->chan);
@@ -351,9 +350,12 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
 	unsigned long flags;
 	dma_cookie_t cookie;
 
-	/* cookie increment and adding to ld_queue must be atomic */
 	spin_lock_irqsave(&fchan->desc_lock, flags);
 
+	/*
+	 * assign cookies to all of the software descriptors
+	 * that make up this transaction
+	 */
 	cookie = fchan->common.cookie;
 	list_for_each_entry(child, &desc->tx_list, node) {
 		cookie++;
@@ -364,8 +366,9 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
 	}
 
 	fchan->common.cookie = cookie;
+
+	/* put this transaction onto the tail of the pending queue */
 	append_ld_queue(fchan, desc);
-	list_splice_init(&desc->tx_list, fchan->ld_queue.prev);
 
 	spin_unlock_irqrestore(&fchan->desc_lock, flags);
 
@@ -381,20 +384,22 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
 static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
 					struct fsldma_chan *fchan)
 {
+	struct fsl_desc_sw *desc;
 	dma_addr_t pdesc;
-	struct fsl_desc_sw *desc_sw;
-
-	desc_sw = dma_pool_alloc(fchan->desc_pool, GFP_ATOMIC, &pdesc);
-	if (desc_sw) {
-		memset(desc_sw, 0, sizeof(struct fsl_desc_sw));
-		INIT_LIST_HEAD(&desc_sw->tx_list);
-		dma_async_tx_descriptor_init(&desc_sw->async_tx,
-						&fchan->common);
-		desc_sw->async_tx.tx_submit = fsl_dma_tx_submit;
-		desc_sw->async_tx.phys = pdesc;
+
+	desc = dma_pool_alloc(fchan->desc_pool, GFP_ATOMIC, &pdesc);
+	if (!desc) {
+		dev_dbg(fchan->dev, "out of memory for link desc\n");
+		return NULL;
 	}
 
-	return desc_sw;
+	memset(desc, 0, sizeof(*desc));
+	INIT_LIST_HEAD(&desc->tx_list);
+	dma_async_tx_descriptor_init(&desc->async_tx, &fchan->common);
+	desc->async_tx.tx_submit = fsl_dma_tx_submit;
+	desc->async_tx.phys = pdesc;
+
+	return desc;
 }
 
 
@@ -414,45 +419,69 @@ static int fsl_dma_alloc_chan_resources(struct dma_chan *chan)
 	if (fchan->desc_pool)
 		return 1;
 
-	/* We need the descriptor to be aligned to 32bytes
+	/*
+	 * We need the descriptor to be aligned to 32bytes
 	 * for meeting FSL DMA specification requirement.
 	 */
 	fchan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool",
-			fchan->dev, sizeof(struct fsl_desc_sw),
-			32, 0);
+					   fchan->dev,
+					   sizeof(struct fsl_desc_sw),
+					   __alignof__(struct fsl_desc_sw), 0);
 	if (!fchan->desc_pool) {
-		dev_err(fchan->dev, "No memory for channel %d "
-			"descriptor dma pool.\n", fchan->id);
-		return 0;
+		dev_err(fchan->dev, "unable to allocate channel %d "
+				    "descriptor pool\n", fchan->id);
+		return -ENOMEM;
 	}
 
+	/* there is at least one descriptor free to be allocated */
 	return 1;
 }
 
 /**
+ * fsldma_free_desc_list - Free all descriptors in a queue
+ * @fchan: Freescae DMA channel
+ * @list: the list to free
+ *
+ * LOCKING: must hold fchan->desc_lock
+ */
+static void fsldma_free_desc_list(struct fsldma_chan *fchan,
+				  struct list_head *list)
+{
+	struct fsl_desc_sw *desc, *_desc;
+
+	list_for_each_entry_safe(desc, _desc, list, node) {
+		list_del(&desc->node);
+		dma_pool_free(fchan->desc_pool, desc, desc->async_tx.phys);
+	}
+}
+
+static void fsldma_free_desc_list_reverse(struct fsldma_chan *fchan,
+					  struct list_head *list)
+{
+	struct fsl_desc_sw *desc, *_desc;
+
+	list_for_each_entry_safe_reverse(desc, _desc, list, node) {
+		list_del(&desc->node);
+		dma_pool_free(fchan->desc_pool, desc, desc->async_tx.phys);
+	}
+}
+
+/**
  * fsl_dma_free_chan_resources - Free all resources of the channel.
  * @fchan : Freescale DMA channel
  */
 static void fsl_dma_free_chan_resources(struct dma_chan *chan)
 {
 	struct fsldma_chan *fchan = to_fsl_chan(chan);
-	struct fsl_desc_sw *desc, *_desc;
 	unsigned long flags;
 
 	dev_dbg(fchan->dev, "Free all channel resources.\n");
 	spin_lock_irqsave(&fchan->desc_lock, flags);
-	list_for_each_entry_safe(desc, _desc, &fchan->ld_queue, node) {
-#ifdef FSL_DMA_LD_DEBUG
-		dev_dbg(fchan->dev,
-				"LD %p will be released.\n", desc);
-#endif
-		list_del(&desc->node);
-		/* free link descriptor */
-		dma_pool_free(fchan->desc_pool, desc, desc->async_tx.phys);
-	}
+	fsldma_free_desc_list(fchan, &fchan->ld_pending);
+	fsldma_free_desc_list(fchan, &fchan->ld_running);
 	spin_unlock_irqrestore(&fchan->desc_lock, flags);
-	dma_pool_destroy(fchan->desc_pool);
 
+	dma_pool_destroy(fchan->desc_pool);
 	fchan->desc_pool = NULL;
 }
 
@@ -491,7 +520,6 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
 {
 	struct fsldma_chan *fchan;
 	struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
-	struct list_head *list;
 	size_t copy;
 
 	if (!chan)
@@ -550,12 +578,7 @@ fail:
 	if (!first)
 		return NULL;
 
-	list = &first->tx_list;
-	list_for_each_entry_safe_reverse(new, prev, list, node) {
-		list_del(&new->node);
-		dma_pool_free(fchan->desc_pool, new, new->async_tx.phys);
-	}
-
+	fsldma_free_desc_list_reverse(fchan, &first->tx_list);
 	return NULL;
 }
 
@@ -578,7 +601,6 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
 	struct fsldma_chan *fchan;
 	struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
 	struct fsl_dma_slave *slave;
-	struct list_head *tx_list;
 	size_t copy;
 
 	int i;
@@ -748,19 +770,13 @@ fail:
 	 *
 	 * We're re-using variables for the loop, oh well
 	 */
-	tx_list = &first->tx_list;
-	list_for_each_entry_safe_reverse(new, prev, tx_list, node) {
-		list_del_init(&new->node);
-		dma_pool_free(fchan->desc_pool, new, new->async_tx.phys);
-	}
-
+	fsldma_free_desc_list_reverse(fchan, &first->tx_list);
 	return NULL;
 }
 
 static void fsl_dma_device_terminate_all(struct dma_chan *chan)
 {
 	struct fsldma_chan *fchan;
-	struct fsl_desc_sw *desc, *tmp;
 	unsigned long flags;
 
 	if (!chan)
@@ -774,10 +790,8 @@ static void fsl_dma_device_terminate_all(struct dma_chan *chan)
 	spin_lock_irqsave(&fchan->desc_lock, flags);
 
 	/* Remove and free all of the descriptors in the LD queue */
-	list_for_each_entry_safe(desc, tmp, &fchan->ld_queue, node) {
-		list_del(&desc->node);
-		dma_pool_free(fchan->desc_pool, desc, desc->async_tx.phys);
-	}
+	fsldma_free_desc_list(fchan, &fchan->ld_pending);
+	fsldma_free_desc_list(fchan, &fchan->ld_running);
 
 	spin_unlock_irqrestore(&fchan->desc_lock, flags);
 }
@@ -785,31 +799,48 @@ static void fsl_dma_device_terminate_all(struct dma_chan *chan)
 /**
  * fsl_dma_update_completed_cookie - Update the completed cookie.
  * @fchan : Freescale DMA channel
+ *
+ * CONTEXT: hardirq
  */
 static void fsl_dma_update_completed_cookie(struct fsldma_chan *fchan)
 {
-	struct fsl_desc_sw *cur_desc, *desc;
-	dma_addr_t ld_phy;
-
-	ld_phy = get_cdar(fchan) & FSL_DMA_NLDA_MASK;
+	struct fsl_desc_sw *desc;
+	unsigned long flags;
+	dma_cookie_t cookie;
 
-	if (ld_phy) {
-		cur_desc = NULL;
-		list_for_each_entry(desc, &fchan->ld_queue, node)
-			if (desc->async_tx.phys == ld_phy) {
-				cur_desc = desc;
-				break;
-			}
+	spin_lock_irqsave(&fchan->desc_lock, flags);
 
-		if (cur_desc && cur_desc->async_tx.cookie) {
-			if (dma_is_idle(fchan))
-				fchan->completed_cookie =
-					cur_desc->async_tx.cookie;
-			else
-				fchan->completed_cookie =
-					cur_desc->async_tx.cookie - 1;
-		}
+	if (list_empty(&fchan->ld_running)) {
+		dev_dbg(fchan->dev, "no running descriptors\n");
+		goto out_unlock;
 	}
+
+	/* Get the last descriptor, update the cookie to that */
+	desc = to_fsl_desc(fchan->ld_running.prev);
+	if (dma_is_idle(fchan))
+		cookie = desc->async_tx.cookie;
+	else
+		cookie = desc->async_tx.cookie - 1;
+
+	fchan->completed_cookie = cookie;
+
+out_unlock:
+	spin_unlock_irqrestore(&fchan->desc_lock, flags);
+}
+
+/**
+ * fsldma_desc_status - Check the status of a descriptor
+ * @fchan: Freescale DMA channel
+ * @desc: DMA SW descriptor
+ *
+ * This function will return the status of the given descriptor
+ */
+static enum dma_status fsldma_desc_status(struct fsldma_chan *fchan,
+					  struct fsl_desc_sw *desc)
+{
+	return dma_async_is_complete(desc->async_tx.cookie,
+				     fchan->completed_cookie,
+				     fchan->common.cookie);
 }
 
 /**
@@ -817,8 +848,6 @@ static void fsl_dma_update_completed_cookie(struct fsldma_chan *fchan)
  * @fchan : Freescale DMA channel
  *
  * This function clean up the ld_queue of DMA channel.
- * If 'in_intr' is set, the function will move the link descriptor to
- * the recycle list. Otherwise, free it directly.
  */
 static void fsl_chan_ld_cleanup(struct fsldma_chan *fchan)
 {
@@ -827,80 +856,95 @@ static void fsl_chan_ld_cleanup(struct fsldma_chan *fchan)
 
 	spin_lock_irqsave(&fchan->desc_lock, flags);
 
-	dev_dbg(fchan->dev, "chan completed_cookie = %d\n",
-			fchan->completed_cookie);
-	list_for_each_entry_safe(desc, _desc, &fchan->ld_queue, node) {
+	dev_dbg(fchan->dev, "chan completed_cookie = %d\n", fchan->completed_cookie);
+	list_for_each_entry_safe(desc, _desc, &fchan->ld_running, node) {
 		dma_async_tx_callback callback;
 		void *callback_param;
 
-		if (dma_async_is_complete(desc->async_tx.cookie,
-			    fchan->completed_cookie, fchan->common.cookie)
-				== DMA_IN_PROGRESS)
+		if (fsldma_desc_status(fchan, desc) == DMA_IN_PROGRESS)
 			break;
 
-		callback = desc->async_tx.callback;
-		callback_param = desc->async_tx.callback_param;
-
-		/* Remove from ld_queue list */
+		/* Remove from the list of running transactions */
 		list_del(&desc->node);
 
-		dev_dbg(fchan->dev, "link descriptor %p will be recycle.\n",
-				desc);
-		dma_pool_free(fchan->desc_pool, desc, desc->async_tx.phys);
-
 		/* Run the link descriptor callback function */
+		callback = desc->async_tx.callback;
+		callback_param = desc->async_tx.callback_param;
 		if (callback) {
 			spin_unlock_irqrestore(&fchan->desc_lock, flags);
-			dev_dbg(fchan->dev, "link descriptor %p callback\n",
-					desc);
+			dev_dbg(fchan->dev, "LD %p callback\n", desc);
 			callback(callback_param);
 			spin_lock_irqsave(&fchan->desc_lock, flags);
 		}
+
+		/* Run any dependencies, then free the descriptor */
+		dma_run_dependencies(&desc->async_tx);
+		dma_pool_free(fchan->desc_pool, desc, desc->async_tx.phys);
 	}
+
 	spin_unlock_irqrestore(&fchan->desc_lock, flags);
 }
 
 /**
- * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue.
+ * fsl_chan_xfer_ld_queue - transfer any pending transactions
  * @fchan : Freescale DMA channel
+ *
+ * This will make sure that any pending transactions will be run.
+ * If the DMA controller is idle, it will be started. Otherwise,
+ * the DMA controller's interrupt handler will start any pending
+ * transactions when it becomes idle.
  */
 static void fsl_chan_xfer_ld_queue(struct fsldma_chan *fchan)
 {
-	struct list_head *ld_node;
-	dma_addr_t next_dst_addr;
+	struct fsl_desc_sw *desc;
 	unsigned long flags;
 
 	spin_lock_irqsave(&fchan->desc_lock, flags);
 
-	if (!dma_is_idle(fchan))
+	/*
+	 * If the list of pending descriptors is empty, then we
+	 * don't need to do any work at all
+	 */
+	if (list_empty(&fchan->ld_pending)) {
+		dev_dbg(fchan->dev, "no pending LDs\n");
 		goto out_unlock;
+	}
 
+	/*
+	 * The DMA controller is not idle, which means the interrupt
+	 * handler will start any queued transactions when it runs
+	 * at the end of the current transaction
+	 */
+	if (!dma_is_idle(fchan)) {
+		dev_dbg(fchan->dev, "DMA controller still busy\n");
+		goto out_unlock;
+	}
+
+	/*
+	 * TODO:
+	 * make sure the dma_halt() function really un-wedges the
+	 * controller as much as possible
+	 */
 	dma_halt(fchan);
 
-	/* If there are some link descriptors
-	 * not transfered in queue. We need to start it.
+	/*
+	 * If there are some link descriptors which have not been
+	 * transferred, we need to start the controller
 	 */
 
-	/* Find the first un-transfer desciptor */
-	for (ld_node = fchan->ld_queue.next;
-		(ld_node != &fchan->ld_queue)
-			&& (dma_async_is_complete(
-				to_fsl_desc(ld_node)->async_tx.cookie,
-				fchan->completed_cookie,
-				fchan->common.cookie) == DMA_SUCCESS);
-		ld_node = ld_node->next);
-
-	if (ld_node != &fchan->ld_queue) {
-		/* Get the ld start address from ld_queue */
-		next_dst_addr = to_fsl_desc(ld_node)->async_tx.phys;
-		dev_dbg(fchan->dev, "xfer LDs staring from 0x%llx\n",
-				(unsigned long long)next_dst_addr);
-		set_cdar(fchan, next_dst_addr);
-		dma_start(fchan);
-	} else {
-		set_cdar(fchan, 0);
-		set_ndar(fchan, 0);
-	}
+	/*
+	 * Move all elements from the queue of pending transactions
+	 * onto the list of running transactions
+	 */
+	desc = list_first_entry(&fchan->ld_pending, struct fsl_desc_sw, node);
+	list_splice_tail_init(&fchan->ld_pending, &fchan->ld_running);
+
+	/*
+	 * Program the descriptor's address into the DMA controller,
+	 * then start the DMA transaction
+	 */
+	set_cdar(fchan, desc->async_tx.phys);
+	dma_start(fchan);
 
 out_unlock:
 	spin_unlock_irqrestore(&fchan->desc_lock, flags);
@@ -913,30 +957,6 @@ out_unlock:
 static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan)
 {
 	struct fsldma_chan *fchan = to_fsl_chan(chan);
-
-#ifdef FSL_DMA_LD_DEBUG
-	struct fsl_desc_sw *ld;
-	unsigned long flags;
-
-	spin_lock_irqsave(&fchan->desc_lock, flags);
-	if (list_empty(&fchan->ld_queue)) {
-		spin_unlock_irqrestore(&fchan->desc_lock, flags);
-		return;
-	}
-
-	dev_dbg(fchan->dev, "--memcpy issue--\n");
-	list_for_each_entry(ld, &fchan->ld_queue, node) {
-		int i;
-		dev_dbg(fchan->dev, "Ch %d, LD %08x\n",
-				fchan->id, ld->async_tx.phys);
-		for (i = 0; i < 8; i++)
-			dev_dbg(fchan->dev, "LD offset %d: %08x\n",
-					i, *(((u32 *)&ld->hw) + i));
-	}
-	dev_dbg(fchan->dev, "----------------\n");
-	spin_unlock_irqrestore(&fchan->desc_lock, flags);
-#endif
-
 	fsl_chan_xfer_ld_queue(fchan);
 }
 
@@ -974,10 +994,10 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
 	int xfer_ld_q = 0;
 	u32 stat;
 
+	/* save and clear the status register */
 	stat = get_sr(fchan);
-	dev_dbg(fchan->dev, "event: channel %d, stat = 0x%x\n",
-						fchan->id, stat);
-	set_sr(fchan, stat);		/* Clear the event register */
+	set_sr(fchan, stat);
+	dev_dbg(fchan->dev, "irq: channel %d, stat = 0x%x\n", fchan->id, stat);
 
 	stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
 	if (!stat)
@@ -986,12 +1006,13 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
 	if (stat & FSL_DMA_SR_TE)
 		dev_err(fchan->dev, "Transfer Error!\n");
 
-	/* Programming Error
+	/*
+	 * Programming Error
 	 * The DMA_INTERRUPT async_tx is a NULL transfer, which will
 	 * triger a PE interrupt.
 	 */
 	if (stat & FSL_DMA_SR_PE) {
-		dev_dbg(fchan->dev, "event: Programming Error INT\n");
+		dev_dbg(fchan->dev, "irq: Programming Error INT\n");
 		if (get_bcr(fchan) == 0) {
 			/* BCR register is 0, this is a DMA_INTERRUPT async_tx.
 			 * Now, update the completed cookie, and continue the
@@ -1003,34 +1024,37 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
 		stat &= ~FSL_DMA_SR_PE;
 	}
 
-	/* If the link descriptor segment transfer finishes,
+	/*
+	 * If the link descriptor segment transfer finishes,
 	 * we will recycle the used descriptor.
 	 */
 	if (stat & FSL_DMA_SR_EOSI) {
-		dev_dbg(fchan->dev, "event: End-of-segments INT\n");
-		dev_dbg(fchan->dev, "event: clndar 0x%llx, nlndar 0x%llx\n",
+		dev_dbg(fchan->dev, "irq: End-of-segments INT\n");
+		dev_dbg(fchan->dev, "irq: clndar 0x%llx, nlndar 0x%llx\n",
 			(unsigned long long)get_cdar(fchan),
 			(unsigned long long)get_ndar(fchan));
 		stat &= ~FSL_DMA_SR_EOSI;
 		update_cookie = 1;
 	}
 
-	/* For MPC8349, EOCDI event need to update cookie
+	/*
+	 * For MPC8349, EOCDI event need to update cookie
 	 * and start the next transfer if it exist.
 	 */
 	if (stat & FSL_DMA_SR_EOCDI) {
-		dev_dbg(fchan->dev, "event: End-of-Chain link INT\n");
+		dev_dbg(fchan->dev, "irq: End-of-Chain link INT\n");
 		stat &= ~FSL_DMA_SR_EOCDI;
 		update_cookie = 1;
 		xfer_ld_q = 1;
 	}
 
-	/* If it current transfer is the end-of-transfer,
+	/*
+	 * If it current transfer is the end-of-transfer,
 	 * we should clear the Channel Start bit for
 	 * prepare next transfer.
 	 */
 	if (stat & FSL_DMA_SR_EOLNI) {
-		dev_dbg(fchan->dev, "event: End-of-link INT\n");
+		dev_dbg(fchan->dev, "irq: End-of-link INT\n");
 		stat &= ~FSL_DMA_SR_EOLNI;
 		xfer_ld_q = 1;
 	}
@@ -1040,10 +1064,9 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
 	if (xfer_ld_q)
 		fsl_chan_xfer_ld_queue(fchan);
 	if (stat)
-		dev_dbg(fchan->dev, "event: unhandled sr 0x%02x\n",
-					stat);
+		dev_dbg(fchan->dev, "irq: unhandled sr 0x%02x\n", stat);
 
-	dev_dbg(fchan->dev, "event: Exit\n");
+	dev_dbg(fchan->dev, "irq: Exit\n");
 	tasklet_schedule(&fchan->tasklet);
 	return IRQ_HANDLED;
 }
@@ -1125,7 +1148,8 @@ static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
 	}
 
 	spin_lock_init(&fchan->desc_lock);
-	INIT_LIST_HEAD(&fchan->ld_queue);
+	INIT_LIST_HEAD(&fchan->ld_pending);
+	INIT_LIST_HEAD(&fchan->ld_running);
 
 	fchan->common.device = &fdev->common;
 
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index ea3b19c..cb4d6ff 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -131,7 +131,8 @@ struct fsldma_chan {
 	struct fsldma_chan_regs __iomem *regs;
 	dma_cookie_t completed_cookie;	/* The maximum cookie completed */
 	spinlock_t desc_lock;		/* Descriptor operation lock */
-	struct list_head ld_queue;	/* Link descriptors queue */
+	struct list_head ld_pending;	/* Link descriptors queue */
+	struct list_head ld_running;	/* Link descriptors queue */
 	struct dma_chan common;		/* DMA common channel */
 	struct dma_pool *desc_pool;	/* Descriptors pool */
 	struct device *dev;		/* Channel device */
-- 
1.5.4.3

^ permalink raw reply related	[flat|nested] 22+ messages in thread

* RE: fsldma: cleanup driver and fix async_tx compatibility
  2010-01-01  6:10 fsldma: cleanup driver and fix async_tx compatibility Ira W. Snyder
                   ` (7 preceding siblings ...)
  2010-01-01  6:10 ` [PATCH 8/8] fsldma: major cleanups and fixes Ira W. Snyder
@ 2010-01-05  6:08 ` Dudhat Dipen-B09055
  2010-01-11  5:47 ` Dudhat Dipen-B09055
  9 siblings, 0 replies; 22+ messages in thread
From: Dudhat Dipen-B09055 @ 2010-01-05  6:08 UTC (permalink / raw)
  To: Ira W. Snyder, dan.j.williams
  Cc: herbert, Suresh Vishnu-B05022, Tabi Timur-B04825, linuxppc-dev,
	Gupta Maneesh-B18878, Li Yang-R58472


Hi Ira,

I will test it on 85xx hardware and let you know once done.

Thanks
Dipen
=20

-----Original Message-----
From: Ira W. Snyder [mailto:iws@ovro.caltech.edu]=20
Sent: Friday, January 01, 2010 11:41 AM
To: dan.j.williams@intel.com
Cc: galak@kernel.crashing.org; herbert@gondor.apana.org.au; Tabi
Timur-B04825; linuxppc-dev@ozlabs.org; Suresh Vishnu-B05022; Dudhat
Dipen-B09055; Gupta Maneesh-B18878; Li Yang-R58472
Subject: fsldma: cleanup driver and fix async_tx compatibility

This patch series cleans up the Freescale DMAEngine driver, including
verifying the locking and making sure that all code paths are correct.
There were a few places that seemed suspicious, and they have been
fixed.

I have written a quick memory->memory DMAEngine test driver, and the
performance is identical before and after my changes (<0.1% change). I
measured both setting up the DMA operation (via
device_prep_dma_interrupt() and device_prep_dma_memcpy()) and the actual
DMA transfer itself.

As an added bonus, the interrupt load is measurably reduced. My test
driver transfers 32MB as 32x 1MB chunks + 1 interrupt descriptor, using
the functions noted above. Previous to this patch series, 31 interrupts
were generated. After this patch series, only a single interrupt is
generated for the whole transaction.

Some testing on 85xx/86xx hardware would be appreciated. Also, some
testing by the users attempting to use async_tx and talitos to handle
RAID offload would be great as well.

 Documentation/powerpc/dts-bindings/fsl/dma.txt |   17 +-
 drivers/dma/fsldma.c                           | 1036
++++++++++++------------
 drivers/dma/fsldma.h                           |   35 +-
 3 files changed, 556 insertions(+), 532 deletions(-)

Thanks,
Ira

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 6/8] fsldma: simplify IRQ probing and handling
  2010-01-01  6:10 ` [PATCH 6/8] fsldma: simplify IRQ probing and handling Ira W. Snyder
@ 2010-01-06 18:02   ` Scott Wood
  2010-01-06 18:39     ` Ira W. Snyder
  0 siblings, 1 reply; 22+ messages in thread
From: Scott Wood @ 2010-01-06 18:02 UTC (permalink / raw)
  To: Ira W. Snyder
  Cc: herbert, B04825, linuxppc-dev, Vishnu, Dipen.Dudhat,
	dan.j.williams, Maneesh.Gupta, R58472

On Thu, Dec 31, 2009 at 10:10:44PM -0800, Ira W. Snyder wrote:
> The IRQ probing is needlessly complex. All off the 83xx device trees in
> arch/powerpc/boot/dts/ specify 5 interrupts per DMA controller: one for the
> controller, and one for each channel. These interrupts are all attached to
> the same IRQ line.

The reason for this was to accommodate different types of drivers.  A driver
may want to only care about one channel (e.g. if that channel is used for
something specific such as audio), in which case it can just look at the
channel node.

A driver that handles multiple channels, OTOH, may want to only register one
interrupt handler that processes all channels, possibly using the summary
register, on chips that do not have a different interrupt per channel.  Such
drivers would register the controller interrupt if there is one -- and if
so, it would ignore the channel interrupts.

> This causes an interesting situation if two channels interrupt at the same
> time. The controller's handler will handle the first channel, and the
> channel handler will handle the remaining channels.

The driver should not be registering handlers for both the controller
interrupt and the channel interrupt.

It looks like the other problem is that the controller interrupt handler
is assuming only one bit will be set in the summary register.  It should
call the channel handler for each bit that is set.

> The same can be accomplished on 83xx by removing the controller's interrupt
> property from the device tree. Testing has not shown any problems with this
> configuration. 

Yes, it will work, but the overhead is a little higher than if you only had
the one handler and used the summary register.

> All in-tree device trees already have an interrupt property
> specified for each channel.
> 
> Signed-off-by: Ira W. Snyder <iws@ovro.caltech.edu>
> ---
>  Documentation/powerpc/dts-bindings/fsl/dma.txt |   17 +++++---
>  drivers/dma/fsldma.c                           |   49 +++++++-----------------
>  2 files changed, 25 insertions(+), 41 deletions(-)
> 
> diff --git a/Documentation/powerpc/dts-bindings/fsl/dma.txt b/Documentation/powerpc/dts-bindings/fsl/dma.txt
> index 0732cdd..d5d2d3d 100644
> --- a/Documentation/powerpc/dts-bindings/fsl/dma.txt
> +++ b/Documentation/powerpc/dts-bindings/fsl/dma.txt
> @@ -12,6 +12,9 @@ Required properties:
>  - ranges		: Should be defined as specified in 1) to describe the
>  		  DMA controller channels.
>  - cell-index        : controller index.  0 for controller @ 0x8100
> +
> +Optional properties:
> +
>  - interrupts        : <interrupt mapping for DMA IRQ>
>  - interrupt-parent  : optional, if needed for interrupt mapping
>  
> @@ -23,11 +26,7 @@ Required properties:
>  			 "fsl,elo-dma-channel". However, see note below.
>          - reg               : <registers mapping for channel>
>          - cell-index        : dma channel index starts at 0.
> -
> -Optional properties:
>          - interrupts        : <interrupt mapping for DMA channel IRQ>
> -			  (on 83xx this is expected to be identical to
> -			   the interrupts property of the parent node)

It should indeed be the controller interrupt that is optional, but why
remove the comment about 83xx?  That's the only time you'll have a
controller interrupt at all.

>          - interrupt-parent  : optional, if needed for interrupt mapping
>  
>  Example:
> @@ -37,28 +36,34 @@ Example:
>  		compatible = "fsl,mpc8349-dma", "fsl,elo-dma";
>  		reg = <0x82a8 4>;
>  		ranges = <0 0x8100 0x1a4>;
> -		interrupt-parent = <&ipic>;
> -		interrupts = <71 8>;
>  		cell-index = <0>;
>  		dma-channel@0 {
>  			compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
>  			cell-index = <0>;
>  			reg = <0 0x80>;
> +			interrupt-parent = <&ipic>;
> +			interrupts = <71 8>;
>  		};
>  		dma-channel@80 {
>  			compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
>  			cell-index = <1>;
>  			reg = <0x80 0x80>;
> +			interrupt-parent = <&ipic>;
> +			interrupts = <71 8>;
>  		};
>  		dma-channel@100 {
>  			compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
>  			cell-index = <2>;
>  			reg = <0x100 0x80>;
> +			interrupt-parent = <&ipic>;
> +			interrupts = <71 8>;
>  		};
>  		dma-channel@180 {
>  			compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
>  			cell-index = <3>;
>  			reg = <0x180 0x80>;
> +			interrupt-parent = <&ipic>;
> +			interrupts = <71 8>;

I'd rather the example provide both controller and channel interrupts.

-Scott

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 7/8] fsldma: rename fsl_chan to fchan
  2010-01-01  6:10 ` [PATCH 7/8] fsldma: rename fsl_chan to fchan Ira W. Snyder
@ 2010-01-06 18:04   ` Scott Wood
  2010-01-06 18:19     ` Ira W. Snyder
  0 siblings, 1 reply; 22+ messages in thread
From: Scott Wood @ 2010-01-06 18:04 UTC (permalink / raw)
  To: Ira W. Snyder
  Cc: herbert, B04825, linuxppc-dev, Vishnu, Dipen.Dudhat,
	dan.j.williams, Maneesh.Gupta, R58472

On Thu, Dec 31, 2009 at 10:10:45PM -0800, Ira W. Snyder wrote:
> The name fsl_chan seems too long, so it has been shortened to fchan.

Could be just "chan", no need for namespacing here.

-Scott

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 7/8] fsldma: rename fsl_chan to fchan
  2010-01-06 18:04   ` Scott Wood
@ 2010-01-06 18:19     ` Ira W. Snyder
  2010-01-06 18:27       ` Scott Wood
  0 siblings, 1 reply; 22+ messages in thread
From: Ira W. Snyder @ 2010-01-06 18:19 UTC (permalink / raw)
  To: Scott Wood
  Cc: herbert, B04825, linuxppc-dev, Vishnu, Dipen.Dudhat,
	dan.j.williams, Maneesh.Gupta, R58472

On Wed, Jan 06, 2010 at 12:04:22PM -0600, Scott Wood wrote:
> On Thu, Dec 31, 2009 at 10:10:45PM -0800, Ira W. Snyder wrote:
> > The name fsl_chan seems too long, so it has been shortened to fchan.
> 
> Could be just "chan", no need for namespacing here.
> 

True. A few functions have a parameter "struct dma_chan *chan" from the
DMAEngine API. I tried to keep the name different to avoid confusing
readers of the code.

Would you recommend changing everything to "struct fsldma_chan *chan",
and changing to "struct dma_chan *_chan" for the DMAEngine stuff? It
seems fine to me, and the compiler will warn about mismatched types if
you screw something up. The struct dma_chan isn't used for anything
except container_of() anyway.

Ira

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 7/8] fsldma: rename fsl_chan to fchan
  2010-01-06 18:19     ` Ira W. Snyder
@ 2010-01-06 18:27       ` Scott Wood
  2010-01-06 18:40         ` Ira W. Snyder
  0 siblings, 1 reply; 22+ messages in thread
From: Scott Wood @ 2010-01-06 18:27 UTC (permalink / raw)
  To: Ira W. Snyder
  Cc: herbert, B04825, linuxppc-dev, Vishnu, Dipen.Dudhat,
	dan.j.williams, Maneesh.Gupta, R58472

Ira W. Snyder wrote:
> On Wed, Jan 06, 2010 at 12:04:22PM -0600, Scott Wood wrote:
>> On Thu, Dec 31, 2009 at 10:10:45PM -0800, Ira W. Snyder wrote:
>>> The name fsl_chan seems too long, so it has been shortened to fchan.
>> Could be just "chan", no need for namespacing here.
>>
> 
> True. A few functions have a parameter "struct dma_chan *chan" from the
> DMAEngine API. I tried to keep the name different to avoid confusing
> readers of the code.

Ah, I see.

I suppose "fchan" is OK in that case, or "priv", or call the dma_chan 
"dchan", etc.

-Scott

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 6/8] fsldma: simplify IRQ probing and handling
  2010-01-06 18:02   ` Scott Wood
@ 2010-01-06 18:39     ` Ira W. Snyder
  2010-01-06 20:51       ` Scott Wood
  0 siblings, 1 reply; 22+ messages in thread
From: Ira W. Snyder @ 2010-01-06 18:39 UTC (permalink / raw)
  To: Scott Wood
  Cc: herbert, B04825, linuxppc-dev, Vishnu, Dipen.Dudhat,
	dan.j.williams, Maneesh.Gupta, R58472

On Wed, Jan 06, 2010 at 12:02:26PM -0600, Scott Wood wrote:
> On Thu, Dec 31, 2009 at 10:10:44PM -0800, Ira W. Snyder wrote:
> > The IRQ probing is needlessly complex. All off the 83xx device trees in
> > arch/powerpc/boot/dts/ specify 5 interrupts per DMA controller: one for the
> > controller, and one for each channel. These interrupts are all attached to
> > the same IRQ line.
> 
> The reason for this was to accommodate different types of drivers.  A driver
> may want to only care about one channel (e.g. if that channel is used for
> something specific such as audio), in which case it can just look at the
> channel node.
> 
> A driver that handles multiple channels, OTOH, may want to only register one
> interrupt handler that processes all channels, possibly using the summary
> register, on chips that do not have a different interrupt per channel.  Such
> drivers would register the controller interrupt if there is one -- and if
> so, it would ignore the channel interrupts.
> 

Ok. The original driver didn't do this, FYI. It would register all 5
interrupts: 1 controller + 4 channels. It is easy enough to have it
completely ignore per-channel interrupts if there is a controller
interrupt.

I don't think this would break any existing hardware. The 83xx all
shares one IRQ line, and the 85xx/86xx only have per-channel interrupts,
right? (I'm not an 85xx/86xx guy, I've only got 83xx experience). This
is what the device trees suggest, anyway.

> > This causes an interesting situation if two channels interrupt at the same
> > time. The controller's handler will handle the first channel, and the
> > channel handler will handle the remaining channels.
> 
> The driver should not be registering handlers for both the controller
> interrupt and the channel interrupt.
> 
> It looks like the other problem is that the controller interrupt handler
> is assuming only one bit will be set in the summary register.  It should
> call the channel handler for each bit that is set.
> 

Ok. I thought about doing this, but my changed approach seemed easier.

On the 83xx, we should only need to call the per-channel handler once
for each group of 8 bits. The original code used ffs(), which seems a
little wrong. Why call the handler twice if both EOSI and EOCDI are set
for a channel? Should I use a loop + mask, or is there some other neat
trick I can use?

> > The same can be accomplished on 83xx by removing the controller's interrupt
> > property from the device tree. Testing has not shown any problems with this
> > configuration. 
> 
> Yes, it will work, but the overhead is a little higher than if you only had
> the one handler and used the summary register.
> 

Ok. All of the in-tree 83xx device trees have 5 interrupts listed. With
the changes described above, we'll only call request_irq() once in that
case, and use the per-controller interrupt.

I'll leave the documentation alone, with the exception of marking the
per-controller interrupt optional.

Ira

> > All in-tree device trees already have an interrupt property
> > specified for each channel.
> > 
> > Signed-off-by: Ira W. Snyder <iws@ovro.caltech.edu>
> > ---
> >  Documentation/powerpc/dts-bindings/fsl/dma.txt |   17 +++++---
> >  drivers/dma/fsldma.c                           |   49 +++++++-----------------
> >  2 files changed, 25 insertions(+), 41 deletions(-)
> > 
> > diff --git a/Documentation/powerpc/dts-bindings/fsl/dma.txt b/Documentation/powerpc/dts-bindings/fsl/dma.txt
> > index 0732cdd..d5d2d3d 100644
> > --- a/Documentation/powerpc/dts-bindings/fsl/dma.txt
> > +++ b/Documentation/powerpc/dts-bindings/fsl/dma.txt
> > @@ -12,6 +12,9 @@ Required properties:
> >  - ranges		: Should be defined as specified in 1) to describe the
> >  		  DMA controller channels.
> >  - cell-index        : controller index.  0 for controller @ 0x8100
> > +
> > +Optional properties:
> > +
> >  - interrupts        : <interrupt mapping for DMA IRQ>
> >  - interrupt-parent  : optional, if needed for interrupt mapping
> >  
> > @@ -23,11 +26,7 @@ Required properties:
> >  			 "fsl,elo-dma-channel". However, see note below.
> >          - reg               : <registers mapping for channel>
> >          - cell-index        : dma channel index starts at 0.
> > -
> > -Optional properties:
> >          - interrupts        : <interrupt mapping for DMA channel IRQ>
> > -			  (on 83xx this is expected to be identical to
> > -			   the interrupts property of the parent node)
> 
> It should indeed be the controller interrupt that is optional, but why
> remove the comment about 83xx?  That's the only time you'll have a
> controller interrupt at all.
> 
> >          - interrupt-parent  : optional, if needed for interrupt mapping
> >  
> >  Example:
> > @@ -37,28 +36,34 @@ Example:
> >  		compatible = "fsl,mpc8349-dma", "fsl,elo-dma";
> >  		reg = <0x82a8 4>;
> >  		ranges = <0 0x8100 0x1a4>;
> > -		interrupt-parent = <&ipic>;
> > -		interrupts = <71 8>;
> >  		cell-index = <0>;
> >  		dma-channel@0 {
> >  			compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
> >  			cell-index = <0>;
> >  			reg = <0 0x80>;
> > +			interrupt-parent = <&ipic>;
> > +			interrupts = <71 8>;
> >  		};
> >  		dma-channel@80 {
> >  			compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
> >  			cell-index = <1>;
> >  			reg = <0x80 0x80>;
> > +			interrupt-parent = <&ipic>;
> > +			interrupts = <71 8>;
> >  		};
> >  		dma-channel@100 {
> >  			compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
> >  			cell-index = <2>;
> >  			reg = <0x100 0x80>;
> > +			interrupt-parent = <&ipic>;
> > +			interrupts = <71 8>;
> >  		};
> >  		dma-channel@180 {
> >  			compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
> >  			cell-index = <3>;
> >  			reg = <0x180 0x80>;
> > +			interrupt-parent = <&ipic>;
> > +			interrupts = <71 8>;
> 
> I'd rather the example provide both controller and channel interrupts.
> 
> -Scott
> _______________________________________________
> Linuxppc-dev mailing list
> Linuxppc-dev@lists.ozlabs.org
> https://lists.ozlabs.org/listinfo/linuxppc-dev

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 7/8] fsldma: rename fsl_chan to fchan
  2010-01-06 18:27       ` Scott Wood
@ 2010-01-06 18:40         ` Ira W. Snyder
  0 siblings, 0 replies; 22+ messages in thread
From: Ira W. Snyder @ 2010-01-06 18:40 UTC (permalink / raw)
  To: Scott Wood
  Cc: herbert, B04825, linuxppc-dev, Vishnu, Dipen.Dudhat,
	dan.j.williams, Maneesh.Gupta, R58472

On Wed, Jan 06, 2010 at 12:27:11PM -0600, Scott Wood wrote:
> Ira W. Snyder wrote:
> > On Wed, Jan 06, 2010 at 12:04:22PM -0600, Scott Wood wrote:
> >> On Thu, Dec 31, 2009 at 10:10:45PM -0800, Ira W. Snyder wrote:
> >>> The name fsl_chan seems too long, so it has been shortened to fchan.
> >> Could be just "chan", no need for namespacing here.
> >>
> > 
> > True. A few functions have a parameter "struct dma_chan *chan" from the
> > DMAEngine API. I tried to keep the name different to avoid confusing
> > readers of the code.
> 
> Ah, I see.
> 
> I suppose "fchan" is OK in that case, or "priv", or call the dma_chan 
> "dchan", etc.
> 

Using "priv" is fine with me too, I use that style in a lot of the code
I write. I'll try "chan", and see how it works out.

Ira

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 6/8] fsldma: simplify IRQ probing and handling
  2010-01-06 18:39     ` Ira W. Snyder
@ 2010-01-06 20:51       ` Scott Wood
  0 siblings, 0 replies; 22+ messages in thread
From: Scott Wood @ 2010-01-06 20:51 UTC (permalink / raw)
  To: Ira W. Snyder
  Cc: herbert, B04825, linuxppc-dev, Vishnu, Dipen.Dudhat,
	dan.j.williams, Maneesh.Gupta, R58472

Ira W. Snyder wrote:
> I don't think this would break any existing hardware. The 83xx all
> shares one IRQ line, and the 85xx/86xx only have per-channel interrupts,
> right? (I'm not an 85xx/86xx guy, I've only got 83xx experience). This
> is what the device trees suggest, anyway.

Right.

>> It looks like the other problem is that the controller interrupt handler
>> is assuming only one bit will be set in the summary register.  It should
>> call the channel handler for each bit that is set.
>>
> 
> Ok. I thought about doing this, but my changed approach seemed easier.
> 
> On the 83xx, we should only need to call the per-channel handler once
> for each group of 8 bits. The original code used ffs(), which seems a
> little wrong. Why call the handler twice if both EOSI and EOCDI are set
> for a channel?

Sorry, I meant call it once per channel that has bits set.

> Should I use a loop + mask, or is there some other neat
> trick I can use?

After you process one channel, it shouldn't have any bits set (and if it 
does, it's a new event that needs to be processed), so you could use the 
current ffs approach with a while (summary reg != 0) loop around it.

> Ok. All of the in-tree 83xx device trees have 5 interrupts listed. With
> the changes described above, we'll only call request_irq() once in that
> case, and use the per-controller interrupt.
> 
> I'll leave the documentation alone, with the exception of marking the
> per-controller interrupt optional.

Hmm, that description is specific to 83xx, and such chips really should 
have the controller interrupt specified.  The per-channel interrupt 
should not be optional, though.

-Scott

^ permalink raw reply	[flat|nested] 22+ messages in thread

* RE: fsldma: cleanup driver and fix async_tx compatibility
  2010-01-01  6:10 fsldma: cleanup driver and fix async_tx compatibility Ira W. Snyder
                   ` (8 preceding siblings ...)
  2010-01-05  6:08 ` fsldma: cleanup driver and fix async_tx compatibility Dudhat Dipen-B09055
@ 2010-01-11  5:47 ` Dudhat Dipen-B09055
  2010-01-11 16:29   ` Ira W. Snyder
  9 siblings, 1 reply; 22+ messages in thread
From: Dudhat Dipen-B09055 @ 2010-01-11  5:47 UTC (permalink / raw)
  To: Dudhat Dipen-B09055, Ira W. Snyder, dan.j.williams
  Cc: herbert, Suresh Vishnu-B05022, Tabi Timur-B04825, linuxppc-dev,
	Gupta Maneesh-B18878, Li Yang-R58472


Hi Ira,

I have tested your patches with async DMA memcpy support. Though I
haven't captured the improvement figures.
It works fine for RAID5 memcpy offload as interrupts are coming for
separate DMA channels while I have ran IOZONE onto RAID partition.

Regards,
 Dipen
=20

-----Original Message-----
From: Dudhat Dipen-B09055=20
Sent: Tuesday, January 05, 2010 11:38 AM
To: 'Ira W. Snyder'; dan.j.williams@intel.com
Cc: galak@kernel.crashing.org; herbert@gondor.apana.org.au; Tabi
Timur-B04825; linuxppc-dev@ozlabs.org; Suresh Vishnu-B05022; Gupta
Maneesh-B18878; Li Yang-R58472
Subject: RE: fsldma: cleanup driver and fix async_tx compatibility


Hi Ira,

I will test it on 85xx hardware and let you know once done.

Thanks
Dipen
=20

-----Original Message-----
From: Ira W. Snyder [mailto:iws@ovro.caltech.edu]
Sent: Friday, January 01, 2010 11:41 AM
To: dan.j.williams@intel.com
Cc: galak@kernel.crashing.org; herbert@gondor.apana.org.au; Tabi
Timur-B04825; linuxppc-dev@ozlabs.org; Suresh Vishnu-B05022; Dudhat
Dipen-B09055; Gupta Maneesh-B18878; Li Yang-R58472
Subject: fsldma: cleanup driver and fix async_tx compatibility

This patch series cleans up the Freescale DMAEngine driver, including
verifying the locking and making sure that all code paths are correct.
There were a few places that seemed suspicious, and they have been
fixed.

I have written a quick memory->memory DMAEngine test driver, and the
performance is identical before and after my changes (<0.1% change). I
measured both setting up the DMA operation (via
device_prep_dma_interrupt() and device_prep_dma_memcpy()) and the actual
DMA transfer itself.

As an added bonus, the interrupt load is measurably reduced. My test
driver transfers 32MB as 32x 1MB chunks + 1 interrupt descriptor, using
the functions noted above. Previous to this patch series, 31 interrupts
were generated. After this patch series, only a single interrupt is
generated for the whole transaction.

Some testing on 85xx/86xx hardware would be appreciated. Also, some
testing by the users attempting to use async_tx and talitos to handle
RAID offload would be great as well.

 Documentation/powerpc/dts-bindings/fsl/dma.txt |   17 +-
 drivers/dma/fsldma.c                           | 1036
++++++++++++------------
 drivers/dma/fsldma.h                           |   35 +-
 3 files changed, 556 insertions(+), 532 deletions(-)

Thanks,
Ira

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: fsldma: cleanup driver and fix async_tx compatibility
  2010-01-11  5:47 ` Dudhat Dipen-B09055
@ 2010-01-11 16:29   ` Ira W. Snyder
  2010-02-02  7:50     ` Dudhat Dipen-B09055
  0 siblings, 1 reply; 22+ messages in thread
From: Ira W. Snyder @ 2010-01-11 16:29 UTC (permalink / raw)
  To: Dudhat Dipen-B09055
  Cc: herbert, Suresh Vishnu-B05022, Tabi Timur-B04825, linuxppc-dev,
	dan.j.williams, Gupta Maneesh-B18878, Li Yang-R58472

On Mon, Jan 11, 2010 at 11:17:04AM +0530, Dudhat Dipen-B09055 wrote:
> 
> Hi Ira,
> 
> I have tested your patches with async DMA memcpy support. Though I
> haven't captured the improvement figures.
> It works fine for RAID5 memcpy offload as interrupts are coming for
> separate DMA channels while I have ran IOZONE onto RAID partition.
> 

Excellent, thanks for running these tests. I'm glad to hear that the
RAID offload is working now.

You shouldn't notice any difference in performance. On a 32MB memcpy
operation, broken into 32x 1MB memcpy(), 1x interrupt(), I noticed less
than 0.1% difference (approx 100,000 ns / 0.1ms). This is probably at or
near the limits of my measurement accuracy.

Ira


> Regards,
>  Dipen
>  
> 
> -----Original Message-----
> From: Dudhat Dipen-B09055 
> Sent: Tuesday, January 05, 2010 11:38 AM
> To: 'Ira W. Snyder'; dan.j.williams@intel.com
> Cc: galak@kernel.crashing.org; herbert@gondor.apana.org.au; Tabi
> Timur-B04825; linuxppc-dev@ozlabs.org; Suresh Vishnu-B05022; Gupta
> Maneesh-B18878; Li Yang-R58472
> Subject: RE: fsldma: cleanup driver and fix async_tx compatibility
> 
> 
> Hi Ira,
> 
> I will test it on 85xx hardware and let you know once done.
> 
> Thanks
> Dipen
>  
> 
> -----Original Message-----
> From: Ira W. Snyder [mailto:iws@ovro.caltech.edu]
> Sent: Friday, January 01, 2010 11:41 AM
> To: dan.j.williams@intel.com
> Cc: galak@kernel.crashing.org; herbert@gondor.apana.org.au; Tabi
> Timur-B04825; linuxppc-dev@ozlabs.org; Suresh Vishnu-B05022; Dudhat
> Dipen-B09055; Gupta Maneesh-B18878; Li Yang-R58472
> Subject: fsldma: cleanup driver and fix async_tx compatibility
> 
> This patch series cleans up the Freescale DMAEngine driver, including
> verifying the locking and making sure that all code paths are correct.
> There were a few places that seemed suspicious, and they have been
> fixed.
> 
> I have written a quick memory->memory DMAEngine test driver, and the
> performance is identical before and after my changes (<0.1% change). I
> measured both setting up the DMA operation (via
> device_prep_dma_interrupt() and device_prep_dma_memcpy()) and the actual
> DMA transfer itself.
> 
> As an added bonus, the interrupt load is measurably reduced. My test
> driver transfers 32MB as 32x 1MB chunks + 1 interrupt descriptor, using
> the functions noted above. Previous to this patch series, 31 interrupts
> were generated. After this patch series, only a single interrupt is
> generated for the whole transaction.
> 
> Some testing on 85xx/86xx hardware would be appreciated. Also, some
> testing by the users attempting to use async_tx and talitos to handle
> RAID offload would be great as well.
> 
>  Documentation/powerpc/dts-bindings/fsl/dma.txt |   17 +-
>  drivers/dma/fsldma.c                           | 1036
> ++++++++++++------------
>  drivers/dma/fsldma.h                           |   35 +-
>  3 files changed, 556 insertions(+), 532 deletions(-)
> 
> Thanks,
> Ira
> 

^ permalink raw reply	[flat|nested] 22+ messages in thread

* RE: fsldma: cleanup driver and fix async_tx compatibility
  2010-01-11 16:29   ` Ira W. Snyder
@ 2010-02-02  7:50     ` Dudhat Dipen-B09055
  2010-02-02 15:04       ` Dan Williams
  0 siblings, 1 reply; 22+ messages in thread
From: Dudhat Dipen-B09055 @ 2010-02-02  7:50 UTC (permalink / raw)
  To: Ira W. Snyder
  Cc: herbert, Suresh Vishnu-B05022, Tabi Timur-B04825, linuxppc-dev,
	dan.j.williams, Gupta Maneesh-B18878, Li Yang-R58472

=20
Hi Ira,

Do these patches accepted to open source kernel??

- Dipen

-----Original Message-----
From: Ira W. Snyder [mailto:iws@ovro.caltech.edu]=20
Sent: Monday, January 11, 2010 9:59 PM
To: Dudhat Dipen-B09055
Cc: dan.j.williams@intel.com; galak@kernel.crashing.org;
herbert@gondor.apana.org.au; Tabi Timur-B04825; linuxppc-dev@ozlabs.org;
Suresh Vishnu-B05022; Gupta Maneesh-B18878; Li Yang-R58472
Subject: Re: fsldma: cleanup driver and fix async_tx compatibility

On Mon, Jan 11, 2010 at 11:17:04AM +0530, Dudhat Dipen-B09055 wrote:
>=20
> Hi Ira,
>=20
> I have tested your patches with async DMA memcpy support. Though I=20
> haven't captured the improvement figures.
> It works fine for RAID5 memcpy offload as interrupts are coming for=20
> separate DMA channels while I have ran IOZONE onto RAID partition.
>=20

Excellent, thanks for running these tests. I'm glad to hear that the
RAID offload is working now.

You shouldn't notice any difference in performance. On a 32MB memcpy
operation, broken into 32x 1MB memcpy(), 1x interrupt(), I noticed less
than 0.1% difference (approx 100,000 ns / 0.1ms). This is probably at or
near the limits of my measurement accuracy.

Ira


> Regards,
>  Dipen
> =20
>=20
> -----Original Message-----
> From: Dudhat Dipen-B09055
> Sent: Tuesday, January 05, 2010 11:38 AM
> To: 'Ira W. Snyder'; dan.j.williams@intel.com
> Cc: galak@kernel.crashing.org; herbert@gondor.apana.org.au; Tabi=20
> Timur-B04825; linuxppc-dev@ozlabs.org; Suresh Vishnu-B05022; Gupta=20
> Maneesh-B18878; Li Yang-R58472
> Subject: RE: fsldma: cleanup driver and fix async_tx compatibility
>=20
>=20
> Hi Ira,
>=20
> I will test it on 85xx hardware and let you know once done.
>=20
> Thanks
> Dipen
> =20
>=20
> -----Original Message-----
> From: Ira W. Snyder [mailto:iws@ovro.caltech.edu]
> Sent: Friday, January 01, 2010 11:41 AM
> To: dan.j.williams@intel.com
> Cc: galak@kernel.crashing.org; herbert@gondor.apana.org.au; Tabi=20
> Timur-B04825; linuxppc-dev@ozlabs.org; Suresh Vishnu-B05022; Dudhat=20
> Dipen-B09055; Gupta Maneesh-B18878; Li Yang-R58472
> Subject: fsldma: cleanup driver and fix async_tx compatibility
>=20
> This patch series cleans up the Freescale DMAEngine driver, including=20
> verifying the locking and making sure that all code paths are correct.
> There were a few places that seemed suspicious, and they have been=20
> fixed.
>=20
> I have written a quick memory->memory DMAEngine test driver, and the=20
> performance is identical before and after my changes (<0.1% change). I

> measured both setting up the DMA operation (via
> device_prep_dma_interrupt() and device_prep_dma_memcpy()) and the=20
> actual DMA transfer itself.
>=20
> As an added bonus, the interrupt load is measurably reduced. My test=20
> driver transfers 32MB as 32x 1MB chunks + 1 interrupt descriptor,=20
> using the functions noted above. Previous to this patch series, 31=20
> interrupts were generated. After this patch series, only a single=20
> interrupt is generated for the whole transaction.
>=20
> Some testing on 85xx/86xx hardware would be appreciated. Also, some=20
> testing by the users attempting to use async_tx and talitos to handle=20
> RAID offload would be great as well.
>=20
>  Documentation/powerpc/dts-bindings/fsl/dma.txt |   17 +-
>  drivers/dma/fsldma.c                           | 1036
> ++++++++++++------------
>  drivers/dma/fsldma.h                           |   35 +-
>  3 files changed, 556 insertions(+), 532 deletions(-)
>=20
> Thanks,
> Ira
>=20

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: fsldma: cleanup driver and fix async_tx compatibility
  2010-02-02  7:50     ` Dudhat Dipen-B09055
@ 2010-02-02 15:04       ` Dan Williams
  0 siblings, 0 replies; 22+ messages in thread
From: Dan Williams @ 2010-02-02 15:04 UTC (permalink / raw)
  To: Dudhat Dipen-B09055
  Cc: herbert, Ira W. Snyder, Suresh Vishnu-B05022, Tabi Timur-B04825,
	linuxppc-dev, Gupta Maneesh-B18878, Li Yang-R58472

Dudhat Dipen-B09055 wrote:
>  
> Hi Ira,
> 
> Do these patches accepted to open source kernel??

Yes, they should appear on the 'next' branch of async_tx.git by the end 
of the day.

--
Dan

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH 1/8] fsldma: reduce kernel text size
  2010-01-06 23:33 [PATCH 0/8 v2] " Ira W. Snyder
@ 2010-01-06 23:33 ` Ira W. Snyder
  0 siblings, 0 replies; 22+ messages in thread
From: Ira W. Snyder @ 2010-01-06 23:33 UTC (permalink / raw)
  To: dan.j.williams
  Cc: R58472, B04825, linuxppc-dev, scottwood, Dipen.Dudhat,
	Maneesh.Gupta, herbert

Some of the functions are written in a way where they use multiple reads
and writes where a single read/write pair could suffice. This shrinks the
kernel text size measurably, while making the functions easier to
understand.

add/remove: 0/0 grow/shrink: 1/4 up/down: 4/-196 (-192)
function                                     old     new   delta
fsl_chan_set_request_count                   120     124      +4
dma_halt                                     300     272     -28
fsl_chan_set_src_loop_size                   208     156     -52
fsl_chan_set_dest_loop_size                  208     156     -52
fsl_chan_xfer_ld_queue                       500     436     -64

Signed-off-by: Ira W. Snyder <iws@ovro.caltech.edu>
---
 drivers/dma/fsldma.c |   83 +++++++++++++++++++++++++++-----------------------
 1 files changed, 45 insertions(+), 38 deletions(-)

diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 296f9e7..0bad741 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -143,43 +143,45 @@ static int dma_is_idle(struct fsl_dma_chan *fsl_chan)
 
 static void dma_start(struct fsl_dma_chan *fsl_chan)
 {
-	u32 mr_set = 0;
-
-	if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
-		DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32);
-		mr_set |= FSL_DMA_MR_EMP_EN;
-	} else if ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
-		DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
-			DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
-				& ~FSL_DMA_MR_EMP_EN, 32);
+	u32 mode;
+
+	mode = DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32);
+
+	if ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
+		if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
+			DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32);
+			mode |= FSL_DMA_MR_EMP_EN;
+		} else {
+			mode &= ~FSL_DMA_MR_EMP_EN;
+		}
 	}
 
 	if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT)
-		mr_set |= FSL_DMA_MR_EMS_EN;
+		mode |= FSL_DMA_MR_EMS_EN;
 	else
-		mr_set |= FSL_DMA_MR_CS;
+		mode |= FSL_DMA_MR_CS;
 
-	DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
-			DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
-			| mr_set, 32);
+	DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32);
 }
 
 static void dma_halt(struct fsl_dma_chan *fsl_chan)
 {
+	u32 mode;
 	int i;
 
-	DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
-		DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | FSL_DMA_MR_CA,
-		32);
-	DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
-		DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~(FSL_DMA_MR_CS
-		| FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA), 32);
+	mode = DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32);
+	mode |= FSL_DMA_MR_CA;
+	DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32);
+
+	mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA);
+	DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32);
 
 	for (i = 0; i < 100; i++) {
 		if (dma_is_idle(fsl_chan))
 			break;
 		udelay(10);
 	}
+
 	if (i >= 100 && !dma_is_idle(fsl_chan))
 		dev_err(fsl_chan->dev, "DMA halt timeout!\n");
 }
@@ -231,22 +233,23 @@ static void append_ld_queue(struct fsl_dma_chan *fsl_chan,
  */
 static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size)
 {
+	u32 mode;
+
+	mode = DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32);
+
 	switch (size) {
 	case 0:
-		DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
-			DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) &
-			(~FSL_DMA_MR_SAHE), 32);
+		mode &= ~FSL_DMA_MR_SAHE;
 		break;
 	case 1:
 	case 2:
 	case 4:
 	case 8:
-		DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
-			DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) |
-			FSL_DMA_MR_SAHE | (__ilog2(size) << 14),
-			32);
+		mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14);
 		break;
 	}
+
+	DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32);
 }
 
 /**
@@ -262,22 +265,23 @@ static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size)
  */
 static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size)
 {
+	u32 mode;
+
+	mode = DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32);
+
 	switch (size) {
 	case 0:
-		DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
-			DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) &
-			(~FSL_DMA_MR_DAHE), 32);
+		mode &= ~FSL_DMA_MR_DAHE;
 		break;
 	case 1:
 	case 2:
 	case 4:
 	case 8:
-		DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
-			DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) |
-			FSL_DMA_MR_DAHE | (__ilog2(size) << 16),
-			32);
+		mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16);
 		break;
 	}
+
+	DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32);
 }
 
 /**
@@ -294,11 +298,14 @@ static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size)
  */
 static void fsl_chan_set_request_count(struct fsl_dma_chan *fsl_chan, int size)
 {
+	u32 mode;
+
 	BUG_ON(size > 1024);
-	DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
-		DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
-			| ((__ilog2(size) << 24) & 0x0f000000),
-		32);
+
+	mode = DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32);
+	mode |= (__ilog2(size) << 24) & 0x0f000000;
+
+	DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32);
 }
 
 /**
-- 
1.5.4.3

^ permalink raw reply related	[flat|nested] 22+ messages in thread

end of thread, other threads:[~2010-02-02 15:14 UTC | newest]

Thread overview: 22+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2010-01-01  6:10 fsldma: cleanup driver and fix async_tx compatibility Ira W. Snyder
2010-01-01  6:10 ` [PATCH 1/8] fsldma: reduce kernel text size Ira W. Snyder
2010-01-01  6:10 ` [PATCH 2/8] fsldma: remove unused structure members Ira W. Snyder
2010-01-01  6:10 ` [PATCH 3/8] fsldma: rename struct fsl_dma_chan to struct fsldma_chan Ira W. Snyder
2010-01-01  6:10 ` [PATCH 4/8] fsldma: rename dest to dst for uniformity Ira W. Snyder
2010-01-01  6:10 ` [PATCH 5/8] fsldma: clean up the OF subsystem routines Ira W. Snyder
2010-01-01  6:10 ` [PATCH 6/8] fsldma: simplify IRQ probing and handling Ira W. Snyder
2010-01-06 18:02   ` Scott Wood
2010-01-06 18:39     ` Ira W. Snyder
2010-01-06 20:51       ` Scott Wood
2010-01-01  6:10 ` [PATCH 7/8] fsldma: rename fsl_chan to fchan Ira W. Snyder
2010-01-06 18:04   ` Scott Wood
2010-01-06 18:19     ` Ira W. Snyder
2010-01-06 18:27       ` Scott Wood
2010-01-06 18:40         ` Ira W. Snyder
2010-01-01  6:10 ` [PATCH 8/8] fsldma: major cleanups and fixes Ira W. Snyder
2010-01-05  6:08 ` fsldma: cleanup driver and fix async_tx compatibility Dudhat Dipen-B09055
2010-01-11  5:47 ` Dudhat Dipen-B09055
2010-01-11 16:29   ` Ira W. Snyder
2010-02-02  7:50     ` Dudhat Dipen-B09055
2010-02-02 15:04       ` Dan Williams
2010-01-06 23:33 [PATCH 0/8 v2] " Ira W. Snyder
2010-01-06 23:33 ` [PATCH 1/8] fsldma: reduce kernel text size Ira W. Snyder

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).