linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v5 0/3] dma: imx-sdma: add support for sdma memory copy
@ 2014-10-23  2:22 Robin Gong
  2014-10-23  2:22 ` [PATCH v5 1/3] " Robin Gong
                   ` (2 more replies)
  0 siblings, 3 replies; 9+ messages in thread
From: Robin Gong @ 2014-10-23  2:22 UTC (permalink / raw)
  To: vinod.koul, dan.j.williams, andriy.shevchenko
  Cc: dmaengine, linux-kernel, b38343

Add memory copy interface to sdma driver, the patch set is based on
v3:
http://www.spinics.net/lists/dmaengine/msg00850.html.

change from v4:
1.address comments from Andy Shevchenko.
change from v3:
1.split two patches from v3 patch for Vinod's comments.

change from v2:
1.remove redundant check for bus width.

change from v1:
1. correct some printk format, such as %pad for dma_addr_t
2. split duplicated code in prep_dma_memcpy and prep_dma_sg to make code clean


Robin Gong (3):
  dma: imx-sdma: add support for sdma memory copy
  dma: imx-sdma: correct print format
  dma: imx-sdma: reorg code to make code clean

 drivers/dma/imx-sdma.c | 247 +++++++++++++++++++++++++++++++++++++------------
 1 file changed, 190 insertions(+), 57 deletions(-)

-- 
1.9.1


^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH v5 1/3] dma: imx-sdma: add support for sdma memory copy
  2014-10-23  2:22 [PATCH v5 0/3] dma: imx-sdma: add support for sdma memory copy Robin Gong
@ 2014-10-23  2:22 ` Robin Gong
  2014-12-05 16:39   ` Vinod Koul
  2014-10-23  2:22 ` [PATCH v5 2/3] dma: imx-sdma: correct the printk format Robin Gong
  2014-10-23  2:22 ` [PATCH v5 3/3] dma: imx-sdma: reorg code to make code clean Robin Gong
  2 siblings, 1 reply; 9+ messages in thread
From: Robin Gong @ 2014-10-23  2:22 UTC (permalink / raw)
  To: vinod.koul, dan.j.williams, andriy.shevchenko
  Cc: dmaengine, linux-kernel, b38343

Add device_prep_dma_memcpy and device_prep_dma_sg common interfaces
for imx-sdma driver to support memory copy in two ways: continuous
memory buffers or scatter lists.

Signed-off-by: Robin Gong <b38343@freescale.com>
---
 drivers/dma/imx-sdma.c | 188 ++++++++++++++++++++++++++++++++++++++++++-------
 1 file changed, 164 insertions(+), 24 deletions(-)

diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index f7626e3..5424d9a 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -229,6 +229,7 @@ struct sdma_context_data {
 } __attribute__ ((packed));
 
 #define NUM_BD (int)(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor))
+#define SDMA_BD_MAX_CNT	0xfffc /* align with 4 bytes */
 
 struct sdma_engine;
 
@@ -261,6 +262,7 @@ struct sdma_channel {
 	unsigned int			pc_from_device, pc_to_device;
 	unsigned long			flags;
 	dma_addr_t			per_address;
+	unsigned int                    pc_to_pc;
 	unsigned long			event_mask[2];
 	unsigned long			watermark_level;
 	u32				shp_addr, per_addr;
@@ -701,6 +703,7 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
 
 	sdmac->pc_from_device = 0;
 	sdmac->pc_to_device = 0;
+	sdmac->pc_to_pc = 0;
 
 	switch (peripheral_type) {
 	case IMX_DMATYPE_MEMORY:
@@ -775,6 +778,7 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
 
 	sdmac->pc_from_device = per_2_emi;
 	sdmac->pc_to_device = emi_2_per;
+	sdmac->pc_to_pc = emi_2_emi;
 }
 
 static int sdma_load_context(struct sdma_channel *sdmac)
@@ -787,11 +791,12 @@ static int sdma_load_context(struct sdma_channel *sdmac)
 	int ret;
 	unsigned long flags;
 
-	if (sdmac->direction == DMA_DEV_TO_MEM) {
+	if (sdmac->direction == DMA_DEV_TO_MEM)
 		load_address = sdmac->pc_from_device;
-	} else {
+	else if (sdmac->direction == DMA_MEM_TO_MEM)
+		load_address = sdmac->pc_to_pc;
+	else
 		load_address = sdmac->pc_to_device;
-	}
 
 	if (load_address < 0)
 		return load_address;
@@ -1021,16 +1026,118 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
 	clk_disable(sdma->clk_ahb);
 }
 
-static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
-		struct dma_chan *chan, struct scatterlist *sgl,
-		unsigned int sg_len, enum dma_transfer_direction direction,
-		unsigned long flags, void *context)
+static struct dma_async_tx_descriptor *sdma_prep_memcpy(
+		struct dma_chan *chan, dma_addr_t dma_dst,
+		dma_addr_t dma_src, size_t len, unsigned long flags)
+{
+	struct sdma_channel *sdmac = to_sdma_chan(chan);
+	struct sdma_engine *sdma = sdmac->sdma;
+	int channel = sdmac->channel;
+	size_t count;
+	int i = 0, param, ret;
+	struct sdma_buffer_descriptor *bd;
+
+	if (!chan || !len || sdmac->status == DMA_IN_PROGRESS)
+		return NULL;
+
+	if (len >= NUM_BD * SDMA_BD_MAX_CNT) {
+		dev_err(sdma->dev, "channel%d: maximum bytes exceeded:%d > %d\n",
+			channel, len, NUM_BD * SDMA_BD_MAX_CNT);
+		goto err_out;
+	}
+
+	sdmac->status = DMA_IN_PROGRESS;
+
+	sdmac->buf_tail = 0;
+
+	dev_dbg(sdma->dev, "memcpy: %x->%x, len=%d, channel=%d.\n",
+		dma_src, dma_dst, len, channel);
+
+	sdmac->direction = DMA_MEM_TO_MEM;
+
+	ret = sdma_load_context(sdmac);
+	if (ret)
+		goto err_out;
+
+	sdmac->chn_count = 0;
+
+	do {
+		count = min_t(size_t, len, SDMA_BD_MAX_CNT);
+		bd = &sdmac->bd[i];
+		bd->buffer_addr = dma_src;
+		bd->ext_buffer_addr = dma_dst;
+		bd->mode.count = count;
+
+		if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
+			ret =  -EINVAL;
+			goto err_out;
+		}
+
+		switch (sdmac->word_size) {
+		case DMA_SLAVE_BUSWIDTH_4_BYTES:
+			bd->mode.command = 0;
+			if ((count | dma_dst | dma_src) & 3)
+				return NULL;
+			break;
+		case DMA_SLAVE_BUSWIDTH_2_BYTES:
+			bd->mode.command = 2;
+			if ((count | dma_dst | dma_src) & 1)
+				return NULL;
+			break;
+		case DMA_SLAVE_BUSWIDTH_1_BYTE:
+			bd->mode.command = 1;
+			break;
+		default:
+			return NULL;
+		}
+
+		dma_src += count;
+		dma_dst += count;
+		len -= count;
+		i++;
+
+		param = BD_DONE | BD_EXTD | BD_CONT;
+		/* last bd */
+		if (!len) {
+			param |= BD_INTR;
+			param |= BD_LAST;
+			param &= ~BD_CONT;
+		}
+
+		dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
+				i, count, bd->buffer_addr,
+				param & BD_WRAP ? "wrap" : "",
+				param & BD_INTR ? " intr" : "");
+
+		bd->mode.status = param;
+		sdmac->chn_count += count;
+	} while (len);
+
+	sdmac->num_bd = i;
+	sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
+
+	return &sdmac->desc;
+err_out:
+	sdmac->status = DMA_ERROR;
+	return NULL;
+}
+
+/*
+ * Please ensure dst_nents no smaller than src_nents , also every sg_len of
+ * dst_sg node no smaller than src_sg. To simply things, please use the same
+ * size of dst_sg as src_sg.
+ */
+static struct dma_async_tx_descriptor *sdma_prep_sg(
+		struct dma_chan *chan,
+		struct scatterlist *dst_sg, unsigned int dst_nents,
+		struct scatterlist *src_sg, unsigned int src_nents,
+		enum dma_transfer_direction direction)
 {
 	struct sdma_channel *sdmac = to_sdma_chan(chan);
 	struct sdma_engine *sdma = sdmac->sdma;
 	int ret, i, count;
 	int channel = sdmac->channel;
-	struct scatterlist *sg;
+	struct scatterlist *sg_src = src_sg, *sg_dst = dst_sg;
 
 	if (sdmac->status == DMA_IN_PROGRESS)
 		return NULL;
@@ -1041,32 +1148,38 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
 	sdmac->buf_tail = 0;
 
 	dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
-			sg_len, channel);
+			src_nents, channel);
 
 	sdmac->direction = direction;
+
 	ret = sdma_load_context(sdmac);
 	if (ret)
 		goto err_out;
 
-	if (sg_len > NUM_BD) {
+	if (src_nents > NUM_BD) {
 		dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
-				channel, sg_len, NUM_BD);
+				channel, src_nents, NUM_BD);
 		ret = -EINVAL;
 		goto err_out;
 	}
 
 	sdmac->chn_count = 0;
-	for_each_sg(sgl, sg, sg_len, i) {
+	for_each_sg(src_sg, sg_src, src_nents, i) {
 		struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
 		int param;
 
-		bd->buffer_addr = sg->dma_address;
+		bd->buffer_addr = sg_src->dma_address;
+
+		if (direction == DMA_MEM_TO_MEM) {
+			BUG_ON(!sg_dst);
+			bd->ext_buffer_addr = sg_dst->dma_address;
+		}
 
-		count = sg_dma_len(sg);
+		count = sg_dma_len(sg_src);
 
-		if (count > 0xffff) {
+		if (count > SDMA_BD_MAX_CNT) {
 			dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
-					channel, count, 0xffff);
+					channel, count, SDMA_BD_MAX_CNT);
 			ret = -EINVAL;
 			goto err_out;
 		}
@@ -1082,12 +1195,14 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
 		switch (sdmac->word_size) {
 		case DMA_SLAVE_BUSWIDTH_4_BYTES:
 			bd->mode.command = 0;
-			if (count & 3 || sg->dma_address & 3)
+			if ((count | sg_src->dma_address | (sg_dst &&
+				(sg_dst->dma_address))) & 3)
 				return NULL;
 			break;
 		case DMA_SLAVE_BUSWIDTH_2_BYTES:
 			bd->mode.command = 2;
-			if (count & 1 || sg->dma_address & 1)
+			if ((count | sg_src->dma_address |
+				(sg_dst && (sg_dst->dma_address))) & 1)
 				return NULL;
 			break;
 		case DMA_SLAVE_BUSWIDTH_1_BYTE:
@@ -1099,21 +1214,23 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
 
 		param = BD_DONE | BD_EXTD | BD_CONT;
 
-		if (i + 1 == sg_len) {
+		if (i + 1 == src_nents) {
 			param |= BD_INTR;
 			param |= BD_LAST;
 			param &= ~BD_CONT;
 		}
 
-		dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
-				i, count, (u64)sg->dma_address,
+		dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
+				i, count, sg_src->dma_address,
 				param & BD_WRAP ? "wrap" : "",
 				param & BD_INTR ? " intr" : "");
 
 		bd->mode.status = param;
+		if (direction == DMA_MEM_TO_MEM)
+			sg_dst = sg_next(sg_dst);
 	}
 
-	sdmac->num_bd = sg_len;
+	sdmac->num_bd = src_nents;
 	sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
 
 	return &sdmac->desc;
@@ -1122,6 +1239,24 @@ err_out:
 	return NULL;
 }
 
+static struct dma_async_tx_descriptor *sdma_prep_memcpy_sg(
+		struct dma_chan *chan,
+		struct scatterlist *dst_sg, unsigned int dst_nents,
+		struct scatterlist *src_sg, unsigned int src_nents,
+		unsigned long flags)
+{
+	return sdma_prep_sg(chan, dst_sg, dst_nents, src_sg, src_nents,
+			   DMA_MEM_TO_MEM);
+}
+
+static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
+		struct dma_chan *chan, struct scatterlist *sgl,
+		unsigned int sg_len, enum dma_transfer_direction direction,
+		unsigned long flags, void *context)
+{
+	return sdma_prep_sg(chan, NULL, 0, sgl, sg_len, direction);
+}
+
 static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
 		struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
 		size_t period_len, enum dma_transfer_direction direction,
@@ -1155,9 +1290,9 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
 		goto err_out;
 	}
 
-	if (period_len > 0xffff) {
+	if (period_len > SDMA_BD_MAX_CNT) {
 		dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %d > %d\n",
-				channel, period_len, 0xffff);
+				channel, period_len, SDMA_BD_MAX_CNT);
 		goto err_out;
 	}
 
@@ -1218,6 +1353,8 @@ static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
 			sdmac->watermark_level = dmaengine_cfg->src_maxburst *
 						dmaengine_cfg->src_addr_width;
 			sdmac->word_size = dmaengine_cfg->src_addr_width;
+		} else if (dmaengine_cfg->direction == DMA_MEM_TO_MEM) {
+			sdmac->word_size = dmaengine_cfg->dst_addr_width;
 		} else {
 			sdmac->per_address = dmaengine_cfg->dst_addr;
 			sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
@@ -1536,6 +1673,7 @@ static int __init sdma_probe(struct platform_device *pdev)
 
 	dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
 	dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
+	dma_cap_set(DMA_MEMCPY, sdma->dma_device.cap_mask);
 
 	INIT_LIST_HEAD(&sdma->dma_device.channels);
 	/* Initialize channel parameters */
@@ -1598,6 +1736,8 @@ static int __init sdma_probe(struct platform_device *pdev)
 	sdma->dma_device.device_tx_status = sdma_tx_status;
 	sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
 	sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
+	sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy;
+	sdma->dma_device.device_prep_dma_sg = sdma_prep_memcpy_sg;
 	sdma->dma_device.device_control = sdma_control;
 	sdma->dma_device.device_issue_pending = sdma_issue_pending;
 	sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
-- 
1.9.1


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH v5 2/3] dma: imx-sdma: correct the printk format
  2014-10-23  2:22 [PATCH v5 0/3] dma: imx-sdma: add support for sdma memory copy Robin Gong
  2014-10-23  2:22 ` [PATCH v5 1/3] " Robin Gong
@ 2014-10-23  2:22 ` Robin Gong
  2014-10-23  2:22 ` [PATCH v5 3/3] dma: imx-sdma: reorg code to make code clean Robin Gong
  2 siblings, 0 replies; 9+ messages in thread
From: Robin Gong @ 2014-10-23  2:22 UTC (permalink / raw)
  To: vinod.koul, dan.j.williams, andriy.shevchenko
  Cc: dmaengine, linux-kernel, b38343

Correct the printk format for 'size_t', 'dma_address_t', etc.

Signed-off-by: Robin Gong <b38343@freescale.com>
---
 drivers/dma/imx-sdma.c | 18 +++++++++---------
 1 file changed, 9 insertions(+), 9 deletions(-)

diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 5424d9a..377bb18 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1041,7 +1041,7 @@ static struct dma_async_tx_descriptor *sdma_prep_memcpy(
 		return NULL;
 
 	if (len >= NUM_BD * SDMA_BD_MAX_CNT) {
-		dev_err(sdma->dev, "channel%d: maximum bytes exceeded:%d > %d\n",
+		dev_err(sdma->dev, "channel%d: maximum bytes exceeded:%zu > %d\n",
 			channel, len, NUM_BD * SDMA_BD_MAX_CNT);
 		goto err_out;
 	}
@@ -1050,8 +1050,8 @@ static struct dma_async_tx_descriptor *sdma_prep_memcpy(
 
 	sdmac->buf_tail = 0;
 
-	dev_dbg(sdma->dev, "memcpy: %x->%x, len=%d, channel=%d.\n",
-		dma_src, dma_dst, len, channel);
+	dev_dbg(sdma->dev, "memcpy: %pad->%pad, len=%zu, channel=%d.\n",
+		&dma_src, &dma_dst, len, channel);
 
 	sdmac->direction = DMA_MEM_TO_MEM;
 
@@ -1104,7 +1104,7 @@ static struct dma_async_tx_descriptor *sdma_prep_memcpy(
 			param &= ~BD_CONT;
 		}
 
-		dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
+		dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%u %s%s\n",
 				i, count, bd->buffer_addr,
 				param & BD_WRAP ? "wrap" : "",
 				param & BD_INTR ? " intr" : "");
@@ -1220,8 +1220,8 @@ static struct dma_async_tx_descriptor *sdma_prep_sg(
 			param &= ~BD_CONT;
 		}
 
-		dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
-				i, count, sg_src->dma_address,
+		dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%pad %s%s\n",
+				i, count, &sg_src->dma_address,
 				param & BD_WRAP ? "wrap" : "",
 				param & BD_INTR ? " intr" : "");
 
@@ -1291,7 +1291,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
 	}
 
 	if (period_len > SDMA_BD_MAX_CNT) {
-		dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %d > %d\n",
+		dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %zu > %d\n",
 				channel, period_len, SDMA_BD_MAX_CNT);
 		goto err_out;
 	}
@@ -1315,8 +1315,8 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
 		if (i + 1 == num_periods)
 			param |= BD_WRAP;
 
-		dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
-				i, period_len, (u64)dma_addr,
+		dev_dbg(sdma->dev, "entry %d: count: %d dma: %pad %s%s\n",
+				i, period_len, &dma_addr,
 				param & BD_WRAP ? "wrap" : "",
 				param & BD_INTR ? " intr" : "");
 
-- 
1.9.1


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH v5 3/3] dma: imx-sdma: reorg code to make code clean
  2014-10-23  2:22 [PATCH v5 0/3] dma: imx-sdma: add support for sdma memory copy Robin Gong
  2014-10-23  2:22 ` [PATCH v5 1/3] " Robin Gong
  2014-10-23  2:22 ` [PATCH v5 2/3] dma: imx-sdma: correct the printk format Robin Gong
@ 2014-10-23  2:22 ` Robin Gong
  2014-12-05 16:41   ` Vinod Koul
  2 siblings, 1 reply; 9+ messages in thread
From: Robin Gong @ 2014-10-23  2:22 UTC (permalink / raw)
  To: vinod.koul, dan.j.williams, andriy.shevchenko
  Cc: dmaengine, linux-kernel, b38343

Code reorg for transfer prepare and bus width check to make code
cleaner.

Signed-off-by: Robin Gong <b38343@freescale.com>
---
 drivers/dma/imx-sdma.c | 127 +++++++++++++++++++++++--------------------------
 1 file changed, 60 insertions(+), 67 deletions(-)

diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 377bb18..f2dee57 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1026,6 +1026,52 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
 	clk_disable(sdma->clk_ahb);
 }
 
+static int sdma_transfer_init(struct sdma_channel *sdmac,
+			      enum dma_transfer_direction direction)
+{
+	int ret = 0;
+
+	sdmac->status = DMA_IN_PROGRESS;
+	sdmac->buf_tail = 0;
+	sdmac->flags = 0;
+	sdmac->direction = direction;
+
+	ret = sdma_load_context(sdmac);
+	if (ret)
+		return ret;
+
+	sdmac->chn_count = 0;
+
+	return ret;
+}
+
+static int check_bd_buswidth(struct sdma_buffer_descriptor *bd,
+			     struct sdma_channel *sdmac, int count,
+			     dma_addr_t dma_dst, dma_addr_t dma_src)
+{
+	int ret = 0;
+
+	switch (sdmac->word_size) {
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+		bd->mode.command = 0;
+		if ((count | dma_dst | dma_src) & 3)
+			ret = -EINVAL;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		bd->mode.command = 2;
+		if ((count | dma_dst | dma_src) & 1)
+			ret = -EINVAL;
+		break;
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		 bd->mode.command = 1;
+		 break;
+	default:
+		 return -EINVAL;
+	}
+
+	return ret;
+}
+
 static struct dma_async_tx_descriptor *sdma_prep_memcpy(
 		struct dma_chan *chan, dma_addr_t dma_dst,
 		dma_addr_t dma_src, size_t len, unsigned long flags)
@@ -1034,7 +1080,7 @@ static struct dma_async_tx_descriptor *sdma_prep_memcpy(
 	struct sdma_engine *sdma = sdmac->sdma;
 	int channel = sdmac->channel;
 	size_t count;
-	int i = 0, param, ret;
+	int i = 0, param;
 	struct sdma_buffer_descriptor *bd;
 
 	if (!chan || !len || sdmac->status == DMA_IN_PROGRESS)
@@ -1046,21 +1092,12 @@ static struct dma_async_tx_descriptor *sdma_prep_memcpy(
 		goto err_out;
 	}
 
-	sdmac->status = DMA_IN_PROGRESS;
-
-	sdmac->buf_tail = 0;
-
 	dev_dbg(sdma->dev, "memcpy: %pad->%pad, len=%zu, channel=%d.\n",
 		&dma_src, &dma_dst, len, channel);
 
-	sdmac->direction = DMA_MEM_TO_MEM;
-
-	ret = sdma_load_context(sdmac);
-	if (ret)
+	if (sdma_transfer_init(sdmac, DMA_MEM_TO_MEM))
 		goto err_out;
 
-	sdmac->chn_count = 0;
-
 	do {
 		count = min_t(size_t, len, SDMA_BD_MAX_CNT);
 		bd = &sdmac->bd[i];
@@ -1068,28 +1105,8 @@ static struct dma_async_tx_descriptor *sdma_prep_memcpy(
 		bd->ext_buffer_addr = dma_dst;
 		bd->mode.count = count;
 
-		if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
-			ret =  -EINVAL;
+		if (check_bd_buswidth(bd, sdmac, count, dma_dst, dma_src))
 			goto err_out;
-		}
-
-		switch (sdmac->word_size) {
-		case DMA_SLAVE_BUSWIDTH_4_BYTES:
-			bd->mode.command = 0;
-			if ((count | dma_dst | dma_src) & 3)
-				return NULL;
-			break;
-		case DMA_SLAVE_BUSWIDTH_2_BYTES:
-			bd->mode.command = 2;
-			if ((count | dma_dst | dma_src) & 1)
-				return NULL;
-			break;
-		case DMA_SLAVE_BUSWIDTH_1_BYTE:
-			bd->mode.command = 1;
-			break;
-		default:
-			return NULL;
-		}
 
 		dma_src += count;
 		dma_dst += count;
@@ -1141,21 +1158,10 @@ static struct dma_async_tx_descriptor *sdma_prep_sg(
 
 	if (sdmac->status == DMA_IN_PROGRESS)
 		return NULL;
-	sdmac->status = DMA_IN_PROGRESS;
-
-	sdmac->flags = 0;
-
-	sdmac->buf_tail = 0;
 
 	dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
 			src_nents, channel);
 
-	sdmac->direction = direction;
-
-	ret = sdma_load_context(sdmac);
-	if (ret)
-		goto err_out;
-
 	if (src_nents > NUM_BD) {
 		dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
 				channel, src_nents, NUM_BD);
@@ -1163,7 +1169,9 @@ static struct dma_async_tx_descriptor *sdma_prep_sg(
 		goto err_out;
 	}
 
-	sdmac->chn_count = 0;
+	if (sdma_transfer_init(sdmac, direction))
+		goto err_out;
+
 	for_each_sg(src_sg, sg_src, src_nents, i) {
 		struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
 		int param;
@@ -1187,30 +1195,15 @@ static struct dma_async_tx_descriptor *sdma_prep_sg(
 		bd->mode.count = count;
 		sdmac->chn_count += count;
 
-		if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
-			ret =  -EINVAL;
+		if (direction == DMA_MEM_TO_MEM)
+			ret = check_bd_buswidth(bd, sdmac, count,
+						sg_dst->dma_address,
+						sg_src->dma_address);
+		else
+			ret = check_bd_buswidth(bd, sdmac, count, 0,
+						sg_src->dma_address);
+		if (ret)
 			goto err_out;
-		}
-
-		switch (sdmac->word_size) {
-		case DMA_SLAVE_BUSWIDTH_4_BYTES:
-			bd->mode.command = 0;
-			if ((count | sg_src->dma_address | (sg_dst &&
-				(sg_dst->dma_address))) & 3)
-				return NULL;
-			break;
-		case DMA_SLAVE_BUSWIDTH_2_BYTES:
-			bd->mode.command = 2;
-			if ((count | sg_src->dma_address |
-				(sg_dst && (sg_dst->dma_address))) & 1)
-				return NULL;
-			break;
-		case DMA_SLAVE_BUSWIDTH_1_BYTE:
-			bd->mode.command = 1;
-			break;
-		default:
-			return NULL;
-		}
 
 		param = BD_DONE | BD_EXTD | BD_CONT;
 
-- 
1.9.1


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH v5 1/3] dma: imx-sdma: add support for sdma memory copy
  2014-10-23  2:22 ` [PATCH v5 1/3] " Robin Gong
@ 2014-12-05 16:39   ` Vinod Koul
  2015-01-13  3:55     ` Robin Gong
  0 siblings, 1 reply; 9+ messages in thread
From: Vinod Koul @ 2014-12-05 16:39 UTC (permalink / raw)
  To: Robin Gong; +Cc: dan.j.williams, andriy.shevchenko, dmaengine, linux-kernel

On Thu, Oct 23, 2014 at 10:22:18AM +0800, Robin Gong wrote:
 
> -static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
> -		struct dma_chan *chan, struct scatterlist *sgl,
> -		unsigned int sg_len, enum dma_transfer_direction direction,
> -		unsigned long flags, void *context)
> +static struct dma_async_tx_descriptor *sdma_prep_memcpy(
> +		struct dma_chan *chan, dma_addr_t dma_dst,
> +		dma_addr_t dma_src, size_t len, unsigned long flags)
> +{
> +	struct sdma_channel *sdmac = to_sdma_chan(chan);
> +	struct sdma_engine *sdma = sdmac->sdma;
> +	int channel = sdmac->channel;
> +	size_t count;
> +	int i = 0, param, ret;
> +	struct sdma_buffer_descriptor *bd;
> +
> +	if (!chan || !len || sdmac->status == DMA_IN_PROGRESS)
> +		return NULL;
why is this dependent on status. You can prepare a descriptor here!
> +
> +	if (len >= NUM_BD * SDMA_BD_MAX_CNT) {
> +		dev_err(sdma->dev, "channel%d: maximum bytes exceeded:%d > %d\n",
> +			channel, len, NUM_BD * SDMA_BD_MAX_CNT);
> +		goto err_out;
> +	}
> +
> +	sdmac->status = DMA_IN_PROGRESS;
??

> +
> +	sdmac->buf_tail = 0;
> +
> +	dev_dbg(sdma->dev, "memcpy: %x->%x, len=%d, channel=%d.\n",
> +		dma_src, dma_dst, len, channel);
> +
> +	sdmac->direction = DMA_MEM_TO_MEM;
> +
> +	ret = sdma_load_context(sdmac);
> +	if (ret)
> +		goto err_out;
> +
> +	sdmac->chn_count = 0;
> +
> +	do {
> +		count = min_t(size_t, len, SDMA_BD_MAX_CNT);
> +		bd = &sdmac->bd[i];
> +		bd->buffer_addr = dma_src;
> +		bd->ext_buffer_addr = dma_dst;
> +		bd->mode.count = count;
> +
> +		if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
> +			ret =  -EINVAL;
> +			goto err_out;
> +		}
> +
> +		switch (sdmac->word_size) {
> +		case DMA_SLAVE_BUSWIDTH_4_BYTES:
So are you dependent of dma_slave_config to be set. Then it is wrong. for
memcpy you shoudn't be!

>  		switch (sdmac->word_size) {
>  		case DMA_SLAVE_BUSWIDTH_4_BYTES:
>  			bd->mode.command = 0;
> -			if (count & 3 || sg->dma_address & 3)
> +			if ((count | sg_src->dma_address | (sg_dst &&
> +				(sg_dst->dma_address))) & 3)
>  				return NULL;
>  			break;
>  		case DMA_SLAVE_BUSWIDTH_2_BYTES:
>  			bd->mode.command = 2;
> -			if (count & 1 || sg->dma_address & 1)
> +			if ((count | sg_src->dma_address |
> +				(sg_dst && (sg_dst->dma_address))) & 1)
>  				return NULL;
this doesn't seem to have anything to do with memcpy, shouldn't this be
independent change here?

>  			break;
>  		case DMA_SLAVE_BUSWIDTH_1_BYTE:
> @@ -1099,21 +1214,23 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
>  
>  		param = BD_DONE | BD_EXTD | BD_CONT;
>  
> -		if (i + 1 == sg_len) {
> +		if (i + 1 == src_nents) {
>  			param |= BD_INTR;
>  			param |= BD_LAST;
>  			param &= ~BD_CONT;
>  		}
>  
> -		dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
> -				i, count, (u64)sg->dma_address,
> +		dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
> +				i, count, sg_src->dma_address,
>  				param & BD_WRAP ? "wrap" : "",
>  				param & BD_INTR ? " intr" : "");
ditto

>  
>  		bd->mode.status = param;
> +		if (direction == DMA_MEM_TO_MEM)
> +			sg_dst = sg_next(sg_dst);
>  	}
>  
> -	sdmac->num_bd = sg_len;
> +	sdmac->num_bd = src_nents;
>  	sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
>  
>  	return &sdmac->desc;
> @@ -1122,6 +1239,24 @@ err_out:
>  	return NULL;
>  }
>  
> +static struct dma_async_tx_descriptor *sdma_prep_memcpy_sg(
> +		struct dma_chan *chan,
> +		struct scatterlist *dst_sg, unsigned int dst_nents,
> +		struct scatterlist *src_sg, unsigned int src_nents,
> +		unsigned long flags)
> +{
> +	return sdma_prep_sg(chan, dst_sg, dst_nents, src_sg, src_nents,
> +			   DMA_MEM_TO_MEM);
> +}
> +
> +static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
> +		struct dma_chan *chan, struct scatterlist *sgl,
> +		unsigned int sg_len, enum dma_transfer_direction direction,
> +		unsigned long flags, void *context)
> +{
> +	return sdma_prep_sg(chan, NULL, 0, sgl, sg_len, direction);
> +}
you should have done this first and then added memcpy

-- 
~Vinod


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v5 3/3] dma: imx-sdma: reorg code to make code clean
  2014-10-23  2:22 ` [PATCH v5 3/3] dma: imx-sdma: reorg code to make code clean Robin Gong
@ 2014-12-05 16:41   ` Vinod Koul
  2015-01-13  3:55     ` Robin Gong
  0 siblings, 1 reply; 9+ messages in thread
From: Vinod Koul @ 2014-12-05 16:41 UTC (permalink / raw)
  To: Robin Gong; +Cc: dan.j.williams, andriy.shevchenko, dmaengine, linux-kernel

On Thu, Oct 23, 2014 at 10:22:20AM +0800, Robin Gong wrote:
> Code reorg for transfer prepare and bus width check to make code
> cleaner.
This should have been 1st patch :(

-- 
~Vinod


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v5 1/3] dma: imx-sdma: add support for sdma memory copy
  2014-12-05 16:39   ` Vinod Koul
@ 2015-01-13  3:55     ` Robin Gong
  0 siblings, 0 replies; 9+ messages in thread
From: Robin Gong @ 2015-01-13  3:55 UTC (permalink / raw)
  To: Vinod Koul; +Cc: dan.j.williams, andriy.shevchenko, dmaengine, linux-kernel

On Fri, Dec 05, 2014 at 10:09:18PM +0530, Vinod Koul wrote:
> On Thu, Oct 23, 2014 at 10:22:18AM +0800, Robin Gong wrote:
>  
> > -static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
> > -		struct dma_chan *chan, struct scatterlist *sgl,
> > -		unsigned int sg_len, enum dma_transfer_direction direction,
> > -		unsigned long flags, void *context)
> > +static struct dma_async_tx_descriptor *sdma_prep_memcpy(
> > +		struct dma_chan *chan, dma_addr_t dma_dst,
> > +		dma_addr_t dma_src, size_t len, unsigned long flags)
> > +{
> > +	struct sdma_channel *sdmac = to_sdma_chan(chan);
> > +	struct sdma_engine *sdma = sdmac->sdma;
> > +	int channel = sdmac->channel;
> > +	size_t count;
> > +	int i = 0, param, ret;
> > +	struct sdma_buffer_descriptor *bd;
> > +
> > +	if (!chan || !len || sdmac->status == DMA_IN_PROGRESS)
> > +		return NULL;
> why is this dependent on status. You can prepare a descriptor here!
Unfortunately, it is the limitation of imx-sdma driver, the driver can't support
process asynchronously.
> > +
> > +	if (len >= NUM_BD * SDMA_BD_MAX_CNT) {
> > +		dev_err(sdma->dev, "channel%d: maximum bytes exceeded:%d > %d\n",
> > +			channel, len, NUM_BD * SDMA_BD_MAX_CNT);
> > +		goto err_out;
> > +	}
> > +
> > +	sdmac->status = DMA_IN_PROGRESS;
> ??
> 
> > +
> > +	sdmac->buf_tail = 0;
> > +
> > +	dev_dbg(sdma->dev, "memcpy: %x->%x, len=%d, channel=%d.\n",
> > +		dma_src, dma_dst, len, channel);
> > +
> > +	sdmac->direction = DMA_MEM_TO_MEM;
> > +
> > +	ret = sdma_load_context(sdmac);
> > +	if (ret)
> > +		goto err_out;
> > +
> > +	sdmac->chn_count = 0;
> > +
> > +	do {
> > +		count = min_t(size_t, len, SDMA_BD_MAX_CNT);
> > +		bd = &sdmac->bd[i];
> > +		bd->buffer_addr = dma_src;
> > +		bd->ext_buffer_addr = dma_dst;
> > +		bd->mode.count = count;
> > +
> > +		if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
> > +			ret =  -EINVAL;
> > +			goto err_out;
> > +		}
> > +
> > +		switch (sdmac->word_size) {
> > +		case DMA_SLAVE_BUSWIDTH_4_BYTES:
> So are you dependent of dma_slave_config to be set. Then it is wrong. for
> memcpy you shoudn't be!
> 
Yes. Will remove the constrain.
> >  		switch (sdmac->word_size) {
> >  		case DMA_SLAVE_BUSWIDTH_4_BYTES:
> >  			bd->mode.command = 0;
> > -			if (count & 3 || sg->dma_address & 3)
> > +			if ((count | sg_src->dma_address | (sg_dst &&
> > +				(sg_dst->dma_address))) & 3)
> >  				return NULL;
> >  			break;
> >  		case DMA_SLAVE_BUSWIDTH_2_BYTES:
> >  			bd->mode.command = 2;
> > -			if (count & 1 || sg->dma_address & 1)
> > +			if ((count | sg_src->dma_address |
> > +				(sg_dst && (sg_dst->dma_address))) & 1)
> >  				return NULL;
> this doesn't seem to have anything to do with memcpy, shouldn't this be
> independent change here?
> 
Yes, will split the re-org patch clearly.
> >  			break;
> >  		case DMA_SLAVE_BUSWIDTH_1_BYTE:
> > @@ -1099,21 +1214,23 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
> >  
> >  		param = BD_DONE | BD_EXTD | BD_CONT;
> >  
> > -		if (i + 1 == sg_len) {
> > +		if (i + 1 == src_nents) {
> >  			param |= BD_INTR;
> >  			param |= BD_LAST;
> >  			param &= ~BD_CONT;
> >  		}
> >  
> > -		dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
> > -				i, count, (u64)sg->dma_address,
> > +		dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
> > +				i, count, sg_src->dma_address,
> >  				param & BD_WRAP ? "wrap" : "",
> >  				param & BD_INTR ? " intr" : "");
> ditto
> 
> >  
> >  		bd->mode.status = param;
> > +		if (direction == DMA_MEM_TO_MEM)
> > +			sg_dst = sg_next(sg_dst);
> >  	}
> >  
> > -	sdmac->num_bd = sg_len;
> > +	sdmac->num_bd = src_nents;
> >  	sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
> >  
> >  	return &sdmac->desc;
> > @@ -1122,6 +1239,24 @@ err_out:
> >  	return NULL;
> >  }
> >  
> > +static struct dma_async_tx_descriptor *sdma_prep_memcpy_sg(
> > +		struct dma_chan *chan,
> > +		struct scatterlist *dst_sg, unsigned int dst_nents,
> > +		struct scatterlist *src_sg, unsigned int src_nents,
> > +		unsigned long flags)
> > +{
> > +	return sdma_prep_sg(chan, dst_sg, dst_nents, src_sg, src_nents,
> > +			   DMA_MEM_TO_MEM);
> > +}
> > +
> > +static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
> > +		struct dma_chan *chan, struct scatterlist *sgl,
> > +		unsigned int sg_len, enum dma_transfer_direction direction,
> > +		unsigned long flags, void *context)
> > +{
> > +	return sdma_prep_sg(chan, NULL, 0, sgl, sg_len, direction);
> > +}
> you should have done this first and then added memcpy
> 
> -- 
> ~Vinod 
Ok, do the re-org patch firstly, and then add new feature.


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v5 3/3] dma: imx-sdma: reorg code to make code clean
  2014-12-05 16:41   ` Vinod Koul
@ 2015-01-13  3:55     ` Robin Gong
  0 siblings, 0 replies; 9+ messages in thread
From: Robin Gong @ 2015-01-13  3:55 UTC (permalink / raw)
  To: Vinod Koul; +Cc: dan.j.williams, andriy.shevchenko, dmaengine, linux-kernel

On Fri, Dec 05, 2014 at 10:11:44PM +0530, Vinod Koul wrote:
> On Thu, Oct 23, 2014 at 10:22:20AM +0800, Robin Gong wrote:
> > Code reorg for transfer prepare and bus width check to make code
> > cleaner.
> This should have been 1st patch :(
> 
> -- 
> ~Vinod
>
Got it.Thanks.

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH v5 0/3] dma: imx-sdma: add support for sdma memory copy
@ 2014-10-23  2:19 Robin Gong
  0 siblings, 0 replies; 9+ messages in thread
From: Robin Gong @ 2014-10-23  2:19 UTC (permalink / raw)
  To: vinod.koul, dan.j.williams, andriy.shevchenko
  Cc: dmaengine, linux-kernel, b38343

Add memory copy interface to sdma driver, the patch set is based on
v3:
http://www.spinics.net/lists/dmaengine/msg00850.html.

change from v4:
1.address comments from Andy Shevchenko.
change from v3:
1.split two patches from v3 patch for Vinod's comments.

change from v2:
1.remove redundant check for bus width.

change from v1:
1. correct some printk format, such as %pad for dma_addr_t
2. split duplicated code in prep_dma_memcpy and prep_dma_sg to make code clean


Robin Gong (3):
  dma: imx-sdma: add support for sdma memory copy
  dma: imx-sdma: correct print format
  dma: imx-sdma: reorg code to make code clean

 drivers/dma/imx-sdma.c | 247 +++++++++++++++++++++++++++++++++++++------------
 1 file changed, 190 insertions(+), 57 deletions(-)

-- 
1.9.1


^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2015-01-13  4:04 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-10-23  2:22 [PATCH v5 0/3] dma: imx-sdma: add support for sdma memory copy Robin Gong
2014-10-23  2:22 ` [PATCH v5 1/3] " Robin Gong
2014-12-05 16:39   ` Vinod Koul
2015-01-13  3:55     ` Robin Gong
2014-10-23  2:22 ` [PATCH v5 2/3] dma: imx-sdma: correct the printk format Robin Gong
2014-10-23  2:22 ` [PATCH v5 3/3] dma: imx-sdma: reorg code to make code clean Robin Gong
2014-12-05 16:41   ` Vinod Koul
2015-01-13  3:55     ` Robin Gong
  -- strict thread matches above, loose matches on Subject: below --
2014-10-23  2:19 [PATCH v5 0/3] dma: imx-sdma: add support for sdma memory copy Robin Gong

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).