linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2 0/4] dmaengine: vdma: AXI DMA's enhancments
@ 2016-05-18  7:47 Kedareswara rao Appana
  2016-05-18  7:47 ` [PATCH v2 1/4] dmaengine: vdma: Add support for cyclic dma mode Kedareswara rao Appana
                   ` (3 more replies)
  0 siblings, 4 replies; 9+ messages in thread
From: Kedareswara rao Appana @ 2016-05-18  7:47 UTC (permalink / raw)
  To: dan.j.williams, vinod.koul, michal.simek, soren.brinkmann,
	appanad, moritz.fischer, laurent.pinchart, luis, punnaia
  Cc: dmaengine, linux-arm-kernel, linux-kernel

This patch series does the following
---> Add support for cyclic DMA mode for the AXI DMA IP.
---> use dma_poll_zalloc instead of dma_pool_alloc.
---> Add 64-bit addressing support for AXI DMA IP.
---> Add 64-bit addressing support for AXI CDMA IP.

Kedareswara rao Appana (4):
  dmaengine: vdma: Add support for cyclic dma mode
  dmaengine: vdma: Use dma_pool_zalloc
  dmaengine: vdma: Add 64 bit addressing support for the axi dma
  dmaengine: vdma: Add 64 bit addressing support for the axi cdma

 drivers/dma/xilinx/xilinx_vdma.c | 293 ++++++++++++++++++++++++++++++++++-----
 1 file changed, 260 insertions(+), 33 deletions(-)

-- 
2.1.2

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH v2 1/4] dmaengine: vdma: Add support for cyclic dma mode
  2016-05-18  7:47 [PATCH v2 0/4] dmaengine: vdma: AXI DMA's enhancments Kedareswara rao Appana
@ 2016-05-18  7:47 ` Kedareswara rao Appana
  2016-06-07  6:18   ` Vinod Koul
  2016-05-18  7:47 ` [PATCH v2 2/4] dmaengine: vdma: Use dma_pool_zalloc Kedareswara rao Appana
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 9+ messages in thread
From: Kedareswara rao Appana @ 2016-05-18  7:47 UTC (permalink / raw)
  To: dan.j.williams, vinod.koul, michal.simek, soren.brinkmann,
	appanad, moritz.fischer, laurent.pinchart, luis, punnaia
  Cc: dmaengine, linux-arm-kernel, linux-kernel

This patch adds support for AXI DMA cyclic dma mode.
In cyclic mode, DMA fetches and processes the same
BDs without interruption. The DMA continues to fetch and process
until it is stopped or reset.

Signed-off-by: Kedareswara rao Appana <appanad@xilinx.com>
---
Changes for v2:
---> Removed unnecessary memset() call.

 drivers/dma/xilinx/xilinx_vdma.c | 178 +++++++++++++++++++++++++++++++++++++--
 1 file changed, 173 insertions(+), 5 deletions(-)

diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index df91185..3edd454 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -157,6 +157,7 @@
 /* AXI DMA Specific Masks/Bit fields */
 #define XILINX_DMA_MAX_TRANS_LEN	GENMASK(22, 0)
 #define XILINX_DMA_CR_COALESCE_MAX	GENMASK(23, 16)
+#define XILINX_DMA_CR_CYCLIC_BD_EN_MASK	BIT(4)
 #define XILINX_DMA_CR_COALESCE_SHIFT	16
 #define XILINX_DMA_BD_SOP		BIT(27)
 #define XILINX_DMA_BD_EOP		BIT(26)
@@ -278,11 +279,13 @@ struct xilinx_cdma_tx_segment {
  * @async_tx: Async transaction descriptor
  * @segments: TX segments list
  * @node: Node in the channel descriptors list
+ * @cyclic: Check for cyclic transfers.
  */
 struct xilinx_dma_tx_descriptor {
 	struct dma_async_tx_descriptor async_tx;
 	struct list_head segments;
 	struct list_head node;
+	bool cyclic;
 };
 
 /**
@@ -302,6 +305,7 @@ struct xilinx_dma_tx_descriptor {
  * @direction: Transfer direction
  * @num_frms: Number of frames
  * @has_sg: Support scatter transfers
+ * @cyclic: Check for cyclic transfers.
  * @genlock: Support genlock mode
  * @err: Channel has errors
  * @tasklet: Cleanup work after irq
@@ -312,6 +316,7 @@ struct xilinx_dma_tx_descriptor {
  * @desc_submitcount: Descriptor h/w submitted count
  * @residue: Residue for AXI DMA
  * @seg_v: Statically allocated segments base
+ * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
  * @start_transfer: Differentiate b/w DMA IP's transfer
  */
 struct xilinx_dma_chan {
@@ -330,6 +335,7 @@ struct xilinx_dma_chan {
 	enum dma_transfer_direction direction;
 	int num_frms;
 	bool has_sg;
+	bool cyclic;
 	bool genlock;
 	bool err;
 	struct tasklet_struct tasklet;
@@ -340,6 +346,7 @@ struct xilinx_dma_chan {
 	u32 desc_submitcount;
 	u32 residue;
 	struct xilinx_axidma_tx_segment *seg_v;
+	struct xilinx_axidma_tx_segment *cyclic_seg_v;
 	void (*start_transfer)(struct xilinx_dma_chan *chan);
 };
 
@@ -660,13 +667,37 @@ static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
 	dev_dbg(chan->dev, "Free all channel resources.\n");
 
 	xilinx_dma_free_descriptors(chan);
-	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
+	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+		xilinx_dma_free_tx_segment(chan, chan->cyclic_seg_v);
 		xilinx_dma_free_tx_segment(chan, chan->seg_v);
+	}
 	dma_pool_destroy(chan->desc_pool);
 	chan->desc_pool = NULL;
 }
 
 /**
+ * xilinx_dma_chan_handle_cyclic - Cyclic dma callback
+ * @chan: Driver specific dma channel
+ * @desc: dma transaction descriptor
+ * @flags: flags for spin lock
+ */
+static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan,
+					  struct xilinx_dma_tx_descriptor *desc,
+					  unsigned long *flags)
+{
+	dma_async_tx_callback callback;
+	void *callback_param;
+
+	callback = desc->async_tx.callback;
+	callback_param = desc->async_tx.callback_param;
+	if (callback) {
+		spin_unlock_irqrestore(&chan->lock, *flags);
+		callback(callback_param);
+		spin_lock_irqsave(&chan->lock, *flags);
+	}
+}
+
+/**
  * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
  * @chan: Driver specific DMA channel
  */
@@ -681,6 +712,11 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
 		dma_async_tx_callback callback;
 		void *callback_param;
 
+		if (desc->cyclic) {
+			xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
+			break;
+		}
+
 		/* Remove from the list of running transactions */
 		list_del(&desc->node);
 
@@ -757,7 +793,7 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
 		return -ENOMEM;
 	}
 
-	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
+	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
 		/*
 		 * For AXI DMA case after submitting a pending_list, keep
 		 * an extra segment allocated so that the "next descriptor"
@@ -768,6 +804,15 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
 		 */
 		chan->seg_v = xilinx_axidma_alloc_tx_segment(chan);
 
+		/*
+		 * For cyclic DMA mode we need to program the tail Descriptor
+		 * register with a value which is not a part of the BD chain
+		 * so allocating a desc segment during channel allocation for
+		 * programming tail descriptor.
+		 */
+		chan->cyclic_seg_v = xilinx_axidma_alloc_tx_segment(chan);
+	}
+
 	dma_cookie_init(dchan);
 
 	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
@@ -1157,8 +1202,12 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
 
 	/* Start the transfer */
 	if (chan->has_sg) {
-		dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
-			       tail_segment->phys);
+		if (chan->cyclic)
+			dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
+				       chan->cyclic_seg_v->phys);
+		else
+			dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
+				       tail_segment->phys);
 	} else {
 		struct xilinx_axidma_tx_segment *segment;
 		struct xilinx_axidma_desc_hw *hw;
@@ -1209,7 +1258,8 @@ static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
 
 	list_for_each_entry_safe(desc, next, &chan->active_list, node) {
 		list_del(&desc->node);
-		dma_cookie_complete(&desc->async_tx);
+		if (!desc->cyclic)
+			dma_cookie_complete(&desc->async_tx);
 		list_add_tail(&desc->node, &chan->done_list);
 	}
 }
@@ -1397,6 +1447,11 @@ static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
 	unsigned long flags;
 	int err;
 
+	if (chan->cyclic) {
+		xilinx_dma_free_tx_descriptor(chan, desc);
+		return -EBUSY;
+	}
+
 	if (chan->err) {
 		/*
 		 * If reset fails, need to hard reset the system.
@@ -1414,6 +1469,9 @@ static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
 	/* Put this transaction onto the tail of the pending queue */
 	append_desc_queue(chan, desc);
 
+	if (desc->cyclic)
+		chan->cyclic = true;
+
 	spin_unlock_irqrestore(&chan->lock, flags);
 
 	return cookie;
@@ -1669,12 +1727,112 @@ error:
 }
 
 /**
+ * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
+ * @chan: DMA channel
+ * @sgl: scatterlist to transfer to/from
+ * @sg_len: number of entries in @scatterlist
+ * @direction: DMA direction
+ * @flags: transfer ack flags
+ */
+static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
+	struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
+	size_t period_len, enum dma_transfer_direction direction,
+	unsigned long flags)
+{
+	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+	struct xilinx_dma_tx_descriptor *desc;
+	struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL;
+	size_t copy, sg_used;
+	unsigned int num_periods;
+	int i;
+	u32 reg;
+
+	num_periods = buf_len / period_len;
+
+	if (!is_slave_direction(direction))
+		return NULL;
+
+	/* Allocate a transaction descriptor. */
+	desc = xilinx_dma_alloc_tx_descriptor(chan);
+	if (!desc)
+		return NULL;
+
+	chan->direction = direction;
+	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+	desc->async_tx.tx_submit = xilinx_dma_tx_submit;
+
+	for (i = 0; i < num_periods; ++i) {
+		sg_used = 0;
+
+		while (sg_used < period_len) {
+			struct xilinx_axidma_desc_hw *hw;
+
+			/* Get a free segment */
+			segment = xilinx_axidma_alloc_tx_segment(chan);
+			if (!segment)
+				goto error;
+
+			/*
+			 * Calculate the maximum number of bytes to transfer,
+			 * making sure it is less than the hw limit
+			 */
+			copy = min_t(size_t, period_len - sg_used,
+				     XILINX_DMA_MAX_TRANS_LEN);
+			hw = &segment->hw;
+			hw->buf_addr = buf_addr + sg_used + (period_len * i);
+			hw->control = copy;
+
+			if (prev)
+				prev->hw.next_desc = segment->phys;
+
+			prev = segment;
+			sg_used += copy;
+
+			/*
+			 * Insert the segment into the descriptor segments
+			 * list.
+			 */
+			list_add_tail(&segment->node, &desc->segments);
+		}
+	}
+
+	head_segment = list_first_entry(&desc->segments,
+				   struct xilinx_axidma_tx_segment, node);
+	desc->async_tx.phys = head_segment->phys;
+
+	desc->cyclic = true;
+	reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
+	reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
+	dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
+
+	/* For the last DMA_MEM_TO_DEV transfer, set EOP */
+	if (direction == DMA_MEM_TO_DEV) {
+		segment->hw.control |= XILINX_DMA_BD_SOP;
+		segment = list_last_entry(&desc->segments,
+					  struct xilinx_axidma_tx_segment,
+					  node);
+		segment->hw.control |= XILINX_DMA_BD_EOP;
+		segment->hw.next_desc = (u32) head_segment->phys;
+	}
+
+	return &desc->async_tx;
+
+error:
+	xilinx_dma_free_tx_descriptor(chan, desc);
+	return NULL;
+}
+
+/**
  * xilinx_dma_terminate_all - Halt the channel and free descriptors
  * @chan: Driver specific DMA Channel pointer
  */
 static int xilinx_dma_terminate_all(struct dma_chan *dchan)
 {
 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+	u32 reg;
+
+	if (chan->cyclic)
+		xilinx_dma_chan_reset(chan);
 
 	/* Halt the DMA engine */
 	xilinx_dma_halt(chan);
@@ -1682,6 +1840,13 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
 	/* Remove and free all of the descriptors in the lists */
 	xilinx_dma_free_descriptors(chan);
 
+	if (chan->cyclic) {
+		reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
+		reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
+		dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
+		chan->cyclic = false;
+	}
+
 	return 0;
 }
 
@@ -2218,7 +2383,10 @@ static int xilinx_dma_probe(struct platform_device *pdev)
 	xdev->common.device_tx_status = xilinx_dma_tx_status;
 	xdev->common.device_issue_pending = xilinx_dma_issue_pending;
 	if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+		dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
 		xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
+		xdev->common.device_prep_dma_cyclic =
+					  xilinx_dma_prep_dma_cyclic;
 		/* Residue calculation is supported by only AXI DMA */
 		xdev->common.residue_granularity =
 					  DMA_RESIDUE_GRANULARITY_SEGMENT;
-- 
2.1.2

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH v2 2/4] dmaengine: vdma: Use dma_pool_zalloc
  2016-05-18  7:47 [PATCH v2 0/4] dmaengine: vdma: AXI DMA's enhancments Kedareswara rao Appana
  2016-05-18  7:47 ` [PATCH v2 1/4] dmaengine: vdma: Add support for cyclic dma mode Kedareswara rao Appana
@ 2016-05-18  7:47 ` Kedareswara rao Appana
  2016-06-07  6:19   ` Vinod Koul
  2016-05-18  7:47 ` [PATCH v2 3/4] dmaengine: vdma: Add 64 bit addressing support for the axi dma Kedareswara rao Appana
  2016-05-18  7:47 ` [PATCH v2 4/4] dmaengine: vdma: Add 64 bit addressing support for the axi cdma Kedareswara rao Appana
  3 siblings, 1 reply; 9+ messages in thread
From: Kedareswara rao Appana @ 2016-05-18  7:47 UTC (permalink / raw)
  To: dan.j.williams, vinod.koul, michal.simek, soren.brinkmann,
	appanad, moritz.fischer, laurent.pinchart, luis, punnaia
  Cc: dmaengine, linux-arm-kernel, linux-kernel

dma_pool_zalloc combines dma_pool_alloc and memset 0
this patch updates the driver to use dma_pool_zalloc.

Signed-off-by: Kedareswara rao Appana <appanad@xilinx.com>
---
Changes for v2:
--> New patch.

 drivers/dma/xilinx/xilinx_vdma.c | 6 ++----
 1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index 3edd454..0f5b38a 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -498,11 +498,10 @@ xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
 	struct xilinx_cdma_tx_segment *segment;
 	dma_addr_t phys;
 
-	segment = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys);
+	segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
 	if (!segment)
 		return NULL;
 
-	memset(segment, 0, sizeof(*segment));
 	segment->phys = phys;
 
 	return segment;
@@ -520,11 +519,10 @@ xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
 	struct xilinx_axidma_tx_segment *segment;
 	dma_addr_t phys;
 
-	segment = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys);
+	segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
 	if (!segment)
 		return NULL;
 
-	memset(segment, 0, sizeof(*segment));
 	segment->phys = phys;
 
 	return segment;
-- 
2.1.2

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH v2 3/4] dmaengine: vdma: Add 64 bit addressing support for the axi dma
  2016-05-18  7:47 [PATCH v2 0/4] dmaengine: vdma: AXI DMA's enhancments Kedareswara rao Appana
  2016-05-18  7:47 ` [PATCH v2 1/4] dmaengine: vdma: Add support for cyclic dma mode Kedareswara rao Appana
  2016-05-18  7:47 ` [PATCH v2 2/4] dmaengine: vdma: Use dma_pool_zalloc Kedareswara rao Appana
@ 2016-05-18  7:47 ` Kedareswara rao Appana
  2016-06-07  6:17   ` Vinod Koul
  2016-05-18  7:47 ` [PATCH v2 4/4] dmaengine: vdma: Add 64 bit addressing support for the axi cdma Kedareswara rao Appana
  3 siblings, 1 reply; 9+ messages in thread
From: Kedareswara rao Appana @ 2016-05-18  7:47 UTC (permalink / raw)
  To: dan.j.williams, vinod.koul, michal.simek, soren.brinkmann,
	appanad, moritz.fischer, laurent.pinchart, luis, punnaia
  Cc: dmaengine, linux-arm-kernel, linux-kernel

The AXI DMA is a soft ip, which can be programmed to support
32 bit addressing or greater than 32 bit addressing.

When the AXI DMA ip is configured for 32 bit address space
in simple dma mode the buffer address is specified by a single register
(18h for MM2S channel and 48h for S2MM channel). When configured in SG mode
The current descriptor and tail descriptor are specified by a single
Register(08h for curdesc 10h for tail desc for MM2S channel and 38h for
Curdesc and 40h for tail desc for S2MM).

When the  AXI DMA core is configured for an address space greater
than 32 then each buffer address or descriptor address is specified by
a combination of two registers.

The first register specifies the LSB 32 bits of address,
while the next register specifies the MSB 32 bits of address.

For example, 48h will specify the LSB 32 bits while 4Ch will
specify the MSB 32 bits of the first start address.
So we need to program two registers at a time.

This patch adds the 64 bit addressing support for the axidma
IP in the driver.

Signed-off-by: Kedareswara rao Appana <appanad@xilinx.com>
---
Changes for v2:
--> New patch.

 drivers/dma/xilinx/xilinx_vdma.c | 79 ++++++++++++++++++++++++++++++----------
 1 file changed, 60 insertions(+), 19 deletions(-)

diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index 0f5b38a..9c15451 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -45,6 +45,7 @@
 #include <linux/of_irq.h>
 #include <linux/slab.h>
 #include <linux/clk.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
 
 #include "../dmaengine.h"
 
@@ -195,22 +196,22 @@ struct xilinx_vdma_desc_hw {
 /**
  * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
  * @next_desc: Next Descriptor Pointer @0x00
- * @pad1: Reserved @0x04
+ * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
  * @buf_addr: Buffer address @0x08
- * @pad2: Reserved @0x0C
- * @pad3: Reserved @0x10
- * @pad4: Reserved @0x14
+ * @buf_addr_msb: MSB of Buffer address @0x0C
+ * @pad1: Reserved @0x10
+ * @pad2: Reserved @0x14
  * @control: Control field @0x18
  * @status: Status field @0x1C
  * @app: APP Fields @0x20 - 0x30
  */
 struct xilinx_axidma_desc_hw {
 	u32 next_desc;
-	u32 pad1;
+	u32 next_desc_msb;
 	u32 buf_addr;
+	u32 buf_addr_msb;
+	u32 pad1;
 	u32 pad2;
-	u32 pad3;
-	u32 pad4;
 	u32 control;
 	u32 status;
 	u32 app[XILINX_DMA_NUM_APP_WORDS];
@@ -461,6 +462,11 @@ static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg,
 	writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4);
 }
 
+static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value)
+{
+	lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg);
+}
+
 /* -----------------------------------------------------------------------------
  * Descriptors and segments alloc and free
  */
@@ -1189,9 +1195,14 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
 		dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
 	}
 
-	if (chan->has_sg)
-		dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
-			       head_desc->async_tx.phys);
+	if (chan->has_sg) {
+		if (chan->ext_addr)
+			dma_writeq(chan, XILINX_DMA_REG_CURDESC,
+				   head_desc->async_tx.phys);
+		else
+			dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
+				       head_desc->async_tx.phys);
+	}
 
 	xilinx_dma_start(chan);
 
@@ -1200,12 +1211,21 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
 
 	/* Start the transfer */
 	if (chan->has_sg) {
-		if (chan->cyclic)
-			dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
-				       chan->cyclic_seg_v->phys);
-		else
-			dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
-				       tail_segment->phys);
+		if (chan->cyclic) {
+			if (chan->ext_addr)
+				dma_writeq(chan, XILINX_DMA_REG_TAILDESC,
+					   chan->cyclic_seg_v->phys);
+			else
+				dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
+					       chan->cyclic_seg_v->phys);
+		} else {
+			if (chan->ext_addr)
+				dma_writeq(chan, XILINX_DMA_REG_TAILDESC,
+					   tail_segment->phys);
+			else
+				dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
+					       tail_segment->phys);
+		}
 	} else {
 		struct xilinx_axidma_tx_segment *segment;
 		struct xilinx_axidma_desc_hw *hw;
@@ -1215,7 +1235,12 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
 					   node);
 		hw = &segment->hw;
 
-		dma_ctrl_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr);
+		if (chan->ext_addr)
+			dma_writeq(chan, XILINX_DMA_REG_SRCDSTADDR,
+				   hw->buf_addr);
+		else
+			dma_ctrl_write(chan, XILINX_DMA_REG_SRCDSTADDR,
+				       hw->buf_addr);
 
 		/* Start the transfer */
 		dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
@@ -1679,7 +1704,15 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
 			hw = &segment->hw;
 
 			/* Fill in the descriptor */
-			hw->buf_addr = sg_dma_address(sg) + sg_used;
+			if (chan->ext_addr) {
+				hw->buf_addr = lower_32_bits(sg_dma_address(sg)
+					       + sg_used);
+				hw->buf_addr_msb =
+						upper_32_bits(sg_dma_address(sg)
+						+ sg_used);
+			} else {
+				hw->buf_addr = sg_dma_address(sg) + sg_used;
+			}
 
 			hw->control = copy;
 
@@ -1777,7 +1810,15 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
 			copy = min_t(size_t, period_len - sg_used,
 				     XILINX_DMA_MAX_TRANS_LEN);
 			hw = &segment->hw;
-			hw->buf_addr = buf_addr + sg_used + (period_len * i);
+			if (chan->ext_addr) {
+				hw->buf_addr = lower_32_bits(buf_addr +
+						sg_used + (period_len * i));
+				hw->buf_addr_msb = upper_32_bits(buf_addr +
+						sg_used + (period_len * i));
+			} else {
+				hw->buf_addr = buf_addr + sg_used +
+					       (period_len * i);
+			}
 			hw->control = copy;
 
 			if (prev)
-- 
2.1.2

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH v2 4/4] dmaengine: vdma: Add 64 bit addressing support for the axi cdma
  2016-05-18  7:47 [PATCH v2 0/4] dmaengine: vdma: AXI DMA's enhancments Kedareswara rao Appana
                   ` (2 preceding siblings ...)
  2016-05-18  7:47 ` [PATCH v2 3/4] dmaengine: vdma: Add 64 bit addressing support for the axi dma Kedareswara rao Appana
@ 2016-05-18  7:47 ` Kedareswara rao Appana
  3 siblings, 0 replies; 9+ messages in thread
From: Kedareswara rao Appana @ 2016-05-18  7:47 UTC (permalink / raw)
  To: dan.j.williams, vinod.koul, michal.simek, soren.brinkmann,
	appanad, moritz.fischer, laurent.pinchart, luis, punnaia
  Cc: dmaengine, linux-arm-kernel, linux-kernel

The AXI CDMA is a soft ip, which can be programmed to support
32 bit addressing or greater than 32 bit addressing.

When the AXI CDMA ip is configured for 32 bit address space
in simple dma mode the source/destination buffer address is
specified by a single register(18h for Source buffer address and
20h for Destination buffer address). When configured in SG mode
the current descriptor and tail descriptor are specified by a
Single register(08h for curdesc 10h for tail desc).

When the  AXI CDMA core is configured for an address space greater
than 32 then each buffer address or descriptor address is specified by
a combination of two registers.

The first register specifies the LSB 32 bits of address,
while the next register specifies the MSB 32 bits of address.

For example, 08h will specify the LSB 32 bits while 0Ch will
specify the MSB 32 bits of the first start address.
So we need to program two registers at a time.

This patch adds the 64 bit addressing support to the axicdma
IP in the driver.

Signed-off-by: Kedareswara rao Appana <appanad@xilinx.com>
---
Changes for v2:
--> New patch.

 drivers/dma/xilinx/xilinx_vdma.c | 44 +++++++++++++++++++++++++++++-----------
 1 file changed, 32 insertions(+), 12 deletions(-)

diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index 9c15451..18360ff 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -220,21 +220,21 @@ struct xilinx_axidma_desc_hw {
 /**
  * struct xilinx_cdma_desc_hw - Hardware Descriptor
  * @next_desc: Next Descriptor Pointer @0x00
- * @pad1: Reserved @0x04
+ * @next_descmsb: Next Descriptor Pointer MSB @0x04
  * @src_addr: Source address @0x08
- * @pad2: Reserved @0x0C
+ * @src_addrmsb: Source address MSB @0x0C
  * @dest_addr: Destination address @0x10
- * @pad3: Reserved @0x14
+ * @dest_addrmsb: Destination address MSB @0x14
  * @control: Control field @0x18
  * @status: Status field @0x1C
  */
 struct xilinx_cdma_desc_hw {
 	u32 next_desc;
-	u32 pad1;
+	u32 next_desc_msb;
 	u32 src_addr;
-	u32 pad2;
+	u32 src_addr_msb;
 	u32 dest_addr;
-	u32 pad3;
+	u32 dest_addr_msb;
 	u32 control;
 	u32 status;
 } __aligned(64);
@@ -1114,12 +1114,20 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
 	}
 
 	if (chan->has_sg) {
-		dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
-			   head_desc->async_tx.phys);
+		if (chan->ext_addr)
+			dma_writeq(chan, XILINX_DMA_REG_CURDESC,
+				   head_desc->async_tx.phys);
+		else
+			dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
+				       head_desc->async_tx.phys);
 
 		/* Update tail ptr register which will start the transfer */
-		dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
-			       tail_segment->phys);
+		if (chan->ext_addr)
+			dma_writeq(chan, XILINX_DMA_REG_TAILDESC,
+				   tail_segment->phys);
+		else
+			dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
+				       tail_segment->phys);
 	} else {
 		/* In simple mode */
 		struct xilinx_cdma_tx_segment *segment;
@@ -1131,8 +1139,16 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
 
 		hw = &segment->hw;
 
-		dma_ctrl_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr);
-		dma_ctrl_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr);
+		if (chan->ext_addr) {
+			dma_writeq(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr);
+			dma_writeq(chan, XILINX_CDMA_REG_DSTADDR,
+				   hw->dest_addr);
+		} else {
+			dma_ctrl_write(chan, XILINX_CDMA_REG_SRCADDR,
+				       hw->src_addr);
+			dma_ctrl_write(chan, XILINX_CDMA_REG_DSTADDR,
+				       hw->dest_addr);
+		}
 
 		/* Start the transfer */
 		dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
@@ -1622,6 +1638,10 @@ xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
 	hw->control = len;
 	hw->src_addr = dma_src;
 	hw->dest_addr = dma_dst;
+	if (chan->ext_addr) {
+		hw->src_addr_msb = upper_32_bits(dma_src);
+		hw->dest_addr_msb = upper_32_bits(dma_dst);
+	}
 
 	/* Fill the previous next descriptor with current */
 	prev = list_last_entry(&desc->segments,
-- 
2.1.2

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* RE: [PATCH v2 3/4] dmaengine: vdma: Add 64 bit addressing support for the axi dma
  2016-06-07  6:17   ` Vinod Koul
@ 2016-06-07  6:13     ` Appana Durga Kedareswara Rao
  0 siblings, 0 replies; 9+ messages in thread
From: Appana Durga Kedareswara Rao @ 2016-06-07  6:13 UTC (permalink / raw)
  To: Vinod Koul
  Cc: dan.j.williams, Michal Simek, Soren Brinkmann, moritz.fischer,
	laurent.pinchart, luis, Punnaiah Choudary Kalluri, dmaengine,
	linux-arm-kernel, linux-kernel

Hi Vinod,

> On Wed, May 18, 2016 at 01:17:32PM +0530, Kedareswara rao Appana wrote:
> > +		if (chan->cyclic) {
> > +			if (chan->ext_addr)
> > +				dma_writeq(chan,
> XILINX_DMA_REG_TAILDESC,
> > +					   chan->cyclic_seg_v->phys);
> > +			else
> > +				dma_ctrl_write(chan,
> XILINX_DMA_REG_TAILDESC,
> > +					       chan->cyclic_seg_v->phys);
> > +		} else {
> > +			if (chan->ext_addr)
> > +				dma_writeq(chan,
> XILINX_DMA_REG_TAILDESC,
> > +					   tail_segment->phys);
> > +			else
> > +				dma_ctrl_write(chan,
> XILINX_DMA_REG_TAILDESC,
> > +					       tail_segment->phys);
> 
> this looks ugly and repeated few times. Why not have xilinx_write() which does
> either dma_writeq or dma_ctrl_write based on channel..

Ok sure will fix in the next version...

> 
> > +			if (chan->ext_addr) {
> > +				hw->buf_addr = lower_32_bits(buf_addr +
> > +						sg_used + (period_len * i));
> > +				hw->buf_addr_msb = upper_32_bits(buf_addr +
> > +						sg_used + (period_len * i));
> > +			} else {
> > +				hw->buf_addr = buf_addr + sg_used +
> > +					       (period_len * i);
> > +			}
> 
> similar wrapper here would make code more readable

Ok Sure will fix in the next version...

Thanks,
Kedar.

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v2 3/4] dmaengine: vdma: Add 64 bit addressing support for the axi dma
  2016-05-18  7:47 ` [PATCH v2 3/4] dmaengine: vdma: Add 64 bit addressing support for the axi dma Kedareswara rao Appana
@ 2016-06-07  6:17   ` Vinod Koul
  2016-06-07  6:13     ` Appana Durga Kedareswara Rao
  0 siblings, 1 reply; 9+ messages in thread
From: Vinod Koul @ 2016-06-07  6:17 UTC (permalink / raw)
  To: Kedareswara rao Appana
  Cc: dan.j.williams, michal.simek, soren.brinkmann, appanad,
	moritz.fischer, laurent.pinchart, luis, punnaia, dmaengine,
	linux-arm-kernel, linux-kernel

On Wed, May 18, 2016 at 01:17:32PM +0530, Kedareswara rao Appana wrote:
> +		if (chan->cyclic) {
> +			if (chan->ext_addr)
> +				dma_writeq(chan, XILINX_DMA_REG_TAILDESC,
> +					   chan->cyclic_seg_v->phys);
> +			else
> +				dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
> +					       chan->cyclic_seg_v->phys);
> +		} else {
> +			if (chan->ext_addr)
> +				dma_writeq(chan, XILINX_DMA_REG_TAILDESC,
> +					   tail_segment->phys);
> +			else
> +				dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
> +					       tail_segment->phys);

this looks ugly and repeated few times. Why not have xilinx_write() which
does either dma_writeq or dma_ctrl_write based on channel..

> +			if (chan->ext_addr) {
> +				hw->buf_addr = lower_32_bits(buf_addr +
> +						sg_used + (period_len * i));
> +				hw->buf_addr_msb = upper_32_bits(buf_addr +
> +						sg_used + (period_len * i));
> +			} else {
> +				hw->buf_addr = buf_addr + sg_used +
> +					       (period_len * i);
> +			}

similar wrapper here would make code more readable

-- 
~Vinod

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v2 1/4] dmaengine: vdma: Add support for cyclic dma mode
  2016-05-18  7:47 ` [PATCH v2 1/4] dmaengine: vdma: Add support for cyclic dma mode Kedareswara rao Appana
@ 2016-06-07  6:18   ` Vinod Koul
  0 siblings, 0 replies; 9+ messages in thread
From: Vinod Koul @ 2016-06-07  6:18 UTC (permalink / raw)
  To: Kedareswara rao Appana
  Cc: dan.j.williams, michal.simek, soren.brinkmann, appanad,
	moritz.fischer, laurent.pinchart, luis, punnaia, dmaengine,
	linux-arm-kernel, linux-kernel

On Wed, May 18, 2016 at 01:17:30PM +0530, Kedareswara rao Appana wrote:
> This patch adds support for AXI DMA cyclic dma mode.
> In cyclic mode, DMA fetches and processes the same
> BDs without interruption. The DMA continues to fetch and process
> until it is stopped or reset.

Applied, thanks

-- 
~Vinod

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v2 2/4] dmaengine: vdma: Use dma_pool_zalloc
  2016-05-18  7:47 ` [PATCH v2 2/4] dmaengine: vdma: Use dma_pool_zalloc Kedareswara rao Appana
@ 2016-06-07  6:19   ` Vinod Koul
  0 siblings, 0 replies; 9+ messages in thread
From: Vinod Koul @ 2016-06-07  6:19 UTC (permalink / raw)
  To: Kedareswara rao Appana
  Cc: dan.j.williams, michal.simek, soren.brinkmann, appanad,
	moritz.fischer, laurent.pinchart, luis, punnaia, dmaengine,
	linux-arm-kernel, linux-kernel

On Wed, May 18, 2016 at 01:17:31PM +0530, Kedareswara rao Appana wrote:
> dma_pool_zalloc combines dma_pool_alloc and memset 0
> this patch updates the driver to use dma_pool_zalloc.

Applied, thanks

-- 
~Vinod

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2016-06-07  6:14 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-05-18  7:47 [PATCH v2 0/4] dmaengine: vdma: AXI DMA's enhancments Kedareswara rao Appana
2016-05-18  7:47 ` [PATCH v2 1/4] dmaengine: vdma: Add support for cyclic dma mode Kedareswara rao Appana
2016-06-07  6:18   ` Vinod Koul
2016-05-18  7:47 ` [PATCH v2 2/4] dmaengine: vdma: Use dma_pool_zalloc Kedareswara rao Appana
2016-06-07  6:19   ` Vinod Koul
2016-05-18  7:47 ` [PATCH v2 3/4] dmaengine: vdma: Add 64 bit addressing support for the axi dma Kedareswara rao Appana
2016-06-07  6:17   ` Vinod Koul
2016-06-07  6:13     ` Appana Durga Kedareswara Rao
2016-05-18  7:47 ` [PATCH v2 4/4] dmaengine: vdma: Add 64 bit addressing support for the axi cdma Kedareswara rao Appana

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).