linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v4 0/3] dmaengine: xilinx_dma: Bug fixes
@ 2017-01-04 13:35 Kedareswara rao Appana
  2017-01-04 13:35 ` [PATCH v4 1/3] dmaengine: xilinx_dma: Check for channel idle state before submitting dma descriptor Kedareswara rao Appana
                   ` (2 more replies)
  0 siblings, 3 replies; 8+ messages in thread
From: Kedareswara rao Appana @ 2017-01-04 13:35 UTC (permalink / raw)
  To: robh+dt, mark.rutland, dan.j.williams, vinod.koul, michal.simek,
	soren.brinkmann, appanad, moritz.fischer, laurent.pinchart, luis,
	Jose.Abreu
  Cc: dmaengine, linux-arm-kernel, linux-kernel, devicetree

This patch series fixes below bugs in DMA and VDMA IP's
---> Do not start VDMA until frame buffer is processed by the h/w Fix 
---> bug in Multi frame sotres handling in VDMA Fix issues w.r.to multi 
---> frame descriptors submit with AXI DMA S2MM(recv) Side.

Kedareswara rao Appana (3):
  dmaengine: xilinx_dma: Check for channel idle state before submitting
    dma descriptor
  dmaeninge: xilinx_dma: Fix bug in multiple frame stores scenario in
    vdma
  dmaengine: xilinx_dma: Fix race condition in the driver for multiple
    descriptor scenario

 .../devicetree/bindings/dma/xilinx/xilinx_dma.txt  |   2 +
 drivers/dma/xilinx/xilinx_dma.c                    | 266 ++++++++++++---------
 2 files changed, 157 insertions(+), 111 deletions(-)

-- 
2.1.2

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH v4 1/3] dmaengine: xilinx_dma: Check for channel idle state before submitting dma descriptor
  2017-01-04 13:35 [PATCH v4 0/3] dmaengine: xilinx_dma: Bug fixes Kedareswara rao Appana
@ 2017-01-04 13:35 ` Kedareswara rao Appana
  2017-01-04 13:35 ` [PATCH v4 2/3] dmaeninge: xilinx_dma: Fix bug in multiple frame stores scenario in vdma Kedareswara rao Appana
  2017-01-04 13:35 ` [PATCH v4 3/3] dmaengine: xilinx_dma: Fix race condition in the driver for multiple descriptor scenario Kedareswara rao Appana
  2 siblings, 0 replies; 8+ messages in thread
From: Kedareswara rao Appana @ 2017-01-04 13:35 UTC (permalink / raw)
  To: robh+dt, mark.rutland, dan.j.williams, vinod.koul, michal.simek,
	soren.brinkmann, appanad, moritz.fischer, laurent.pinchart, luis,
	Jose.Abreu
  Cc: dmaengine, linux-arm-kernel, linux-kernel, devicetree

Add channel idle state to ensure that dma descriptor is not
submitted when VDMA engine is in progress.

Reviewed-by: Jose Abreu <joabreu@synopsys.com>
Signed-off-by: Kedareswara rao Appana <appanad@xilinx.com>
---
Changes for v3:
---> None.
Changes for v2:
---> Add idle check in the reset as suggested by Jose Abreu
---> Removed xilinx_dma_is_running/xilinx_dma_is_idle checks
    in the driver and used common idle checks across the driver
    as suggested by Laurent Pinchart.

 drivers/dma/xilinx/xilinx_dma.c | 56 +++++++++++++----------------------------
 1 file changed, 17 insertions(+), 39 deletions(-)

diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 8288fe4..be7eb41 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -321,6 +321,7 @@ struct xilinx_dma_tx_descriptor {
  * @cyclic: Check for cyclic transfers.
  * @genlock: Support genlock mode
  * @err: Channel has errors
+ * @idle: Check for channel idle
  * @tasklet: Cleanup work after irq
  * @config: Device configuration info
  * @flush_on_fsync: Flush on Frame sync
@@ -351,6 +352,7 @@ struct xilinx_dma_chan {
 	bool cyclic;
 	bool genlock;
 	bool err;
+	bool idle;
 	struct tasklet_struct tasklet;
 	struct xilinx_vdma_config config;
 	bool flush_on_fsync;
@@ -920,32 +922,6 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
 }
 
 /**
- * xilinx_dma_is_running - Check if DMA channel is running
- * @chan: Driver specific DMA channel
- *
- * Return: '1' if running, '0' if not.
- */
-static bool xilinx_dma_is_running(struct xilinx_dma_chan *chan)
-{
-	return !(dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
-		 XILINX_DMA_DMASR_HALTED) &&
-		(dma_ctrl_read(chan, XILINX_DMA_REG_DMACR) &
-		 XILINX_DMA_DMACR_RUNSTOP);
-}
-
-/**
- * xilinx_dma_is_idle - Check if DMA channel is idle
- * @chan: Driver specific DMA channel
- *
- * Return: '1' if idle, '0' if not.
- */
-static bool xilinx_dma_is_idle(struct xilinx_dma_chan *chan)
-{
-	return dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
-		XILINX_DMA_DMASR_IDLE;
-}
-
-/**
  * xilinx_dma_halt - Halt DMA channel
  * @chan: Driver specific DMA channel
  */
@@ -966,6 +942,7 @@ static void xilinx_dma_halt(struct xilinx_dma_chan *chan)
 			chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
 		chan->err = true;
 	}
+	chan->idle = true;
 }
 
 /**
@@ -1007,6 +984,9 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
 	if (chan->err)
 		return;
 
+	if (!chan->idle)
+		return;
+
 	if (list_empty(&chan->pending_list))
 		return;
 
@@ -1018,13 +998,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
 	tail_segment = list_last_entry(&tail_desc->segments,
 				       struct xilinx_vdma_tx_segment, node);
 
-	/* If it is SG mode and hardware is busy, cannot submit */
-	if (chan->has_sg && xilinx_dma_is_running(chan) &&
-	    !xilinx_dma_is_idle(chan)) {
-		dev_dbg(chan->dev, "DMA controller still busy\n");
-		return;
-	}
-
 	/*
 	 * If hardware is idle, then all descriptors on the running lists are
 	 * done, start new transfers
@@ -1110,6 +1083,7 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
 		vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
 	}
 
+	chan->idle = false;
 	if (!chan->has_sg) {
 		list_del(&desc->node);
 		list_add_tail(&desc->node, &chan->active_list);
@@ -1136,6 +1110,9 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
 	if (chan->err)
 		return;
 
+	if (!chan->idle)
+		return;
+
 	if (list_empty(&chan->pending_list))
 		return;
 
@@ -1181,6 +1158,7 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
 
 	list_splice_tail_init(&chan->pending_list, &chan->active_list);
 	chan->desc_pendingcount = 0;
+	chan->idle = false;
 }
 
 /**
@@ -1196,15 +1174,11 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
 	if (chan->err)
 		return;
 
-	if (list_empty(&chan->pending_list))
+	if (!chan->idle)
 		return;
 
-	/* If it is SG mode and hardware is busy, cannot submit */
-	if (chan->has_sg && xilinx_dma_is_running(chan) &&
-	    !xilinx_dma_is_idle(chan)) {
-		dev_dbg(chan->dev, "DMA controller still busy\n");
+	if (list_empty(&chan->pending_list))
 		return;
-	}
 
 	head_desc = list_first_entry(&chan->pending_list,
 				     struct xilinx_dma_tx_descriptor, node);
@@ -1302,6 +1276,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
 
 	list_splice_tail_init(&chan->pending_list, &chan->active_list);
 	chan->desc_pendingcount = 0;
+	chan->idle = false;
 }
 
 /**
@@ -1366,6 +1341,7 @@ static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
 	}
 
 	chan->err = false;
+	chan->idle = true;
 
 	return err;
 }
@@ -1447,6 +1423,7 @@ static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
 	if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
 		spin_lock(&chan->lock);
 		xilinx_dma_complete_descriptor(chan);
+		chan->idle = true;
 		chan->start_transfer(chan);
 		spin_unlock(&chan->lock);
 	}
@@ -2327,6 +2304,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
 	chan->has_sg = xdev->has_sg;
 	chan->desc_pendingcount = 0x0;
 	chan->ext_addr = xdev->ext_addr;
+	chan->idle = true;
 
 	spin_lock_init(&chan->lock);
 	INIT_LIST_HEAD(&chan->pending_list);
-- 
2.1.2

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH v4 2/3] dmaeninge: xilinx_dma: Fix bug in multiple frame stores scenario in vdma
  2017-01-04 13:35 [PATCH v4 0/3] dmaengine: xilinx_dma: Bug fixes Kedareswara rao Appana
  2017-01-04 13:35 ` [PATCH v4 1/3] dmaengine: xilinx_dma: Check for channel idle state before submitting dma descriptor Kedareswara rao Appana
@ 2017-01-04 13:35 ` Kedareswara rao Appana
  2017-01-04 15:26   ` Rob Herring
  2017-01-04 13:35 ` [PATCH v4 3/3] dmaengine: xilinx_dma: Fix race condition in the driver for multiple descriptor scenario Kedareswara rao Appana
  2 siblings, 1 reply; 8+ messages in thread
From: Kedareswara rao Appana @ 2017-01-04 13:35 UTC (permalink / raw)
  To: robh+dt, mark.rutland, dan.j.williams, vinod.koul, michal.simek,
	soren.brinkmann, appanad, moritz.fischer, laurent.pinchart, luis,
	Jose.Abreu
  Cc: dmaengine, linux-arm-kernel, linux-kernel, devicetree

When VDMA is configured for more than one frame in the h/w
for example h/w is configured for n number of frames and user
Submits n number of frames and triggered the DMA using issue_pending API.
In the current driver flow we are submitting one frame at a time
but we should submit all the n number of frames at one time as the h/w
Is configured for n number of frames.

This patch fixes this issue.

Reviewed-by: Jose Abreu <joabreu@synopsys.com>
Signed-off-by: Kedareswara rao Appana <appanad@xilinx.com>
---
Changes for v4:
---> Add Check for framestore configuration on Transmit case as well
     as suggested by Jose Abreu.
---> Modified the dev_dbg checks to dev_warn checks as suggested
     by Jose Abreu. 
Changes for v3:
---> Added Checks for frame store configuration. If frame store
     Configuration is not present at the h/w level and user
     Submits less frames added debug prints in the driver as relevant.
Changes for v2:
---> Fixed race conditions in the driver as suggested by Jose Abreu
---> Fixed unnecessray if else checks in the vdma_start_transfer
     as suggested by Laurent Pinchart.

 .../devicetree/bindings/dma/xilinx/xilinx_dma.txt  |  2 +
 drivers/dma/xilinx/xilinx_dma.c                    | 79 +++++++++++++++-------
 2 files changed, 58 insertions(+), 23 deletions(-)

diff --git a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
index a2b8bfa..1f65e09 100644
--- a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
+++ b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
@@ -66,6 +66,8 @@ Optional child node properties:
 Optional child node properties for VDMA:
 - xlnx,genlock-mode: Tells Genlock synchronization is
 	enabled/disabled in hardware.
+- xlnx,fstore-config: Tells Whether Frame Store Configuration is
+	enabled/disabled in hardware.
 Optional child node properties for AXI DMA:
 -dma-channels: Number of dma channels in child node.
 
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index be7eb41..4fb34da 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -322,6 +322,7 @@ struct xilinx_dma_tx_descriptor {
  * @genlock: Support genlock mode
  * @err: Channel has errors
  * @idle: Check for channel idle
+ * @has_fstoreconfig: Check for frame store configuration
  * @tasklet: Cleanup work after irq
  * @config: Device configuration info
  * @flush_on_fsync: Flush on Frame sync
@@ -353,6 +354,7 @@ struct xilinx_dma_chan {
 	bool genlock;
 	bool err;
 	bool idle;
+	bool has_fstoreconfig;
 	struct tasklet_struct tasklet;
 	struct xilinx_vdma_config config;
 	bool flush_on_fsync;
@@ -990,6 +992,27 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
 	if (list_empty(&chan->pending_list))
 		return;
 
+	/*
+	 * Note: When VDMA is built with default h/w configuration
+	 * User should submit frames upto H/W configured.
+	 * If users submits less than h/w configured
+	 * VDMA engine tries to write to a invalid location
+	 * Results undefined behaviour/memory corruption.
+	 *
+	 * If user would like to submit frames less than h/w capable
+	 * On S2MM side please enable debug info 13 at the h/w level
+	 * On MM2S side please enable debug info 6 at the h/w level
+	 * It will allows the frame buffers numbers to be modified at runtime.
+	 */
+	if (!chan->has_fstoreconfig &&
+	     chan->desc_pendingcount < chan->num_frms) {
+		dev_warn(chan->dev, "Frame Store Configuration is not enabled at the\n");
+		dev_warn(chan->dev, "H/w level enable Debug info 13 or 6 at the h/w level\n");
+		dev_warn(chan->dev, "OR Submit the frames upto h/w Capable\n\r");
+
+		return;
+	}
+
 	desc = list_first_entry(&chan->pending_list,
 				struct xilinx_dma_tx_descriptor, node);
 	tail_desc = list_last_entry(&chan->pending_list,
@@ -1052,25 +1075,38 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
 	if (chan->has_sg) {
 		dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
 				tail_segment->phys);
+		list_splice_tail_init(&chan->pending_list, &chan->active_list);
+		chan->desc_pendingcount = 0;
 	} else {
 		struct xilinx_vdma_tx_segment *segment, *last = NULL;
-		int i = 0;
+		int i = 0, j = 0;
 
 		if (chan->desc_submitcount < chan->num_frms)
 			i = chan->desc_submitcount;
 
-		list_for_each_entry(segment, &desc->segments, node) {
-			if (chan->ext_addr)
-				vdma_desc_write_64(chan,
-					XILINX_VDMA_REG_START_ADDRESS_64(i++),
-					segment->hw.buf_addr,
-					segment->hw.buf_addr_msb);
-			else
-				vdma_desc_write(chan,
-					XILINX_VDMA_REG_START_ADDRESS(i++),
-					segment->hw.buf_addr);
-
-			last = segment;
+		for (j = 0; j < chan->num_frms; ) {
+			list_for_each_entry(segment, &desc->segments, node) {
+				if (chan->ext_addr)
+					vdma_desc_write_64(chan,
+					  XILINX_VDMA_REG_START_ADDRESS_64(i++),
+					  segment->hw.buf_addr,
+					  segment->hw.buf_addr_msb);
+				else
+					vdma_desc_write(chan,
+					    XILINX_VDMA_REG_START_ADDRESS(i++),
+					    segment->hw.buf_addr);
+
+				last = segment;
+			}
+			list_del(&desc->node);
+			list_add_tail(&desc->node, &chan->active_list);
+			j++;
+			if (list_empty(&chan->pending_list) ||
+			    (i == chan->num_frms))
+				break;
+			desc = list_first_entry(&chan->pending_list,
+						struct xilinx_dma_tx_descriptor,
+						node);
 		}
 
 		if (!last)
@@ -1081,20 +1117,14 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
 		vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
 				last->hw.stride);
 		vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
-	}
 
-	chan->idle = false;
-	if (!chan->has_sg) {
-		list_del(&desc->node);
-		list_add_tail(&desc->node, &chan->active_list);
-		chan->desc_submitcount++;
-		chan->desc_pendingcount--;
+		chan->desc_submitcount += j;
+		chan->desc_pendingcount -= j;
 		if (chan->desc_submitcount == chan->num_frms)
 			chan->desc_submitcount = 0;
-	} else {
-		list_splice_tail_init(&chan->pending_list, &chan->active_list);
-		chan->desc_pendingcount = 0;
 	}
+
+	chan->idle = false;
 }
 
 /**
@@ -1342,6 +1372,7 @@ static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
 
 	chan->err = false;
 	chan->idle = true;
+	chan->desc_submitcount = 0;
 
 	return err;
 }
@@ -2315,6 +2346,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
 	has_dre = of_property_read_bool(node, "xlnx,include-dre");
 
 	chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
+	chan->has_fstoreconfig = of_property_read_bool(node,
+						       "xlnx,fstore-config");
 
 	err = of_property_read_u32(node, "xlnx,datawidth", &value);
 	if (err) {
-- 
2.1.2

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH v4 3/3] dmaengine: xilinx_dma: Fix race condition in the driver for multiple descriptor scenario
  2017-01-04 13:35 [PATCH v4 0/3] dmaengine: xilinx_dma: Bug fixes Kedareswara rao Appana
  2017-01-04 13:35 ` [PATCH v4 1/3] dmaengine: xilinx_dma: Check for channel idle state before submitting dma descriptor Kedareswara rao Appana
  2017-01-04 13:35 ` [PATCH v4 2/3] dmaeninge: xilinx_dma: Fix bug in multiple frame stores scenario in vdma Kedareswara rao Appana
@ 2017-01-04 13:35 ` Kedareswara rao Appana
  2 siblings, 0 replies; 8+ messages in thread
From: Kedareswara rao Appana @ 2017-01-04 13:35 UTC (permalink / raw)
  To: robh+dt, mark.rutland, dan.j.williams, vinod.koul, michal.simek,
	soren.brinkmann, appanad, moritz.fischer, laurent.pinchart, luis,
	Jose.Abreu
  Cc: dmaengine, linux-arm-kernel, linux-kernel, devicetree

When driver is handling AXI DMA SoftIP
When user submits multiple descriptors back to back on the S2MM(recv)
side with the current driver flow the last buffer descriptor next bd
points to a invalid location resulting the invalid data or errors in the
DMA engine.

This patch fixes this issue by creating a BD Chain during
channel allocation itself and use those BD's.

Signed-off-by: Kedareswara rao Appana <appanad@xilinx.com>
---
Changes for v4:
---> None.
Changes for v3:
---> None.
Changes for v2:
---> None.

 drivers/dma/xilinx/xilinx_dma.c | 133 +++++++++++++++++++++++++---------------
 1 file changed, 83 insertions(+), 50 deletions(-)

diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 4fb34da..900bff1 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -163,6 +163,7 @@
 #define XILINX_DMA_BD_SOP		BIT(27)
 #define XILINX_DMA_BD_EOP		BIT(26)
 #define XILINX_DMA_COALESCE_MAX		255
+#define XILINX_DMA_NUM_DESCS		255
 #define XILINX_DMA_NUM_APP_WORDS	5
 
 /* Multi-Channel DMA Descriptor offsets*/
@@ -310,6 +311,7 @@ struct xilinx_dma_tx_descriptor {
  * @pending_list: Descriptors waiting
  * @active_list: Descriptors ready to submit
  * @done_list: Complete descriptors
+ * @free_seg_list: Free descriptors
  * @common: DMA common channel
  * @desc_pool: Descriptors pool
  * @dev: The dma device
@@ -331,7 +333,9 @@ struct xilinx_dma_tx_descriptor {
  * @desc_submitcount: Descriptor h/w submitted count
  * @residue: Residue for AXI DMA
  * @seg_v: Statically allocated segments base
+ * @seg_p: Physical allocated segments base
  * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
+ * @cyclic_seg_p: Physical allocated segments base for cyclic dma
  * @start_transfer: Differentiate b/w DMA IP's transfer
  */
 struct xilinx_dma_chan {
@@ -342,6 +346,7 @@ struct xilinx_dma_chan {
 	struct list_head pending_list;
 	struct list_head active_list;
 	struct list_head done_list;
+	struct list_head free_seg_list;
 	struct dma_chan common;
 	struct dma_pool *desc_pool;
 	struct device *dev;
@@ -363,7 +368,9 @@ struct xilinx_dma_chan {
 	u32 desc_submitcount;
 	u32 residue;
 	struct xilinx_axidma_tx_segment *seg_v;
+	dma_addr_t seg_p;
 	struct xilinx_axidma_tx_segment *cyclic_seg_v;
+	dma_addr_t cyclic_seg_p;
 	void (*start_transfer)(struct xilinx_dma_chan *chan);
 	u16 tdest;
 };
@@ -569,17 +576,31 @@ static struct xilinx_axidma_tx_segment *
 xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
 {
 	struct xilinx_axidma_tx_segment *segment;
-	dma_addr_t phys;
-
-	segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
-	if (!segment)
-		return NULL;
+	unsigned long flags;
 
-	segment->phys = phys;
+	spin_lock_irqsave(&chan->lock, flags);
+	if (!list_empty(&chan->free_seg_list)) {
+		segment = list_first_entry(&chan->free_seg_list,
+					   struct xilinx_axidma_tx_segment,
+					   node);
+		list_del(&segment->node);
+	}
+	spin_unlock_irqrestore(&chan->lock, flags);
 
 	return segment;
 }
 
+static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw)
+{
+	u32 next_desc = hw->next_desc;
+	u32 next_desc_msb = hw->next_desc_msb;
+
+	memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw));
+
+	hw->next_desc = next_desc;
+	hw->next_desc_msb = next_desc_msb;
+}
+
 /**
  * xilinx_dma_free_tx_segment - Free transaction segment
  * @chan: Driver specific DMA channel
@@ -588,7 +609,9 @@ xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
 static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
 				struct xilinx_axidma_tx_segment *segment)
 {
-	dma_pool_free(chan->desc_pool, segment, segment->phys);
+	xilinx_dma_clean_hw_desc(&segment->hw);
+
+	list_add_tail(&segment->node, &chan->free_seg_list);
 }
 
 /**
@@ -713,16 +736,26 @@ static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
 static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
 {
 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+	unsigned long flags;
 
 	dev_dbg(chan->dev, "Free all channel resources.\n");
 
 	xilinx_dma_free_descriptors(chan);
+
 	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
-		xilinx_dma_free_tx_segment(chan, chan->cyclic_seg_v);
-		xilinx_dma_free_tx_segment(chan, chan->seg_v);
+		spin_lock_irqsave(&chan->lock, flags);
+		INIT_LIST_HEAD(&chan->free_seg_list);
+		spin_unlock_irqrestore(&chan->lock, flags);
+
+		/* Free Memory that is allocated for cyclic DMA Mode */
+		dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v),
+				  chan->cyclic_seg_v, chan->cyclic_seg_p);
+	}
+
+	if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) {
+		dma_pool_destroy(chan->desc_pool);
+		chan->desc_pool = NULL;
 	}
-	dma_pool_destroy(chan->desc_pool);
-	chan->desc_pool = NULL;
 }
 
 /**
@@ -805,6 +838,7 @@ static void xilinx_dma_do_tasklet(unsigned long data)
 static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
 {
 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+	int i;
 
 	/* Has this channel already been allocated? */
 	if (chan->desc_pool)
@@ -815,11 +849,30 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
 	 * for meeting Xilinx VDMA specification requirement.
 	 */
 	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
-		chan->desc_pool = dma_pool_create("xilinx_dma_desc_pool",
-				   chan->dev,
-				   sizeof(struct xilinx_axidma_tx_segment),
-				   __alignof__(struct xilinx_axidma_tx_segment),
-				   0);
+		/* Allocate the buffer descriptors. */
+		chan->seg_v = dma_zalloc_coherent(chan->dev,
+						  sizeof(*chan->seg_v) *
+						  XILINX_DMA_NUM_DESCS,
+						  &chan->seg_p, GFP_KERNEL);
+		if (!chan->seg_v) {
+			dev_err(chan->dev,
+				"unable to allocate channel %d descriptors\n",
+				chan->id);
+			return -ENOMEM;
+		}
+
+		for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
+			chan->seg_v[i].hw.next_desc =
+			lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
+				((i + 1) % XILINX_DMA_NUM_DESCS));
+			chan->seg_v[i].hw.next_desc_msb =
+			upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
+				((i + 1) % XILINX_DMA_NUM_DESCS));
+			chan->seg_v[i].phys = chan->seg_p +
+				sizeof(*chan->seg_v) * i;
+			list_add_tail(&chan->seg_v[i].node,
+				      &chan->free_seg_list);
+		}
 	} else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
 		chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
 				   chan->dev,
@@ -834,7 +887,8 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
 				     0);
 	}
 
-	if (!chan->desc_pool) {
+	if (!chan->desc_pool &&
+	    (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA)) {
 		dev_err(chan->dev,
 			"unable to allocate channel %d descriptor pool\n",
 			chan->id);
@@ -843,22 +897,20 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
 
 	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
 		/*
-		 * For AXI DMA case after submitting a pending_list, keep
-		 * an extra segment allocated so that the "next descriptor"
-		 * pointer on the tail descriptor always points to a
-		 * valid descriptor, even when paused after reaching taildesc.
-		 * This way, it is possible to issue additional
-		 * transfers without halting and restarting the channel.
-		 */
-		chan->seg_v = xilinx_axidma_alloc_tx_segment(chan);
-
-		/*
 		 * For cyclic DMA mode we need to program the tail Descriptor
 		 * register with a value which is not a part of the BD chain
 		 * so allocating a desc segment during channel allocation for
 		 * programming tail descriptor.
 		 */
-		chan->cyclic_seg_v = xilinx_axidma_alloc_tx_segment(chan);
+		chan->cyclic_seg_v = dma_zalloc_coherent(chan->dev,
+					sizeof(*chan->cyclic_seg_v),
+					&chan->cyclic_seg_p, GFP_KERNEL);
+		if (!chan->cyclic_seg_v) {
+			dev_err(chan->dev,
+				"unable to allocate desc segment for cyclic DMA\n");
+			return -ENOMEM;
+		}
+		chan->cyclic_seg_v->phys = chan->cyclic_seg_p;
 	}
 
 	dma_cookie_init(dchan);
@@ -1198,7 +1250,7 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
 static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
 {
 	struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
-	struct xilinx_axidma_tx_segment *tail_segment, *old_head, *new_head;
+	struct xilinx_axidma_tx_segment *tail_segment;
 	u32 reg;
 
 	if (chan->err)
@@ -1217,21 +1269,6 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
 	tail_segment = list_last_entry(&tail_desc->segments,
 				       struct xilinx_axidma_tx_segment, node);
 
-	if (chan->has_sg && !chan->xdev->mcdma) {
-		old_head = list_first_entry(&head_desc->segments,
-					struct xilinx_axidma_tx_segment, node);
-		new_head = chan->seg_v;
-		/* Copy Buffer Descriptor fields. */
-		new_head->hw = old_head->hw;
-
-		/* Swap and save new reserve */
-		list_replace_init(&old_head->node, &new_head->node);
-		chan->seg_v = old_head;
-
-		tail_segment->hw.next_desc = chan->seg_v->phys;
-		head_desc->async_tx.phys = new_head->phys;
-	}
-
 	reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
 
 	if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
@@ -1729,7 +1766,7 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
 {
 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
 	struct xilinx_dma_tx_descriptor *desc;
-	struct xilinx_axidma_tx_segment *segment = NULL, *prev = NULL;
+	struct xilinx_axidma_tx_segment *segment = NULL;
 	u32 *app_w = (u32 *)context;
 	struct scatterlist *sg;
 	size_t copy;
@@ -1780,10 +1817,6 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
 					       XILINX_DMA_NUM_APP_WORDS);
 			}
 
-			if (prev)
-				prev->hw.next_desc = segment->phys;
-
-			prev = segment;
 			sg_used += copy;
 
 			/*
@@ -1797,7 +1830,6 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
 	segment = list_first_entry(&desc->segments,
 				   struct xilinx_axidma_tx_segment, node);
 	desc->async_tx.phys = segment->phys;
-	prev->hw.next_desc = segment->phys;
 
 	/* For the last DMA_MEM_TO_DEV transfer, set EOP */
 	if (chan->direction == DMA_MEM_TO_DEV) {
@@ -2341,6 +2373,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
 	INIT_LIST_HEAD(&chan->pending_list);
 	INIT_LIST_HEAD(&chan->done_list);
 	INIT_LIST_HEAD(&chan->active_list);
+	INIT_LIST_HEAD(&chan->free_seg_list);
 
 	/* Retrieve the channel properties from the device tree */
 	has_dre = of_property_read_bool(node, "xlnx,include-dre");
-- 
2.1.2

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH v4 2/3] dmaeninge: xilinx_dma: Fix bug in multiple frame stores scenario in vdma
  2017-01-04 13:35 ` [PATCH v4 2/3] dmaeninge: xilinx_dma: Fix bug in multiple frame stores scenario in vdma Kedareswara rao Appana
@ 2017-01-04 15:26   ` Rob Herring
  2017-01-04 17:00     ` Appana Durga Kedareswara Rao
  0 siblings, 1 reply; 8+ messages in thread
From: Rob Herring @ 2017-01-04 15:26 UTC (permalink / raw)
  To: Kedareswara rao Appana
  Cc: mark.rutland, dan.j.williams, vinod.koul, michal.simek,
	soren.brinkmann, appanad, moritz.fischer, laurent.pinchart, luis,
	Jose.Abreu, dmaengine, linux-arm-kernel, linux-kernel,
	devicetree

On Wed, Jan 04, 2017 at 07:05:53PM +0530, Kedareswara rao Appana wrote:
> When VDMA is configured for more than one frame in the h/w
> for example h/w is configured for n number of frames and user
> Submits n number of frames and triggered the DMA using issue_pending API.
> In the current driver flow we are submitting one frame at a time
> but we should submit all the n number of frames at one time as the h/w
> Is configured for n number of frames.

Please fix run-on sentences, capitalization, and word wrapping.

> 
> This patch fixes this issue.
> 
> Reviewed-by: Jose Abreu <joabreu@synopsys.com>
> Signed-off-by: Kedareswara rao Appana <appanad@xilinx.com>
> ---
> Changes for v4:
> ---> Add Check for framestore configuration on Transmit case as well
>      as suggested by Jose Abreu.
> ---> Modified the dev_dbg checks to dev_warn checks as suggested
>      by Jose Abreu. 
> Changes for v3:
> ---> Added Checks for frame store configuration. If frame store
>      Configuration is not present at the h/w level and user
>      Submits less frames added debug prints in the driver as relevant.
> Changes for v2:
> ---> Fixed race conditions in the driver as suggested by Jose Abreu
> ---> Fixed unnecessray if else checks in the vdma_start_transfer
>      as suggested by Laurent Pinchart.
> 
>  .../devicetree/bindings/dma/xilinx/xilinx_dma.txt  |  2 +
>  drivers/dma/xilinx/xilinx_dma.c                    | 79 +++++++++++++++-------
>  2 files changed, 58 insertions(+), 23 deletions(-)
> 
> diff --git a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
> index a2b8bfa..1f65e09 100644
> --- a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
> +++ b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
> @@ -66,6 +66,8 @@ Optional child node properties:
>  Optional child node properties for VDMA:
>  - xlnx,genlock-mode: Tells Genlock synchronization is
>  	enabled/disabled in hardware.
> +- xlnx,fstore-config: Tells Whether Frame Store Configuration is
> +	enabled/disabled in hardware.

What's the default (when not present)? That should be the most 
common case. Looks like the code treats this as bool, but that's 
not clear here. The name is not clear what it is doing. Enabling or 
disabling the feature?


>  Optional child node properties for AXI DMA:
>  -dma-channels: Number of dma channels in child node.
>  

^ permalink raw reply	[flat|nested] 8+ messages in thread

* RE: [PATCH v4 2/3] dmaeninge: xilinx_dma: Fix bug in multiple frame stores scenario in vdma
  2017-01-04 15:26   ` Rob Herring
@ 2017-01-04 17:00     ` Appana Durga Kedareswara Rao
  2017-01-05 16:39       ` Rob Herring
  0 siblings, 1 reply; 8+ messages in thread
From: Appana Durga Kedareswara Rao @ 2017-01-04 17:00 UTC (permalink / raw)
  To: Rob Herring
  Cc: mark.rutland, dan.j.williams, vinod.koul, michal.simek,
	Soren Brinkmann, moritz.fischer, laurent.pinchart, luis,
	Jose.Abreu, dmaengine, linux-arm-kernel, linux-kernel,
	devicetree

Hi Rob,

	Thanks for the review....

> On Wed, Jan 04, 2017 at 07:05:53PM +0530, Kedareswara rao Appana wrote:
> > When VDMA is configured for more than one frame in the h/w for example
> > h/w is configured for n number of frames and user Submits n number of
> > frames and triggered the DMA using issue_pending API.
> > In the current driver flow we are submitting one frame at a time but
> > we should submit all the n number of frames at one time as the h/w Is
> > configured for n number of frames.
> 
> Please fix run-on sentences, capitalization, and word wrapping.

Sure will fix in the next version....

[Snip]
-- a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
> > +++ b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
> > @@ -66,6 +66,8 @@ Optional child node properties:
> >  Optional child node properties for VDMA:
> >  - xlnx,genlock-mode: Tells Genlock synchronization is
> >  	enabled/disabled in hardware.
> > +- xlnx,fstore-config: Tells Whether Frame Store Configuration is
> > +	enabled/disabled in hardware.
> 
> What's the default (when not present)? That should be the most common case.
> Looks like the code treats this as bool, but that's not clear here. The name is not
> clear what it is doing. Enabling or disabling the feature?

Default value is zero...
When this property is present it tells hardware is configured for frame store configuration.

Will fix the explanation part in the next version like below.
 xlnx,fstore-config: Tells hardware is configured for frame store configuration.
Is the above explanation clear???

Regards,
Kedar.

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v4 2/3] dmaeninge: xilinx_dma: Fix bug in multiple frame stores scenario in vdma
  2017-01-04 17:00     ` Appana Durga Kedareswara Rao
@ 2017-01-05 16:39       ` Rob Herring
  2017-01-06  6:01         ` Appana Durga Kedareswara Rao
  0 siblings, 1 reply; 8+ messages in thread
From: Rob Herring @ 2017-01-05 16:39 UTC (permalink / raw)
  To: Appana Durga Kedareswara Rao
  Cc: mark.rutland, dan.j.williams, vinod.koul, michal.simek,
	Soren Brinkmann, moritz.fischer, laurent.pinchart, luis,
	Jose.Abreu, dmaengine, linux-arm-kernel, linux-kernel,
	devicetree

On Wed, Jan 4, 2017 at 11:00 AM, Appana Durga Kedareswara Rao
<appana.durga.rao@xilinx.com> wrote:
> Hi Rob,
>
>         Thanks for the review....
>
>> On Wed, Jan 04, 2017 at 07:05:53PM +0530, Kedareswara rao Appana wrote:
>> > When VDMA is configured for more than one frame in the h/w for example
>> > h/w is configured for n number of frames and user Submits n number of
>> > frames and triggered the DMA using issue_pending API.
>> > In the current driver flow we are submitting one frame at a time but
>> > we should submit all the n number of frames at one time as the h/w Is
>> > configured for n number of frames.
>>
>> Please fix run-on sentences, capitalization, and word wrapping.
>
> Sure will fix in the next version....
>
> [Snip]
> -- a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
>> > +++ b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
>> > @@ -66,6 +66,8 @@ Optional child node properties:
>> >  Optional child node properties for VDMA:
>> >  - xlnx,genlock-mode: Tells Genlock synchronization is
>> >     enabled/disabled in hardware.
>> > +- xlnx,fstore-config: Tells Whether Frame Store Configuration is
>> > +   enabled/disabled in hardware.
>>
>> What's the default (when not present)? That should be the most common case.
>> Looks like the code treats this as bool, but that's not clear here. The name is not
>> clear what it is doing. Enabling or disabling the feature?
>
> Default value is zero...
> When this property is present it tells hardware is configured for frame store configuration.

So most people will not want "frame store configuration"?

> Will fix the explanation part in the next version like below.
>  xlnx,fstore-config: Tells hardware is configured for frame store configuration.
> Is the above explanation clear???

No, I mean make it obvious from the name of the property:
xlnx,fstore-config-enable or xlnx,fstore-enable

And the description needs to say it is boolean.

Rob

^ permalink raw reply	[flat|nested] 8+ messages in thread

* RE: [PATCH v4 2/3] dmaeninge: xilinx_dma: Fix bug in multiple frame stores scenario in vdma
  2017-01-05 16:39       ` Rob Herring
@ 2017-01-06  6:01         ` Appana Durga Kedareswara Rao
  0 siblings, 0 replies; 8+ messages in thread
From: Appana Durga Kedareswara Rao @ 2017-01-06  6:01 UTC (permalink / raw)
  To: Rob Herring
  Cc: mark.rutland, dan.j.williams, vinod.koul, michal.simek,
	Soren Brinkmann, moritz.fischer, laurent.pinchart, luis,
	Jose.Abreu, dmaengine, linux-arm-kernel, linux-kernel,
	devicetree

Hi Rob,

	Thanks for the review...

 [Snip]
> > -- a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
> >> > +++ b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
> >> > @@ -66,6 +66,8 @@ Optional child node properties:
> >> >  Optional child node properties for VDMA:
> >> >  - xlnx,genlock-mode: Tells Genlock synchronization is
> >> >     enabled/disabled in hardware.
> >> > +- xlnx,fstore-config: Tells Whether Frame Store Configuration is
> >> > +   enabled/disabled in hardware.
> >>
> >> What's the default (when not present)? That should be the most common
> case.
> >> Looks like the code treats this as bool, but that's not clear here.
> >> The name is not clear what it is doing. Enabling or disabling the feature?
> >
> > Default value is zero...
> > When this property is present it tells hardware is configured for frame store
> configuration.
> 
> So most people will not want "frame store configuration"?

Since the driver is for SoftIP (I mean fpga ip) default h/w configuration of this IP is frame store
Configuration disabled that's in the driver making default value of this configuration as zero.

If users are trying a use case where this configuration should be enabled but in the h/w it is disabled.
In this case driver will warn users to enable this frame store configuration in the h/w.
So that users can enable it in their h/w.

Please let me know if the above expiation is not clear will explain in detail...

> 
> > Will fix the explanation part in the next version like below.
> >  xlnx,fstore-config: Tells hardware is configured for frame store configuration.
> > Is the above explanation clear???
> 
> No, I mean make it obvious from the name of the property:
> xlnx,fstore-config-enable or xlnx,fstore-enable
> 
> And the description needs to say it is boolean.

Sure will fix in the next version...

Regards,
Kedar.

> 
> Rob

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2017-01-06  6:01 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-01-04 13:35 [PATCH v4 0/3] dmaengine: xilinx_dma: Bug fixes Kedareswara rao Appana
2017-01-04 13:35 ` [PATCH v4 1/3] dmaengine: xilinx_dma: Check for channel idle state before submitting dma descriptor Kedareswara rao Appana
2017-01-04 13:35 ` [PATCH v4 2/3] dmaeninge: xilinx_dma: Fix bug in multiple frame stores scenario in vdma Kedareswara rao Appana
2017-01-04 15:26   ` Rob Herring
2017-01-04 17:00     ` Appana Durga Kedareswara Rao
2017-01-05 16:39       ` Rob Herring
2017-01-06  6:01         ` Appana Durga Kedareswara Rao
2017-01-04 13:35 ` [PATCH v4 3/3] dmaengine: xilinx_dma: Fix race condition in the driver for multiple descriptor scenario Kedareswara rao Appana

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).