All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v3 1/4] dmaengine: xilinx_vdma: Improve SG engine handling
@ 2016-02-26 14:03 ` Kedareswara rao Appana
  0 siblings, 0 replies; 16+ messages in thread
From: Kedareswara rao Appana @ 2016-02-26 14:03 UTC (permalink / raw)
  To: dan.j.williams, vinod.koul, michal.simek, soren.brinkmann,
	appanad, moritz.fischer, laurent.pinchart, luis, anirudh
  Cc: dmaengine, linux-arm-kernel, linux-kernel

The current driver allows user to queue up multiple segments
on to a single transaction descriptor. User will submit this single desc
and in the issue_pending() we decode multiple segments and submit to SG HW engine.
We free up the allocated_desc when it is submitted to the HW.

Existing code prevents the user to prepare multiple trasactions at same time as
we are overwrite with the allocated_desc.

The best utilization of HW SG engine would happen if we collate the pending
list when we start dma this patch updates the same.

Signed-off-by: Kedareswara rao Appana <appanad@xilinx.com>
---
Changes for v3:
---> None.
Changes for v2:
---> splitted the changes into multiple patches.

 drivers/dma/xilinx/xilinx_vdma.c | 127 ++++++++++++++++++++++-----------------
 1 file changed, 72 insertions(+), 55 deletions(-)

diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index 6f4b501..06bffec 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -190,8 +190,7 @@ struct xilinx_vdma_tx_descriptor {
  * @desc_offset: TX descriptor registers offset
  * @lock: Descriptor operation lock
  * @pending_list: Descriptors waiting
- * @active_desc: Active descriptor
- * @allocated_desc: Allocated descriptor
+ * @active_list: Descriptors ready to submit
  * @done_list: Complete descriptors
  * @common: DMA common channel
  * @desc_pool: Descriptors pool
@@ -206,6 +205,7 @@ struct xilinx_vdma_tx_descriptor {
  * @tasklet: Cleanup work after irq
  * @config: Device configuration info
  * @flush_on_fsync: Flush on Frame sync
+ * @desc_pendingcount: Descriptor pending count
  */
 struct xilinx_vdma_chan {
 	struct xilinx_vdma_device *xdev;
@@ -213,8 +213,7 @@ struct xilinx_vdma_chan {
 	u32 desc_offset;
 	spinlock_t lock;
 	struct list_head pending_list;
-	struct xilinx_vdma_tx_descriptor *active_desc;
-	struct xilinx_vdma_tx_descriptor *allocated_desc;
+	struct list_head active_list;
 	struct list_head done_list;
 	struct dma_chan common;
 	struct dma_pool *desc_pool;
@@ -229,6 +228,7 @@ struct xilinx_vdma_chan {
 	struct tasklet_struct tasklet;
 	struct xilinx_vdma_config config;
 	bool flush_on_fsync;
+	u32 desc_pendingcount;
 };
 
 /**
@@ -342,19 +342,11 @@ static struct xilinx_vdma_tx_descriptor *
 xilinx_vdma_alloc_tx_descriptor(struct xilinx_vdma_chan *chan)
 {
 	struct xilinx_vdma_tx_descriptor *desc;
-	unsigned long flags;
-
-	if (chan->allocated_desc)
-		return chan->allocated_desc;
 
 	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
 	if (!desc)
 		return NULL;
 
-	spin_lock_irqsave(&chan->lock, flags);
-	chan->allocated_desc = desc;
-	spin_unlock_irqrestore(&chan->lock, flags);
-
 	INIT_LIST_HEAD(&desc->segments);
 
 	return desc;
@@ -412,9 +404,7 @@ static void xilinx_vdma_free_descriptors(struct xilinx_vdma_chan *chan)
 
 	xilinx_vdma_free_desc_list(chan, &chan->pending_list);
 	xilinx_vdma_free_desc_list(chan, &chan->done_list);
-
-	xilinx_vdma_free_tx_descriptor(chan, chan->active_desc);
-	chan->active_desc = NULL;
+	xilinx_vdma_free_desc_list(chan, &chan->active_list);
 
 	spin_unlock_irqrestore(&chan->lock, flags);
 }
@@ -614,25 +604,26 @@ static void xilinx_vdma_start(struct xilinx_vdma_chan *chan)
 static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
 {
 	struct xilinx_vdma_config *config = &chan->config;
-	struct xilinx_vdma_tx_descriptor *desc;
+	struct xilinx_vdma_tx_descriptor *desc, *tail_desc;
 	unsigned long flags;
 	u32 reg;
-	struct xilinx_vdma_tx_segment *head, *tail = NULL;
+	struct xilinx_vdma_tx_segment *tail_segment;
 
 	if (chan->err)
 		return;
 
 	spin_lock_irqsave(&chan->lock, flags);
 
-	/* There's already an active descriptor, bail out. */
-	if (chan->active_desc)
-		goto out_unlock;
-
 	if (list_empty(&chan->pending_list))
 		goto out_unlock;
 
 	desc = list_first_entry(&chan->pending_list,
 				struct xilinx_vdma_tx_descriptor, node);
+	tail_desc = list_last_entry(&chan->pending_list,
+				    struct xilinx_vdma_tx_descriptor, node);
+
+	tail_segment = list_last_entry(&tail_desc->segments,
+				       struct xilinx_vdma_tx_segment, node);
 
 	/* If it is SG mode and hardware is busy, cannot submit */
 	if (chan->has_sg && xilinx_vdma_is_running(chan) &&
@@ -645,14 +636,9 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
 	 * If hardware is idle, then all descriptors on the running lists are
 	 * done, start new transfers
 	 */
-	if (chan->has_sg) {
-		head = list_first_entry(&desc->segments,
-					struct xilinx_vdma_tx_segment, node);
-		tail = list_entry(desc->segments.prev,
-				  struct xilinx_vdma_tx_segment, node);
-
-		vdma_ctrl_write(chan, XILINX_VDMA_REG_CURDESC, head->phys);
-	}
+	if (chan->has_sg)
+		vdma_ctrl_write(chan, XILINX_VDMA_REG_CURDESC,
+				desc->async_tx.phys);
 
 	/* Configure the hardware using info in the config structure */
 	reg = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR);
@@ -694,12 +680,15 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
 
 	/* Start the transfer */
 	if (chan->has_sg) {
-		vdma_ctrl_write(chan, XILINX_VDMA_REG_TAILDESC, tail->phys);
+		vdma_ctrl_write(chan, XILINX_VDMA_REG_TAILDESC,
+				tail_segment->phys);
 	} else {
 		struct xilinx_vdma_tx_segment *segment, *last = NULL;
 		int i = 0;
 
-		list_for_each_entry(segment, &desc->segments, node) {
+		list_for_each_entry(desc, &chan->pending_list, node) {
+			segment = list_first_entry(&desc->segments,
+					   struct xilinx_vdma_tx_segment, node);
 			vdma_desc_write(chan,
 					XILINX_VDMA_REG_START_ADDRESS(i++),
 					segment->hw.buf_addr);
@@ -716,8 +705,8 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
 		vdma_desc_write(chan, XILINX_VDMA_REG_VSIZE, last->hw.vsize);
 	}
 
-	list_del(&desc->node);
-	chan->active_desc = desc;
+	list_splice_tail_init(&chan->pending_list, &chan->active_list);
+	chan->desc_pendingcount = 0;
 
 out_unlock:
 	spin_unlock_irqrestore(&chan->lock, flags);
@@ -742,21 +731,19 @@ static void xilinx_vdma_issue_pending(struct dma_chan *dchan)
  */
 static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan)
 {
-	struct xilinx_vdma_tx_descriptor *desc;
+	struct xilinx_vdma_tx_descriptor *desc, *next;
 	unsigned long flags;
 
 	spin_lock_irqsave(&chan->lock, flags);
 
-	desc = chan->active_desc;
-	if (!desc) {
-		dev_dbg(chan->dev, "no running descriptors\n");
+	if (list_empty(&chan->active_list))
 		goto out_unlock;
-	}
-
-	dma_cookie_complete(&desc->async_tx);
-	list_add_tail(&desc->node, &chan->done_list);
 
-	chan->active_desc = NULL;
+	list_for_each_entry_safe(desc, next, &chan->active_list, node) {
+		list_del(&desc->node);
+		dma_cookie_complete(&desc->async_tx);
+		list_add_tail(&desc->node, &chan->done_list);
+	}
 
 out_unlock:
 	spin_unlock_irqrestore(&chan->lock, flags);
@@ -879,6 +866,44 @@ static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data)
 }
 
 /**
+ * append_desc_queue - Queuing descriptor
+ * @chan: Driver specific dma channel
+ * @desc: dma transaction descriptor
+ */
+static void append_desc_queue(struct xilinx_vdma_chan *chan,
+			      struct xilinx_vdma_tx_descriptor *desc)
+{
+	struct xilinx_vdma_tx_segment *tail_segment;
+	struct xilinx_vdma_tx_descriptor *tail_desc;
+
+	if (list_empty(&chan->pending_list))
+		goto append;
+
+	/*
+	 * Add the hardware descriptor to the chain of hardware descriptors
+	 * that already exists in memory.
+	 */
+	tail_desc = list_last_entry(&chan->pending_list,
+				    struct xilinx_vdma_tx_descriptor, node);
+	tail_segment = list_last_entry(&tail_desc->segments,
+				       struct xilinx_vdma_tx_segment, node);
+	tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
+
+	/*
+	 * Add the software descriptor and all children to the list
+	 * of pending transactions
+	 */
+append:
+	list_add_tail(&desc->node, &chan->pending_list);
+	chan->desc_pendingcount++;
+
+	if (unlikely(chan->desc_pendingcount > chan->num_frms)) {
+		dev_dbg(chan->dev, "desc pendingcount is too high\n");
+		chan->desc_pendingcount = chan->num_frms;
+	}
+}
+
+/**
  * xilinx_vdma_tx_submit - Submit DMA transaction
  * @tx: Async transaction descriptor
  *
@@ -906,11 +931,8 @@ static dma_cookie_t xilinx_vdma_tx_submit(struct dma_async_tx_descriptor *tx)
 
 	cookie = dma_cookie_assign(tx);
 
-	/* Append the transaction to the pending transactions queue. */
-	list_add_tail(&desc->node, &chan->pending_list);
-
-	/* Free the allocated desc */
-	chan->allocated_desc = NULL;
+	/* Put this transaction onto the tail of the pending queue */
+	append_desc_queue(chan, desc);
 
 	spin_unlock_irqrestore(&chan->lock, flags);
 
@@ -973,13 +995,6 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
 	else
 		hw->buf_addr = xt->src_start;
 
-	/* Link the previous next descriptor to current */
-	if (!list_empty(&desc->segments)) {
-		prev = list_last_entry(&desc->segments,
-				       struct xilinx_vdma_tx_segment, node);
-		prev->hw.next_desc = segment->phys;
-	}
-
 	/* Insert the segment into the descriptor segments list. */
 	list_add_tail(&segment->node, &desc->segments);
 
@@ -988,7 +1003,7 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
 	/* Link the last hardware descriptor with the first. */
 	segment = list_first_entry(&desc->segments,
 				   struct xilinx_vdma_tx_segment, node);
-	prev->hw.next_desc = segment->phys;
+	desc->async_tx.phys = segment->phys;
 
 	return &desc->async_tx;
 
@@ -1127,10 +1142,12 @@ static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev,
 	chan->dev = xdev->dev;
 	chan->xdev = xdev;
 	chan->has_sg = xdev->has_sg;
+	chan->desc_pendingcount = 0x0;
 
 	spin_lock_init(&chan->lock);
 	INIT_LIST_HEAD(&chan->pending_list);
 	INIT_LIST_HEAD(&chan->done_list);
+	INIT_LIST_HEAD(&chan->active_list);
 
 	/* Retrieve the channel properties from the device tree */
 	has_dre = of_property_read_bool(node, "xlnx,include-dre");
-- 
2.1.2

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH v3 1/4] dmaengine: xilinx_vdma: Improve SG engine handling
@ 2016-02-26 14:03 ` Kedareswara rao Appana
  0 siblings, 0 replies; 16+ messages in thread
From: Kedareswara rao Appana @ 2016-02-26 14:03 UTC (permalink / raw)
  To: linux-arm-kernel

The current driver allows user to queue up multiple segments
on to a single transaction descriptor. User will submit this single desc
and in the issue_pending() we decode multiple segments and submit to SG HW engine.
We free up the allocated_desc when it is submitted to the HW.

Existing code prevents the user to prepare multiple trasactions at same time as
we are overwrite with the allocated_desc.

The best utilization of HW SG engine would happen if we collate the pending
list when we start dma this patch updates the same.

Signed-off-by: Kedareswara rao Appana <appanad@xilinx.com>
---
Changes for v3:
---> None.
Changes for v2:
---> splitted the changes into multiple patches.

 drivers/dma/xilinx/xilinx_vdma.c | 127 ++++++++++++++++++++++-----------------
 1 file changed, 72 insertions(+), 55 deletions(-)

diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index 6f4b501..06bffec 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -190,8 +190,7 @@ struct xilinx_vdma_tx_descriptor {
  * @desc_offset: TX descriptor registers offset
  * @lock: Descriptor operation lock
  * @pending_list: Descriptors waiting
- * @active_desc: Active descriptor
- * @allocated_desc: Allocated descriptor
+ * @active_list: Descriptors ready to submit
  * @done_list: Complete descriptors
  * @common: DMA common channel
  * @desc_pool: Descriptors pool
@@ -206,6 +205,7 @@ struct xilinx_vdma_tx_descriptor {
  * @tasklet: Cleanup work after irq
  * @config: Device configuration info
  * @flush_on_fsync: Flush on Frame sync
+ * @desc_pendingcount: Descriptor pending count
  */
 struct xilinx_vdma_chan {
 	struct xilinx_vdma_device *xdev;
@@ -213,8 +213,7 @@ struct xilinx_vdma_chan {
 	u32 desc_offset;
 	spinlock_t lock;
 	struct list_head pending_list;
-	struct xilinx_vdma_tx_descriptor *active_desc;
-	struct xilinx_vdma_tx_descriptor *allocated_desc;
+	struct list_head active_list;
 	struct list_head done_list;
 	struct dma_chan common;
 	struct dma_pool *desc_pool;
@@ -229,6 +228,7 @@ struct xilinx_vdma_chan {
 	struct tasklet_struct tasklet;
 	struct xilinx_vdma_config config;
 	bool flush_on_fsync;
+	u32 desc_pendingcount;
 };
 
 /**
@@ -342,19 +342,11 @@ static struct xilinx_vdma_tx_descriptor *
 xilinx_vdma_alloc_tx_descriptor(struct xilinx_vdma_chan *chan)
 {
 	struct xilinx_vdma_tx_descriptor *desc;
-	unsigned long flags;
-
-	if (chan->allocated_desc)
-		return chan->allocated_desc;
 
 	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
 	if (!desc)
 		return NULL;
 
-	spin_lock_irqsave(&chan->lock, flags);
-	chan->allocated_desc = desc;
-	spin_unlock_irqrestore(&chan->lock, flags);
-
 	INIT_LIST_HEAD(&desc->segments);
 
 	return desc;
@@ -412,9 +404,7 @@ static void xilinx_vdma_free_descriptors(struct xilinx_vdma_chan *chan)
 
 	xilinx_vdma_free_desc_list(chan, &chan->pending_list);
 	xilinx_vdma_free_desc_list(chan, &chan->done_list);
-
-	xilinx_vdma_free_tx_descriptor(chan, chan->active_desc);
-	chan->active_desc = NULL;
+	xilinx_vdma_free_desc_list(chan, &chan->active_list);
 
 	spin_unlock_irqrestore(&chan->lock, flags);
 }
@@ -614,25 +604,26 @@ static void xilinx_vdma_start(struct xilinx_vdma_chan *chan)
 static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
 {
 	struct xilinx_vdma_config *config = &chan->config;
-	struct xilinx_vdma_tx_descriptor *desc;
+	struct xilinx_vdma_tx_descriptor *desc, *tail_desc;
 	unsigned long flags;
 	u32 reg;
-	struct xilinx_vdma_tx_segment *head, *tail = NULL;
+	struct xilinx_vdma_tx_segment *tail_segment;
 
 	if (chan->err)
 		return;
 
 	spin_lock_irqsave(&chan->lock, flags);
 
-	/* There's already an active descriptor, bail out. */
-	if (chan->active_desc)
-		goto out_unlock;
-
 	if (list_empty(&chan->pending_list))
 		goto out_unlock;
 
 	desc = list_first_entry(&chan->pending_list,
 				struct xilinx_vdma_tx_descriptor, node);
+	tail_desc = list_last_entry(&chan->pending_list,
+				    struct xilinx_vdma_tx_descriptor, node);
+
+	tail_segment = list_last_entry(&tail_desc->segments,
+				       struct xilinx_vdma_tx_segment, node);
 
 	/* If it is SG mode and hardware is busy, cannot submit */
 	if (chan->has_sg && xilinx_vdma_is_running(chan) &&
@@ -645,14 +636,9 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
 	 * If hardware is idle, then all descriptors on the running lists are
 	 * done, start new transfers
 	 */
-	if (chan->has_sg) {
-		head = list_first_entry(&desc->segments,
-					struct xilinx_vdma_tx_segment, node);
-		tail = list_entry(desc->segments.prev,
-				  struct xilinx_vdma_tx_segment, node);
-
-		vdma_ctrl_write(chan, XILINX_VDMA_REG_CURDESC, head->phys);
-	}
+	if (chan->has_sg)
+		vdma_ctrl_write(chan, XILINX_VDMA_REG_CURDESC,
+				desc->async_tx.phys);
 
 	/* Configure the hardware using info in the config structure */
 	reg = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR);
@@ -694,12 +680,15 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
 
 	/* Start the transfer */
 	if (chan->has_sg) {
-		vdma_ctrl_write(chan, XILINX_VDMA_REG_TAILDESC, tail->phys);
+		vdma_ctrl_write(chan, XILINX_VDMA_REG_TAILDESC,
+				tail_segment->phys);
 	} else {
 		struct xilinx_vdma_tx_segment *segment, *last = NULL;
 		int i = 0;
 
-		list_for_each_entry(segment, &desc->segments, node) {
+		list_for_each_entry(desc, &chan->pending_list, node) {
+			segment = list_first_entry(&desc->segments,
+					   struct xilinx_vdma_tx_segment, node);
 			vdma_desc_write(chan,
 					XILINX_VDMA_REG_START_ADDRESS(i++),
 					segment->hw.buf_addr);
@@ -716,8 +705,8 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
 		vdma_desc_write(chan, XILINX_VDMA_REG_VSIZE, last->hw.vsize);
 	}
 
-	list_del(&desc->node);
-	chan->active_desc = desc;
+	list_splice_tail_init(&chan->pending_list, &chan->active_list);
+	chan->desc_pendingcount = 0;
 
 out_unlock:
 	spin_unlock_irqrestore(&chan->lock, flags);
@@ -742,21 +731,19 @@ static void xilinx_vdma_issue_pending(struct dma_chan *dchan)
  */
 static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan)
 {
-	struct xilinx_vdma_tx_descriptor *desc;
+	struct xilinx_vdma_tx_descriptor *desc, *next;
 	unsigned long flags;
 
 	spin_lock_irqsave(&chan->lock, flags);
 
-	desc = chan->active_desc;
-	if (!desc) {
-		dev_dbg(chan->dev, "no running descriptors\n");
+	if (list_empty(&chan->active_list))
 		goto out_unlock;
-	}
-
-	dma_cookie_complete(&desc->async_tx);
-	list_add_tail(&desc->node, &chan->done_list);
 
-	chan->active_desc = NULL;
+	list_for_each_entry_safe(desc, next, &chan->active_list, node) {
+		list_del(&desc->node);
+		dma_cookie_complete(&desc->async_tx);
+		list_add_tail(&desc->node, &chan->done_list);
+	}
 
 out_unlock:
 	spin_unlock_irqrestore(&chan->lock, flags);
@@ -879,6 +866,44 @@ static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data)
 }
 
 /**
+ * append_desc_queue - Queuing descriptor
+ * @chan: Driver specific dma channel
+ * @desc: dma transaction descriptor
+ */
+static void append_desc_queue(struct xilinx_vdma_chan *chan,
+			      struct xilinx_vdma_tx_descriptor *desc)
+{
+	struct xilinx_vdma_tx_segment *tail_segment;
+	struct xilinx_vdma_tx_descriptor *tail_desc;
+
+	if (list_empty(&chan->pending_list))
+		goto append;
+
+	/*
+	 * Add the hardware descriptor to the chain of hardware descriptors
+	 * that already exists in memory.
+	 */
+	tail_desc = list_last_entry(&chan->pending_list,
+				    struct xilinx_vdma_tx_descriptor, node);
+	tail_segment = list_last_entry(&tail_desc->segments,
+				       struct xilinx_vdma_tx_segment, node);
+	tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
+
+	/*
+	 * Add the software descriptor and all children to the list
+	 * of pending transactions
+	 */
+append:
+	list_add_tail(&desc->node, &chan->pending_list);
+	chan->desc_pendingcount++;
+
+	if (unlikely(chan->desc_pendingcount > chan->num_frms)) {
+		dev_dbg(chan->dev, "desc pendingcount is too high\n");
+		chan->desc_pendingcount = chan->num_frms;
+	}
+}
+
+/**
  * xilinx_vdma_tx_submit - Submit DMA transaction
  * @tx: Async transaction descriptor
  *
@@ -906,11 +931,8 @@ static dma_cookie_t xilinx_vdma_tx_submit(struct dma_async_tx_descriptor *tx)
 
 	cookie = dma_cookie_assign(tx);
 
-	/* Append the transaction to the pending transactions queue. */
-	list_add_tail(&desc->node, &chan->pending_list);
-
-	/* Free the allocated desc */
-	chan->allocated_desc = NULL;
+	/* Put this transaction onto the tail of the pending queue */
+	append_desc_queue(chan, desc);
 
 	spin_unlock_irqrestore(&chan->lock, flags);
 
@@ -973,13 +995,6 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
 	else
 		hw->buf_addr = xt->src_start;
 
-	/* Link the previous next descriptor to current */
-	if (!list_empty(&desc->segments)) {
-		prev = list_last_entry(&desc->segments,
-				       struct xilinx_vdma_tx_segment, node);
-		prev->hw.next_desc = segment->phys;
-	}
-
 	/* Insert the segment into the descriptor segments list. */
 	list_add_tail(&segment->node, &desc->segments);
 
@@ -988,7 +1003,7 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
 	/* Link the last hardware descriptor with the first. */
 	segment = list_first_entry(&desc->segments,
 				   struct xilinx_vdma_tx_segment, node);
-	prev->hw.next_desc = segment->phys;
+	desc->async_tx.phys = segment->phys;
 
 	return &desc->async_tx;
 
@@ -1127,10 +1142,12 @@ static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev,
 	chan->dev = xdev->dev;
 	chan->xdev = xdev;
 	chan->has_sg = xdev->has_sg;
+	chan->desc_pendingcount = 0x0;
 
 	spin_lock_init(&chan->lock);
 	INIT_LIST_HEAD(&chan->pending_list);
 	INIT_LIST_HEAD(&chan->done_list);
+	INIT_LIST_HEAD(&chan->active_list);
 
 	/* Retrieve the channel properties from the device tree */
 	has_dre = of_property_read_bool(node, "xlnx,include-dre");
-- 
2.1.2

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH v3 2/4] dmaengine: xilinx_vdma: Simplify spin lock handling
  2016-02-26 14:03 ` Kedareswara rao Appana
@ 2016-02-26 14:03   ` Kedareswara rao Appana
  -1 siblings, 0 replies; 16+ messages in thread
From: Kedareswara rao Appana @ 2016-02-26 14:03 UTC (permalink / raw)
  To: dan.j.williams, vinod.koul, michal.simek, soren.brinkmann,
	appanad, moritz.fischer, laurent.pinchart, luis, anirudh
  Cc: dmaengine, linux-arm-kernel, linux-kernel

This patch simplifies the spin lock handling in the driver
by moving locking out of xilinx_dma_start_transfer() API
and xilinx_dma_update_completed_cookie() API.

Signed-off-by: Kedareswara rao Appana <appanad@xilinx.com>
---
Changes for v3:
---> Updated commit message as suggested by vinod.
---> Added description to the API's(start_transfer/complete_cookie) regarding
locking as suggested by vinod.
Changes for v2:
---> splitted the changes into multiple patches

 drivers/dma/xilinx/xilinx_vdma.c | 29 ++++++++++++-----------------
 1 file changed, 12 insertions(+), 17 deletions(-)

diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index 06bffec..ce330d4 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -605,17 +605,15 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
 {
 	struct xilinx_vdma_config *config = &chan->config;
 	struct xilinx_vdma_tx_descriptor *desc, *tail_desc;
-	unsigned long flags;
 	u32 reg;
 	struct xilinx_vdma_tx_segment *tail_segment;
 
+	/* This function was invoked with lock held */
 	if (chan->err)
 		return;
 
-	spin_lock_irqsave(&chan->lock, flags);
-
 	if (list_empty(&chan->pending_list))
-		goto out_unlock;
+		return;
 
 	desc = list_first_entry(&chan->pending_list,
 				struct xilinx_vdma_tx_descriptor, node);
@@ -629,7 +627,7 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
 	if (chan->has_sg && xilinx_vdma_is_running(chan) &&
 	    !xilinx_vdma_is_idle(chan)) {
 		dev_dbg(chan->dev, "DMA controller still busy\n");
-		goto out_unlock;
+		return;
 	}
 
 	/*
@@ -676,7 +674,7 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
 	xilinx_vdma_start(chan);
 
 	if (chan->err)
-		goto out_unlock;
+		return;
 
 	/* Start the transfer */
 	if (chan->has_sg) {
@@ -696,7 +694,7 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
 		}
 
 		if (!last)
-			goto out_unlock;
+			return;
 
 		/* HW expects these parameters to be same for one transaction */
 		vdma_desc_write(chan, XILINX_VDMA_REG_HSIZE, last->hw.hsize);
@@ -707,9 +705,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
 
 	list_splice_tail_init(&chan->pending_list, &chan->active_list);
 	chan->desc_pendingcount = 0;
-
-out_unlock:
-	spin_unlock_irqrestore(&chan->lock, flags);
 }
 
 /**
@@ -719,8 +714,11 @@ out_unlock:
 static void xilinx_vdma_issue_pending(struct dma_chan *dchan)
 {
 	struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
+	unsigned long flags;
 
+	spin_lock_irqsave(&chan->lock, flags);
 	xilinx_vdma_start_transfer(chan);
+	spin_unlock_irqrestore(&chan->lock, flags);
 }
 
 /**
@@ -732,21 +730,16 @@ static void xilinx_vdma_issue_pending(struct dma_chan *dchan)
 static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan)
 {
 	struct xilinx_vdma_tx_descriptor *desc, *next;
-	unsigned long flags;
-
-	spin_lock_irqsave(&chan->lock, flags);
 
+	/* This function was invoked with lock held */
 	if (list_empty(&chan->active_list))
-		goto out_unlock;
+		return;
 
 	list_for_each_entry_safe(desc, next, &chan->active_list, node) {
 		list_del(&desc->node);
 		dma_cookie_complete(&desc->async_tx);
 		list_add_tail(&desc->node, &chan->done_list);
 	}
-
-out_unlock:
-	spin_unlock_irqrestore(&chan->lock, flags);
 }
 
 /**
@@ -857,8 +850,10 @@ static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data)
 	}
 
 	if (status & XILINX_VDMA_DMASR_FRM_CNT_IRQ) {
+		spin_lock(&chan->lock);
 		xilinx_vdma_complete_descriptor(chan);
 		xilinx_vdma_start_transfer(chan);
+		spin_unlock(&chan->lock);
 	}
 
 	tasklet_schedule(&chan->tasklet);
-- 
2.1.2

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH v3 2/4] dmaengine: xilinx_vdma: Simplify spin lock handling
@ 2016-02-26 14:03   ` Kedareswara rao Appana
  0 siblings, 0 replies; 16+ messages in thread
From: Kedareswara rao Appana @ 2016-02-26 14:03 UTC (permalink / raw)
  To: linux-arm-kernel

This patch simplifies the spin lock handling in the driver
by moving locking out of xilinx_dma_start_transfer() API
and xilinx_dma_update_completed_cookie() API.

Signed-off-by: Kedareswara rao Appana <appanad@xilinx.com>
---
Changes for v3:
---> Updated commit message as suggested by vinod.
---> Added description to the API's(start_transfer/complete_cookie) regarding
locking as suggested by vinod.
Changes for v2:
---> splitted the changes into multiple patches

 drivers/dma/xilinx/xilinx_vdma.c | 29 ++++++++++++-----------------
 1 file changed, 12 insertions(+), 17 deletions(-)

diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index 06bffec..ce330d4 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -605,17 +605,15 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
 {
 	struct xilinx_vdma_config *config = &chan->config;
 	struct xilinx_vdma_tx_descriptor *desc, *tail_desc;
-	unsigned long flags;
 	u32 reg;
 	struct xilinx_vdma_tx_segment *tail_segment;
 
+	/* This function was invoked with lock held */
 	if (chan->err)
 		return;
 
-	spin_lock_irqsave(&chan->lock, flags);
-
 	if (list_empty(&chan->pending_list))
-		goto out_unlock;
+		return;
 
 	desc = list_first_entry(&chan->pending_list,
 				struct xilinx_vdma_tx_descriptor, node);
@@ -629,7 +627,7 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
 	if (chan->has_sg && xilinx_vdma_is_running(chan) &&
 	    !xilinx_vdma_is_idle(chan)) {
 		dev_dbg(chan->dev, "DMA controller still busy\n");
-		goto out_unlock;
+		return;
 	}
 
 	/*
@@ -676,7 +674,7 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
 	xilinx_vdma_start(chan);
 
 	if (chan->err)
-		goto out_unlock;
+		return;
 
 	/* Start the transfer */
 	if (chan->has_sg) {
@@ -696,7 +694,7 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
 		}
 
 		if (!last)
-			goto out_unlock;
+			return;
 
 		/* HW expects these parameters to be same for one transaction */
 		vdma_desc_write(chan, XILINX_VDMA_REG_HSIZE, last->hw.hsize);
@@ -707,9 +705,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
 
 	list_splice_tail_init(&chan->pending_list, &chan->active_list);
 	chan->desc_pendingcount = 0;
-
-out_unlock:
-	spin_unlock_irqrestore(&chan->lock, flags);
 }
 
 /**
@@ -719,8 +714,11 @@ out_unlock:
 static void xilinx_vdma_issue_pending(struct dma_chan *dchan)
 {
 	struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
+	unsigned long flags;
 
+	spin_lock_irqsave(&chan->lock, flags);
 	xilinx_vdma_start_transfer(chan);
+	spin_unlock_irqrestore(&chan->lock, flags);
 }
 
 /**
@@ -732,21 +730,16 @@ static void xilinx_vdma_issue_pending(struct dma_chan *dchan)
 static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan)
 {
 	struct xilinx_vdma_tx_descriptor *desc, *next;
-	unsigned long flags;
-
-	spin_lock_irqsave(&chan->lock, flags);
 
+	/* This function was invoked with lock held */
 	if (list_empty(&chan->active_list))
-		goto out_unlock;
+		return;
 
 	list_for_each_entry_safe(desc, next, &chan->active_list, node) {
 		list_del(&desc->node);
 		dma_cookie_complete(&desc->async_tx);
 		list_add_tail(&desc->node, &chan->done_list);
 	}
-
-out_unlock:
-	spin_unlock_irqrestore(&chan->lock, flags);
 }
 
 /**
@@ -857,8 +850,10 @@ static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data)
 	}
 
 	if (status & XILINX_VDMA_DMASR_FRM_CNT_IRQ) {
+		spin_lock(&chan->lock);
 		xilinx_vdma_complete_descriptor(chan);
 		xilinx_vdma_start_transfer(chan);
+		spin_unlock(&chan->lock);
 	}
 
 	tasklet_schedule(&chan->tasklet);
-- 
2.1.2

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH v3 3/4] dmaengine: xilinx_vdma: Fix issues with non-parking mode
  2016-02-26 14:03 ` Kedareswara rao Appana
@ 2016-02-26 14:03   ` Kedareswara rao Appana
  -1 siblings, 0 replies; 16+ messages in thread
From: Kedareswara rao Appana @ 2016-02-26 14:03 UTC (permalink / raw)
  To: dan.j.williams, vinod.koul, michal.simek, soren.brinkmann,
	appanad, moritz.fischer, laurent.pinchart, luis, anirudh
  Cc: dmaengine, linux-arm-kernel, linux-kernel

This patch fixes issues with the Non-parking mode(Cirular mode).
With the  existing driver in cirular mode if we submit frames less than h/w
configured we simply end-up having misconfigured vdma h/w.
This patch fixes this issue by configuring the frame count register.

Signed-off-by: Kedareswara rao Appana <appanad@xilinx.com>
---
Changes for v3:
---> None.
Changes for v2:
---> splitted the changes into multiple patches.

 drivers/dma/xilinx/xilinx_vdma.c | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index ce330d4..70b2b32 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -646,6 +646,10 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
 	else
 		reg &= ~XILINX_VDMA_DMACR_FRAMECNT_EN;
 
+	/* Configure channel to allow number frame buffers */
+	vdma_ctrl_write(chan, XILINX_VDMA_REG_FRMSTORE,
+			chan->desc_pendingcount);
+
 	/*
 	 * With SG, start with circular mode, so that BDs can be fetched.
 	 * In direct register mode, if not parking, enable circular mode
-- 
2.1.2

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH v3 3/4] dmaengine: xilinx_vdma: Fix issues with non-parking mode
@ 2016-02-26 14:03   ` Kedareswara rao Appana
  0 siblings, 0 replies; 16+ messages in thread
From: Kedareswara rao Appana @ 2016-02-26 14:03 UTC (permalink / raw)
  To: linux-arm-kernel

This patch fixes issues with the Non-parking mode(Cirular mode).
With the  existing driver in cirular mode if we submit frames less than h/w
configured we simply end-up having misconfigured vdma h/w.
This patch fixes this issue by configuring the frame count register.

Signed-off-by: Kedareswara rao Appana <appanad@xilinx.com>
---
Changes for v3:
---> None.
Changes for v2:
---> splitted the changes into multiple patches.

 drivers/dma/xilinx/xilinx_vdma.c | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index ce330d4..70b2b32 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -646,6 +646,10 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
 	else
 		reg &= ~XILINX_VDMA_DMACR_FRAMECNT_EN;
 
+	/* Configure channel to allow number frame buffers */
+	vdma_ctrl_write(chan, XILINX_VDMA_REG_FRMSTORE,
+			chan->desc_pendingcount);
+
 	/*
 	 * With SG, start with circular mode, so that BDs can be fetched.
 	 * In direct register mode, if not parking, enable circular mode
-- 
2.1.2

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH v3 4/4] dmaengine: xilinx_vdma: Use readl_poll_timeout instead of do while loop's
  2016-02-26 14:03 ` Kedareswara rao Appana
@ 2016-02-26 14:03   ` Kedareswara rao Appana
  -1 siblings, 0 replies; 16+ messages in thread
From: Kedareswara rao Appana @ 2016-02-26 14:03 UTC (permalink / raw)
  To: dan.j.williams, vinod.koul, michal.simek, soren.brinkmann,
	appanad, moritz.fischer, laurent.pinchart, luis, anirudh
  Cc: dmaengine, linux-arm-kernel, linux-kernel

It is sometimes necessary to poll a memory-mapped register until its
value satisfies some condition use convenience macros
that do this instead of do while loop's.

This patch updates the same in the driver.

Signed-off-by: Kedareswara rao Appana <appanad@xilinx.com>
---
Changes for v3:
---> removed patch dmaengine: xilinx_vdma: Improve channel idle checking
from the series as it is not a valid patch.
---> Added this patch.
Changes for v2:
---> splitted the changes into multiple patches.

 drivers/dma/xilinx/xilinx_vdma.c | 46 +++++++++++++++++++---------------------
 1 file changed, 22 insertions(+), 24 deletions(-)

diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index 70b2b32..bc2ca45 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -28,6 +28,7 @@
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
+#include <linux/iopoll.h>
 #include <linux/module.h>
 #include <linux/of_address.h>
 #include <linux/of_dma.h>
@@ -254,6 +255,9 @@ struct xilinx_vdma_device {
 	container_of(chan, struct xilinx_vdma_chan, common)
 #define to_vdma_tx_descriptor(tx) \
 	container_of(tx, struct xilinx_vdma_tx_descriptor, async_tx)
+#define xilinx_vdma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
+	readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
+			   cond, delay_us, timeout_us)
 
 /* IO accessors */
 static inline u32 vdma_read(struct xilinx_vdma_chan *chan, u32 reg)
@@ -550,18 +554,17 @@ static bool xilinx_vdma_is_idle(struct xilinx_vdma_chan *chan)
  */
 static void xilinx_vdma_halt(struct xilinx_vdma_chan *chan)
 {
-	int loop = XILINX_VDMA_LOOP_COUNT;
+	int err = 0;
+	u32 val;
 
 	vdma_ctrl_clr(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP);
 
 	/* Wait for the hardware to halt */
-	do {
-		if (vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
-		    XILINX_VDMA_DMASR_HALTED)
-			break;
-	} while (loop--);
+	err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMASR, val,
+				      (val & XILINX_VDMA_DMASR_HALTED), 0,
+				      XILINX_VDMA_LOOP_COUNT);
 
-	if (!loop) {
+	if (err) {
 		dev_err(chan->dev, "Cannot stop channel %p: %x\n",
 			chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR));
 		chan->err = true;
@@ -576,18 +579,17 @@ static void xilinx_vdma_halt(struct xilinx_vdma_chan *chan)
  */
 static void xilinx_vdma_start(struct xilinx_vdma_chan *chan)
 {
-	int loop = XILINX_VDMA_LOOP_COUNT;
+	int err = 0;
+	u32 val;
 
 	vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP);
 
 	/* Wait for the hardware to start */
-	do {
-		if (!(vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
-		      XILINX_VDMA_DMASR_HALTED))
-			break;
-	} while (loop--);
+	err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMASR, val,
+				      !(val & XILINX_VDMA_DMASR_HALTED), 0,
+				      XILINX_VDMA_LOOP_COUNT);
 
-	if (!loop) {
+	if (err) {
 		dev_err(chan->dev, "Cannot start channel %p: %x\n",
 			chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR));
 
@@ -754,21 +756,17 @@ static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan)
  */
 static int xilinx_vdma_reset(struct xilinx_vdma_chan *chan)
 {
-	int loop = XILINX_VDMA_LOOP_COUNT;
+	int err = 0;
 	u32 tmp;
 
 	vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RESET);
 
-	tmp = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) &
-		XILINX_VDMA_DMACR_RESET;
-
 	/* Wait for the hardware to finish reset */
-	do {
-		tmp = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) &
-			XILINX_VDMA_DMACR_RESET;
-	} while (loop-- && tmp);
+	err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMACR, tmp,
+				      !(tmp & XILINX_VDMA_DMACR_RESET), 0,
+				      XILINX_VDMA_LOOP_COUNT);
 
-	if (!loop) {
+	if (err) {
 		dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
 			vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR),
 			vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR));
@@ -777,7 +775,7 @@ static int xilinx_vdma_reset(struct xilinx_vdma_chan *chan)
 
 	chan->err = false;
 
-	return 0;
+	return err;
 }
 
 /**
-- 
2.1.2

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH v3 4/4] dmaengine: xilinx_vdma: Use readl_poll_timeout instead of do while loop's
@ 2016-02-26 14:03   ` Kedareswara rao Appana
  0 siblings, 0 replies; 16+ messages in thread
From: Kedareswara rao Appana @ 2016-02-26 14:03 UTC (permalink / raw)
  To: linux-arm-kernel

It is sometimes necessary to poll a memory-mapped register until its
value satisfies some condition use convenience macros
that do this instead of do while loop's.

This patch updates the same in the driver.

Signed-off-by: Kedareswara rao Appana <appanad@xilinx.com>
---
Changes for v3:
---> removed patch dmaengine: xilinx_vdma: Improve channel idle checking
from the series as it is not a valid patch.
---> Added this patch.
Changes for v2:
---> splitted the changes into multiple patches.

 drivers/dma/xilinx/xilinx_vdma.c | 46 +++++++++++++++++++---------------------
 1 file changed, 22 insertions(+), 24 deletions(-)

diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index 70b2b32..bc2ca45 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -28,6 +28,7 @@
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
+#include <linux/iopoll.h>
 #include <linux/module.h>
 #include <linux/of_address.h>
 #include <linux/of_dma.h>
@@ -254,6 +255,9 @@ struct xilinx_vdma_device {
 	container_of(chan, struct xilinx_vdma_chan, common)
 #define to_vdma_tx_descriptor(tx) \
 	container_of(tx, struct xilinx_vdma_tx_descriptor, async_tx)
+#define xilinx_vdma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
+	readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
+			   cond, delay_us, timeout_us)
 
 /* IO accessors */
 static inline u32 vdma_read(struct xilinx_vdma_chan *chan, u32 reg)
@@ -550,18 +554,17 @@ static bool xilinx_vdma_is_idle(struct xilinx_vdma_chan *chan)
  */
 static void xilinx_vdma_halt(struct xilinx_vdma_chan *chan)
 {
-	int loop = XILINX_VDMA_LOOP_COUNT;
+	int err = 0;
+	u32 val;
 
 	vdma_ctrl_clr(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP);
 
 	/* Wait for the hardware to halt */
-	do {
-		if (vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
-		    XILINX_VDMA_DMASR_HALTED)
-			break;
-	} while (loop--);
+	err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMASR, val,
+				      (val & XILINX_VDMA_DMASR_HALTED), 0,
+				      XILINX_VDMA_LOOP_COUNT);
 
-	if (!loop) {
+	if (err) {
 		dev_err(chan->dev, "Cannot stop channel %p: %x\n",
 			chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR));
 		chan->err = true;
@@ -576,18 +579,17 @@ static void xilinx_vdma_halt(struct xilinx_vdma_chan *chan)
  */
 static void xilinx_vdma_start(struct xilinx_vdma_chan *chan)
 {
-	int loop = XILINX_VDMA_LOOP_COUNT;
+	int err = 0;
+	u32 val;
 
 	vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP);
 
 	/* Wait for the hardware to start */
-	do {
-		if (!(vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
-		      XILINX_VDMA_DMASR_HALTED))
-			break;
-	} while (loop--);
+	err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMASR, val,
+				      !(val & XILINX_VDMA_DMASR_HALTED), 0,
+				      XILINX_VDMA_LOOP_COUNT);
 
-	if (!loop) {
+	if (err) {
 		dev_err(chan->dev, "Cannot start channel %p: %x\n",
 			chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR));
 
@@ -754,21 +756,17 @@ static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan)
  */
 static int xilinx_vdma_reset(struct xilinx_vdma_chan *chan)
 {
-	int loop = XILINX_VDMA_LOOP_COUNT;
+	int err = 0;
 	u32 tmp;
 
 	vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RESET);
 
-	tmp = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) &
-		XILINX_VDMA_DMACR_RESET;
-
 	/* Wait for the hardware to finish reset */
-	do {
-		tmp = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) &
-			XILINX_VDMA_DMACR_RESET;
-	} while (loop-- && tmp);
+	err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMACR, tmp,
+				      !(tmp & XILINX_VDMA_DMACR_RESET), 0,
+				      XILINX_VDMA_LOOP_COUNT);
 
-	if (!loop) {
+	if (err) {
 		dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
 			vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR),
 			vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR));
@@ -777,7 +775,7 @@ static int xilinx_vdma_reset(struct xilinx_vdma_chan *chan)
 
 	chan->err = false;
 
-	return 0;
+	return err;
 }
 
 /**
-- 
2.1.2

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* Re: [PATCH v3 4/4] dmaengine: xilinx_vdma: Use readl_poll_timeout instead of do while loop's
  2016-02-26 14:03   ` Kedareswara rao Appana
@ 2016-03-03 15:29     ` Vinod Koul
  -1 siblings, 0 replies; 16+ messages in thread
From: Vinod Koul @ 2016-03-03 15:29 UTC (permalink / raw)
  To: Kedareswara rao Appana
  Cc: dan.j.williams, michal.simek, soren.brinkmann, appanad,
	moritz.fischer, laurent.pinchart, luis, anirudh, dmaengine,
	linux-arm-kernel, linux-kernel

On Fri, Feb 26, 2016 at 07:33:54PM +0530, Kedareswara rao Appana wrote:

>  static void xilinx_vdma_halt(struct xilinx_vdma_chan *chan)
>  {
> -	int loop = XILINX_VDMA_LOOP_COUNT;
> +	int err = 0;
> +	u32 val;
>  
>  	vdma_ctrl_clr(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP);
>  
>  	/* Wait for the hardware to halt */
> -	do {
> -		if (vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
> -		    XILINX_VDMA_DMASR_HALTED)
> -			break;
> -	} while (loop--);
> +	err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMASR, val,
> +				      (val & XILINX_VDMA_DMASR_HALTED), 0,
> +				      XILINX_VDMA_LOOP_COUNT);
>  
> -	if (!loop) {
> +	if (err) {
>  		dev_err(chan->dev, "Cannot stop channel %p: %x\n",
>  			chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR));
>  		chan->err = true;
> @@ -576,18 +579,17 @@ static void xilinx_vdma_halt(struct xilinx_vdma_chan *chan)
>   */
>  static void xilinx_vdma_start(struct xilinx_vdma_chan *chan)
>  {
> -	int loop = XILINX_VDMA_LOOP_COUNT;
> +	int err = 0;

why is this initialization required here and other places?

> +	u32 val;
>  
>  	vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP);
>  
>  	/* Wait for the hardware to start */
> -	do {
> -		if (!(vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
> -		      XILINX_VDMA_DMASR_HALTED))
> -			break;
> -	} while (loop--);
> +	err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMASR, val,
> +				      !(val & XILINX_VDMA_DMASR_HALTED), 0,
> +				      XILINX_VDMA_LOOP_COUNT);
>  
> -	if (!loop) {
> +	if (err) {
>  		dev_err(chan->dev, "Cannot start channel %p: %x\n",
>  			chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR));
>  
> @@ -754,21 +756,17 @@ static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan)
>   */
>  static int xilinx_vdma_reset(struct xilinx_vdma_chan *chan)
>  {
> -	int loop = XILINX_VDMA_LOOP_COUNT;
> +	int err = 0;
>  	u32 tmp;
>  
>  	vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RESET);
>  
> -	tmp = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) &
> -		XILINX_VDMA_DMACR_RESET;
> -
>  	/* Wait for the hardware to finish reset */
> -	do {
> -		tmp = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) &
> -			XILINX_VDMA_DMACR_RESET;
> -	} while (loop-- && tmp);
> +	err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMACR, tmp,
> +				      !(tmp & XILINX_VDMA_DMACR_RESET), 0,
> +				      XILINX_VDMA_LOOP_COUNT);
>  
> -	if (!loop) {
> +	if (err) {



-- 
~Vinod

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH v3 4/4] dmaengine: xilinx_vdma: Use readl_poll_timeout instead of do while loop's
@ 2016-03-03 15:29     ` Vinod Koul
  0 siblings, 0 replies; 16+ messages in thread
From: Vinod Koul @ 2016-03-03 15:29 UTC (permalink / raw)
  To: linux-arm-kernel

On Fri, Feb 26, 2016 at 07:33:54PM +0530, Kedareswara rao Appana wrote:

>  static void xilinx_vdma_halt(struct xilinx_vdma_chan *chan)
>  {
> -	int loop = XILINX_VDMA_LOOP_COUNT;
> +	int err = 0;
> +	u32 val;
>  
>  	vdma_ctrl_clr(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP);
>  
>  	/* Wait for the hardware to halt */
> -	do {
> -		if (vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
> -		    XILINX_VDMA_DMASR_HALTED)
> -			break;
> -	} while (loop--);
> +	err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMASR, val,
> +				      (val & XILINX_VDMA_DMASR_HALTED), 0,
> +				      XILINX_VDMA_LOOP_COUNT);
>  
> -	if (!loop) {
> +	if (err) {
>  		dev_err(chan->dev, "Cannot stop channel %p: %x\n",
>  			chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR));
>  		chan->err = true;
> @@ -576,18 +579,17 @@ static void xilinx_vdma_halt(struct xilinx_vdma_chan *chan)
>   */
>  static void xilinx_vdma_start(struct xilinx_vdma_chan *chan)
>  {
> -	int loop = XILINX_VDMA_LOOP_COUNT;
> +	int err = 0;

why is this initialization required here and other places?

> +	u32 val;
>  
>  	vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP);
>  
>  	/* Wait for the hardware to start */
> -	do {
> -		if (!(vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
> -		      XILINX_VDMA_DMASR_HALTED))
> -			break;
> -	} while (loop--);
> +	err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMASR, val,
> +				      !(val & XILINX_VDMA_DMASR_HALTED), 0,
> +				      XILINX_VDMA_LOOP_COUNT);
>  
> -	if (!loop) {
> +	if (err) {
>  		dev_err(chan->dev, "Cannot start channel %p: %x\n",
>  			chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR));
>  
> @@ -754,21 +756,17 @@ static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan)
>   */
>  static int xilinx_vdma_reset(struct xilinx_vdma_chan *chan)
>  {
> -	int loop = XILINX_VDMA_LOOP_COUNT;
> +	int err = 0;
>  	u32 tmp;
>  
>  	vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RESET);
>  
> -	tmp = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) &
> -		XILINX_VDMA_DMACR_RESET;
> -
>  	/* Wait for the hardware to finish reset */
> -	do {
> -		tmp = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) &
> -			XILINX_VDMA_DMACR_RESET;
> -	} while (loop-- && tmp);
> +	err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMACR, tmp,
> +				      !(tmp & XILINX_VDMA_DMACR_RESET), 0,
> +				      XILINX_VDMA_LOOP_COUNT);
>  
> -	if (!loop) {
> +	if (err) {



-- 
~Vinod

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH v3 1/4] dmaengine: xilinx_vdma: Improve SG engine handling
  2016-02-26 14:03 ` Kedareswara rao Appana
@ 2016-03-03 15:33   ` Vinod Koul
  -1 siblings, 0 replies; 16+ messages in thread
From: Vinod Koul @ 2016-03-03 15:33 UTC (permalink / raw)
  To: Kedareswara rao Appana
  Cc: dan.j.williams, michal.simek, soren.brinkmann, appanad,
	moritz.fischer, laurent.pinchart, luis, anirudh, dmaengine,
	linux-arm-kernel, linux-kernel

On Fri, Feb 26, 2016 at 07:33:51PM +0530, Kedareswara rao Appana wrote:
> The current driver allows user to queue up multiple segments
> on to a single transaction descriptor. User will submit this single desc
> and in the issue_pending() we decode multiple segments and submit to SG HW engine.
> We free up the allocated_desc when it is submitted to the HW.
> 
> Existing code prevents the user to prepare multiple trasactions at same time as
> we are overwrite with the allocated_desc.
> 
> The best utilization of HW SG engine would happen if we collate the pending
> list when we start dma this patch updates the same.

Applied all. It is usually advisable to do cover letter using --cover-letter
for multi patch series

-- 
~Vinod

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH v3 1/4] dmaengine: xilinx_vdma: Improve SG engine handling
@ 2016-03-03 15:33   ` Vinod Koul
  0 siblings, 0 replies; 16+ messages in thread
From: Vinod Koul @ 2016-03-03 15:33 UTC (permalink / raw)
  To: linux-arm-kernel

On Fri, Feb 26, 2016 at 07:33:51PM +0530, Kedareswara rao Appana wrote:
> The current driver allows user to queue up multiple segments
> on to a single transaction descriptor. User will submit this single desc
> and in the issue_pending() we decode multiple segments and submit to SG HW engine.
> We free up the allocated_desc when it is submitted to the HW.
> 
> Existing code prevents the user to prepare multiple trasactions at same time as
> we are overwrite with the allocated_desc.
> 
> The best utilization of HW SG engine would happen if we collate the pending
> list when we start dma this patch updates the same.

Applied all. It is usually advisable to do cover letter using --cover-letter
for multi patch series

-- 
~Vinod

^ permalink raw reply	[flat|nested] 16+ messages in thread

* RE: [PATCH v3 1/4] dmaengine: xilinx_vdma: Improve SG engine handling
  2016-03-03 15:33   ` Vinod Koul
@ 2016-03-03 16:28     ` Appana Durga Kedareswara Rao
  -1 siblings, 0 replies; 16+ messages in thread
From: Appana Durga Kedareswara Rao @ 2016-03-03 16:28 UTC (permalink / raw)
  To: Vinod Koul
  Cc: dan.j.williams, Michal Simek, Soren Brinkmann, moritz.fischer,
	laurent.pinchart, luis, Anirudha Sarangi, dmaengine,
	linux-arm-kernel, linux-kernel

> -----Original Message-----
> From: Vinod Koul [mailto:vinod.koul@intel.com]
> Sent: Thursday, March 03, 2016 9:03 PM
> To: Appana Durga Kedareswara Rao
> Cc: dan.j.williams@intel.com; Michal Simek; Soren Brinkmann; Appana Durga
> Kedareswara Rao; moritz.fischer@ettus.com;
> laurent.pinchart@ideasonboard.com; luis@debethencourt.com; Anirudha
> Sarangi; dmaengine@vger.kernel.org; linux-arm-kernel@lists.infradead.org;
> linux-kernel@vger.kernel.org
> Subject: Re: [PATCH v3 1/4] dmaengine: xilinx_vdma: Improve SG engine
> handling
> 
> On Fri, Feb 26, 2016 at 07:33:51PM +0530, Kedareswara rao Appana wrote:
> > The current driver allows user to queue up multiple segments on to a
> > single transaction descriptor. User will submit this single desc and
> > in the issue_pending() we decode multiple segments and submit to SG HW
> engine.
> > We free up the allocated_desc when it is submitted to the HW.
> >
> > Existing code prevents the user to prepare multiple trasactions at
> > same time as we are overwrite with the allocated_desc.
> >
> > The best utilization of HW SG engine would happen if we collate the
> > pending list when we start dma this patch updates the same.
> 
> Applied all. It is usually advisable to do cover letter using --cover-letter for multi
> patch series

Thanks ... Will fix next time onwards...

Regards,
Kedar.

> 
> --
> ~Vinod

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH v3 1/4] dmaengine: xilinx_vdma: Improve SG engine handling
@ 2016-03-03 16:28     ` Appana Durga Kedareswara Rao
  0 siblings, 0 replies; 16+ messages in thread
From: Appana Durga Kedareswara Rao @ 2016-03-03 16:28 UTC (permalink / raw)
  To: linux-arm-kernel

> -----Original Message-----
> From: Vinod Koul [mailto:vinod.koul at intel.com]
> Sent: Thursday, March 03, 2016 9:03 PM
> To: Appana Durga Kedareswara Rao
> Cc: dan.j.williams at intel.com; Michal Simek; Soren Brinkmann; Appana Durga
> Kedareswara Rao; moritz.fischer at ettus.com;
> laurent.pinchart at ideasonboard.com; luis at debethencourt.com; Anirudha
> Sarangi; dmaengine at vger.kernel.org; linux-arm-kernel at lists.infradead.org;
> linux-kernel at vger.kernel.org
> Subject: Re: [PATCH v3 1/4] dmaengine: xilinx_vdma: Improve SG engine
> handling
> 
> On Fri, Feb 26, 2016 at 07:33:51PM +0530, Kedareswara rao Appana wrote:
> > The current driver allows user to queue up multiple segments on to a
> > single transaction descriptor. User will submit this single desc and
> > in the issue_pending() we decode multiple segments and submit to SG HW
> engine.
> > We free up the allocated_desc when it is submitted to the HW.
> >
> > Existing code prevents the user to prepare multiple trasactions at
> > same time as we are overwrite with the allocated_desc.
> >
> > The best utilization of HW SG engine would happen if we collate the
> > pending list when we start dma this patch updates the same.
> 
> Applied all. It is usually advisable to do cover letter using --cover-letter for multi
> patch series

Thanks ... Will fix next time onwards...

Regards,
Kedar.

> 
> --
> ~Vinod

^ permalink raw reply	[flat|nested] 16+ messages in thread

* RE: [PATCH v3 4/4] dmaengine: xilinx_vdma: Use readl_poll_timeout instead of do while loop's
  2016-03-03 15:29     ` Vinod Koul
@ 2016-03-03 17:12       ` Appana Durga Kedareswara Rao
  -1 siblings, 0 replies; 16+ messages in thread
From: Appana Durga Kedareswara Rao @ 2016-03-03 17:12 UTC (permalink / raw)
  To: Vinod Koul
  Cc: dan.j.williams, Michal Simek, Soren Brinkmann, moritz.fischer,
	laurent.pinchart, luis, Anirudha Sarangi, dmaengine,
	linux-arm-kernel, linux-kernel

Hi Vinod,


> -----Original Message-----
> From: dmaengine-owner@vger.kernel.org [mailto:dmaengine-
> owner@vger.kernel.org] On Behalf Of Vinod Koul
> Sent: Thursday, March 03, 2016 9:00 PM
> To: Appana Durga Kedareswara Rao
> Cc: dan.j.williams@intel.com; Michal Simek; Soren Brinkmann; Appana Durga
> Kedareswara Rao; moritz.fischer@ettus.com;
> laurent.pinchart@ideasonboard.com; luis@debethencourt.com; Anirudha
> Sarangi; dmaengine@vger.kernel.org; linux-arm-kernel@lists.infradead.org;
> linux-kernel@vger.kernel.org
> Subject: Re: [PATCH v3 4/4] dmaengine: xilinx_vdma: Use readl_poll_timeout
> instead of do while loop's
> 
> On Fri, Feb 26, 2016 at 07:33:54PM +0530, Kedareswara rao Appana wrote:
> 
> >  static void xilinx_vdma_halt(struct xilinx_vdma_chan *chan)  {
> > -	int loop = XILINX_VDMA_LOOP_COUNT;
> > +	int err = 0;
> > +	u32 val;
> >
> >  	vdma_ctrl_clr(chan, XILINX_VDMA_REG_DMACR,
> > XILINX_VDMA_DMACR_RUNSTOP);
> >
> >  	/* Wait for the hardware to halt */
> > -	do {
> > -		if (vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
> > -		    XILINX_VDMA_DMASR_HALTED)
> > -			break;
> > -	} while (loop--);
> > +	err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMASR, val,
> > +				      (val & XILINX_VDMA_DMASR_HALTED), 0,
> > +				      XILINX_VDMA_LOOP_COUNT);
> >
> > -	if (!loop) {
> > +	if (err) {
> >  		dev_err(chan->dev, "Cannot stop channel %p: %x\n",
> >  			chan, vdma_ctrl_read(chan,
> XILINX_VDMA_REG_DMASR));
> >  		chan->err = true;
> > @@ -576,18 +579,17 @@ static void xilinx_vdma_halt(struct
> xilinx_vdma_chan *chan)
> >   */
> >  static void xilinx_vdma_start(struct xilinx_vdma_chan *chan)  {
> > -	int loop = XILINX_VDMA_LOOP_COUNT;
> > +	int err = 0;
> 
> why is this initialization required here and other places?

Yes initialization is not required on the other mail you said you already applied this patch series.
Will send a separate patch to fix it.

Regards,
Kedar.

> 
> > +	u32 val;
> >
> >  	vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR,
> > XILINX_VDMA_DMACR_RUNSTOP);
> >
> >  	/* Wait for the hardware to start */
> > -	do {
> > -		if (!(vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
> > -		      XILINX_VDMA_DMASR_HALTED))
> > -			break;
> > -	} while (loop--);
> > +	err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMASR, val,
> > +				      !(val & XILINX_VDMA_DMASR_HALTED), 0,
> > +				      XILINX_VDMA_LOOP_COUNT);
> >
> > -	if (!loop) {
> > +	if (err) {
> >  		dev_err(chan->dev, "Cannot start channel %p: %x\n",
> >  			chan, vdma_ctrl_read(chan,
> XILINX_VDMA_REG_DMASR));
> >
> > @@ -754,21 +756,17 @@ static void xilinx_vdma_complete_descriptor(struct
> xilinx_vdma_chan *chan)
> >   */
> >  static int xilinx_vdma_reset(struct xilinx_vdma_chan *chan)  {
> > -	int loop = XILINX_VDMA_LOOP_COUNT;
> > +	int err = 0;
> >  	u32 tmp;
> >
> >  	vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR,
> XILINX_VDMA_DMACR_RESET);
> >
> > -	tmp = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) &
> > -		XILINX_VDMA_DMACR_RESET;
> > -
> >  	/* Wait for the hardware to finish reset */
> > -	do {
> > -		tmp = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) &
> > -			XILINX_VDMA_DMACR_RESET;
> > -	} while (loop-- && tmp);
> > +	err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMACR, tmp,
> > +				      !(tmp & XILINX_VDMA_DMACR_RESET), 0,
> > +				      XILINX_VDMA_LOOP_COUNT);
> >
> > -	if (!loop) {
> > +	if (err) {
> 
> 
> 
> --
> ~Vinod
> --
> To unsubscribe from this list: send the line "unsubscribe dmaengine" in the body
> of a message to majordomo@vger.kernel.org More majordomo info at
> http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH v3 4/4] dmaengine: xilinx_vdma: Use readl_poll_timeout instead of do while loop's
@ 2016-03-03 17:12       ` Appana Durga Kedareswara Rao
  0 siblings, 0 replies; 16+ messages in thread
From: Appana Durga Kedareswara Rao @ 2016-03-03 17:12 UTC (permalink / raw)
  To: linux-arm-kernel

Hi Vinod,


> -----Original Message-----
> From: dmaengine-owner at vger.kernel.org [mailto:dmaengine-
> owner at vger.kernel.org] On Behalf Of Vinod Koul
> Sent: Thursday, March 03, 2016 9:00 PM
> To: Appana Durga Kedareswara Rao
> Cc: dan.j.williams at intel.com; Michal Simek; Soren Brinkmann; Appana Durga
> Kedareswara Rao; moritz.fischer at ettus.com;
> laurent.pinchart at ideasonboard.com; luis at debethencourt.com; Anirudha
> Sarangi; dmaengine at vger.kernel.org; linux-arm-kernel at lists.infradead.org;
> linux-kernel at vger.kernel.org
> Subject: Re: [PATCH v3 4/4] dmaengine: xilinx_vdma: Use readl_poll_timeout
> instead of do while loop's
> 
> On Fri, Feb 26, 2016 at 07:33:54PM +0530, Kedareswara rao Appana wrote:
> 
> >  static void xilinx_vdma_halt(struct xilinx_vdma_chan *chan)  {
> > -	int loop = XILINX_VDMA_LOOP_COUNT;
> > +	int err = 0;
> > +	u32 val;
> >
> >  	vdma_ctrl_clr(chan, XILINX_VDMA_REG_DMACR,
> > XILINX_VDMA_DMACR_RUNSTOP);
> >
> >  	/* Wait for the hardware to halt */
> > -	do {
> > -		if (vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
> > -		    XILINX_VDMA_DMASR_HALTED)
> > -			break;
> > -	} while (loop--);
> > +	err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMASR, val,
> > +				      (val & XILINX_VDMA_DMASR_HALTED), 0,
> > +				      XILINX_VDMA_LOOP_COUNT);
> >
> > -	if (!loop) {
> > +	if (err) {
> >  		dev_err(chan->dev, "Cannot stop channel %p: %x\n",
> >  			chan, vdma_ctrl_read(chan,
> XILINX_VDMA_REG_DMASR));
> >  		chan->err = true;
> > @@ -576,18 +579,17 @@ static void xilinx_vdma_halt(struct
> xilinx_vdma_chan *chan)
> >   */
> >  static void xilinx_vdma_start(struct xilinx_vdma_chan *chan)  {
> > -	int loop = XILINX_VDMA_LOOP_COUNT;
> > +	int err = 0;
> 
> why is this initialization required here and other places?

Yes initialization is not required on the other mail you said you already applied this patch series.
Will send a separate patch to fix it.

Regards,
Kedar.

> 
> > +	u32 val;
> >
> >  	vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR,
> > XILINX_VDMA_DMACR_RUNSTOP);
> >
> >  	/* Wait for the hardware to start */
> > -	do {
> > -		if (!(vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
> > -		      XILINX_VDMA_DMASR_HALTED))
> > -			break;
> > -	} while (loop--);
> > +	err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMASR, val,
> > +				      !(val & XILINX_VDMA_DMASR_HALTED), 0,
> > +				      XILINX_VDMA_LOOP_COUNT);
> >
> > -	if (!loop) {
> > +	if (err) {
> >  		dev_err(chan->dev, "Cannot start channel %p: %x\n",
> >  			chan, vdma_ctrl_read(chan,
> XILINX_VDMA_REG_DMASR));
> >
> > @@ -754,21 +756,17 @@ static void xilinx_vdma_complete_descriptor(struct
> xilinx_vdma_chan *chan)
> >   */
> >  static int xilinx_vdma_reset(struct xilinx_vdma_chan *chan)  {
> > -	int loop = XILINX_VDMA_LOOP_COUNT;
> > +	int err = 0;
> >  	u32 tmp;
> >
> >  	vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR,
> XILINX_VDMA_DMACR_RESET);
> >
> > -	tmp = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) &
> > -		XILINX_VDMA_DMACR_RESET;
> > -
> >  	/* Wait for the hardware to finish reset */
> > -	do {
> > -		tmp = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) &
> > -			XILINX_VDMA_DMACR_RESET;
> > -	} while (loop-- && tmp);
> > +	err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMACR, tmp,
> > +				      !(tmp & XILINX_VDMA_DMACR_RESET), 0,
> > +				      XILINX_VDMA_LOOP_COUNT);
> >
> > -	if (!loop) {
> > +	if (err) {
> 
> 
> 
> --
> ~Vinod
> --
> To unsubscribe from this list: send the line "unsubscribe dmaengine" in the body
> of a message to majordomo at vger.kernel.org More majordomo info at
> http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2016-03-03 17:27 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-02-26 14:03 [PATCH v3 1/4] dmaengine: xilinx_vdma: Improve SG engine handling Kedareswara rao Appana
2016-02-26 14:03 ` Kedareswara rao Appana
2016-02-26 14:03 ` [PATCH v3 2/4] dmaengine: xilinx_vdma: Simplify spin lock handling Kedareswara rao Appana
2016-02-26 14:03   ` Kedareswara rao Appana
2016-02-26 14:03 ` [PATCH v3 3/4] dmaengine: xilinx_vdma: Fix issues with non-parking mode Kedareswara rao Appana
2016-02-26 14:03   ` Kedareswara rao Appana
2016-02-26 14:03 ` [PATCH v3 4/4] dmaengine: xilinx_vdma: Use readl_poll_timeout instead of do while loop's Kedareswara rao Appana
2016-02-26 14:03   ` Kedareswara rao Appana
2016-03-03 15:29   ` Vinod Koul
2016-03-03 15:29     ` Vinod Koul
2016-03-03 17:12     ` Appana Durga Kedareswara Rao
2016-03-03 17:12       ` Appana Durga Kedareswara Rao
2016-03-03 15:33 ` [PATCH v3 1/4] dmaengine: xilinx_vdma: Improve SG engine handling Vinod Koul
2016-03-03 15:33   ` Vinod Koul
2016-03-03 16:28   ` Appana Durga Kedareswara Rao
2016-03-03 16:28     ` Appana Durga Kedareswara Rao

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.