* [PATCH v2 1/4] dmaengine: xilinx_vdma: Improve SG engine handling
@ 2016-02-22 5:54 Kedareswara rao Appana
2016-02-22 5:54 ` [PATCH v2 2/4] dmaengine: xilinx_vdma: Simplify spin lock handling Kedareswara rao Appana
` (2 more replies)
0 siblings, 3 replies; 8+ messages in thread
From: Kedareswara rao Appana @ 2016-02-22 5:54 UTC (permalink / raw)
To: dan.j.williams, vinod.koul, michal.simek, soren.brinkmann,
appanad, moritz.fischer, laurent.pinchart, luis, anirudh
Cc: dmaengine, linux-arm-kernel, linux-kernel
The current driver allows user to queue up multiple segments
on to a single transaction descriptor. User will submit this single desc
and in the issue_pending() we decode multiple segments and submit to SG HW engine.
We free up the allocated_desc when it is submitted to the HW.
Existing code prevents the user to prepare multiple trasactions at same time as
we are overwrite with the allocated_desc.
The best utilization of HW SG engine would happen if we collate the pending
list when we start dma this patch updates the same.
Signed-off-by: Kedareswara rao Appana <appanad@xilinx.com>
---
Changes for v2:
---> splitted the changes into multiple patches.
drivers/dma/xilinx/xilinx_vdma.c | 127 ++++++++++++++++++++++-----------------
1 file changed, 72 insertions(+), 55 deletions(-)
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index 6f4b501..06bffec 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -190,8 +190,7 @@ struct xilinx_vdma_tx_descriptor {
* @desc_offset: TX descriptor registers offset
* @lock: Descriptor operation lock
* @pending_list: Descriptors waiting
- * @active_desc: Active descriptor
- * @allocated_desc: Allocated descriptor
+ * @active_list: Descriptors ready to submit
* @done_list: Complete descriptors
* @common: DMA common channel
* @desc_pool: Descriptors pool
@@ -206,6 +205,7 @@ struct xilinx_vdma_tx_descriptor {
* @tasklet: Cleanup work after irq
* @config: Device configuration info
* @flush_on_fsync: Flush on Frame sync
+ * @desc_pendingcount: Descriptor pending count
*/
struct xilinx_vdma_chan {
struct xilinx_vdma_device *xdev;
@@ -213,8 +213,7 @@ struct xilinx_vdma_chan {
u32 desc_offset;
spinlock_t lock;
struct list_head pending_list;
- struct xilinx_vdma_tx_descriptor *active_desc;
- struct xilinx_vdma_tx_descriptor *allocated_desc;
+ struct list_head active_list;
struct list_head done_list;
struct dma_chan common;
struct dma_pool *desc_pool;
@@ -229,6 +228,7 @@ struct xilinx_vdma_chan {
struct tasklet_struct tasklet;
struct xilinx_vdma_config config;
bool flush_on_fsync;
+ u32 desc_pendingcount;
};
/**
@@ -342,19 +342,11 @@ static struct xilinx_vdma_tx_descriptor *
xilinx_vdma_alloc_tx_descriptor(struct xilinx_vdma_chan *chan)
{
struct xilinx_vdma_tx_descriptor *desc;
- unsigned long flags;
-
- if (chan->allocated_desc)
- return chan->allocated_desc;
desc = kzalloc(sizeof(*desc), GFP_KERNEL);
if (!desc)
return NULL;
- spin_lock_irqsave(&chan->lock, flags);
- chan->allocated_desc = desc;
- spin_unlock_irqrestore(&chan->lock, flags);
-
INIT_LIST_HEAD(&desc->segments);
return desc;
@@ -412,9 +404,7 @@ static void xilinx_vdma_free_descriptors(struct xilinx_vdma_chan *chan)
xilinx_vdma_free_desc_list(chan, &chan->pending_list);
xilinx_vdma_free_desc_list(chan, &chan->done_list);
-
- xilinx_vdma_free_tx_descriptor(chan, chan->active_desc);
- chan->active_desc = NULL;
+ xilinx_vdma_free_desc_list(chan, &chan->active_list);
spin_unlock_irqrestore(&chan->lock, flags);
}
@@ -614,25 +604,26 @@ static void xilinx_vdma_start(struct xilinx_vdma_chan *chan)
static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
{
struct xilinx_vdma_config *config = &chan->config;
- struct xilinx_vdma_tx_descriptor *desc;
+ struct xilinx_vdma_tx_descriptor *desc, *tail_desc;
unsigned long flags;
u32 reg;
- struct xilinx_vdma_tx_segment *head, *tail = NULL;
+ struct xilinx_vdma_tx_segment *tail_segment;
if (chan->err)
return;
spin_lock_irqsave(&chan->lock, flags);
- /* There's already an active descriptor, bail out. */
- if (chan->active_desc)
- goto out_unlock;
-
if (list_empty(&chan->pending_list))
goto out_unlock;
desc = list_first_entry(&chan->pending_list,
struct xilinx_vdma_tx_descriptor, node);
+ tail_desc = list_last_entry(&chan->pending_list,
+ struct xilinx_vdma_tx_descriptor, node);
+
+ tail_segment = list_last_entry(&tail_desc->segments,
+ struct xilinx_vdma_tx_segment, node);
/* If it is SG mode and hardware is busy, cannot submit */
if (chan->has_sg && xilinx_vdma_is_running(chan) &&
@@ -645,14 +636,9 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
* If hardware is idle, then all descriptors on the running lists are
* done, start new transfers
*/
- if (chan->has_sg) {
- head = list_first_entry(&desc->segments,
- struct xilinx_vdma_tx_segment, node);
- tail = list_entry(desc->segments.prev,
- struct xilinx_vdma_tx_segment, node);
-
- vdma_ctrl_write(chan, XILINX_VDMA_REG_CURDESC, head->phys);
- }
+ if (chan->has_sg)
+ vdma_ctrl_write(chan, XILINX_VDMA_REG_CURDESC,
+ desc->async_tx.phys);
/* Configure the hardware using info in the config structure */
reg = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR);
@@ -694,12 +680,15 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
/* Start the transfer */
if (chan->has_sg) {
- vdma_ctrl_write(chan, XILINX_VDMA_REG_TAILDESC, tail->phys);
+ vdma_ctrl_write(chan, XILINX_VDMA_REG_TAILDESC,
+ tail_segment->phys);
} else {
struct xilinx_vdma_tx_segment *segment, *last = NULL;
int i = 0;
- list_for_each_entry(segment, &desc->segments, node) {
+ list_for_each_entry(desc, &chan->pending_list, node) {
+ segment = list_first_entry(&desc->segments,
+ struct xilinx_vdma_tx_segment, node);
vdma_desc_write(chan,
XILINX_VDMA_REG_START_ADDRESS(i++),
segment->hw.buf_addr);
@@ -716,8 +705,8 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
vdma_desc_write(chan, XILINX_VDMA_REG_VSIZE, last->hw.vsize);
}
- list_del(&desc->node);
- chan->active_desc = desc;
+ list_splice_tail_init(&chan->pending_list, &chan->active_list);
+ chan->desc_pendingcount = 0;
out_unlock:
spin_unlock_irqrestore(&chan->lock, flags);
@@ -742,21 +731,19 @@ static void xilinx_vdma_issue_pending(struct dma_chan *dchan)
*/
static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan)
{
- struct xilinx_vdma_tx_descriptor *desc;
+ struct xilinx_vdma_tx_descriptor *desc, *next;
unsigned long flags;
spin_lock_irqsave(&chan->lock, flags);
- desc = chan->active_desc;
- if (!desc) {
- dev_dbg(chan->dev, "no running descriptors\n");
+ if (list_empty(&chan->active_list))
goto out_unlock;
- }
-
- dma_cookie_complete(&desc->async_tx);
- list_add_tail(&desc->node, &chan->done_list);
- chan->active_desc = NULL;
+ list_for_each_entry_safe(desc, next, &chan->active_list, node) {
+ list_del(&desc->node);
+ dma_cookie_complete(&desc->async_tx);
+ list_add_tail(&desc->node, &chan->done_list);
+ }
out_unlock:
spin_unlock_irqrestore(&chan->lock, flags);
@@ -879,6 +866,44 @@ static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data)
}
/**
+ * append_desc_queue - Queuing descriptor
+ * @chan: Driver specific dma channel
+ * @desc: dma transaction descriptor
+ */
+static void append_desc_queue(struct xilinx_vdma_chan *chan,
+ struct xilinx_vdma_tx_descriptor *desc)
+{
+ struct xilinx_vdma_tx_segment *tail_segment;
+ struct xilinx_vdma_tx_descriptor *tail_desc;
+
+ if (list_empty(&chan->pending_list))
+ goto append;
+
+ /*
+ * Add the hardware descriptor to the chain of hardware descriptors
+ * that already exists in memory.
+ */
+ tail_desc = list_last_entry(&chan->pending_list,
+ struct xilinx_vdma_tx_descriptor, node);
+ tail_segment = list_last_entry(&tail_desc->segments,
+ struct xilinx_vdma_tx_segment, node);
+ tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
+
+ /*
+ * Add the software descriptor and all children to the list
+ * of pending transactions
+ */
+append:
+ list_add_tail(&desc->node, &chan->pending_list);
+ chan->desc_pendingcount++;
+
+ if (unlikely(chan->desc_pendingcount > chan->num_frms)) {
+ dev_dbg(chan->dev, "desc pendingcount is too high\n");
+ chan->desc_pendingcount = chan->num_frms;
+ }
+}
+
+/**
* xilinx_vdma_tx_submit - Submit DMA transaction
* @tx: Async transaction descriptor
*
@@ -906,11 +931,8 @@ static dma_cookie_t xilinx_vdma_tx_submit(struct dma_async_tx_descriptor *tx)
cookie = dma_cookie_assign(tx);
- /* Append the transaction to the pending transactions queue. */
- list_add_tail(&desc->node, &chan->pending_list);
-
- /* Free the allocated desc */
- chan->allocated_desc = NULL;
+ /* Put this transaction onto the tail of the pending queue */
+ append_desc_queue(chan, desc);
spin_unlock_irqrestore(&chan->lock, flags);
@@ -973,13 +995,6 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
else
hw->buf_addr = xt->src_start;
- /* Link the previous next descriptor to current */
- if (!list_empty(&desc->segments)) {
- prev = list_last_entry(&desc->segments,
- struct xilinx_vdma_tx_segment, node);
- prev->hw.next_desc = segment->phys;
- }
-
/* Insert the segment into the descriptor segments list. */
list_add_tail(&segment->node, &desc->segments);
@@ -988,7 +1003,7 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
/* Link the last hardware descriptor with the first. */
segment = list_first_entry(&desc->segments,
struct xilinx_vdma_tx_segment, node);
- prev->hw.next_desc = segment->phys;
+ desc->async_tx.phys = segment->phys;
return &desc->async_tx;
@@ -1127,10 +1142,12 @@ static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev,
chan->dev = xdev->dev;
chan->xdev = xdev;
chan->has_sg = xdev->has_sg;
+ chan->desc_pendingcount = 0x0;
spin_lock_init(&chan->lock);
INIT_LIST_HEAD(&chan->pending_list);
INIT_LIST_HEAD(&chan->done_list);
+ INIT_LIST_HEAD(&chan->active_list);
/* Retrieve the channel properties from the device tree */
has_dre = of_property_read_bool(node, "xlnx,include-dre");
--
2.1.2
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH v2 2/4] dmaengine: xilinx_vdma: Simplify spin lock handling
2016-02-22 5:54 [PATCH v2 1/4] dmaengine: xilinx_vdma: Improve SG engine handling Kedareswara rao Appana
@ 2016-02-22 5:54 ` Kedareswara rao Appana
2016-02-23 3:09 ` Vinod Koul
2016-02-22 5:54 ` [PATCH v2 3/4] dmaengine: xilinx_vdma: Fix issues with non-parking mode Kedareswara rao Appana
2016-02-22 5:54 ` [PATCH v2 4/4] dmaengine: xilinx_vdma: Improve channel idle checking Kedareswara rao Appana
2 siblings, 1 reply; 8+ messages in thread
From: Kedareswara rao Appana @ 2016-02-22 5:54 UTC (permalink / raw)
To: dan.j.williams, vinod.koul, michal.simek, soren.brinkmann,
appanad, moritz.fischer, laurent.pinchart, luis, anirudh
Cc: dmaengine, linux-arm-kernel, linux-kernel
This patch simplifies the spin lock handling in the driver.
Signed-off-by: Kedareswara rao Appana <appanad@xilinx.com>
---
Changes for v2:
---> splitted the changes into multiple patches.
drivers/dma/xilinx/xilinx_vdma.c | 27 ++++++++++-----------------
1 file changed, 10 insertions(+), 17 deletions(-)
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index 06bffec..d646218 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -605,17 +605,14 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
{
struct xilinx_vdma_config *config = &chan->config;
struct xilinx_vdma_tx_descriptor *desc, *tail_desc;
- unsigned long flags;
u32 reg;
struct xilinx_vdma_tx_segment *tail_segment;
if (chan->err)
return;
- spin_lock_irqsave(&chan->lock, flags);
-
if (list_empty(&chan->pending_list))
- goto out_unlock;
+ return;
desc = list_first_entry(&chan->pending_list,
struct xilinx_vdma_tx_descriptor, node);
@@ -629,7 +626,7 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
if (chan->has_sg && xilinx_vdma_is_running(chan) &&
!xilinx_vdma_is_idle(chan)) {
dev_dbg(chan->dev, "DMA controller still busy\n");
- goto out_unlock;
+ return;
}
/*
@@ -676,7 +673,7 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
xilinx_vdma_start(chan);
if (chan->err)
- goto out_unlock;
+ return;
/* Start the transfer */
if (chan->has_sg) {
@@ -696,7 +693,7 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
}
if (!last)
- goto out_unlock;
+ return;
/* HW expects these parameters to be same for one transaction */
vdma_desc_write(chan, XILINX_VDMA_REG_HSIZE, last->hw.hsize);
@@ -707,9 +704,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
list_splice_tail_init(&chan->pending_list, &chan->active_list);
chan->desc_pendingcount = 0;
-
-out_unlock:
- spin_unlock_irqrestore(&chan->lock, flags);
}
/**
@@ -719,8 +713,11 @@ out_unlock:
static void xilinx_vdma_issue_pending(struct dma_chan *dchan)
{
struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
+ unsigned long flags;
+ spin_lock_irqsave(&chan->lock, flags);
xilinx_vdma_start_transfer(chan);
+ spin_unlock_irqrestore(&chan->lock, flags);
}
/**
@@ -732,21 +729,15 @@ static void xilinx_vdma_issue_pending(struct dma_chan *dchan)
static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan)
{
struct xilinx_vdma_tx_descriptor *desc, *next;
- unsigned long flags;
-
- spin_lock_irqsave(&chan->lock, flags);
if (list_empty(&chan->active_list))
- goto out_unlock;
+ return;
list_for_each_entry_safe(desc, next, &chan->active_list, node) {
list_del(&desc->node);
dma_cookie_complete(&desc->async_tx);
list_add_tail(&desc->node, &chan->done_list);
}
-
-out_unlock:
- spin_unlock_irqrestore(&chan->lock, flags);
}
/**
@@ -857,8 +848,10 @@ static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data)
}
if (status & XILINX_VDMA_DMASR_FRM_CNT_IRQ) {
+ spin_lock(&chan->lock);
xilinx_vdma_complete_descriptor(chan);
xilinx_vdma_start_transfer(chan);
+ spin_unlock(&chan->lock);
}
tasklet_schedule(&chan->tasklet);
--
2.1.2
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [PATCH v2 2/4] dmaengine: xilinx_vdma: Simplify spin lock handling
2016-02-22 5:54 ` [PATCH v2 2/4] dmaengine: xilinx_vdma: Simplify spin lock handling Kedareswara rao Appana
@ 2016-02-23 3:09 ` Vinod Koul
2016-02-23 4:19 ` Appana Durga Kedareswara Rao
0 siblings, 1 reply; 8+ messages in thread
From: Vinod Koul @ 2016-02-23 3:09 UTC (permalink / raw)
To: Kedareswara rao Appana
Cc: dan.j.williams, michal.simek, soren.brinkmann, appanad,
moritz.fischer, laurent.pinchart, luis, anirudh, dmaengine,
linux-arm-kernel, linux-kernel
On Mon, Feb 22, 2016 at 11:24:35AM +0530, Kedareswara rao Appana wrote:
> This patch simplifies the spin lock handling in the driver.
But sadly doesn't describe how?
>
> Signed-off-by: Kedareswara rao Appana <appanad@xilinx.com>
> ---
> Changes for v2:
> ---> splitted the changes into multiple patches.
>
> drivers/dma/xilinx/xilinx_vdma.c | 27 ++++++++++-----------------
> 1 file changed, 10 insertions(+), 17 deletions(-)
>
> diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
> index 06bffec..d646218 100644
> --- a/drivers/dma/xilinx/xilinx_vdma.c
> +++ b/drivers/dma/xilinx/xilinx_vdma.c
> @@ -605,17 +605,14 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
> {
> struct xilinx_vdma_config *config = &chan->config;
> struct xilinx_vdma_tx_descriptor *desc, *tail_desc;
> - unsigned long flags;
> u32 reg;
> struct xilinx_vdma_tx_segment *tail_segment;
>
> if (chan->err)
> return;
>
> - spin_lock_irqsave(&chan->lock, flags);
> -
It would help if you add a comment to this function taht we need to invoke
this with lock held...
> if (list_empty(&chan->pending_list))
> - goto out_unlock;
> + return;
>
> desc = list_first_entry(&chan->pending_list,
> struct xilinx_vdma_tx_descriptor, node);
> @@ -629,7 +626,7 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
> if (chan->has_sg && xilinx_vdma_is_running(chan) &&
> !xilinx_vdma_is_idle(chan)) {
> dev_dbg(chan->dev, "DMA controller still busy\n");
> - goto out_unlock;
> + return;
> }
>
> /*
> @@ -676,7 +673,7 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
> xilinx_vdma_start(chan);
>
> if (chan->err)
> - goto out_unlock;
> + return;
>
> /* Start the transfer */
> if (chan->has_sg) {
> @@ -696,7 +693,7 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
> }
>
> if (!last)
> - goto out_unlock;
> + return;
>
> /* HW expects these parameters to be same for one transaction */
> vdma_desc_write(chan, XILINX_VDMA_REG_HSIZE, last->hw.hsize);
> @@ -707,9 +704,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
>
> list_splice_tail_init(&chan->pending_list, &chan->active_list);
> chan->desc_pendingcount = 0;
> -
> -out_unlock:
> - spin_unlock_irqrestore(&chan->lock, flags);
> }
>
> /**
> @@ -719,8 +713,11 @@ out_unlock:
> static void xilinx_vdma_issue_pending(struct dma_chan *dchan)
> {
> struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
> + unsigned long flags;
>
> + spin_lock_irqsave(&chan->lock, flags);
> xilinx_vdma_start_transfer(chan);
> + spin_unlock_irqrestore(&chan->lock, flags);
> }
>
> /**
> @@ -732,21 +729,15 @@ static void xilinx_vdma_issue_pending(struct dma_chan *dchan)
> static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan)
> {
> struct xilinx_vdma_tx_descriptor *desc, *next;
> - unsigned long flags;
> -
> - spin_lock_irqsave(&chan->lock, flags);
this one as well
>
> if (list_empty(&chan->active_list))
> - goto out_unlock;
> + return;
>
> list_for_each_entry_safe(desc, next, &chan->active_list, node) {
> list_del(&desc->node);
> dma_cookie_complete(&desc->async_tx);
> list_add_tail(&desc->node, &chan->done_list);
> }
> -
> -out_unlock:
> - spin_unlock_irqrestore(&chan->lock, flags);
> }
>
> /**
> @@ -857,8 +848,10 @@ static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data)
> }
>
> if (status & XILINX_VDMA_DMASR_FRM_CNT_IRQ) {
> + spin_lock(&chan->lock);
> xilinx_vdma_complete_descriptor(chan);
> xilinx_vdma_start_transfer(chan);
> + spin_unlock(&chan->lock);
> }
>
> tasklet_schedule(&chan->tasklet);
> --
> 2.1.2
>
> --
> To unsubscribe from this list: send the line "unsubscribe dmaengine" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at http://vger.kernel.org/majordomo-info.html
--
~Vinod
^ permalink raw reply [flat|nested] 8+ messages in thread
* RE: [PATCH v2 2/4] dmaengine: xilinx_vdma: Simplify spin lock handling
2016-02-23 3:09 ` Vinod Koul
@ 2016-02-23 4:19 ` Appana Durga Kedareswara Rao
0 siblings, 0 replies; 8+ messages in thread
From: Appana Durga Kedareswara Rao @ 2016-02-23 4:19 UTC (permalink / raw)
To: Vinod Koul
Cc: dan.j.williams, Michal Simek, Soren Brinkmann, moritz.fischer,
laurent.pinchart, luis, Anirudha Sarangi, dmaengine,
linux-arm-kernel, linux-kernel
Hi Vinod,
> -----Original Message-----
> From: Vinod Koul [mailto:vinod.koul@intel.com]
> Sent: Tuesday, February 23, 2016 8:40 AM
> To: Appana Durga Kedareswara Rao
> Cc: dan.j.williams@intel.com; Michal Simek; Soren Brinkmann; Appana Durga
> Kedareswara Rao; moritz.fischer@ettus.com;
> laurent.pinchart@ideasonboard.com; luis@debethencourt.com; Anirudha
> Sarangi; dmaengine@vger.kernel.org; linux-arm-kernel@lists.infradead.org;
> linux-kernel@vger.kernel.org
> Subject: Re: [PATCH v2 2/4] dmaengine: xilinx_vdma: Simplify spin lock handling
>
> On Mon, Feb 22, 2016 at 11:24:35AM +0530, Kedareswara rao Appana wrote:
> > This patch simplifies the spin lock handling in the driver.
>
> But sadly doesn't describe how?
Ok sure will improve commit message in the next version.
>
> >
> > Signed-off-by: Kedareswara rao Appana <appanad@xilinx.com>
> > ---
> > Changes for v2:
> > ---> splitted the changes into multiple patches.
> >
> > drivers/dma/xilinx/xilinx_vdma.c | 27 ++++++++++-----------------
> > 1 file changed, 10 insertions(+), 17 deletions(-)
> >
> > diff --git a/drivers/dma/xilinx/xilinx_vdma.c
> > b/drivers/dma/xilinx/xilinx_vdma.c
> > index 06bffec..d646218 100644
> > --- a/drivers/dma/xilinx/xilinx_vdma.c
> > +++ b/drivers/dma/xilinx/xilinx_vdma.c
> > @@ -605,17 +605,14 @@ static void xilinx_vdma_start_transfer(struct
> > xilinx_vdma_chan *chan) {
> > struct xilinx_vdma_config *config = &chan->config;
> > struct xilinx_vdma_tx_descriptor *desc, *tail_desc;
> > - unsigned long flags;
> > u32 reg;
> > struct xilinx_vdma_tx_segment *tail_segment;
> >
> > if (chan->err)
> > return;
> >
> > - spin_lock_irqsave(&chan->lock, flags);
> > -
>
> It would help if you add a comment to this function taht we need to invoke this
> with lock held...
Ok sure will add comment..
>
> > if (list_empty(&chan->pending_list))
> > - goto out_unlock;
> > + return;
> >
> > desc = list_first_entry(&chan->pending_list,
> > struct xilinx_vdma_tx_descriptor, node); @@ -
> 629,7 +626,7 @@
> > static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
> > if (chan->has_sg && xilinx_vdma_is_running(chan) &&
> > !xilinx_vdma_is_idle(chan)) {
> > dev_dbg(chan->dev, "DMA controller still busy\n");
> > - goto out_unlock;
> > + return;
> > }
> >
> > /*
> > @@ -676,7 +673,7 @@ static void xilinx_vdma_start_transfer(struct
> xilinx_vdma_chan *chan)
> > xilinx_vdma_start(chan);
> >
> > if (chan->err)
> > - goto out_unlock;
> > + return;
> >
> > /* Start the transfer */
> > if (chan->has_sg) {
> > @@ -696,7 +693,7 @@ static void xilinx_vdma_start_transfer(struct
> xilinx_vdma_chan *chan)
> > }
> >
> > if (!last)
> > - goto out_unlock;
> > + return;
> >
> > /* HW expects these parameters to be same for one
> transaction */
> > vdma_desc_write(chan, XILINX_VDMA_REG_HSIZE, last-
> >hw.hsize); @@
> > -707,9 +704,6 @@ static void xilinx_vdma_start_transfer(struct
> > xilinx_vdma_chan *chan)
> >
> > list_splice_tail_init(&chan->pending_list, &chan->active_list);
> > chan->desc_pendingcount = 0;
> > -
> > -out_unlock:
> > - spin_unlock_irqrestore(&chan->lock, flags);
> > }
> >
> > /**
> > @@ -719,8 +713,11 @@ out_unlock:
> > static void xilinx_vdma_issue_pending(struct dma_chan *dchan) {
> > struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
> > + unsigned long flags;
> >
> > + spin_lock_irqsave(&chan->lock, flags);
> > xilinx_vdma_start_transfer(chan);
> > + spin_unlock_irqrestore(&chan->lock, flags);
> > }
> >
> > /**
> > @@ -732,21 +729,15 @@ static void xilinx_vdma_issue_pending(struct
> > dma_chan *dchan) static void xilinx_vdma_complete_descriptor(struct
> > xilinx_vdma_chan *chan) {
> > struct xilinx_vdma_tx_descriptor *desc, *next;
> > - unsigned long flags;
> > -
> > - spin_lock_irqsave(&chan->lock, flags);
>
> this one as well
Ok sure will add comment..
Regards,
Kedar.
>
> >
> > if (list_empty(&chan->active_list))
> > - goto out_unlock;
> > + return;
> >
> > list_for_each_entry_safe(desc, next, &chan->active_list, node) {
> > list_del(&desc->node);
> > dma_cookie_complete(&desc->async_tx);
> > list_add_tail(&desc->node, &chan->done_list);
> > }
> > -
> > -out_unlock:
> > - spin_unlock_irqrestore(&chan->lock, flags);
> > }
> >
> > /**
> > @@ -857,8 +848,10 @@ static irqreturn_t xilinx_vdma_irq_handler(int irq,
> void *data)
> > }
> >
> > if (status & XILINX_VDMA_DMASR_FRM_CNT_IRQ) {
> > + spin_lock(&chan->lock);
> > xilinx_vdma_complete_descriptor(chan);
> > xilinx_vdma_start_transfer(chan);
> > + spin_unlock(&chan->lock);
> > }
> >
> > tasklet_schedule(&chan->tasklet);
> > --
> > 2.1.2
> >
> > --
> > To unsubscribe from this list: send the line "unsubscribe dmaengine"
> > in the body of a message to majordomo@vger.kernel.org More majordomo
> > info at http://vger.kernel.org/majordomo-info.html
>
> --
> ~Vinod
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH v2 3/4] dmaengine: xilinx_vdma: Fix issues with non-parking mode
2016-02-22 5:54 [PATCH v2 1/4] dmaengine: xilinx_vdma: Improve SG engine handling Kedareswara rao Appana
2016-02-22 5:54 ` [PATCH v2 2/4] dmaengine: xilinx_vdma: Simplify spin lock handling Kedareswara rao Appana
@ 2016-02-22 5:54 ` Kedareswara rao Appana
2016-02-22 5:54 ` [PATCH v2 4/4] dmaengine: xilinx_vdma: Improve channel idle checking Kedareswara rao Appana
2 siblings, 0 replies; 8+ messages in thread
From: Kedareswara rao Appana @ 2016-02-22 5:54 UTC (permalink / raw)
To: dan.j.williams, vinod.koul, michal.simek, soren.brinkmann,
appanad, moritz.fischer, laurent.pinchart, luis, anirudh
Cc: dmaengine, linux-arm-kernel, linux-kernel
This patch fixes issues with the Non-parking mode(Cirular mode).
With the existing driver in cirular mode if we submit frames less than h/w
configured we simply end-up having misconfigured vdma h/w.
This patch fixes this issue by configuring the frame count register.
Signed-off-by: Kedareswara rao Appana <appanad@xilinx.com>
---
Changes for v2:
---> splitted the changes into multiple patches.
drivers/dma/xilinx/xilinx_vdma.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index d646218..8db07f7 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -645,6 +645,10 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
else
reg &= ~XILINX_VDMA_DMACR_FRAMECNT_EN;
+ /* Configure channel to allow number frame buffers */
+ vdma_ctrl_write(chan, XILINX_VDMA_REG_FRMSTORE,
+ chan->desc_pendingcount);
+
/*
* With SG, start with circular mode, so that BDs can be fetched.
* In direct register mode, if not parking, enable circular mode
--
2.1.2
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH v2 4/4] dmaengine: xilinx_vdma: Improve channel idle checking
2016-02-22 5:54 [PATCH v2 1/4] dmaengine: xilinx_vdma: Improve SG engine handling Kedareswara rao Appana
2016-02-22 5:54 ` [PATCH v2 2/4] dmaengine: xilinx_vdma: Simplify spin lock handling Kedareswara rao Appana
2016-02-22 5:54 ` [PATCH v2 3/4] dmaengine: xilinx_vdma: Fix issues with non-parking mode Kedareswara rao Appana
@ 2016-02-22 5:54 ` Kedareswara rao Appana
2016-02-23 3:11 ` Vinod Koul
2 siblings, 1 reply; 8+ messages in thread
From: Kedareswara rao Appana @ 2016-02-22 5:54 UTC (permalink / raw)
To: dan.j.williams, vinod.koul, michal.simek, soren.brinkmann,
appanad, moritz.fischer, laurent.pinchart, luis, anirudh
Cc: dmaengine, linux-arm-kernel, linux-kernel
This patch improves the channel idle cheking by introduing
a new varibale in chan private structure.
Signed-off-by: Kedareswara rao Appana <appanad@xilinx.com>
---
Changes for v2:
---> splitted the changes into multiple patches.
drivers/dma/xilinx/xilinx_vdma.c | 41 ++++++++--------------------------------
1 file changed, 8 insertions(+), 33 deletions(-)
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index 8db07f7..51686d1 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -202,6 +202,7 @@ struct xilinx_vdma_tx_descriptor {
* @has_sg: Support scatter transfers
* @genlock: Support genlock mode
* @err: Channel has errors
+ * @idle: Check for channel idle
* @tasklet: Cleanup work after irq
* @config: Device configuration info
* @flush_on_fsync: Flush on Frame sync
@@ -225,6 +226,7 @@ struct xilinx_vdma_chan {
bool has_sg;
bool genlock;
bool err;
+ bool idle;
struct tasklet_struct tasklet;
struct xilinx_vdma_config config;
bool flush_on_fsync;
@@ -519,32 +521,6 @@ static enum dma_status xilinx_vdma_tx_status(struct dma_chan *dchan,
}
/**
- * xilinx_vdma_is_running - Check if VDMA channel is running
- * @chan: Driver specific VDMA channel
- *
- * Return: '1' if running, '0' if not.
- */
-static bool xilinx_vdma_is_running(struct xilinx_vdma_chan *chan)
-{
- return !(vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
- XILINX_VDMA_DMASR_HALTED) &&
- (vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) &
- XILINX_VDMA_DMACR_RUNSTOP);
-}
-
-/**
- * xilinx_vdma_is_idle - Check if VDMA channel is idle
- * @chan: Driver specific VDMA channel
- *
- * Return: '1' if idle, '0' if not.
- */
-static bool xilinx_vdma_is_idle(struct xilinx_vdma_chan *chan)
-{
- return vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
- XILINX_VDMA_DMASR_IDLE;
-}
-
-/**
* xilinx_vdma_halt - Halt VDMA channel
* @chan: Driver specific VDMA channel
*/
@@ -614,6 +590,9 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
if (list_empty(&chan->pending_list))
return;
+ if (!chan->idle)
+ return;
+
desc = list_first_entry(&chan->pending_list,
struct xilinx_vdma_tx_descriptor, node);
tail_desc = list_last_entry(&chan->pending_list,
@@ -622,13 +601,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
tail_segment = list_last_entry(&tail_desc->segments,
struct xilinx_vdma_tx_segment, node);
- /* If it is SG mode and hardware is busy, cannot submit */
- if (chan->has_sg && xilinx_vdma_is_running(chan) &&
- !xilinx_vdma_is_idle(chan)) {
- dev_dbg(chan->dev, "DMA controller still busy\n");
- return;
- }
-
/*
* If hardware is idle, then all descriptors on the running lists are
* done, start new transfers
@@ -708,6 +680,7 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
list_splice_tail_init(&chan->pending_list, &chan->active_list);
chan->desc_pendingcount = 0;
+ chan->idle = false;
}
/**
@@ -854,6 +827,7 @@ static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data)
if (status & XILINX_VDMA_DMASR_FRM_CNT_IRQ) {
spin_lock(&chan->lock);
xilinx_vdma_complete_descriptor(chan);
+ chan->idle = true;
xilinx_vdma_start_transfer(chan);
spin_unlock(&chan->lock);
}
@@ -1212,6 +1186,7 @@ static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev,
list_add_tail(&chan->common.device_node, &xdev->common.channels);
xdev->chan[chan->id] = chan;
+ chan->idle = true;
/* Reset the channel */
err = xilinx_vdma_chan_reset(chan);
--
2.1.2
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [PATCH v2 4/4] dmaengine: xilinx_vdma: Improve channel idle checking
2016-02-22 5:54 ` [PATCH v2 4/4] dmaengine: xilinx_vdma: Improve channel idle checking Kedareswara rao Appana
@ 2016-02-23 3:11 ` Vinod Koul
2016-02-23 4:21 ` Appana Durga Kedareswara Rao
0 siblings, 1 reply; 8+ messages in thread
From: Vinod Koul @ 2016-02-23 3:11 UTC (permalink / raw)
To: Kedareswara rao Appana
Cc: dan.j.williams, michal.simek, soren.brinkmann, appanad,
moritz.fischer, laurent.pinchart, luis, anirudh, dmaengine,
linux-arm-kernel, linux-kernel
On Mon, Feb 22, 2016 at 11:24:37AM +0530, Kedareswara rao Appana wrote:
> This patch improves the channel idle cheking by introduing
^^^^^^^^^
typo
> a new varibale in chan private structure.
^^^^^^^
here too :(
and there is no description how this improvement is achieved and why
>
> Signed-off-by: Kedareswara rao Appana <appanad@xilinx.com>
> ---
> Changes for v2:
> ---> splitted the changes into multiple patches.
>
> drivers/dma/xilinx/xilinx_vdma.c | 41 ++++++++--------------------------------
> 1 file changed, 8 insertions(+), 33 deletions(-)
>
> diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
> index 8db07f7..51686d1 100644
> --- a/drivers/dma/xilinx/xilinx_vdma.c
> +++ b/drivers/dma/xilinx/xilinx_vdma.c
> @@ -202,6 +202,7 @@ struct xilinx_vdma_tx_descriptor {
> * @has_sg: Support scatter transfers
> * @genlock: Support genlock mode
> * @err: Channel has errors
> + * @idle: Check for channel idle
> * @tasklet: Cleanup work after irq
> * @config: Device configuration info
> * @flush_on_fsync: Flush on Frame sync
> @@ -225,6 +226,7 @@ struct xilinx_vdma_chan {
> bool has_sg;
> bool genlock;
> bool err;
> + bool idle;
> struct tasklet_struct tasklet;
> struct xilinx_vdma_config config;
> bool flush_on_fsync;
> @@ -519,32 +521,6 @@ static enum dma_status xilinx_vdma_tx_status(struct dma_chan *dchan,
> }
>
> /**
> - * xilinx_vdma_is_running - Check if VDMA channel is running
> - * @chan: Driver specific VDMA channel
> - *
> - * Return: '1' if running, '0' if not.
> - */
> -static bool xilinx_vdma_is_running(struct xilinx_vdma_chan *chan)
> -{
> - return !(vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
> - XILINX_VDMA_DMASR_HALTED) &&
> - (vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) &
> - XILINX_VDMA_DMACR_RUNSTOP);
> -}
> -
> -/**
> - * xilinx_vdma_is_idle - Check if VDMA channel is idle
> - * @chan: Driver specific VDMA channel
> - *
> - * Return: '1' if idle, '0' if not.
> - */
> -static bool xilinx_vdma_is_idle(struct xilinx_vdma_chan *chan)
> -{
> - return vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
> - XILINX_VDMA_DMASR_IDLE;
> -}
> -
> -/**
> * xilinx_vdma_halt - Halt VDMA channel
> * @chan: Driver specific VDMA channel
> */
> @@ -614,6 +590,9 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
> if (list_empty(&chan->pending_list))
> return;
>
> + if (!chan->idle)
> + return;
> +
> desc = list_first_entry(&chan->pending_list,
> struct xilinx_vdma_tx_descriptor, node);
> tail_desc = list_last_entry(&chan->pending_list,
> @@ -622,13 +601,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
> tail_segment = list_last_entry(&tail_desc->segments,
> struct xilinx_vdma_tx_segment, node);
>
> - /* If it is SG mode and hardware is busy, cannot submit */
> - if (chan->has_sg && xilinx_vdma_is_running(chan) &&
> - !xilinx_vdma_is_idle(chan)) {
> - dev_dbg(chan->dev, "DMA controller still busy\n");
> - return;
> - }
> -
> /*
> * If hardware is idle, then all descriptors on the running lists are
> * done, start new transfers
> @@ -708,6 +680,7 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
>
> list_splice_tail_init(&chan->pending_list, &chan->active_list);
> chan->desc_pendingcount = 0;
> + chan->idle = false;
> }
>
> /**
> @@ -854,6 +827,7 @@ static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data)
> if (status & XILINX_VDMA_DMASR_FRM_CNT_IRQ) {
> spin_lock(&chan->lock);
> xilinx_vdma_complete_descriptor(chan);
> + chan->idle = true;
> xilinx_vdma_start_transfer(chan);
> spin_unlock(&chan->lock);
> }
> @@ -1212,6 +1186,7 @@ static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev,
>
> list_add_tail(&chan->common.device_node, &xdev->common.channels);
> xdev->chan[chan->id] = chan;
> + chan->idle = true;
>
> /* Reset the channel */
> err = xilinx_vdma_chan_reset(chan);
> --
> 2.1.2
>
--
~Vinod
^ permalink raw reply [flat|nested] 8+ messages in thread
* RE: [PATCH v2 4/4] dmaengine: xilinx_vdma: Improve channel idle checking
2016-02-23 3:11 ` Vinod Koul
@ 2016-02-23 4:21 ` Appana Durga Kedareswara Rao
0 siblings, 0 replies; 8+ messages in thread
From: Appana Durga Kedareswara Rao @ 2016-02-23 4:21 UTC (permalink / raw)
To: Vinod Koul
Cc: dan.j.williams, Michal Simek, Soren Brinkmann, moritz.fischer,
laurent.pinchart, luis, Anirudha Sarangi, dmaengine,
linux-arm-kernel, linux-kernel
Hi Vinod,
> -----Original Message-----
> From: Vinod Koul [mailto:vinod.koul@intel.com]
> Sent: Tuesday, February 23, 2016 8:42 AM
> To: Appana Durga Kedareswara Rao
> Cc: dan.j.williams@intel.com; Michal Simek; Soren Brinkmann; Appana Durga
> Kedareswara Rao; moritz.fischer@ettus.com;
> laurent.pinchart@ideasonboard.com; luis@debethencourt.com; Anirudha
> Sarangi; dmaengine@vger.kernel.org; linux-arm-kernel@lists.infradead.org;
> linux-kernel@vger.kernel.org
> Subject: Re: [PATCH v2 4/4] dmaengine: xilinx_vdma: Improve channel idle
> checking
>
> On Mon, Feb 22, 2016 at 11:24:37AM +0530, Kedareswara rao Appana wrote:
> > This patch improves the channel idle cheking by introduing
> ^^^^^^^^^
> typo
Ok will fix.
>
> > a new varibale in chan private structure.
> ^^^^^^^
> here too :(
Ok will fix.
>
> and there is no description how this improvement is achieved and why
Ok will give detailed explanation in the next version of the patch.
Regards,
Kedar.
> >
> > Signed-off-by: Kedareswara rao Appana <appanad@xilinx.com>
> > ---
> > Changes for v2:
> > ---> splitted the changes into multiple patches.
> >
> > drivers/dma/xilinx/xilinx_vdma.c | 41
> > ++++++++--------------------------------
> > 1 file changed, 8 insertions(+), 33 deletions(-)
> >
> > diff --git a/drivers/dma/xilinx/xilinx_vdma.c
> > b/drivers/dma/xilinx/xilinx_vdma.c
> > index 8db07f7..51686d1 100644
> > --- a/drivers/dma/xilinx/xilinx_vdma.c
> > +++ b/drivers/dma/xilinx/xilinx_vdma.c
> > @@ -202,6 +202,7 @@ struct xilinx_vdma_tx_descriptor {
> > * @has_sg: Support scatter transfers
> > * @genlock: Support genlock mode
> > * @err: Channel has errors
> > + * @idle: Check for channel idle
> > * @tasklet: Cleanup work after irq
> > * @config: Device configuration info
> > * @flush_on_fsync: Flush on Frame sync @@ -225,6 +226,7 @@ struct
> > xilinx_vdma_chan {
> > bool has_sg;
> > bool genlock;
> > bool err;
> > + bool idle;
> > struct tasklet_struct tasklet;
> > struct xilinx_vdma_config config;
> > bool flush_on_fsync;
> > @@ -519,32 +521,6 @@ static enum dma_status
> > xilinx_vdma_tx_status(struct dma_chan *dchan, }
> >
> > /**
> > - * xilinx_vdma_is_running - Check if VDMA channel is running
> > - * @chan: Driver specific VDMA channel
> > - *
> > - * Return: '1' if running, '0' if not.
> > - */
> > -static bool xilinx_vdma_is_running(struct xilinx_vdma_chan *chan) -{
> > - return !(vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
> > - XILINX_VDMA_DMASR_HALTED) &&
> > - (vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) &
> > - XILINX_VDMA_DMACR_RUNSTOP);
> > -}
> > -
> > -/**
> > - * xilinx_vdma_is_idle - Check if VDMA channel is idle
> > - * @chan: Driver specific VDMA channel
> > - *
> > - * Return: '1' if idle, '0' if not.
> > - */
> > -static bool xilinx_vdma_is_idle(struct xilinx_vdma_chan *chan) -{
> > - return vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
> > - XILINX_VDMA_DMASR_IDLE;
> > -}
> > -
> > -/**
> > * xilinx_vdma_halt - Halt VDMA channel
> > * @chan: Driver specific VDMA channel
> > */
> > @@ -614,6 +590,9 @@ static void xilinx_vdma_start_transfer(struct
> xilinx_vdma_chan *chan)
> > if (list_empty(&chan->pending_list))
> > return;
> >
> > + if (!chan->idle)
> > + return;
> > +
> > desc = list_first_entry(&chan->pending_list,
> > struct xilinx_vdma_tx_descriptor, node);
> > tail_desc = list_last_entry(&chan->pending_list,
> > @@ -622,13 +601,6 @@ static void xilinx_vdma_start_transfer(struct
> xilinx_vdma_chan *chan)
> > tail_segment = list_last_entry(&tail_desc->segments,
> > struct xilinx_vdma_tx_segment, node);
> >
> > - /* If it is SG mode and hardware is busy, cannot submit */
> > - if (chan->has_sg && xilinx_vdma_is_running(chan) &&
> > - !xilinx_vdma_is_idle(chan)) {
> > - dev_dbg(chan->dev, "DMA controller still busy\n");
> > - return;
> > - }
> > -
> > /*
> > * If hardware is idle, then all descriptors on the running lists are
> > * done, start new transfers
> > @@ -708,6 +680,7 @@ static void xilinx_vdma_start_transfer(struct
> > xilinx_vdma_chan *chan)
> >
> > list_splice_tail_init(&chan->pending_list, &chan->active_list);
> > chan->desc_pendingcount = 0;
> > + chan->idle = false;
> > }
> >
> > /**
> > @@ -854,6 +827,7 @@ static irqreturn_t xilinx_vdma_irq_handler(int irq, void
> *data)
> > if (status & XILINX_VDMA_DMASR_FRM_CNT_IRQ) {
> > spin_lock(&chan->lock);
> > xilinx_vdma_complete_descriptor(chan);
> > + chan->idle = true;
> > xilinx_vdma_start_transfer(chan);
> > spin_unlock(&chan->lock);
> > }
> > @@ -1212,6 +1186,7 @@ static int xilinx_vdma_chan_probe(struct
> > xilinx_vdma_device *xdev,
> >
> > list_add_tail(&chan->common.device_node, &xdev-
> >common.channels);
> > xdev->chan[chan->id] = chan;
> > + chan->idle = true;
> >
> > /* Reset the channel */
> > err = xilinx_vdma_chan_reset(chan);
> > --
> > 2.1.2
> >
>
> --
> ~Vinod
^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2016-02-23 4:21 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-02-22 5:54 [PATCH v2 1/4] dmaengine: xilinx_vdma: Improve SG engine handling Kedareswara rao Appana
2016-02-22 5:54 ` [PATCH v2 2/4] dmaengine: xilinx_vdma: Simplify spin lock handling Kedareswara rao Appana
2016-02-23 3:09 ` Vinod Koul
2016-02-23 4:19 ` Appana Durga Kedareswara Rao
2016-02-22 5:54 ` [PATCH v2 3/4] dmaengine: xilinx_vdma: Fix issues with non-parking mode Kedareswara rao Appana
2016-02-22 5:54 ` [PATCH v2 4/4] dmaengine: xilinx_vdma: Improve channel idle checking Kedareswara rao Appana
2016-02-23 3:11 ` Vinod Koul
2016-02-23 4:21 ` Appana Durga Kedareswara Rao
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).