* Re: [PATCH 1/3] dmaengine: at_hdmac: replace spin_lock* with irqsave variants
2011-06-28 11:17 [PATCH 1/3] dmaengine: at_hdmac: replace spin_lock* with irqsave variants Nicolas Ferre
@ 2011-06-28 10:15 ` Nicolas Ferre
2011-06-28 11:17 ` [PATCH 2/3] dmaengine: at_hdmac: improve power management routines Nicolas Ferre
2011-06-28 11:17 ` [PATCH 3/3] dmaengine: at_hdmac: add slave config operation Nicolas Ferre
2 siblings, 0 replies; 13+ messages in thread
From: Nicolas Ferre @ 2011-06-28 10:15 UTC (permalink / raw)
To: vinod.koul
Cc: dan.j.williams, linux-arm-kernel, linux-kernel, Uwe Kleine-König
Vinod,
FYI, this series is based on your slave-dma.git/next branch.
Best regards,
--
Nicolas Ferre
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH 1/3] dmaengine: at_hdmac: replace spin_lock* with irqsave variants
@ 2011-06-28 11:17 Nicolas Ferre
2011-06-28 10:15 ` Nicolas Ferre
` (2 more replies)
0 siblings, 3 replies; 13+ messages in thread
From: Nicolas Ferre @ 2011-06-28 11:17 UTC (permalink / raw)
To: vinod.koul, dan.j.williams, linux-arm-kernel
Cc: linux-kernel, Nicolas Ferre, Uwe Kleine-König
dmaengine routines can be called from interrupt context and with
interrupts disabled. Whereas spin_unlock_bh can't be called from
such contexts. So this patch converts all spin_lock* routines
to irqsave variants.
Also, spin_lock() used in tasklet is converted to irqsave variants,
as tasklet can be interrupted, and dma requests from such interruptions
may also call spin_lock.
Idea from dw_dmac patch by Viresh Kumar.
Signed-off-by: Nicolas Ferre <nicolas.ferre@atmel.com>
Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
---
drivers/dma/at_hdmac.c | 52 +++++++++++++++++++++++++++--------------------
1 files changed, 30 insertions(+), 22 deletions(-)
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 6a483ea..fd87b96 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -107,10 +107,11 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
{
struct at_desc *desc, *_desc;
struct at_desc *ret = NULL;
+ unsigned long flags;
unsigned int i = 0;
LIST_HEAD(tmp_list);
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
i++;
if (async_tx_test_ack(&desc->txd)) {
@@ -121,7 +122,7 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
dev_dbg(chan2dev(&atchan->chan_common),
"desc %p not ACKed\n", desc);
}
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
dev_vdbg(chan2dev(&atchan->chan_common),
"scanned %u descriptors on freelist\n", i);
@@ -129,9 +130,9 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
if (!ret) {
ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
if (ret) {
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
atchan->descs_allocated++;
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
} else {
dev_err(chan2dev(&atchan->chan_common),
"not enough descriptors available\n");
@@ -150,8 +151,9 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
{
if (desc) {
struct at_desc *child;
+ unsigned long flags;
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
list_for_each_entry(child, &desc->tx_list, desc_node)
dev_vdbg(chan2dev(&atchan->chan_common),
"moving child desc %p to freelist\n",
@@ -160,7 +162,7 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
dev_vdbg(chan2dev(&atchan->chan_common),
"moving desc %p to freelist\n", desc);
list_add(&desc->desc_node, &atchan->free_list);
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
}
}
@@ -471,8 +473,9 @@ static void atc_handle_cyclic(struct at_dma_chan *atchan)
static void atc_tasklet(unsigned long data)
{
struct at_dma_chan *atchan = (struct at_dma_chan *)data;
+ unsigned long flags;
- spin_lock(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
atc_handle_error(atchan);
else if (test_bit(ATC_IS_CYCLIC, &atchan->status))
@@ -480,7 +483,7 @@ static void atc_tasklet(unsigned long data)
else
atc_advance_work(atchan);
- spin_unlock(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
}
static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
@@ -539,8 +542,9 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
struct at_desc *desc = txd_to_at_desc(tx);
struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
dma_cookie_t cookie;
+ unsigned long flags;
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
cookie = atc_assign_cookie(atchan, desc);
if (list_empty(&atchan->active_list)) {
@@ -554,7 +558,7 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
list_add_tail(&desc->desc_node, &atchan->queue);
}
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
return cookie;
}
@@ -927,28 +931,29 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_dma *atdma = to_at_dma(chan->device);
int chan_id = atchan->chan_common.chan_id;
+ unsigned long flags;
LIST_HEAD(list);
dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd);
if (cmd == DMA_PAUSE) {
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
set_bit(ATC_IS_PAUSED, &atchan->status);
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
} else if (cmd == DMA_RESUME) {
if (!test_bit(ATC_IS_PAUSED, &atchan->status))
return 0;
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
clear_bit(ATC_IS_PAUSED, &atchan->status);
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
} else if (cmd == DMA_TERMINATE_ALL) {
struct at_desc *desc, *_desc;
/*
@@ -957,7 +962,7 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
* channel. We still have to poll the channel enable bit due
* to AHB/HSB limitations.
*/
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
/* disabling channel: must also remove suspend state */
dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
@@ -978,7 +983,7 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
/* if channel dedicated to cyclic operations, free it */
clear_bit(ATC_IS_CYCLIC, &atchan->status);
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
} else {
return -ENXIO;
}
@@ -1004,9 +1009,10 @@ atc_tx_status(struct dma_chan *chan,
struct at_dma_chan *atchan = to_at_dma_chan(chan);
dma_cookie_t last_used;
dma_cookie_t last_complete;
+ unsigned long flags;
enum dma_status ret;
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
last_complete = atchan->completed_cookie;
last_used = chan->cookie;
@@ -1021,7 +1027,7 @@ atc_tx_status(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used);
}
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
if (ret != DMA_SUCCESS)
dma_set_tx_state(txstate, last_complete, last_used,
@@ -1046,6 +1052,7 @@ atc_tx_status(struct dma_chan *chan,
static void atc_issue_pending(struct dma_chan *chan)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
+ unsigned long flags;
dev_vdbg(chan2dev(chan), "issue_pending\n");
@@ -1053,11 +1060,11 @@ static void atc_issue_pending(struct dma_chan *chan)
if (test_bit(ATC_IS_CYCLIC, &atchan->status))
return;
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
if (!atc_chan_is_enabled(atchan)) {
atc_advance_work(atchan);
}
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
}
/**
@@ -1073,6 +1080,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
struct at_dma *atdma = to_at_dma(chan->device);
struct at_desc *desc;
struct at_dma_slave *atslave;
+ unsigned long flags;
int i;
u32 cfg;
LIST_HEAD(tmp_list);
@@ -1116,11 +1124,11 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
list_add_tail(&desc->desc_node, &tmp_list);
}
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
atchan->descs_allocated = i;
list_splice(&tmp_list, &atchan->free_list);
atchan->completed_cookie = chan->cookie = 1;
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
/* channel parameters */
channel_writel(atchan, CFG, cfg);
--
1.7.3
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH 2/3] dmaengine: at_hdmac: improve power management routines
2011-06-28 11:17 [PATCH 1/3] dmaengine: at_hdmac: replace spin_lock* with irqsave variants Nicolas Ferre
2011-06-28 10:15 ` Nicolas Ferre
@ 2011-06-28 11:17 ` Nicolas Ferre
2011-07-07 2:20 ` Vinod Koul
2011-06-28 11:17 ` [PATCH 3/3] dmaengine: at_hdmac: add slave config operation Nicolas Ferre
2 siblings, 1 reply; 13+ messages in thread
From: Nicolas Ferre @ 2011-06-28 11:17 UTC (permalink / raw)
To: vinod.koul, dan.j.williams, linux-arm-kernel
Cc: linux-kernel, Nicolas Ferre, Uwe Kleine-König
Save/restore dma controller state across a suspend-resume sequence.
The prepare() function will wait for the non-cyclic channels to become idle.
It also deals with cyclic operations with the start at next period while
resuming.
Signed-off-by: Nicolas Ferre <nicolas.ferre@atmel.com>
Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
---
drivers/dma/at_hdmac.c | 88 ++++++++++++++++++++++++++++++++++++++++++-
drivers/dma/at_hdmac_regs.h | 7 +++
2 files changed, 94 insertions(+), 1 deletions(-)
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index fd87b96..7096adb 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -1385,27 +1385,113 @@ static void at_dma_shutdown(struct platform_device *pdev)
clk_disable(atdma->clk);
}
+static int at_dma_prepare(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct at_dma *atdma = platform_get_drvdata(pdev);
+ struct dma_chan *chan, *_chan;
+
+ list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
+ device_node) {
+ struct at_dma_chan *atchan = to_at_dma_chan(chan);
+ /* wait for transaction completion (except in cyclic case) */
+ if (atc_chan_is_enabled(atchan) &&
+ !test_bit(ATC_IS_CYCLIC, &atchan->status))
+ return -EAGAIN;
+ }
+ return 0;
+}
+
+static void atc_suspend_cyclic(struct at_dma_chan *atchan)
+{
+ struct dma_chan *chan = &atchan->chan_common;
+
+ /* Channel should be paused by user
+ * do it anyway even if it is not done already */
+ if (!test_bit(ATC_IS_PAUSED, &atchan->status)) {
+ dev_warn(chan2dev(chan),
+ "cyclic channel not paused, should be done by channel user\n");
+ atc_control(chan, DMA_PAUSE, 0);
+ }
+
+ /* now preserve additional data for cyclic operations */
+ /* next descriptor address in the cyclic list */
+ atchan->save_dscr = channel_readl(atchan, DSCR);
+
+ vdbg_dump_regs(atchan);
+}
+
static int at_dma_suspend_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct at_dma *atdma = platform_get_drvdata(pdev);
+ struct dma_chan *chan, *_chan;
- at_dma_off(platform_get_drvdata(pdev));
+ /* preserve data */
+ list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
+ device_node) {
+ struct at_dma_chan *atchan = to_at_dma_chan(chan);
+
+ if (test_bit(ATC_IS_CYCLIC, &atchan->status))
+ atc_suspend_cyclic(atchan);
+ atchan->save_cfg = channel_readl(atchan, CFG);
+ }
+ atdma->save_imr = dma_readl(atdma, EBCIMR);
+
+ /* disable DMA controller */
+ at_dma_off(atdma);
clk_disable(atdma->clk);
return 0;
}
+static void atc_resume_cyclic(struct at_dma_chan *atchan)
+{
+ struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
+
+ /* restore channel status for cyclic descriptors list:
+ * next descriptor in the cyclic list at the time of suspend */
+ channel_writel(atchan, SADDR, 0);
+ channel_writel(atchan, DADDR, 0);
+ channel_writel(atchan, CTRLA, 0);
+ channel_writel(atchan, CTRLB, 0);
+ channel_writel(atchan, DSCR, atchan->save_dscr);
+ dma_writel(atdma, CHER, atchan->mask);
+
+ /* channel pause status should be removed by channel user
+ * We cannot take the initiative to do it here */
+
+ vdbg_dump_regs(atchan);
+}
+
static int at_dma_resume_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct at_dma *atdma = platform_get_drvdata(pdev);
+ struct dma_chan *chan, *_chan;
+ /* bring back DMA controller */
clk_enable(atdma->clk);
dma_writel(atdma, EN, AT_DMA_ENABLE);
+
+ /* clear any pending interrupt */
+ while (dma_readl(atdma, EBCISR))
+ cpu_relax();
+
+ /* restore saved data */
+ dma_writel(atdma, EBCIER, atdma->save_imr);
+ list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
+ device_node) {
+ struct at_dma_chan *atchan = to_at_dma_chan(chan);
+
+ channel_writel(atchan, CFG, atchan->save_cfg);
+ if (test_bit(ATC_IS_CYCLIC, &atchan->status))
+ atc_resume_cyclic(atchan);
+ }
return 0;
}
static const struct dev_pm_ops at_dma_dev_pm_ops = {
+ .prepare = at_dma_prepare,
.suspend_noirq = at_dma_suspend_noirq,
.resume_noirq = at_dma_resume_noirq,
};
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 087dbf1..6f0c4a3 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -204,6 +204,9 @@ enum atc_status {
* @status: transmit status information from irq/prep* functions
* to tasklet (use atomic operations)
* @tasklet: bottom half to finish transaction work
+ * @save_cfg: configuration register that is saved on suspend/resume cycle
+ * @save_dscr: for cyclic operations, preserve next descriptor address in
+ * the cyclic list on suspend/resume cycle
* @lock: serializes enqueue/dequeue operations to descriptors lists
* @completed_cookie: identifier for the most recently completed operation
* @active_list: list of descriptors dmaengine is being running on
@@ -218,6 +221,8 @@ struct at_dma_chan {
u8 mask;
unsigned long status;
struct tasklet_struct tasklet;
+ u32 save_cfg;
+ u32 save_dscr;
spinlock_t lock;
@@ -248,6 +253,7 @@ static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan)
* @chan_common: common dmaengine dma_device object members
* @ch_regs: memory mapped register base
* @clk: dma controller clock
+ * @save_imr: interrupt mask register that is saved on suspend/resume cycle
* @all_chan_mask: all channels availlable in a mask
* @dma_desc_pool: base of DMA descriptor region (DMA address)
* @chan: channels table to store at_dma_chan structures
@@ -256,6 +262,7 @@ struct at_dma {
struct dma_device dma_common;
void __iomem *regs;
struct clk *clk;
+ u32 save_imr;
u8 all_chan_mask;
--
1.7.3
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH 3/3] dmaengine: at_hdmac: add slave config operation
2011-06-28 11:17 [PATCH 1/3] dmaengine: at_hdmac: replace spin_lock* with irqsave variants Nicolas Ferre
2011-06-28 10:15 ` Nicolas Ferre
2011-06-28 11:17 ` [PATCH 2/3] dmaengine: at_hdmac: improve power management routines Nicolas Ferre
@ 2011-06-28 11:17 ` Nicolas Ferre
2011-07-07 2:33 ` Vinod Koul
2 siblings, 1 reply; 13+ messages in thread
From: Nicolas Ferre @ 2011-06-28 11:17 UTC (permalink / raw)
To: vinod.koul, dan.j.williams, linux-arm-kernel
Cc: linux-kernel, Nicolas Ferre, Uwe Kleine-König
Only change source/destination peripheral register access width.
Signed-off-by: Nicolas Ferre <nicolas.ferre@atmel.com>
Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
---
drivers/dma/at_hdmac.c | 29 +++++++++++++++++++++++++++++
1 files changed, 29 insertions(+), 0 deletions(-)
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 7096adb..9cb8044 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -954,6 +954,35 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
clear_bit(ATC_IS_PAUSED, &atchan->status);
spin_unlock_irqrestore(&atchan->lock, flags);
+ } else if (cmd == DMA_SLAVE_CONFIG) {
+ struct dma_slave_config *dmaengine_cfg = (void *)arg;
+ struct at_dma_slave *atslave = chan->private;
+ enum dma_slave_buswidth sdma_width;
+ enum at_dma_slave_width atdma_width;
+
+ /* only modify transfer size width */
+ if (!atslave)
+ return -ENXIO;
+
+ if (dmaengine_cfg->direction == DMA_FROM_DEVICE)
+ sdma_width = dmaengine_cfg->src_addr_width;
+ else
+ sdma_width = dmaengine_cfg->dst_addr_width;
+
+ switch (sdma_width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ atdma_width = AT_DMA_SLAVE_WIDTH_8BIT;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ atdma_width = AT_DMA_SLAVE_WIDTH_16BIT;
+ break;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ atdma_width = AT_DMA_SLAVE_WIDTH_32BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ atslave->reg_width = atdma_width;
} else if (cmd == DMA_TERMINATE_ALL) {
struct at_desc *desc, *_desc;
/*
--
1.7.3
^ permalink raw reply related [flat|nested] 13+ messages in thread
* Re: [PATCH 2/3] dmaengine: at_hdmac: improve power management routines
2011-06-28 11:17 ` [PATCH 2/3] dmaengine: at_hdmac: improve power management routines Nicolas Ferre
@ 2011-07-07 2:20 ` Vinod Koul
2011-07-25 21:06 ` Nicolas Ferre
0 siblings, 1 reply; 13+ messages in thread
From: Vinod Koul @ 2011-07-07 2:20 UTC (permalink / raw)
To: Nicolas Ferre
Cc: dan.j.williams, linux-arm-kernel, linux-kernel, Uwe Kleine-König
On Tue, 2011-06-28 at 13:17 +0200, Nicolas Ferre wrote:
> Save/restore dma controller state across a suspend-resume sequence.
> The prepare() function will wait for the non-cyclic channels to become idle.
> It also deals with cyclic operations with the start at next period while
> resuming.
>
> Signed-off-by: Nicolas Ferre <nicolas.ferre@atmel.com>
> Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
> ---
> drivers/dma/at_hdmac.c | 88 ++++++++++++++++++++++++++++++++++++++++++-
> drivers/dma/at_hdmac_regs.h | 7 +++
> 2 files changed, 94 insertions(+), 1 deletions(-)
>
> diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
> index fd87b96..7096adb 100644
> --- a/drivers/dma/at_hdmac.c
> +++ b/drivers/dma/at_hdmac.c
> @@ -1385,27 +1385,113 @@ static void at_dma_shutdown(struct platform_device *pdev)
> clk_disable(atdma->clk);
> }
>
> +static int at_dma_prepare(struct device *dev)
> +{
> + struct platform_device *pdev = to_platform_device(dev);
> + struct at_dma *atdma = platform_get_drvdata(pdev);
> + struct dma_chan *chan, *_chan;
> +
> + list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
> + device_node) {
> + struct at_dma_chan *atchan = to_at_dma_chan(chan);
> + /* wait for transaction completion (except in cyclic case) */
> + if (atc_chan_is_enabled(atchan) &&
> + !test_bit(ATC_IS_CYCLIC, &atchan->status))
> + return -EAGAIN;
pls fix indent here
> + }
> + return 0;
> +}
> +
> +static void atc_suspend_cyclic(struct at_dma_chan *atchan)
> +{
> + struct dma_chan *chan = &atchan->chan_common;
> +
> + /* Channel should be paused by user
> + * do it anyway even if it is not done already */
> + if (!test_bit(ATC_IS_PAUSED, &atchan->status)) {
> + dev_warn(chan2dev(chan),
> + "cyclic channel not paused, should be done by channel user\n");
> + atc_control(chan, DMA_PAUSE, 0);
> + }
> +
> + /* now preserve additional data for cyclic operations */
> + /* next descriptor address in the cyclic list */
> + atchan->save_dscr = channel_readl(atchan, DSCR);
> +
> + vdbg_dump_regs(atchan);
> +}
> +
> static int at_dma_suspend_noirq(struct device *dev)
> {
> struct platform_device *pdev = to_platform_device(dev);
> struct at_dma *atdma = platform_get_drvdata(pdev);
> + struct dma_chan *chan, *_chan;
>
> - at_dma_off(platform_get_drvdata(pdev));
> + /* preserve data */
> + list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
> + device_node) {
> + struct at_dma_chan *atchan = to_at_dma_chan(chan);
> +
> + if (test_bit(ATC_IS_CYCLIC, &atchan->status))
> + atc_suspend_cyclic(atchan);
> + atchan->save_cfg = channel_readl(atchan, CFG);
> + }
> + atdma->save_imr = dma_readl(atdma, EBCIMR);
> +
> + /* disable DMA controller */
> + at_dma_off(atdma);
> clk_disable(atdma->clk);
> return 0;
> }
>
> +static void atc_resume_cyclic(struct at_dma_chan *atchan)
> +{
> + struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
> +
> + /* restore channel status for cyclic descriptors list:
> + * next descriptor in the cyclic list at the time of suspend */
> + channel_writel(atchan, SADDR, 0);
> + channel_writel(atchan, DADDR, 0);
> + channel_writel(atchan, CTRLA, 0);
> + channel_writel(atchan, CTRLB, 0);
> + channel_writel(atchan, DSCR, atchan->save_dscr);
> + dma_writel(atdma, CHER, atchan->mask);
> +
> + /* channel pause status should be removed by channel user
> + * We cannot take the initiative to do it here */
> +
> + vdbg_dump_regs(atchan);
> +}
> +
> static int at_dma_resume_noirq(struct device *dev)
> {
> struct platform_device *pdev = to_platform_device(dev);
> struct at_dma *atdma = platform_get_drvdata(pdev);
> + struct dma_chan *chan, *_chan;
>
> + /* bring back DMA controller */
> clk_enable(atdma->clk);
> dma_writel(atdma, EN, AT_DMA_ENABLE);
> +
> + /* clear any pending interrupt */
> + while (dma_readl(atdma, EBCISR))
> + cpu_relax();
> +
> + /* restore saved data */
> + dma_writel(atdma, EBCIER, atdma->save_imr);
> + list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
> + device_node) {
> + struct at_dma_chan *atchan = to_at_dma_chan(chan);
> +
> + channel_writel(atchan, CFG, atchan->save_cfg);
> + if (test_bit(ATC_IS_CYCLIC, &atchan->status))
> + atc_resume_cyclic(atchan);
This testing on bits seems to be reused few times how about wrapping it
up in a routine?
> + }
> return 0;
> }
>
> static const struct dev_pm_ops at_dma_dev_pm_ops = {
> + .prepare = at_dma_prepare,
> .suspend_noirq = at_dma_suspend_noirq,
> .resume_noirq = at_dma_resume_noirq,
> };
> diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
> index 087dbf1..6f0c4a3 100644
> --- a/drivers/dma/at_hdmac_regs.h
> +++ b/drivers/dma/at_hdmac_regs.h
> @@ -204,6 +204,9 @@ enum atc_status {
> * @status: transmit status information from irq/prep* functions
> * to tasklet (use atomic operations)
> * @tasklet: bottom half to finish transaction work
> + * @save_cfg: configuration register that is saved on suspend/resume cycle
> + * @save_dscr: for cyclic operations, preserve next descriptor address in
> + * the cyclic list on suspend/resume cycle
> * @lock: serializes enqueue/dequeue operations to descriptors lists
> * @completed_cookie: identifier for the most recently completed operation
> * @active_list: list of descriptors dmaengine is being running on
> @@ -218,6 +221,8 @@ struct at_dma_chan {
> u8 mask;
> unsigned long status;
> struct tasklet_struct tasklet;
> + u32 save_cfg;
> + u32 save_dscr;
>
> spinlock_t lock;
>
> @@ -248,6 +253,7 @@ static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan)
> * @chan_common: common dmaengine dma_device object members
> * @ch_regs: memory mapped register base
> * @clk: dma controller clock
> + * @save_imr: interrupt mask register that is saved on suspend/resume cycle
> * @all_chan_mask: all channels availlable in a mask
> * @dma_desc_pool: base of DMA descriptor region (DMA address)
> * @chan: channels table to store at_dma_chan structures
> @@ -256,6 +262,7 @@ struct at_dma {
> struct dma_device dma_common;
> void __iomem *regs;
> struct clk *clk;
> + u32 save_imr;
>
> u8 all_chan_mask;
>
--
~Vinod Koul
Intel Corp.
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 3/3] dmaengine: at_hdmac: add slave config operation
2011-06-28 11:17 ` [PATCH 3/3] dmaengine: at_hdmac: add slave config operation Nicolas Ferre
@ 2011-07-07 2:33 ` Vinod Koul
2011-07-25 21:12 ` Nicolas Ferre
0 siblings, 1 reply; 13+ messages in thread
From: Vinod Koul @ 2011-07-07 2:33 UTC (permalink / raw)
To: Nicolas Ferre
Cc: dan.j.williams, linux-arm-kernel, linux-kernel, Uwe Kleine-König
On Tue, 2011-06-28 at 13:17 +0200, Nicolas Ferre wrote:
> Only change source/destination peripheral register access width.
How about a little more explainable change log
>
> Signed-off-by: Nicolas Ferre <nicolas.ferre@atmel.com>
> Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
> ---
> drivers/dma/at_hdmac.c | 29 +++++++++++++++++++++++++++++
> 1 files changed, 29 insertions(+), 0 deletions(-)
>
> diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
> index 7096adb..9cb8044 100644
> --- a/drivers/dma/at_hdmac.c
> +++ b/drivers/dma/at_hdmac.c
> @@ -954,6 +954,35 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
> clear_bit(ATC_IS_PAUSED, &atchan->status);
>
> spin_unlock_irqrestore(&atchan->lock, flags);
> + } else if (cmd == DMA_SLAVE_CONFIG) {
> + struct dma_slave_config *dmaengine_cfg = (void *)arg;
> + struct at_dma_slave *atslave = chan->private;
This API was created to remove chan->private, so this doesn't make sense
at all. If your intent is to have a channels internal data, then you
should consider embedding dma_chan into this structure (the way other
dma drivers do) rather than using private field.
> + enum dma_slave_buswidth sdma_width;
> + enum at_dma_slave_width atdma_width;
> +
> + /* only modify transfer size width */
> + if (!atslave)
> + return -ENXIO;
> +
> + if (dmaengine_cfg->direction == DMA_FROM_DEVICE)
> + sdma_width = dmaengine_cfg->src_addr_width;
> + else
> + sdma_width = dmaengine_cfg->dst_addr_width;
> +
> + switch (sdma_width) {
> + case DMA_SLAVE_BUSWIDTH_1_BYTE:
> + atdma_width = AT_DMA_SLAVE_WIDTH_8BIT;
> + break;
> + case DMA_SLAVE_BUSWIDTH_2_BYTES:
> + atdma_width = AT_DMA_SLAVE_WIDTH_16BIT;
> + break;
> + case DMA_SLAVE_BUSWIDTH_4_BYTES:
> + atdma_width = AT_DMA_SLAVE_WIDTH_32BIT;
> + break;
> + default:
> + return -EINVAL;
> + }
> + atslave->reg_width = atdma_width;
> } else if (cmd == DMA_TERMINATE_ALL) {
> struct at_desc *desc, *_desc;
> /*
--
~Vinod Koul
Intel Corp.
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 2/3] dmaengine: at_hdmac: improve power management routines
2011-07-07 2:20 ` Vinod Koul
@ 2011-07-25 21:06 ` Nicolas Ferre
2011-07-26 10:51 ` Vinod Koul
0 siblings, 1 reply; 13+ messages in thread
From: Nicolas Ferre @ 2011-07-25 21:06 UTC (permalink / raw)
To: Vinod Koul
Cc: dan.j.williams, linux-arm-kernel, linux-kernel, Uwe Kleine-König
On 07/07/2011 04:20 AM, Vinod Koul wrote:
> On Tue, 2011-06-28 at 13:17 +0200, Nicolas Ferre wrote:
>> Save/restore dma controller state across a suspend-resume sequence.
>> The prepare() function will wait for the non-cyclic channels to become idle.
>> It also deals with cyclic operations with the start at next period while
>> resuming.
>>
>> Signed-off-by: Nicolas Ferre<nicolas.ferre@atmel.com>
>> Signed-off-by: Uwe Kleine-König<u.kleine-koenig@pengutronix.de>
>> ---
>> drivers/dma/at_hdmac.c | 88 ++++++++++++++++++++++++++++++++++++++++++-
>> drivers/dma/at_hdmac_regs.h | 7 +++
>> 2 files changed, 94 insertions(+), 1 deletions(-)
>>
>> diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
>> index fd87b96..7096adb 100644
>> --- a/drivers/dma/at_hdmac.c
>> +++ b/drivers/dma/at_hdmac.c
>> @@ -1385,27 +1385,113 @@ static void at_dma_shutdown(struct platform_device *pdev)
>> clk_disable(atdma->clk);
>> }
>>
>> +static int at_dma_prepare(struct device *dev)
>> +{
>> + struct platform_device *pdev = to_platform_device(dev);
>> + struct at_dma *atdma = platform_get_drvdata(pdev);
>> + struct dma_chan *chan, *_chan;
>> +
>> + list_for_each_entry_safe(chan, _chan,&atdma->dma_common.channels,
>> + device_node) {
>> + struct at_dma_chan *atchan = to_at_dma_chan(chan);
>> + /* wait for transaction completion (except in cyclic case) */
>> + if (atc_chan_is_enabled(atchan)&&
>> + !test_bit(ATC_IS_CYCLIC,&atchan->status))
>> + return -EAGAIN;
> pls fix indent here
Fixed by replacement of test_bit() with a wrapper in a patch to come (as
you suggest hereafter).
>> + }
>> + return 0;
>> +}
>> +
>> +static void atc_suspend_cyclic(struct at_dma_chan *atchan)
>> +{
>> + struct dma_chan *chan =&atchan->chan_common;
>> +
>> + /* Channel should be paused by user
>> + * do it anyway even if it is not done already */
>> + if (!test_bit(ATC_IS_PAUSED,&atchan->status)) {
>> + dev_warn(chan2dev(chan),
>> + "cyclic channel not paused, should be done by channel user\n");
>> + atc_control(chan, DMA_PAUSE, 0);
>> + }
>> +
>> + /* now preserve additional data for cyclic operations */
>> + /* next descriptor address in the cyclic list */
>> + atchan->save_dscr = channel_readl(atchan, DSCR);
>> +
>> + vdbg_dump_regs(atchan);
>> +}
>> +
>> static int at_dma_suspend_noirq(struct device *dev)
>> {
>> struct platform_device *pdev = to_platform_device(dev);
>> struct at_dma *atdma = platform_get_drvdata(pdev);
>> + struct dma_chan *chan, *_chan;
>>
>> - at_dma_off(platform_get_drvdata(pdev));
>> + /* preserve data */
>> + list_for_each_entry_safe(chan, _chan,&atdma->dma_common.channels,
>> + device_node) {
>> + struct at_dma_chan *atchan = to_at_dma_chan(chan);
>> +
>> + if (test_bit(ATC_IS_CYCLIC,&atchan->status))
>> + atc_suspend_cyclic(atchan);
>> + atchan->save_cfg = channel_readl(atchan, CFG);
>> + }
>> + atdma->save_imr = dma_readl(atdma, EBCIMR);
>> +
>> + /* disable DMA controller */
>> + at_dma_off(atdma);
>> clk_disable(atdma->clk);
>> return 0;
>> }
>>
>> +static void atc_resume_cyclic(struct at_dma_chan *atchan)
>> +{
>> + struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
>> +
>> + /* restore channel status for cyclic descriptors list:
>> + * next descriptor in the cyclic list at the time of suspend */
>> + channel_writel(atchan, SADDR, 0);
>> + channel_writel(atchan, DADDR, 0);
>> + channel_writel(atchan, CTRLA, 0);
>> + channel_writel(atchan, CTRLB, 0);
>> + channel_writel(atchan, DSCR, atchan->save_dscr);
>> + dma_writel(atdma, CHER, atchan->mask);
>> +
>> + /* channel pause status should be removed by channel user
>> + * We cannot take the initiative to do it here */
>> +
>> + vdbg_dump_regs(atchan);
>> +}
>> +
>> static int at_dma_resume_noirq(struct device *dev)
>> {
>> struct platform_device *pdev = to_platform_device(dev);
>> struct at_dma *atdma = platform_get_drvdata(pdev);
>> + struct dma_chan *chan, *_chan;
>>
>> + /* bring back DMA controller */
>> clk_enable(atdma->clk);
>> dma_writel(atdma, EN, AT_DMA_ENABLE);
>> +
>> + /* clear any pending interrupt */
>> + while (dma_readl(atdma, EBCISR))
>> + cpu_relax();
>> +
>> + /* restore saved data */
>> + dma_writel(atdma, EBCIER, atdma->save_imr);
>> + list_for_each_entry_safe(chan, _chan,&atdma->dma_common.channels,
>> + device_node) {
>> + struct at_dma_chan *atchan = to_at_dma_chan(chan);
>> +
>> + channel_writel(atchan, CFG, atchan->save_cfg);
>> + if (test_bit(ATC_IS_CYCLIC,&atchan->status))
>> + atc_resume_cyclic(atchan);
> This testing on bits seems to be reused few times how about wrapping it
> up in a routine?
True: I write a little patch for this and the "PAUSE" state. I send a
patch for this now:
dmaengine: at_hdmac: add wrappers for testing channel state
So can you:
1/ queue 1/3 and 2/3 of this patch series
2/ queue the following patch named
"dmaengine: at_hdmac: add wrappers for testing channel state"
on top of that
3/ drop the patch 3/3 of this series: it certainly have to be reworked
with all slave config infrastructure implementation in the at_hdmac driver.
>> + }
>> return 0;
>> }
>>
>> static const struct dev_pm_ops at_dma_dev_pm_ops = {
>> + .prepare = at_dma_prepare,
>> .suspend_noirq = at_dma_suspend_noirq,
>> .resume_noirq = at_dma_resume_noirq,
>> };
>> diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
>> index 087dbf1..6f0c4a3 100644
>> --- a/drivers/dma/at_hdmac_regs.h
>> +++ b/drivers/dma/at_hdmac_regs.h
>> @@ -204,6 +204,9 @@ enum atc_status {
>> * @status: transmit status information from irq/prep* functions
>> * to tasklet (use atomic operations)
>> * @tasklet: bottom half to finish transaction work
>> + * @save_cfg: configuration register that is saved on suspend/resume cycle
>> + * @save_dscr: for cyclic operations, preserve next descriptor address in
>> + * the cyclic list on suspend/resume cycle
>> * @lock: serializes enqueue/dequeue operations to descriptors lists
>> * @completed_cookie: identifier for the most recently completed operation
>> * @active_list: list of descriptors dmaengine is being running on
>> @@ -218,6 +221,8 @@ struct at_dma_chan {
>> u8 mask;
>> unsigned long status;
>> struct tasklet_struct tasklet;
>> + u32 save_cfg;
>> + u32 save_dscr;
>>
>> spinlock_t lock;
>>
>> @@ -248,6 +253,7 @@ static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan)
>> * @chan_common: common dmaengine dma_device object members
>> * @ch_regs: memory mapped register base
>> * @clk: dma controller clock
>> + * @save_imr: interrupt mask register that is saved on suspend/resume cycle
>> * @all_chan_mask: all channels availlable in a mask
>> * @dma_desc_pool: base of DMA descriptor region (DMA address)
>> * @chan: channels table to store at_dma_chan structures
>> @@ -256,6 +262,7 @@ struct at_dma {
>> struct dma_device dma_common;
>> void __iomem *regs;
>> struct clk *clk;
>> + u32 save_imr;
>>
>> u8 all_chan_mask;
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH] dmaengine: at_hdmac: add wrappers for testing channel state
2011-07-27 12:21 ` [PATCH V2 1/3] dmaengine: at_hdmac: replace spin_lock* with irqsave variants Nicolas Ferre
@ 2011-07-25 21:09 ` Nicolas Ferre
2011-08-01 14:18 ` [PATCH V2 3/3] " Nicolas Ferre
2011-07-27 12:21 ` [PATCH V2 2/3] dmaengine: at_hdmac: improve power management routines Nicolas Ferre
1 sibling, 1 reply; 13+ messages in thread
From: Nicolas Ferre @ 2011-07-25 21:09 UTC (permalink / raw)
To: vinod.koul, dan.j.williams, linux-arm-kernel; +Cc: linux-kernel, Nicolas Ferre
Cyclic property and paused state are encoded as bits in the channel status
bitfield. Tests of those bits are wrapped in convenient helper functions.
Signed-off-by: Nicolas Ferre <nicolas.ferre@atmel.com>
---
drivers/dma/at_hdmac.c | 19 +++++++++----------
drivers/dma/at_hdmac_regs.h | 17 +++++++++++++++++
2 files changed, 26 insertions(+), 10 deletions(-)
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 99bd2c18..26a1196 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -301,7 +301,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
/* for cyclic transfers,
* no need to replay callback function while stopping */
- if (!test_bit(ATC_IS_CYCLIC, &atchan->status)) {
+ if (!atc_chan_is_cyclic(atchan)) {
dma_async_tx_callback callback = txd->callback;
void *param = txd->callback_param;
@@ -478,7 +478,7 @@ static void atc_tasklet(unsigned long data)
spin_lock_irqsave(&atchan->lock, flags);
if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
atc_handle_error(atchan);
- else if (test_bit(ATC_IS_CYCLIC, &atchan->status))
+ else if (atc_chan_is_cyclic(atchan))
atc_handle_cyclic(atchan);
else
atc_advance_work(atchan);
@@ -945,7 +945,7 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
spin_unlock_irqrestore(&atchan->lock, flags);
} else if (cmd == DMA_RESUME) {
- if (!test_bit(ATC_IS_PAUSED, &atchan->status))
+ if (!atc_chan_is_paused(atchan))
return 0;
spin_lock_irqsave(&atchan->lock, flags);
@@ -1035,7 +1035,7 @@ atc_tx_status(struct dma_chan *chan,
else
dma_set_tx_state(txstate, last_complete, last_used, 0);
- if (test_bit(ATC_IS_PAUSED, &atchan->status))
+ if (atc_chan_is_paused(atchan))
ret = DMA_PAUSED;
dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n",
@@ -1057,7 +1057,7 @@ static void atc_issue_pending(struct dma_chan *chan)
dev_vdbg(chan2dev(chan), "issue_pending\n");
/* Not needed for cyclic transfers */
- if (test_bit(ATC_IS_CYCLIC, &atchan->status))
+ if (atc_chan_is_cyclic(atchan))
return;
spin_lock_irqsave(&atchan->lock, flags);
@@ -1395,8 +1395,7 @@ static int at_dma_prepare(struct device *dev)
device_node) {
struct at_dma_chan *atchan = to_at_dma_chan(chan);
/* wait for transaction completion (except in cyclic case) */
- if (atc_chan_is_enabled(atchan) &&
- !test_bit(ATC_IS_CYCLIC, &atchan->status))
+ if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
return -EAGAIN;
}
return 0;
@@ -1408,7 +1407,7 @@ static void atc_suspend_cyclic(struct at_dma_chan *atchan)
/* Channel should be paused by user
* do it anyway even if it is not done already */
- if (!test_bit(ATC_IS_PAUSED, &atchan->status)) {
+ if (!atc_chan_is_paused(atchan)) {
dev_warn(chan2dev(chan),
"cyclic channel not paused, should be done by channel user\n");
atc_control(chan, DMA_PAUSE, 0);
@@ -1432,7 +1431,7 @@ static int at_dma_suspend_noirq(struct device *dev)
device_node) {
struct at_dma_chan *atchan = to_at_dma_chan(chan);
- if (test_bit(ATC_IS_CYCLIC, &atchan->status))
+ if (atc_chan_is_cyclic(atchan))
atc_suspend_cyclic(atchan);
atchan->save_cfg = channel_readl(atchan, CFG);
}
@@ -1484,7 +1483,7 @@ static int at_dma_resume_noirq(struct device *dev)
struct at_dma_chan *atchan = to_at_dma_chan(chan);
channel_writel(atchan, CFG, atchan->save_cfg);
- if (test_bit(ATC_IS_CYCLIC, &atchan->status))
+ if (atc_chan_is_cyclic(atchan))
atc_resume_cyclic(atchan);
}
return 0;
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 6f0c4a3..aa4c9ae 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -362,6 +362,23 @@ static inline int atc_chan_is_enabled(struct at_dma_chan *atchan)
return !!(dma_readl(atdma, CHSR) & atchan->mask);
}
+/**
+ * atc_chan_is_paused - test channel pause/resume status
+ * @atchan: channel we want to test status
+ */
+static inline int atc_chan_is_paused(struct at_dma_chan *atchan)
+{
+ return test_bit(ATC_IS_PAUSED, &atchan->status);
+}
+
+/**
+ * atc_chan_is_cyclic - test if given channel has cyclic property set
+ * @atchan: channel we want to test status
+ */
+static inline int atc_chan_is_cyclic(struct at_dma_chan *atchan)
+{
+ return test_bit(ATC_IS_CYCLIC, &atchan->status);
+}
/**
* set_desc_eol - set end-of-link to descriptor so it will end transfer
--
1.7.4.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* Re: [PATCH 3/3] dmaengine: at_hdmac: add slave config operation
2011-07-07 2:33 ` Vinod Koul
@ 2011-07-25 21:12 ` Nicolas Ferre
0 siblings, 0 replies; 13+ messages in thread
From: Nicolas Ferre @ 2011-07-25 21:12 UTC (permalink / raw)
To: Vinod Koul
Cc: dan.j.williams, linux-arm-kernel, linux-kernel, Uwe Kleine-König
On 07/07/2011 04:33 AM, Vinod Koul wrote:
> On Tue, 2011-06-28 at 13:17 +0200, Nicolas Ferre wrote:
>> Only change source/destination peripheral register access width.
> How about a little more explainable change log
>
>>
>> Signed-off-by: Nicolas Ferre<nicolas.ferre@atmel.com>
>> Signed-off-by: Uwe Kleine-König<u.kleine-koenig@pengutronix.de>
>> ---
>> drivers/dma/at_hdmac.c | 29 +++++++++++++++++++++++++++++
>> 1 files changed, 29 insertions(+), 0 deletions(-)
>>
>> diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
>> index 7096adb..9cb8044 100644
>> --- a/drivers/dma/at_hdmac.c
>> +++ b/drivers/dma/at_hdmac.c
>> @@ -954,6 +954,35 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
>> clear_bit(ATC_IS_PAUSED,&atchan->status);
>>
>> spin_unlock_irqrestore(&atchan->lock, flags);
>> + } else if (cmd == DMA_SLAVE_CONFIG) {
>> + struct dma_slave_config *dmaengine_cfg = (void *)arg;
>> + struct at_dma_slave *atslave = chan->private;
> This API was created to remove chan->private, so this doesn't make sense
> at all. If your intent is to have a channels internal data, then you
> should consider embedding dma_chan into this structure (the way other
> dma drivers do) rather than using private field.
Yes, I need to rework this slave config part of the driver (and its
related calling drivers).
So please drop this 3/3 patch of the series.
Best regards,
--
Nicolas Ferre
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 2/3] dmaengine: at_hdmac: improve power management routines
2011-07-25 21:06 ` Nicolas Ferre
@ 2011-07-26 10:51 ` Vinod Koul
2011-07-27 12:21 ` [PATCH V2 1/3] dmaengine: at_hdmac: replace spin_lock* with irqsave variants Nicolas Ferre
0 siblings, 1 reply; 13+ messages in thread
From: Vinod Koul @ 2011-07-26 10:51 UTC (permalink / raw)
To: Nicolas Ferre
Cc: vinod.koul, dan.j.williams, linux-arm-kernel, linux-kernel,
Uwe Kleine-König
On Mon, 2011-07-25 at 23:06 +0200, Nicolas Ferre wrote:
> On 07/07/2011 04:20 AM, Vinod Koul wrote:
> > On Tue, 2011-06-28 at 13:17 +0200, Nicolas Ferre wrote:
> >> Save/restore dma controller state across a suspend-resume sequence.
> >> The prepare() function will wait for the non-cyclic channels to become idle.
> >> It also deals with cyclic operations with the start at next period while
> >> resuming.
> >>
> >> Signed-off-by: Nicolas Ferre<nicolas.ferre@atmel.com>
> >> Signed-off-by: Uwe Kleine-König<u.kleine-koenig@pengutronix.de>
> >> ---
> >> drivers/dma/at_hdmac.c | 88 ++++++++++++++++++++++++++++++++++++++++++-
> >> drivers/dma/at_hdmac_regs.h | 7 +++
> >> 2 files changed, 94 insertions(+), 1 deletions(-)
> >>
> >> diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
> >> index fd87b96..7096adb 100644
> >> --- a/drivers/dma/at_hdmac.c
> >> +++ b/drivers/dma/at_hdmac.c
> >> @@ -1385,27 +1385,113 @@ static void at_dma_shutdown(struct platform_device *pdev)
> >> clk_disable(atdma->clk);
> >> }
> >>
> >> +static int at_dma_prepare(struct device *dev)
> >> +{
> >> + struct platform_device *pdev = to_platform_device(dev);
> >> + struct at_dma *atdma = platform_get_drvdata(pdev);
> >> + struct dma_chan *chan, *_chan;
> >> +
> >> + list_for_each_entry_safe(chan, _chan,&atdma->dma_common.channels,
> >> + device_node) {
> >> + struct at_dma_chan *atchan = to_at_dma_chan(chan);
> >> + /* wait for transaction completion (except in cyclic case) */
> >> + if (atc_chan_is_enabled(atchan)&&
> >> + !test_bit(ATC_IS_CYCLIC,&atchan->status))
> >> + return -EAGAIN;
> > pls fix indent here
>
> Fixed by replacement of test_bit() with a wrapper in a patch to come (as
> you suggest hereafter).
>
> >> + }
> >> + return 0;
> >> +}
> >> +
> >> +static void atc_suspend_cyclic(struct at_dma_chan *atchan)
> >> +{
> >> + struct dma_chan *chan =&atchan->chan_common;
> >> +
> >> + /* Channel should be paused by user
> >> + * do it anyway even if it is not done already */
> >> + if (!test_bit(ATC_IS_PAUSED,&atchan->status)) {
> >> + dev_warn(chan2dev(chan),
> >> + "cyclic channel not paused, should be done by channel user\n");
> >> + atc_control(chan, DMA_PAUSE, 0);
> >> + }
> >> +
> >> + /* now preserve additional data for cyclic operations */
> >> + /* next descriptor address in the cyclic list */
> >> + atchan->save_dscr = channel_readl(atchan, DSCR);
> >> +
> >> + vdbg_dump_regs(atchan);
> >> +}
> >> +
> >> static int at_dma_suspend_noirq(struct device *dev)
> >> {
> >> struct platform_device *pdev = to_platform_device(dev);
> >> struct at_dma *atdma = platform_get_drvdata(pdev);
> >> + struct dma_chan *chan, *_chan;
> >>
> >> - at_dma_off(platform_get_drvdata(pdev));
> >> + /* preserve data */
> >> + list_for_each_entry_safe(chan, _chan,&atdma->dma_common.channels,
> >> + device_node) {
> >> + struct at_dma_chan *atchan = to_at_dma_chan(chan);
> >> +
> >> + if (test_bit(ATC_IS_CYCLIC,&atchan->status))
> >> + atc_suspend_cyclic(atchan);
> >> + atchan->save_cfg = channel_readl(atchan, CFG);
> >> + }
> >> + atdma->save_imr = dma_readl(atdma, EBCIMR);
> >> +
> >> + /* disable DMA controller */
> >> + at_dma_off(atdma);
> >> clk_disable(atdma->clk);
> >> return 0;
> >> }
> >>
> >> +static void atc_resume_cyclic(struct at_dma_chan *atchan)
> >> +{
> >> + struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
> >> +
> >> + /* restore channel status for cyclic descriptors list:
> >> + * next descriptor in the cyclic list at the time of suspend */
> >> + channel_writel(atchan, SADDR, 0);
> >> + channel_writel(atchan, DADDR, 0);
> >> + channel_writel(atchan, CTRLA, 0);
> >> + channel_writel(atchan, CTRLB, 0);
> >> + channel_writel(atchan, DSCR, atchan->save_dscr);
> >> + dma_writel(atdma, CHER, atchan->mask);
> >> +
> >> + /* channel pause status should be removed by channel user
> >> + * We cannot take the initiative to do it here */
> >> +
> >> + vdbg_dump_regs(atchan);
> >> +}
> >> +
> >> static int at_dma_resume_noirq(struct device *dev)
> >> {
> >> struct platform_device *pdev = to_platform_device(dev);
> >> struct at_dma *atdma = platform_get_drvdata(pdev);
> >> + struct dma_chan *chan, *_chan;
> >>
> >> + /* bring back DMA controller */
> >> clk_enable(atdma->clk);
> >> dma_writel(atdma, EN, AT_DMA_ENABLE);
> >> +
> >> + /* clear any pending interrupt */
> >> + while (dma_readl(atdma, EBCISR))
> >> + cpu_relax();
> >> +
> >> + /* restore saved data */
> >> + dma_writel(atdma, EBCIER, atdma->save_imr);
> >> + list_for_each_entry_safe(chan, _chan,&atdma->dma_common.channels,
> >> + device_node) {
> >> + struct at_dma_chan *atchan = to_at_dma_chan(chan);
> >> +
> >> + channel_writel(atchan, CFG, atchan->save_cfg);
> >> + if (test_bit(ATC_IS_CYCLIC,&atchan->status))
> >> + atc_resume_cyclic(atchan);
> > This testing on bits seems to be reused few times how about wrapping it
> > up in a routine?
>
> True: I write a little patch for this and the "PAUSE" state. I send a
> patch for this now:
> dmaengine: at_hdmac: add wrappers for testing channel state
> So can you:
> 1/ queue 1/3 and 2/3 of this patch series
> 2/ queue the following patch named
> "dmaengine: at_hdmac: add wrappers for testing channel state"
> on top of that
>
> 3/ drop the patch 3/3 of this series: it certainly have to be reworked
> with all slave config infrastructure implementation in the at_hdmac driver.
The indent needs to be fixed before I can apply this one, sorry but
error introduced in one patch cannot be fixed in next, it is not
supposed to work that way
--
~Vinod Koul
Intel Corp.
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH V2 1/3] dmaengine: at_hdmac: replace spin_lock* with irqsave variants
2011-07-26 10:51 ` Vinod Koul
@ 2011-07-27 12:21 ` Nicolas Ferre
2011-07-25 21:09 ` [PATCH] dmaengine: at_hdmac: add wrappers for testing channel state Nicolas Ferre
2011-07-27 12:21 ` [PATCH V2 2/3] dmaengine: at_hdmac: improve power management routines Nicolas Ferre
0 siblings, 2 replies; 13+ messages in thread
From: Nicolas Ferre @ 2011-07-27 12:21 UTC (permalink / raw)
To: vinod.koul, dan.j.williams, linux-arm-kernel
Cc: linux-kernel, Nicolas Ferre, Uwe Kleine-König
dmaengine routines can be called from interrupt context and with
interrupts disabled. Whereas spin_unlock_bh can't be called from
such contexts. So this patch converts all spin_lock* routines
to irqsave variants.
Also, spin_lock() used in tasklet is converted to irqsave variants,
as tasklet can be interrupted, and dma requests from such interruptions
may also call spin_lock.
Idea from dw_dmac patch by Viresh Kumar.
Signed-off-by: Nicolas Ferre <nicolas.ferre@atmel.com>
Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
---
V2: no change but resent to ease the patch series understanding
drivers/dma/at_hdmac.c | 52 +++++++++++++++++++++++++++--------------------
1 files changed, 30 insertions(+), 22 deletions(-)
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 36144f8..6ee8f1c 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -107,10 +107,11 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
{
struct at_desc *desc, *_desc;
struct at_desc *ret = NULL;
+ unsigned long flags;
unsigned int i = 0;
LIST_HEAD(tmp_list);
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
i++;
if (async_tx_test_ack(&desc->txd)) {
@@ -121,7 +122,7 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
dev_dbg(chan2dev(&atchan->chan_common),
"desc %p not ACKed\n", desc);
}
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
dev_vdbg(chan2dev(&atchan->chan_common),
"scanned %u descriptors on freelist\n", i);
@@ -129,9 +130,9 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
if (!ret) {
ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
if (ret) {
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
atchan->descs_allocated++;
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
} else {
dev_err(chan2dev(&atchan->chan_common),
"not enough descriptors available\n");
@@ -150,8 +151,9 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
{
if (desc) {
struct at_desc *child;
+ unsigned long flags;
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
list_for_each_entry(child, &desc->tx_list, desc_node)
dev_vdbg(chan2dev(&atchan->chan_common),
"moving child desc %p to freelist\n",
@@ -160,7 +162,7 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
dev_vdbg(chan2dev(&atchan->chan_common),
"moving desc %p to freelist\n", desc);
list_add(&desc->desc_node, &atchan->free_list);
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
}
}
@@ -471,8 +473,9 @@ static void atc_handle_cyclic(struct at_dma_chan *atchan)
static void atc_tasklet(unsigned long data)
{
struct at_dma_chan *atchan = (struct at_dma_chan *)data;
+ unsigned long flags;
- spin_lock(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
atc_handle_error(atchan);
else if (test_bit(ATC_IS_CYCLIC, &atchan->status))
@@ -480,7 +483,7 @@ static void atc_tasklet(unsigned long data)
else
atc_advance_work(atchan);
- spin_unlock(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
}
static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
@@ -539,8 +542,9 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
struct at_desc *desc = txd_to_at_desc(tx);
struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
dma_cookie_t cookie;
+ unsigned long flags;
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
cookie = atc_assign_cookie(atchan, desc);
if (list_empty(&atchan->active_list)) {
@@ -554,7 +558,7 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
list_add_tail(&desc->desc_node, &atchan->queue);
}
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
return cookie;
}
@@ -927,28 +931,29 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_dma *atdma = to_at_dma(chan->device);
int chan_id = atchan->chan_common.chan_id;
+ unsigned long flags;
LIST_HEAD(list);
dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd);
if (cmd == DMA_PAUSE) {
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
set_bit(ATC_IS_PAUSED, &atchan->status);
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
} else if (cmd == DMA_RESUME) {
if (!test_bit(ATC_IS_PAUSED, &atchan->status))
return 0;
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
clear_bit(ATC_IS_PAUSED, &atchan->status);
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
} else if (cmd == DMA_TERMINATE_ALL) {
struct at_desc *desc, *_desc;
/*
@@ -957,7 +962,7 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
* channel. We still have to poll the channel enable bit due
* to AHB/HSB limitations.
*/
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
/* disabling channel: must also remove suspend state */
dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
@@ -978,7 +983,7 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
/* if channel dedicated to cyclic operations, free it */
clear_bit(ATC_IS_CYCLIC, &atchan->status);
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
} else {
return -ENXIO;
}
@@ -1004,9 +1009,10 @@ atc_tx_status(struct dma_chan *chan,
struct at_dma_chan *atchan = to_at_dma_chan(chan);
dma_cookie_t last_used;
dma_cookie_t last_complete;
+ unsigned long flags;
enum dma_status ret;
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
last_complete = atchan->completed_cookie;
last_used = chan->cookie;
@@ -1021,7 +1027,7 @@ atc_tx_status(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used);
}
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
if (ret != DMA_SUCCESS)
dma_set_tx_state(txstate, last_complete, last_used,
@@ -1046,6 +1052,7 @@ atc_tx_status(struct dma_chan *chan,
static void atc_issue_pending(struct dma_chan *chan)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
+ unsigned long flags;
dev_vdbg(chan2dev(chan), "issue_pending\n");
@@ -1053,11 +1060,11 @@ static void atc_issue_pending(struct dma_chan *chan)
if (test_bit(ATC_IS_CYCLIC, &atchan->status))
return;
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
if (!atc_chan_is_enabled(atchan)) {
atc_advance_work(atchan);
}
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
}
/**
@@ -1073,6 +1080,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
struct at_dma *atdma = to_at_dma(chan->device);
struct at_desc *desc;
struct at_dma_slave *atslave;
+ unsigned long flags;
int i;
u32 cfg;
LIST_HEAD(tmp_list);
@@ -1116,11 +1124,11 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
list_add_tail(&desc->desc_node, &tmp_list);
}
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
atchan->descs_allocated = i;
list_splice(&tmp_list, &atchan->free_list);
atchan->completed_cookie = chan->cookie = 1;
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
/* channel parameters */
channel_writel(atchan, CFG, cfg);
--
1.7.4.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH V2 2/3] dmaengine: at_hdmac: improve power management routines
2011-07-27 12:21 ` [PATCH V2 1/3] dmaengine: at_hdmac: replace spin_lock* with irqsave variants Nicolas Ferre
2011-07-25 21:09 ` [PATCH] dmaengine: at_hdmac: add wrappers for testing channel state Nicolas Ferre
@ 2011-07-27 12:21 ` Nicolas Ferre
1 sibling, 0 replies; 13+ messages in thread
From: Nicolas Ferre @ 2011-07-27 12:21 UTC (permalink / raw)
To: vinod.koul, dan.j.williams, linux-arm-kernel
Cc: linux-kernel, Nicolas Ferre, Uwe Kleine-König
Save/restore dma controller state across a suspend-resume sequence.
The prepare() function will wait for the non-cyclic channels to become idle.
It also deals with cyclic operations with the start at next period while
resuming.
Signed-off-by: Nicolas Ferre <nicolas.ferre@atmel.com>
Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
---
V2: fix indentation
drivers/dma/at_hdmac.c | 88 ++++++++++++++++++++++++++++++++++++++++++-
drivers/dma/at_hdmac_regs.h | 7 +++
2 files changed, 94 insertions(+), 1 deletions(-)
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 6ee8f1c..99bd2c18 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -1385,27 +1385,113 @@ static void at_dma_shutdown(struct platform_device *pdev)
clk_disable(atdma->clk);
}
+static int at_dma_prepare(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct at_dma *atdma = platform_get_drvdata(pdev);
+ struct dma_chan *chan, *_chan;
+
+ list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
+ device_node) {
+ struct at_dma_chan *atchan = to_at_dma_chan(chan);
+ /* wait for transaction completion (except in cyclic case) */
+ if (atc_chan_is_enabled(atchan) &&
+ !test_bit(ATC_IS_CYCLIC, &atchan->status))
+ return -EAGAIN;
+ }
+ return 0;
+}
+
+static void atc_suspend_cyclic(struct at_dma_chan *atchan)
+{
+ struct dma_chan *chan = &atchan->chan_common;
+
+ /* Channel should be paused by user
+ * do it anyway even if it is not done already */
+ if (!test_bit(ATC_IS_PAUSED, &atchan->status)) {
+ dev_warn(chan2dev(chan),
+ "cyclic channel not paused, should be done by channel user\n");
+ atc_control(chan, DMA_PAUSE, 0);
+ }
+
+ /* now preserve additional data for cyclic operations */
+ /* next descriptor address in the cyclic list */
+ atchan->save_dscr = channel_readl(atchan, DSCR);
+
+ vdbg_dump_regs(atchan);
+}
+
static int at_dma_suspend_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct at_dma *atdma = platform_get_drvdata(pdev);
+ struct dma_chan *chan, *_chan;
- at_dma_off(platform_get_drvdata(pdev));
+ /* preserve data */
+ list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
+ device_node) {
+ struct at_dma_chan *atchan = to_at_dma_chan(chan);
+
+ if (test_bit(ATC_IS_CYCLIC, &atchan->status))
+ atc_suspend_cyclic(atchan);
+ atchan->save_cfg = channel_readl(atchan, CFG);
+ }
+ atdma->save_imr = dma_readl(atdma, EBCIMR);
+
+ /* disable DMA controller */
+ at_dma_off(atdma);
clk_disable(atdma->clk);
return 0;
}
+static void atc_resume_cyclic(struct at_dma_chan *atchan)
+{
+ struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
+
+ /* restore channel status for cyclic descriptors list:
+ * next descriptor in the cyclic list at the time of suspend */
+ channel_writel(atchan, SADDR, 0);
+ channel_writel(atchan, DADDR, 0);
+ channel_writel(atchan, CTRLA, 0);
+ channel_writel(atchan, CTRLB, 0);
+ channel_writel(atchan, DSCR, atchan->save_dscr);
+ dma_writel(atdma, CHER, atchan->mask);
+
+ /* channel pause status should be removed by channel user
+ * We cannot take the initiative to do it here */
+
+ vdbg_dump_regs(atchan);
+}
+
static int at_dma_resume_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct at_dma *atdma = platform_get_drvdata(pdev);
+ struct dma_chan *chan, *_chan;
+ /* bring back DMA controller */
clk_enable(atdma->clk);
dma_writel(atdma, EN, AT_DMA_ENABLE);
+
+ /* clear any pending interrupt */
+ while (dma_readl(atdma, EBCISR))
+ cpu_relax();
+
+ /* restore saved data */
+ dma_writel(atdma, EBCIER, atdma->save_imr);
+ list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
+ device_node) {
+ struct at_dma_chan *atchan = to_at_dma_chan(chan);
+
+ channel_writel(atchan, CFG, atchan->save_cfg);
+ if (test_bit(ATC_IS_CYCLIC, &atchan->status))
+ atc_resume_cyclic(atchan);
+ }
return 0;
}
static const struct dev_pm_ops at_dma_dev_pm_ops = {
+ .prepare = at_dma_prepare,
.suspend_noirq = at_dma_suspend_noirq,
.resume_noirq = at_dma_resume_noirq,
};
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 087dbf1..6f0c4a3 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -204,6 +204,9 @@ enum atc_status {
* @status: transmit status information from irq/prep* functions
* to tasklet (use atomic operations)
* @tasklet: bottom half to finish transaction work
+ * @save_cfg: configuration register that is saved on suspend/resume cycle
+ * @save_dscr: for cyclic operations, preserve next descriptor address in
+ * the cyclic list on suspend/resume cycle
* @lock: serializes enqueue/dequeue operations to descriptors lists
* @completed_cookie: identifier for the most recently completed operation
* @active_list: list of descriptors dmaengine is being running on
@@ -218,6 +221,8 @@ struct at_dma_chan {
u8 mask;
unsigned long status;
struct tasklet_struct tasklet;
+ u32 save_cfg;
+ u32 save_dscr;
spinlock_t lock;
@@ -248,6 +253,7 @@ static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan)
* @chan_common: common dmaengine dma_device object members
* @ch_regs: memory mapped register base
* @clk: dma controller clock
+ * @save_imr: interrupt mask register that is saved on suspend/resume cycle
* @all_chan_mask: all channels availlable in a mask
* @dma_desc_pool: base of DMA descriptor region (DMA address)
* @chan: channels table to store at_dma_chan structures
@@ -256,6 +262,7 @@ struct at_dma {
struct dma_device dma_common;
void __iomem *regs;
struct clk *clk;
+ u32 save_imr;
u8 all_chan_mask;
--
1.7.4.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* Re: [PATCH V2 3/3] dmaengine: at_hdmac: add wrappers for testing channel state
2011-07-25 21:09 ` [PATCH] dmaengine: at_hdmac: add wrappers for testing channel state Nicolas Ferre
@ 2011-08-01 14:18 ` Nicolas Ferre
0 siblings, 0 replies; 13+ messages in thread
From: Nicolas Ferre @ 2011-08-01 14:18 UTC (permalink / raw)
To: Nicolas Ferre; +Cc: vinod.koul, dan.j.williams, linux-arm-kernel, linux-kernel
On 07/27/2011 01:21 PM, Nicolas Ferre wrote:
> Cyclic property and paused state are encoded as bits in the channel status
> bitfield. Tests of those bits are wrapped in convenient helper functions.
>
> Signed-off-by: Nicolas Ferre<nicolas.ferre@atmel.com>
> ---
> V2: match V2 2/3 patch indentation fixing
Vinod, any news on this V2 series?
Thanks, bye,
> drivers/dma/at_hdmac.c | 19 +++++++++----------
> drivers/dma/at_hdmac_regs.h | 17 +++++++++++++++++
> 2 files changed, 26 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
> index 99bd2c18..26a1196 100644
> --- a/drivers/dma/at_hdmac.c
> +++ b/drivers/dma/at_hdmac.c
> @@ -301,7 +301,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
>
> /* for cyclic transfers,
> * no need to replay callback function while stopping */
> - if (!test_bit(ATC_IS_CYCLIC,&atchan->status)) {
> + if (!atc_chan_is_cyclic(atchan)) {
> dma_async_tx_callback callback = txd->callback;
> void *param = txd->callback_param;
>
> @@ -478,7 +478,7 @@ static void atc_tasklet(unsigned long data)
> spin_lock_irqsave(&atchan->lock, flags);
> if (test_and_clear_bit(ATC_IS_ERROR,&atchan->status))
> atc_handle_error(atchan);
> - else if (test_bit(ATC_IS_CYCLIC,&atchan->status))
> + else if (atc_chan_is_cyclic(atchan))
> atc_handle_cyclic(atchan);
> else
> atc_advance_work(atchan);
> @@ -945,7 +945,7 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
>
> spin_unlock_irqrestore(&atchan->lock, flags);
> } else if (cmd == DMA_RESUME) {
> - if (!test_bit(ATC_IS_PAUSED,&atchan->status))
> + if (!atc_chan_is_paused(atchan))
> return 0;
>
> spin_lock_irqsave(&atchan->lock, flags);
> @@ -1035,7 +1035,7 @@ atc_tx_status(struct dma_chan *chan,
> else
> dma_set_tx_state(txstate, last_complete, last_used, 0);
>
> - if (test_bit(ATC_IS_PAUSED,&atchan->status))
> + if (atc_chan_is_paused(atchan))
> ret = DMA_PAUSED;
>
> dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n",
> @@ -1057,7 +1057,7 @@ static void atc_issue_pending(struct dma_chan *chan)
> dev_vdbg(chan2dev(chan), "issue_pending\n");
>
> /* Not needed for cyclic transfers */
> - if (test_bit(ATC_IS_CYCLIC,&atchan->status))
> + if (atc_chan_is_cyclic(atchan))
> return;
>
> spin_lock_irqsave(&atchan->lock, flags);
> @@ -1395,8 +1395,7 @@ static int at_dma_prepare(struct device *dev)
> device_node) {
> struct at_dma_chan *atchan = to_at_dma_chan(chan);
> /* wait for transaction completion (except in cyclic case) */
> - if (atc_chan_is_enabled(atchan)&&
> - !test_bit(ATC_IS_CYCLIC,&atchan->status))
> + if (atc_chan_is_enabled(atchan)&& !atc_chan_is_cyclic(atchan))
> return -EAGAIN;
> }
> return 0;
> @@ -1408,7 +1407,7 @@ static void atc_suspend_cyclic(struct at_dma_chan *atchan)
>
> /* Channel should be paused by user
> * do it anyway even if it is not done already */
> - if (!test_bit(ATC_IS_PAUSED,&atchan->status)) {
> + if (!atc_chan_is_paused(atchan)) {
> dev_warn(chan2dev(chan),
> "cyclic channel not paused, should be done by channel user\n");
> atc_control(chan, DMA_PAUSE, 0);
> @@ -1432,7 +1431,7 @@ static int at_dma_suspend_noirq(struct device *dev)
> device_node) {
> struct at_dma_chan *atchan = to_at_dma_chan(chan);
>
> - if (test_bit(ATC_IS_CYCLIC,&atchan->status))
> + if (atc_chan_is_cyclic(atchan))
> atc_suspend_cyclic(atchan);
> atchan->save_cfg = channel_readl(atchan, CFG);
> }
> @@ -1484,7 +1483,7 @@ static int at_dma_resume_noirq(struct device *dev)
> struct at_dma_chan *atchan = to_at_dma_chan(chan);
>
> channel_writel(atchan, CFG, atchan->save_cfg);
> - if (test_bit(ATC_IS_CYCLIC,&atchan->status))
> + if (atc_chan_is_cyclic(atchan))
> atc_resume_cyclic(atchan);
> }
> return 0;
> diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
> index 6f0c4a3..aa4c9ae 100644
> --- a/drivers/dma/at_hdmac_regs.h
> +++ b/drivers/dma/at_hdmac_regs.h
> @@ -362,6 +362,23 @@ static inline int atc_chan_is_enabled(struct at_dma_chan *atchan)
> return !!(dma_readl(atdma, CHSR)& atchan->mask);
> }
>
> +/**
> + * atc_chan_is_paused - test channel pause/resume status
> + * @atchan: channel we want to test status
> + */
> +static inline int atc_chan_is_paused(struct at_dma_chan *atchan)
> +{
> + return test_bit(ATC_IS_PAUSED,&atchan->status);
> +}
> +
> +/**
> + * atc_chan_is_cyclic - test if given channel has cyclic property set
> + * @atchan: channel we want to test status
> + */
> +static inline int atc_chan_is_cyclic(struct at_dma_chan *atchan)
> +{
> + return test_bit(ATC_IS_CYCLIC,&atchan->status);
> +}
>
> /**
> * set_desc_eol - set end-of-link to descriptor so it will end transfer
^ permalink raw reply [flat|nested] 13+ messages in thread
end of thread, other threads:[~2011-08-01 14:19 UTC | newest]
Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2011-06-28 11:17 [PATCH 1/3] dmaengine: at_hdmac: replace spin_lock* with irqsave variants Nicolas Ferre
2011-06-28 10:15 ` Nicolas Ferre
2011-06-28 11:17 ` [PATCH 2/3] dmaengine: at_hdmac: improve power management routines Nicolas Ferre
2011-07-07 2:20 ` Vinod Koul
2011-07-25 21:06 ` Nicolas Ferre
2011-07-26 10:51 ` Vinod Koul
2011-07-27 12:21 ` [PATCH V2 1/3] dmaengine: at_hdmac: replace spin_lock* with irqsave variants Nicolas Ferre
2011-07-25 21:09 ` [PATCH] dmaengine: at_hdmac: add wrappers for testing channel state Nicolas Ferre
2011-08-01 14:18 ` [PATCH V2 3/3] " Nicolas Ferre
2011-07-27 12:21 ` [PATCH V2 2/3] dmaengine: at_hdmac: improve power management routines Nicolas Ferre
2011-06-28 11:17 ` [PATCH 3/3] dmaengine: at_hdmac: add slave config operation Nicolas Ferre
2011-07-07 2:33 ` Vinod Koul
2011-07-25 21:12 ` Nicolas Ferre
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).