* [PATCH v2 4/4] fsl-dma: use spin_lock_bh to instead of spin_lock_irqsave
@ 2012-07-11 9:02 ` Qiang Liu
0 siblings, 0 replies; 8+ messages in thread
From: Qiang Liu @ 2012-07-11 9:02 UTC (permalink / raw)
To: linux-crypto, linuxppc-dev
Cc: kim.phillips, herbert, davem, Qiang Liu, Dan Williams,
Vinod Koul, Li Yang
Use spin_lock_bh to instead of spin_lock_irqsave for improving performance.
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Vinod Koul <vinod.koul@intel.com>
Cc: Li Yang <leoli@freescale.com>
Signed-off-by: Qiang Liu <qiang.liu@freescale.com>
---
drivers/dma/fsldma.c | 30 ++++++++++++------------------
1 files changed, 12 insertions(+), 18 deletions(-)
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 0ba3e40..f2822a8 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -405,10 +405,9 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
struct fsldma_chan *chan = to_fsl_chan(tx->chan);
struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
struct fsl_desc_sw *child;
- unsigned long flags;
dma_cookie_t cookie;
- spin_lock_irqsave(&chan->desc_lock, flags);
+ spin_lock_bh(&chan->desc_lock);
/*
* assign cookies to all of the software descriptors
@@ -421,7 +420,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
/* put this transaction onto the tail of the pending queue */
append_ld_queue(chan, desc);
- spin_unlock_irqrestore(&chan->desc_lock, flags);
+ spin_unlock_bh(&chan->desc_lock);
return cookie;
}
@@ -530,13 +529,12 @@ static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
{
struct fsldma_chan *chan = to_fsl_chan(dchan);
- unsigned long flags;
chan_dbg(chan, "free all channel resources\n");
- spin_lock_irqsave(&chan->desc_lock, flags);
+ spin_lock_bh(&chan->desc_lock);
fsldma_free_desc_list(chan, &chan->ld_pending);
fsldma_free_desc_list(chan, &chan->ld_running);
- spin_unlock_irqrestore(&chan->desc_lock, flags);
+ spin_unlock_bh(&chan->desc_lock);
dma_pool_destroy(chan->desc_pool);
chan->desc_pool = NULL;
@@ -755,7 +753,6 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
{
struct dma_slave_config *config;
struct fsldma_chan *chan;
- unsigned long flags;
int size;
if (!dchan)
@@ -765,7 +762,7 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
switch (cmd) {
case DMA_TERMINATE_ALL:
- spin_lock_irqsave(&chan->desc_lock, flags);
+ spin_lock_bh(&chan->desc_lock);
/* Halt the DMA engine */
dma_halt(chan);
@@ -775,7 +772,7 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
fsldma_free_desc_list(chan, &chan->ld_running);
chan->idle = true;
- spin_unlock_irqrestore(&chan->desc_lock, flags);
+ spin_unlock_bh(&chan->desc_lock);
return 0;
case DMA_SLAVE_CONFIG:
@@ -935,11 +932,10 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
{
struct fsldma_chan *chan = to_fsl_chan(dchan);
- unsigned long flags;
- spin_lock_irqsave(&chan->desc_lock, flags);
+ spin_lock_bh(&chan->desc_lock);
fsl_chan_xfer_ld_queue(chan);
- spin_unlock_irqrestore(&chan->desc_lock, flags);
+ spin_unlock_bh(&chan->desc_lock);
}
/**
@@ -952,11 +948,10 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan,
{
struct fsldma_chan *chan = to_fsl_chan(dchan);
enum dma_status ret;
- unsigned long flags;
- spin_lock_irqsave(&chan->desc_lock, flags);
+ spin_lock_bh(&chan->desc_lock);
ret = dma_cookie_status(dchan, cookie, txstate);
- spin_unlock_irqrestore(&chan->desc_lock, flags);
+ spin_unlock_bh(&chan->desc_lock);
return ret;
}
@@ -1037,12 +1032,11 @@ static void dma_do_tasklet(unsigned long data)
struct fsldma_chan *chan = (struct fsldma_chan *)data;
struct fsl_desc_sw *desc, *_desc, *prev = NULL;
LIST_HEAD(ld_cleanup);
- unsigned long flags;
dma_addr_t curr_phys = get_cdar(chan);
chan_dbg(chan, "tasklet entry\n");
- spin_lock_irqsave(&chan->desc_lock, flags);
+ spin_lock_bh(&chan->desc_lock);
/* find the descriptor which is already completed */
list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) {
@@ -1079,7 +1073,7 @@ static void dma_do_tasklet(unsigned long data)
* ahead and free the descriptors below.
*/
fsl_chan_xfer_ld_queue(chan);
- spin_unlock_irqrestore(&chan->desc_lock, flags);
+ spin_unlock_bh(&chan->desc_lock);
/* Run the callback for each descriptor, in order */
list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) {
--
1.7.5.1
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH v2 4/4] fsl-dma: use spin_lock_bh to instead of spin_lock_irqsave
@ 2012-07-11 9:02 ` Qiang Liu
0 siblings, 0 replies; 8+ messages in thread
From: Qiang Liu @ 2012-07-11 9:02 UTC (permalink / raw)
To: linux-crypto, linuxppc-dev
Cc: Vinod Koul, Qiang Liu, herbert, Dan Williams, davem
Use spin_lock_bh to instead of spin_lock_irqsave for improving performance.
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Vinod Koul <vinod.koul@intel.com>
Cc: Li Yang <leoli@freescale.com>
Signed-off-by: Qiang Liu <qiang.liu@freescale.com>
---
drivers/dma/fsldma.c | 30 ++++++++++++------------------
1 files changed, 12 insertions(+), 18 deletions(-)
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 0ba3e40..f2822a8 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -405,10 +405,9 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
struct fsldma_chan *chan = to_fsl_chan(tx->chan);
struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
struct fsl_desc_sw *child;
- unsigned long flags;
dma_cookie_t cookie;
- spin_lock_irqsave(&chan->desc_lock, flags);
+ spin_lock_bh(&chan->desc_lock);
/*
* assign cookies to all of the software descriptors
@@ -421,7 +420,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
/* put this transaction onto the tail of the pending queue */
append_ld_queue(chan, desc);
- spin_unlock_irqrestore(&chan->desc_lock, flags);
+ spin_unlock_bh(&chan->desc_lock);
return cookie;
}
@@ -530,13 +529,12 @@ static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
{
struct fsldma_chan *chan = to_fsl_chan(dchan);
- unsigned long flags;
chan_dbg(chan, "free all channel resources\n");
- spin_lock_irqsave(&chan->desc_lock, flags);
+ spin_lock_bh(&chan->desc_lock);
fsldma_free_desc_list(chan, &chan->ld_pending);
fsldma_free_desc_list(chan, &chan->ld_running);
- spin_unlock_irqrestore(&chan->desc_lock, flags);
+ spin_unlock_bh(&chan->desc_lock);
dma_pool_destroy(chan->desc_pool);
chan->desc_pool = NULL;
@@ -755,7 +753,6 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
{
struct dma_slave_config *config;
struct fsldma_chan *chan;
- unsigned long flags;
int size;
if (!dchan)
@@ -765,7 +762,7 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
switch (cmd) {
case DMA_TERMINATE_ALL:
- spin_lock_irqsave(&chan->desc_lock, flags);
+ spin_lock_bh(&chan->desc_lock);
/* Halt the DMA engine */
dma_halt(chan);
@@ -775,7 +772,7 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
fsldma_free_desc_list(chan, &chan->ld_running);
chan->idle = true;
- spin_unlock_irqrestore(&chan->desc_lock, flags);
+ spin_unlock_bh(&chan->desc_lock);
return 0;
case DMA_SLAVE_CONFIG:
@@ -935,11 +932,10 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
{
struct fsldma_chan *chan = to_fsl_chan(dchan);
- unsigned long flags;
- spin_lock_irqsave(&chan->desc_lock, flags);
+ spin_lock_bh(&chan->desc_lock);
fsl_chan_xfer_ld_queue(chan);
- spin_unlock_irqrestore(&chan->desc_lock, flags);
+ spin_unlock_bh(&chan->desc_lock);
}
/**
@@ -952,11 +948,10 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan,
{
struct fsldma_chan *chan = to_fsl_chan(dchan);
enum dma_status ret;
- unsigned long flags;
- spin_lock_irqsave(&chan->desc_lock, flags);
+ spin_lock_bh(&chan->desc_lock);
ret = dma_cookie_status(dchan, cookie, txstate);
- spin_unlock_irqrestore(&chan->desc_lock, flags);
+ spin_unlock_bh(&chan->desc_lock);
return ret;
}
@@ -1037,12 +1032,11 @@ static void dma_do_tasklet(unsigned long data)
struct fsldma_chan *chan = (struct fsldma_chan *)data;
struct fsl_desc_sw *desc, *_desc, *prev = NULL;
LIST_HEAD(ld_cleanup);
- unsigned long flags;
dma_addr_t curr_phys = get_cdar(chan);
chan_dbg(chan, "tasklet entry\n");
- spin_lock_irqsave(&chan->desc_lock, flags);
+ spin_lock_bh(&chan->desc_lock);
/* find the descriptor which is already completed */
list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) {
@@ -1079,7 +1073,7 @@ static void dma_do_tasklet(unsigned long data)
* ahead and free the descriptors below.
*/
fsl_chan_xfer_ld_queue(chan);
- spin_unlock_irqrestore(&chan->desc_lock, flags);
+ spin_unlock_bh(&chan->desc_lock);
/* Run the callback for each descriptor, in order */
list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) {
--
1.7.5.1
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [linuxppc-release] [PATCH v2 4/4] fsl-dma: use spin_lock_bh to instead of spin_lock_irqsave
2012-07-11 9:02 ` Qiang Liu
@ 2012-07-11 15:01 ` Timur Tabi
-1 siblings, 0 replies; 8+ messages in thread
From: Timur Tabi @ 2012-07-11 15:01 UTC (permalink / raw)
To: Qiang Liu
Cc: linux-crypto, linuxppc-dev, Vinod Koul, herbert, Dan Williams,
Li Yang, davem
Qiang Liu wrote:
> Use spin_lock_bh to instead of spin_lock_irqsave for improving performance.
Please provide some evidence that performance has improved, as well as an
explanation why it's okay to use spin_lock_bh, and why it's faster.
--
Timur Tabi
Linux kernel developer at Freescale
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [linuxppc-release] [PATCH v2 4/4] fsl-dma: use spin_lock_bh to instead of spin_lock_irqsave
@ 2012-07-11 15:01 ` Timur Tabi
0 siblings, 0 replies; 8+ messages in thread
From: Timur Tabi @ 2012-07-11 15:01 UTC (permalink / raw)
To: Qiang Liu
Cc: Vinod Koul, herbert, linux-crypto, Dan Williams, linuxppc-dev, davem
Qiang Liu wrote:
> Use spin_lock_bh to instead of spin_lock_irqsave for improving performance.
Please provide some evidence that performance has improved, as well as an
explanation why it's okay to use spin_lock_bh, and why it's faster.
--
Timur Tabi
Linux kernel developer at Freescale
^ permalink raw reply [flat|nested] 8+ messages in thread
* RE: [linuxppc-release] [PATCH v2 4/4] fsl-dma: use spin_lock_bh to instead of spin_lock_irqsave
2012-07-11 15:01 ` Timur Tabi
@ 2012-07-12 9:07 ` Liu Qiang-B32616
-1 siblings, 0 replies; 8+ messages in thread
From: Liu Qiang-B32616 @ 2012-07-12 9:07 UTC (permalink / raw)
To: Tabi Timur-B04825
Cc: linux-crypto, linuxppc-dev, Vinod Koul, herbert, Dan Williams,
Li Yang-R58472, davem
> -----Original Message-----
> From: Tabi Timur-B04825
> Sent: Wednesday, July 11, 2012 11:01 PM
> To: Liu Qiang-B32616
> Cc: linux-crypto@vger.kernel.org; linuxppc-dev@lists.ozlabs.org; Vinod
> Koul; herbert@gondor.hengli.com.au; Dan Williams; Li Yang-R58472;
> davem@davemloft.net
> Subject: Re: [linuxppc-release] [PATCH v2 4/4] fsl-dma: use spin_lock_bh
> to instead of spin_lock_irqsave
>
> Qiang Liu wrote:
> > Use spin_lock_bh to instead of spin_lock_irqsave for improving
> performance.
>
> Please provide some evidence that performance has improved, as well as an
> explanation why it's okay to use spin_lock_bh, and why it's faster.
I compared my test result before and after this patch, write performance can
improved by 15%. I will send the latest patches sooner because of Ira's concern.
I will give a complete description about the improvement of spin_lock_bh().
About your question, spin_lock_bh is used in the case of bottom/half as its
name, there is no need to protect a running/pending list with spin_lock_irqsave.
Thanks.
> --
> Timur Tabi
> Linux kernel developer at Freescale
^ permalink raw reply [flat|nested] 8+ messages in thread
* RE: [linuxppc-release] [PATCH v2 4/4] fsl-dma: use spin_lock_bh to instead of spin_lock_irqsave
@ 2012-07-12 9:07 ` Liu Qiang-B32616
0 siblings, 0 replies; 8+ messages in thread
From: Liu Qiang-B32616 @ 2012-07-12 9:07 UTC (permalink / raw)
To: Tabi Timur-B04825
Cc: Li Yang-R58472, Vinod Koul, herbert, linux-crypto, Dan Williams,
linuxppc-dev, davem
> -----Original Message-----
> From: Tabi Timur-B04825
> Sent: Wednesday, July 11, 2012 11:01 PM
> To: Liu Qiang-B32616
> Cc: linux-crypto@vger.kernel.org; linuxppc-dev@lists.ozlabs.org; Vinod
> Koul; herbert@gondor.hengli.com.au; Dan Williams; Li Yang-R58472;
> davem@davemloft.net
> Subject: Re: [linuxppc-release] [PATCH v2 4/4] fsl-dma: use spin_lock_bh
> to instead of spin_lock_irqsave
>=20
> Qiang Liu wrote:
> > Use spin_lock_bh to instead of spin_lock_irqsave for improving
> performance.
>=20
> Please provide some evidence that performance has improved, as well as an
> explanation why it's okay to use spin_lock_bh, and why it's faster.
I compared my test result before and after this patch, write performance ca=
n
improved by 15%. I will send the latest patches sooner because of Ira's con=
cern.
I will give a complete description about the improvement of spin_lock_bh().
About your question, spin_lock_bh is used in the case of bottom/half as its
name, there is no need to protect a running/pending list with spin_lock_irq=
save.
Thanks.
> --
> Timur Tabi
> Linux kernel developer at Freescale
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [linuxppc-release] [PATCH v2 4/4] fsl-dma: use spin_lock_bh to instead of spin_lock_irqsave
2012-07-12 9:07 ` Liu Qiang-B32616
@ 2012-07-12 18:23 ` Timur Tabi
-1 siblings, 0 replies; 8+ messages in thread
From: Timur Tabi @ 2012-07-12 18:23 UTC (permalink / raw)
To: Liu Qiang-B32616
Cc: linux-crypto, linuxppc-dev, Vinod Koul, herbert, Dan Williams,
Li Yang-R58472, davem
Liu Qiang-B32616 wrote:
> I compared my test result before and after this patch, write performance can
> improved by 15%. I will send the latest patches sooner because of Ira's concern.
> I will give a complete description about the improvement of spin_lock_bh().
>
> About your question, spin_lock_bh is used in the case of bottom/half as its
> name, there is no need to protect a running/pending list with spin_lock_irqsave.
Please respin the patch and include this information in the patch description.
--
Timur Tabi
Linux kernel developer at Freescale
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [linuxppc-release] [PATCH v2 4/4] fsl-dma: use spin_lock_bh to instead of spin_lock_irqsave
@ 2012-07-12 18:23 ` Timur Tabi
0 siblings, 0 replies; 8+ messages in thread
From: Timur Tabi @ 2012-07-12 18:23 UTC (permalink / raw)
To: Liu Qiang-B32616
Cc: Li Yang-R58472, Vinod Koul, herbert, linux-crypto, Dan Williams,
linuxppc-dev, davem
Liu Qiang-B32616 wrote:
> I compared my test result before and after this patch, write performance can
> improved by 15%. I will send the latest patches sooner because of Ira's concern.
> I will give a complete description about the improvement of spin_lock_bh().
>
> About your question, spin_lock_bh is used in the case of bottom/half as its
> name, there is no need to protect a running/pending list with spin_lock_irqsave.
Please respin the patch and include this information in the patch description.
--
Timur Tabi
Linux kernel developer at Freescale
^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2012-07-12 18:23 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2012-07-11 9:02 [PATCH v2 4/4] fsl-dma: use spin_lock_bh to instead of spin_lock_irqsave Qiang Liu
2012-07-11 9:02 ` Qiang Liu
2012-07-11 15:01 ` [linuxppc-release] " Timur Tabi
2012-07-11 15:01 ` Timur Tabi
2012-07-12 9:07 ` Liu Qiang-B32616
2012-07-12 9:07 ` Liu Qiang-B32616
2012-07-12 18:23 ` Timur Tabi
2012-07-12 18:23 ` Timur Tabi
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.