linux-mips.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] MIPS: lantiq: lock DMA register accesses for SMP
@ 2016-12-30 11:18 Hauke Mehrtens
  2017-01-24 18:16 ` Ralf Baechle
  0 siblings, 1 reply; 2+ messages in thread
From: Hauke Mehrtens @ 2016-12-30 11:18 UTC (permalink / raw)
  To: ralf; +Cc: linux-mips, john, Hauke Mehrtens

The DMA controller channel and port configuration is changed by
selecting the port or channel in one register and then update the
configuration in other registers. This has to be done in an atomic
operation. Previously only the local interrupts were deactivated which
works for single CPU systems. If the system supports SMP a better
locking is needed, use spinlocks instead.
On more recent SoCs (at least xrx200 and later) there are two memory
regions to change the configuration, there we could use one area for
each CPU and do not have to synchronize between the CPUs and more.

Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
---
 arch/mips/lantiq/xway/dma.c | 38 ++++++++++++++++++++------------------
 1 file changed, 20 insertions(+), 18 deletions(-)

diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c
index cef8117..a4ec07b 100644
--- a/arch/mips/lantiq/xway/dma.c
+++ b/arch/mips/lantiq/xway/dma.c
@@ -20,6 +20,7 @@
 #include <linux/io.h>
 #include <linux/dma-mapping.h>
 #include <linux/module.h>
+#include <linux/spinlock.h>
 #include <linux/clk.h>
 #include <linux/err.h>
 
@@ -59,16 +60,17 @@
 						ltq_dma_membase + (z))
 
 static void __iomem *ltq_dma_membase;
+static DEFINE_SPINLOCK(ltq_dma_lock);
 
 void
 ltq_dma_enable_irq(struct ltq_dma_channel *ch)
 {
 	unsigned long flags;
 
-	local_irq_save(flags);
+	spin_lock_irqsave(&ltq_dma_lock, flags);
 	ltq_dma_w32(ch->nr, LTQ_DMA_CS);
 	ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
-	local_irq_restore(flags);
+	spin_unlock_irqrestore(&ltq_dma_lock, flags);
 }
 EXPORT_SYMBOL_GPL(ltq_dma_enable_irq);
 
@@ -77,10 +79,10 @@ ltq_dma_disable_irq(struct ltq_dma_channel *ch)
 {
 	unsigned long flags;
 
-	local_irq_save(flags);
+	spin_lock_irqsave(&ltq_dma_lock, flags);
 	ltq_dma_w32(ch->nr, LTQ_DMA_CS);
 	ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN);
-	local_irq_restore(flags);
+	spin_unlock_irqrestore(&ltq_dma_lock, flags);
 }
 EXPORT_SYMBOL_GPL(ltq_dma_disable_irq);
 
@@ -89,10 +91,10 @@ ltq_dma_ack_irq(struct ltq_dma_channel *ch)
 {
 	unsigned long flags;
 
-	local_irq_save(flags);
+	spin_lock_irqsave(&ltq_dma_lock, flags);
 	ltq_dma_w32(ch->nr, LTQ_DMA_CS);
 	ltq_dma_w32(DMA_IRQ_ACK, LTQ_DMA_CIS);
-	local_irq_restore(flags);
+	spin_unlock_irqrestore(&ltq_dma_lock, flags);
 }
 EXPORT_SYMBOL_GPL(ltq_dma_ack_irq);
 
@@ -101,11 +103,11 @@ ltq_dma_open(struct ltq_dma_channel *ch)
 {
 	unsigned long flag;
 
-	local_irq_save(flag);
+	spin_lock_irqsave(&ltq_dma_lock, flag);
 	ltq_dma_w32(ch->nr, LTQ_DMA_CS);
 	ltq_dma_w32_mask(0, DMA_CHAN_ON, LTQ_DMA_CCTRL);
-	ltq_dma_enable_irq(ch);
-	local_irq_restore(flag);
+	ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
+	spin_unlock_irqrestore(&ltq_dma_lock, flag);
 }
 EXPORT_SYMBOL_GPL(ltq_dma_open);
 
@@ -114,11 +116,11 @@ ltq_dma_close(struct ltq_dma_channel *ch)
 {
 	unsigned long flag;
 
-	local_irq_save(flag);
+	spin_lock_irqsave(&ltq_dma_lock, flag);
 	ltq_dma_w32(ch->nr, LTQ_DMA_CS);
 	ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL);
-	ltq_dma_disable_irq(ch);
-	local_irq_restore(flag);
+	ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN);
+	spin_unlock_irqrestore(&ltq_dma_lock, flag);
 }
 EXPORT_SYMBOL_GPL(ltq_dma_close);
 
@@ -133,7 +135,7 @@ ltq_dma_alloc(struct ltq_dma_channel *ch)
 				&ch->phys, GFP_ATOMIC);
 	memset(ch->desc_base, 0, LTQ_DESC_NUM * LTQ_DESC_SIZE);
 
-	local_irq_save(flags);
+	spin_lock_irqsave(&ltq_dma_lock, flags);
 	ltq_dma_w32(ch->nr, LTQ_DMA_CS);
 	ltq_dma_w32(ch->phys, LTQ_DMA_CDBA);
 	ltq_dma_w32(LTQ_DESC_NUM, LTQ_DMA_CDLEN);
@@ -142,7 +144,7 @@ ltq_dma_alloc(struct ltq_dma_channel *ch)
 	ltq_dma_w32_mask(0, DMA_CHAN_RST, LTQ_DMA_CCTRL);
 	while (ltq_dma_r32(LTQ_DMA_CCTRL) & DMA_CHAN_RST)
 		;
-	local_irq_restore(flags);
+	spin_unlock_irqrestore(&ltq_dma_lock, flags);
 }
 
 void
@@ -152,11 +154,11 @@ ltq_dma_alloc_tx(struct ltq_dma_channel *ch)
 
 	ltq_dma_alloc(ch);
 
-	local_irq_save(flags);
+	spin_lock_irqsave(&ltq_dma_lock, flags);
 	ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE);
 	ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
 	ltq_dma_w32(DMA_WEIGHT | DMA_TX, LTQ_DMA_CCTRL);
-	local_irq_restore(flags);
+	spin_unlock_irqrestore(&ltq_dma_lock, flags);
 }
 EXPORT_SYMBOL_GPL(ltq_dma_alloc_tx);
 
@@ -167,11 +169,11 @@ ltq_dma_alloc_rx(struct ltq_dma_channel *ch)
 
 	ltq_dma_alloc(ch);
 
-	local_irq_save(flags);
+	spin_lock_irqsave(&ltq_dma_lock, flags);
 	ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE);
 	ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
 	ltq_dma_w32(DMA_WEIGHT, LTQ_DMA_CCTRL);
-	local_irq_restore(flags);
+	spin_unlock_irqrestore(&ltq_dma_lock, flags);
 }
 EXPORT_SYMBOL_GPL(ltq_dma_alloc_rx);
 
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH] MIPS: lantiq: lock DMA register accesses for SMP
  2016-12-30 11:18 [PATCH] MIPS: lantiq: lock DMA register accesses for SMP Hauke Mehrtens
@ 2017-01-24 18:16 ` Ralf Baechle
  0 siblings, 0 replies; 2+ messages in thread
From: Ralf Baechle @ 2017-01-24 18:16 UTC (permalink / raw)
  To: Hauke Mehrtens; +Cc: linux-mips, john

On Fri, Dec 30, 2016 at 12:18:27PM +0100, Hauke Mehrtens wrote:

> The DMA controller channel and port configuration is changed by
> selecting the port or channel in one register and then update the
> configuration in other registers. This has to be done in an atomic
> operation. Previously only the local interrupts were deactivated which
> works for single CPU systems. If the system supports SMP a better
> locking is needed, use spinlocks instead.
> On more recent SoCs (at least xrx200 and later) there are two memory
> regions to change the configuration, there we could use one area for
> each CPU and do not have to synchronize between the CPUs and more.

You should always use proper locking, not interrupt fiddlery for proper
serialization.  On UP kernels the spinlocks will expand to call just
the local_irq_* so no overhead.

Applied,

  Ralf

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2017-01-24 18:16 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-12-30 11:18 [PATCH] MIPS: lantiq: lock DMA register accesses for SMP Hauke Mehrtens
2017-01-24 18:16 ` Ralf Baechle

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).