All of lore.kernel.org
 help / color / mirror / Atom feed
* [v4,1/6] dmaengine: fsldma: Replace DMA_IN/OUT by FSL_DMA_IN/OUT
@ 2018-05-14 12:03 ` Wen He
  0 siblings, 0 replies; 32+ messages in thread
From: Wen He @ 2018-05-14 12:03 UTC (permalink / raw)
  To: vinod.koul, dmaengine
  Cc: robh+dt, devicetree, leoyang.li, jiafei.pan, jiaheng.fan, Wen He

This patch implenment a standard macro call functions is
used to NXP dma drivers.

Signed-off-by: Wen He <wen.he_1@nxp.com>
---
change in v4:
	- no

change in v3:
	- no change

change in v2:
	- Replace 'ioread##width##be(addr) : ioread##width(addr)'
	  by 'fsl_ioread##width##be(addr) : fsl_ioread##width(addr)'
	- Fix macro FSL_DMA_IN/OUT build issues in powerpc
					   
 drivers/dma/fsldma.c |   16 +++++++-------
 drivers/dma/fsldma.h |   57 +++++++++++++++++++++++++++++++++----------------
 2 files changed, 46 insertions(+), 27 deletions(-)

diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 3eaece8..75479d6 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -53,42 +53,42 @@
 
 static void set_sr(struct fsldma_chan *chan, u32 val)
 {
-	DMA_OUT(chan, &chan->regs->sr, val, 32);
+	FSL_DMA_OUT(chan, &chan->regs->sr, val, 32);
 }
 
 static u32 get_sr(struct fsldma_chan *chan)
 {
-	return DMA_IN(chan, &chan->regs->sr, 32);
+	return FSL_DMA_IN(chan, &chan->regs->sr, 32);
 }
 
 static void set_mr(struct fsldma_chan *chan, u32 val)
 {
-	DMA_OUT(chan, &chan->regs->mr, val, 32);
+	FSL_DMA_OUT(chan, &chan->regs->mr, val, 32);
 }
 
 static u32 get_mr(struct fsldma_chan *chan)
 {
-	return DMA_IN(chan, &chan->regs->mr, 32);
+	return FSL_DMA_IN(chan, &chan->regs->mr, 32);
 }
 
 static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
 {
-	DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
+	FSL_DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
 }
 
 static dma_addr_t get_cdar(struct fsldma_chan *chan)
 {
-	return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
+	return FSL_DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
 }
 
 static void set_bcr(struct fsldma_chan *chan, u32 val)
 {
-	DMA_OUT(chan, &chan->regs->bcr, val, 32);
+	FSL_DMA_OUT(chan, &chan->regs->bcr, val, 32);
 }
 
 static u32 get_bcr(struct fsldma_chan *chan)
 {
-	return DMA_IN(chan, &chan->regs->bcr, 32);
+	return FSL_DMA_IN(chan, &chan->regs->bcr, 32);
 }
 
 /*
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index 4787d48..4c33a53 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -196,39 +196,58 @@ struct fsldma_chan {
 #define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node)
 #define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx)
 
+#ifdef	CONFIG_PPC
+#define fsl_ioread32(p)		in_le32(p)
+#define fsl_ioread32be(p)	in_be32(p)
+#define fsl_iowrite32(v, p)	out_le32(p, v)
+#define fsl_iowrite32be(v, p)	out_be32(p, v)
+
 #ifndef __powerpc64__
-static u64 in_be64(const u64 __iomem *addr)
+static u64 fsl_ioread64(const u64 __iomem *addr)
 {
-	return ((u64)in_be32((u32 __iomem *)addr) << 32) |
-		(in_be32((u32 __iomem *)addr + 1));
+	return ((u64)in_le32((u32 __iomem *)addr + 1) << 32) |
+		(in_le32((u32 __iomem *)addr));
 }
 
-static void out_be64(u64 __iomem *addr, u64 val)
+static void fsl_iowrite64(u64 val, u64 __iomem *addr)
 {
-	out_be32((u32 __iomem *)addr, val >> 32);
-	out_be32((u32 __iomem *)addr + 1, (u32)val);
+	out_le32((u32 __iomem *)addr + 1, val >> 32);
+	out_le32((u32 __iomem *)addr, (u32)val);
 }
 
-/* There is no asm instructions for 64 bits reverse loads and stores */
-static u64 in_le64(const u64 __iomem *addr)
+static u64 fsl_ioread64be(const u64 __iomem *addr)
 {
-	return ((u64)in_le32((u32 __iomem *)addr + 1) << 32) |
-		(in_le32((u32 __iomem *)addr));
+	return ((u64)in_be32((u32 __iomem *)addr) << 32) |
+		(in_be32((u32 __iomem *)addr + 1));
 }
 
-static void out_le64(u64 __iomem *addr, u64 val)
+static void fsl_iowrite64be(u64 val, u64 __iomem *addr)
 {
-	out_le32((u32 __iomem *)addr + 1, val >> 32);
-	out_le32((u32 __iomem *)addr, (u32)val);
+	out_be32((u32 __iomem *)addr, val >> 32);
+	out_be32((u32 __iomem *)addr + 1, (u32)val);
 }
 #endif
+#endif
 
-#define DMA_IN(fsl_chan, addr, width)					\
-		(((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ?		\
-			in_be##width(addr) : in_le##width(addr))
-#define DMA_OUT(fsl_chan, addr, val, width)				\
-		(((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ?		\
-			out_be##width(addr, val) : out_le##width(addr, val))
+#if defined(CONFIG_ARM64) || defined(CONFIG_ARM)
+#define fsl_ioread32(p)		ioread32(p)
+#define fsl_ioread32be(p)	ioread32be(p)
+#define fsl_iowrite32(v, p)	iowrite32(v, p)
+#define fsl_iowrite32be(v, p)	iowrite32be(v, p)
+#define fsl_ioread64(p)		ioread64(p)
+#define fsl_ioread64be(p)	ioread64be(p)
+#define fsl_iowrite64(v, p)	iowrite64(v, p)
+#define fsl_iowrite64be(v, p)	iowrite64be(v, p)
+#endif
+
+#define FSL_DMA_IN(fsl_dma, addr, width)				\
+		(((fsl_dma)->feature & FSL_DMA_BIG_ENDIAN) ?		\
+			fsl_ioread##width##be(addr) : fsl_ioread##width(addr))
+
+#define FSL_DMA_OUT(fsl_dma, addr, val, width)				\
+		(((fsl_dma)->feature & FSL_DMA_BIG_ENDIAN) ?		\
+			fsl_iowrite##width##be(val, addr) : fsl_iowrite	\
+		##width(val, addr))
 
 #define DMA_TO_CPU(fsl_chan, d, width)					\
 		(((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ?		\

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [v4 1/6] dmaengine: fsldma: Replace DMA_IN/OUT by FSL_DMA_IN/OUT
@ 2018-05-14 12:03 ` Wen He
  0 siblings, 0 replies; 32+ messages in thread
From: Wen He @ 2018-05-14 12:03 UTC (permalink / raw)
  To: vinod.koul, dmaengine
  Cc: robh+dt, devicetree, leoyang.li, jiafei.pan, jiaheng.fan, Wen He

This patch implenment a standard macro call functions is
used to NXP dma drivers.

Signed-off-by: Wen He <wen.he_1@nxp.com>
---
change in v4:
	- no

change in v3:
	- no change

change in v2:
	- Replace 'ioread##width##be(addr) : ioread##width(addr)'
	  by 'fsl_ioread##width##be(addr) : fsl_ioread##width(addr)'
	- Fix macro FSL_DMA_IN/OUT build issues in powerpc
					   
 drivers/dma/fsldma.c |   16 +++++++-------
 drivers/dma/fsldma.h |   57 +++++++++++++++++++++++++++++++++----------------
 2 files changed, 46 insertions(+), 27 deletions(-)

diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 3eaece8..75479d6 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -53,42 +53,42 @@
 
 static void set_sr(struct fsldma_chan *chan, u32 val)
 {
-	DMA_OUT(chan, &chan->regs->sr, val, 32);
+	FSL_DMA_OUT(chan, &chan->regs->sr, val, 32);
 }
 
 static u32 get_sr(struct fsldma_chan *chan)
 {
-	return DMA_IN(chan, &chan->regs->sr, 32);
+	return FSL_DMA_IN(chan, &chan->regs->sr, 32);
 }
 
 static void set_mr(struct fsldma_chan *chan, u32 val)
 {
-	DMA_OUT(chan, &chan->regs->mr, val, 32);
+	FSL_DMA_OUT(chan, &chan->regs->mr, val, 32);
 }
 
 static u32 get_mr(struct fsldma_chan *chan)
 {
-	return DMA_IN(chan, &chan->regs->mr, 32);
+	return FSL_DMA_IN(chan, &chan->regs->mr, 32);
 }
 
 static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
 {
-	DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
+	FSL_DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
 }
 
 static dma_addr_t get_cdar(struct fsldma_chan *chan)
 {
-	return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
+	return FSL_DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
 }
 
 static void set_bcr(struct fsldma_chan *chan, u32 val)
 {
-	DMA_OUT(chan, &chan->regs->bcr, val, 32);
+	FSL_DMA_OUT(chan, &chan->regs->bcr, val, 32);
 }
 
 static u32 get_bcr(struct fsldma_chan *chan)
 {
-	return DMA_IN(chan, &chan->regs->bcr, 32);
+	return FSL_DMA_IN(chan, &chan->regs->bcr, 32);
 }
 
 /*
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index 4787d48..4c33a53 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -196,39 +196,58 @@ struct fsldma_chan {
 #define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node)
 #define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx)
 
+#ifdef	CONFIG_PPC
+#define fsl_ioread32(p)		in_le32(p)
+#define fsl_ioread32be(p)	in_be32(p)
+#define fsl_iowrite32(v, p)	out_le32(p, v)
+#define fsl_iowrite32be(v, p)	out_be32(p, v)
+
 #ifndef __powerpc64__
-static u64 in_be64(const u64 __iomem *addr)
+static u64 fsl_ioread64(const u64 __iomem *addr)
 {
-	return ((u64)in_be32((u32 __iomem *)addr) << 32) |
-		(in_be32((u32 __iomem *)addr + 1));
+	return ((u64)in_le32((u32 __iomem *)addr + 1) << 32) |
+		(in_le32((u32 __iomem *)addr));
 }
 
-static void out_be64(u64 __iomem *addr, u64 val)
+static void fsl_iowrite64(u64 val, u64 __iomem *addr)
 {
-	out_be32((u32 __iomem *)addr, val >> 32);
-	out_be32((u32 __iomem *)addr + 1, (u32)val);
+	out_le32((u32 __iomem *)addr + 1, val >> 32);
+	out_le32((u32 __iomem *)addr, (u32)val);
 }
 
-/* There is no asm instructions for 64 bits reverse loads and stores */
-static u64 in_le64(const u64 __iomem *addr)
+static u64 fsl_ioread64be(const u64 __iomem *addr)
 {
-	return ((u64)in_le32((u32 __iomem *)addr + 1) << 32) |
-		(in_le32((u32 __iomem *)addr));
+	return ((u64)in_be32((u32 __iomem *)addr) << 32) |
+		(in_be32((u32 __iomem *)addr + 1));
 }
 
-static void out_le64(u64 __iomem *addr, u64 val)
+static void fsl_iowrite64be(u64 val, u64 __iomem *addr)
 {
-	out_le32((u32 __iomem *)addr + 1, val >> 32);
-	out_le32((u32 __iomem *)addr, (u32)val);
+	out_be32((u32 __iomem *)addr, val >> 32);
+	out_be32((u32 __iomem *)addr + 1, (u32)val);
 }
 #endif
+#endif
 
-#define DMA_IN(fsl_chan, addr, width)					\
-		(((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ?		\
-			in_be##width(addr) : in_le##width(addr))
-#define DMA_OUT(fsl_chan, addr, val, width)				\
-		(((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ?		\
-			out_be##width(addr, val) : out_le##width(addr, val))
+#if defined(CONFIG_ARM64) || defined(CONFIG_ARM)
+#define fsl_ioread32(p)		ioread32(p)
+#define fsl_ioread32be(p)	ioread32be(p)
+#define fsl_iowrite32(v, p)	iowrite32(v, p)
+#define fsl_iowrite32be(v, p)	iowrite32be(v, p)
+#define fsl_ioread64(p)		ioread64(p)
+#define fsl_ioread64be(p)	ioread64be(p)
+#define fsl_iowrite64(v, p)	iowrite64(v, p)
+#define fsl_iowrite64be(v, p)	iowrite64be(v, p)
+#endif
+
+#define FSL_DMA_IN(fsl_dma, addr, width)				\
+		(((fsl_dma)->feature & FSL_DMA_BIG_ENDIAN) ?		\
+			fsl_ioread##width##be(addr) : fsl_ioread##width(addr))
+
+#define FSL_DMA_OUT(fsl_dma, addr, val, width)				\
+		(((fsl_dma)->feature & FSL_DMA_BIG_ENDIAN) ?		\
+			fsl_iowrite##width##be(val, addr) : fsl_iowrite	\
+		##width(val, addr))
 
 #define DMA_TO_CPU(fsl_chan, d, width)					\
 		(((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ?		\
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [v4,2/6] dmaengine: fsl-qdma: Add qDMA controller driver for Layerscape SoCs
  2018-05-14 12:03 ` [v4 1/6] " Wen He
@ 2018-05-14 12:03 ` Wen He
  -1 siblings, 0 replies; 32+ messages in thread
From: Wen He @ 2018-05-14 12:03 UTC (permalink / raw)
  To: vinod.koul, dmaengine
  Cc: robh+dt, devicetree, leoyang.li, jiafei.pan, jiaheng.fan, Wen He

NXP Queue DMA controller(qDMA) on Layerscape SoCs supports channel
virtuallization by allowing DMA jobs to be enqueued into different
command queues.

Note that this module depends on NXP DPAA.

Signed-off-by: Wen He <wen.he_1@nxp.com>
Signed-off-by: Jiaheng Fan <jiaheng.fan@nxp.com>
---
change in v4:
	- Fixed the issues that Vinod point out in the mail list.

change in v3:
	- Add 'depends on ARM || ARM64' in file 'drivers/dma/Kconfig'

change in v2:
	- Replace GPL V2 License details by SPDX tags
	- Replace Freescale by NXP
	- Reduce and optimize header file references
	- Replace big_endian by feature in struct fsl_qdma_engine
	- Replace struct fsl_qdma_format by struct fsl_qdma_ccdf
	  and struct fsl_qdma_csgf
	- Remove empty line
	- Replace 'if..else' by macro 'FSL_QDMA_IN/OUT' in function
	  qdma_readl() and qdma_writel()
	- Remove function fsl_qdma_alloc_chan_resources()
	- Replace 'prei' by 'pre'
	- Replace '-1' by '-ENOMEM' in function fsl_qdma_pre_request_enqueue_desc()
	- Fix dma pool allocation need to rolled back in function
	  fsl_qdma_request_enqueue_desc()
	- Replace function of_property_read_u32_array() by device_property_read_u32_array()
	- Add functions fsl_qdma_cleanup_vchan() and fsl_qdma_irq_exit() to ensure
	  irq and tasklets stopped
	- Replace dts node element 'channels' by 'dma-channels'
	- Replace function platform_driver_register() by module_platform_driver()

 drivers/dma/Kconfig    |   13 +
 drivers/dma/Makefile   |    1 +
 drivers/dma/fsl-qdma.c | 1120 ++++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 1134 insertions(+), 0 deletions(-)
 create mode 100644 drivers/dma/fsl-qdma.c

diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6d61cd0..99aff33 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -225,6 +225,19 @@ config FSL_EDMA
 	  multiplexing capability for DMA request sources(slot).
 	  This module can be found on Freescale Vybrid and LS-1 SoCs.
 
+config FSL_QDMA
+       tristate "NXP Layerscape qDMA engine support"
+       depends on ARM || ARM64
+       select DMA_ENGINE
+       select DMA_VIRTUAL_CHANNELS
+       select DMA_ENGINE_RAID
+       select ASYNC_TX_ENABLE_CHANNEL_SWITCH
+       help
+         Support the NXP Layerscape qDMA engine with command queue and legacy mode.
+         Channel virtualization is supported through enqueuing of DMA jobs to,
+         or dequeuing DMA jobs from, different work queues.
+         This module can be found on NXP Layerscape SoCs.
+
 config FSL_RAID
         tristate "Freescale RAID engine Support"
         depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 0f62a4d..93db0fc 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -33,6 +33,7 @@ obj-$(CONFIG_DW_DMAC_CORE) += dw/
 obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
 obj-$(CONFIG_FSL_DMA) += fsldma.o
 obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
+obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o
 obj-$(CONFIG_FSL_RAID) += fsl_raid.o
 obj-$(CONFIG_HSU_DMA) += hsu/
 obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
new file mode 100644
index 0000000..4341b86
--- /dev/null
+++ b/drivers/dma/fsl-qdma.c
@@ -0,0 +1,1120 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright 2018 NXP
+
+/*
+ * Driver for NXP Layerscape Queue Direct Memory Access Controller
+ *
+ * Author:
+ *  Wen He <wen.he_1@nxp.com>
+ *  Jiaheng Fan <jiaheng.fan@nxp.com>
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/of_dma.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/dmaengine.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "virt-dma.h"
+#include "fsldma.h"
+
+/* Register related definition */
+#define FSL_QDMA_DMR			0x0
+#define FSL_QDMA_DSR			0x4
+#define FSL_QDMA_DEIER			0xe00
+#define FSL_QDMA_DEDR			0xe04
+#define FSL_QDMA_DECFDW0R		0xe10
+#define FSL_QDMA_DECFDW1R		0xe14
+#define FSL_QDMA_DECFDW2R		0xe18
+#define FSL_QDMA_DECFDW3R		0xe1c
+#define FSL_QDMA_DECFQIDR		0xe30
+#define FSL_QDMA_DECBR			0xe34
+
+#define FSL_QDMA_BCQMR(x)		(0xc0 + 0x100 * (x))
+#define FSL_QDMA_BCQSR(x)		(0xc4 + 0x100 * (x))
+#define FSL_QDMA_BCQEDPA_SADDR(x)	(0xc8 + 0x100 * (x))
+#define FSL_QDMA_BCQDPA_SADDR(x)	(0xcc + 0x100 * (x))
+#define FSL_QDMA_BCQEEPA_SADDR(x)	(0xd0 + 0x100 * (x))
+#define FSL_QDMA_BCQEPA_SADDR(x)	(0xd4 + 0x100 * (x))
+#define FSL_QDMA_BCQIER(x)		(0xe0 + 0x100 * (x))
+#define FSL_QDMA_BCQIDR(x)		(0xe4 + 0x100 * (x))
+
+#define FSL_QDMA_SQDPAR			0x80c
+#define FSL_QDMA_SQEPAR			0x814
+#define FSL_QDMA_BSQMR			0x800
+#define FSL_QDMA_BSQSR			0x804
+#define FSL_QDMA_BSQICR			0x828
+#define FSL_QDMA_CQMR			0xa00
+#define FSL_QDMA_CQDSCR1		0xa08
+#define FSL_QDMA_CQDSCR2                0xa0c
+#define FSL_QDMA_CQIER			0xa10
+#define FSL_QDMA_CQEDR			0xa14
+#define FSL_QDMA_SQCCMR			0xa20
+
+/* Registers for bit and genmask */
+#define FSL_QDMA_CQIDR_SQT		0x8000
+#define QDMA_CCDF_MASK			GENMASK(28, 20)
+#define QDMA_CCDF_FOTMAT		BIT(29)
+#define QDMA_CCDF_SER			BIT(30)
+#define QDMA_SG_FIN			BIT(30)
+#define QDMA_SG_EXT			BIT(31)
+#define QDMA_SG_LEN_MASK		GENMASK(29, 0)
+
+#define QDMA_CCDF_STATUS		20
+#define QDMA_CCDF_OFFSET		20
+#define FSL_QDMA_BCQIER_CQTIE		0x8000
+#define FSL_QDMA_BCQIER_CQPEIE		0x800000
+#define FSL_QDMA_BSQICR_ICEN		0x80000000
+#define FSL_QDMA_BSQICR_ICST(x)		((x) << 16)
+#define FSL_QDMA_CQIER_MEIE		0x80000000
+#define FSL_QDMA_CQIER_TEIE		0x1
+#define FSL_QDMA_SQCCMR_ENTER_WM	0x200000
+
+#define FSL_QDMA_QUEUE_MAX		8
+
+#define FSL_QDMA_BCQMR_EN		0x80000000
+#define FSL_QDMA_BCQMR_EI		0x40000000
+#define FSL_QDMA_BCQMR_CD_THLD(x)	((x) << 20)
+#define FSL_QDMA_BCQMR_CQ_SIZE(x)	((x) << 16)
+
+#define FSL_QDMA_BCQSR_QF		0x10000
+#define FSL_QDMA_BCQSR_XOFF		0x1
+
+#define FSL_QDMA_BSQMR_EN		0x80000000
+#define FSL_QDMA_BSQMR_DI		0x40000000
+#define FSL_QDMA_BSQMR_CQ_SIZE(x)	((x) << 16)
+
+#define FSL_QDMA_BSQSR_QE		0x20000
+
+#define FSL_QDMA_DMR_DQD		0x40000000
+#define FSL_QDMA_DSR_DB			0x80000000
+
+/* Size related definition */
+#define FSL_QDMA_BASE_BUFFER_SIZE	96
+#define FSL_QDMA_EXPECT_SG_ENTRY_NUM	16
+#define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN	64
+#define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX	16384
+#define FSL_QDMA_QUEUE_NUM_MAX		8
+
+/* Field definition for CMD */
+#define FSL_QDMA_CMD_RWTTYPE		0x4
+#define FSL_QDMA_CMD_LWC                0x2
+#define FSL_QDMA_CMD_RWTTYPE_OFFSET	28
+#define FSL_QDMA_CMD_NS_OFFSET		27
+#define FSL_QDMA_CMD_DQOS_OFFSET	24
+#define FSL_QDMA_CMD_WTHROTL_OFFSET	20
+#define FSL_QDMA_CMD_DSEN_OFFSET	19
+#define FSL_QDMA_CMD_LWC_OFFSET		16
+
+#define FSL_QDMA_E_SG_TABLE		1
+#define FSL_QDMA_E_DATA_BUFFER		0
+#define FSL_QDMA_F_LAST_ENTRY		1
+
+u64 pre_addr, pre_queue;
+
+/* qDMA Command Descriptor Fotmats */
+
+struct fsl_qdma_format {
+	__le32 status; /* ser, status */
+	__le32 cfg;	/* format, offset */
+	union {
+		struct {
+			__le32 addr_lo;	/* low 32-bits of 40-bit address */
+			u8 addr_hi;	/* high 8-bits of 40-bit address */
+			u8 __reserved1[2];
+			u8 cfg8b_w1; /* dd, queue */
+		} __packed;
+		__le64 data;
+	};
+} __packed;
+
+/* qDMA Source Descriptor Format */
+struct fsl_qdma_sdf {
+	__le32 rev3;
+	__le32 cfg; /* rev4, bit[0-11] - ssd, bit[12-23] sss */
+	__le32 rev5;
+	__le32 cmd;
+} __packed;
+
+/* qDMA Destination Descriptor Format */
+struct fsl_qdma_ddf {
+	__le32 rev1;
+	__le32 cfg; /* rev2, bit[0-11] - dsd, bit[12-23] - dss */
+	__le32 rev3;
+	__le32 cmd;
+} __packed;
+
+struct fsl_qdma_chan {
+	struct virt_dma_chan		vchan;
+	struct virt_dma_desc		vdesc;
+	enum dma_status			status;
+	u32				slave_id;
+	struct fsl_qdma_engine		*qdma;
+	struct fsl_qdma_queue		*queue;
+	struct list_head		qcomp;
+};
+
+struct fsl_qdma_queue {
+	struct fsl_qdma_format	*virt_head;
+	struct fsl_qdma_format	*virt_tail;
+	struct list_head	comp_used;
+	struct list_head	comp_free;
+	struct dma_pool		*comp_pool;
+	struct dma_pool		*sg_pool;
+	spinlock_t		queue_lock;
+	dma_addr_t		bus_addr;
+	u32                     n_cq;
+	u32			id;
+	struct fsl_qdma_format	*cq;
+};
+
+struct fsl_qdma_sg {
+	dma_addr_t		bus_addr;
+	void			*virt_addr;
+};
+
+struct fsl_qdma_comp {
+	dma_addr_t              bus_addr;
+	void			*virt_addr;
+	struct fsl_qdma_chan	*qchan;
+	struct fsl_qdma_sg	*sg_block;
+	struct virt_dma_desc    vdesc;
+	struct list_head	list;
+	u32			sg_block_src;
+	u32			sg_block_dst;
+};
+
+struct fsl_qdma_engine {
+	struct dma_device	dma_dev;
+	void __iomem		*ctrl_base;
+	void __iomem            *status_base;
+	void __iomem		*block_base;
+	u32			n_chans;
+	u32			n_queues;
+	struct mutex            fsl_qdma_mutex;
+	int			error_irq;
+	int			queue_irq;
+	bool			feature;
+	struct fsl_qdma_queue	*queue;
+	struct fsl_qdma_queue	*status;
+	struct fsl_qdma_chan	chans[];
+
+};
+
+static inline u64
+qdma_ccdf_addr_get64(const struct fsl_qdma_format *ccdf)
+{
+	return le64_to_cpu(ccdf->data) & (U64_MAX >> 24);
+}
+
+static inline void
+qdma_desc_addr_set64(struct fsl_qdma_format *ccdf, u64 addr)
+{
+	ccdf->addr_hi = upper_32_bits(addr);
+	ccdf->addr_lo = cpu_to_le32(lower_32_bits(addr));
+}
+
+static inline u64
+qdma_ccdf_get_queue(const struct fsl_qdma_format *ccdf)
+{
+	return ccdf->cfg8b_w1 & U8_MAX;
+}
+
+static inline int
+qdma_ccdf_get_offset(const struct fsl_qdma_format *ccdf)
+{
+	return (le32_to_cpu(ccdf->cfg) & QDMA_CCDF_MASK) >> QDMA_CCDF_OFFSET;
+}
+
+static inline void
+qdma_ccdf_set_format(struct fsl_qdma_format *ccdf, int offset)
+{
+	ccdf->cfg = cpu_to_le32(QDMA_CCDF_FOTMAT | offset);
+}
+
+static inline int
+qdma_ccdf_get_status(const struct fsl_qdma_format *ccdf)
+{
+	return (le32_to_cpu(ccdf->status) & QDMA_CCDF_MASK) >> QDMA_CCDF_STATUS;
+}
+
+static inline void
+qdma_ccdf_set_ser(struct fsl_qdma_format *ccdf, int status)
+{
+	ccdf->status = cpu_to_le32(QDMA_CCDF_SER | status);
+}
+
+static inline void qdma_csgf_set_len(struct fsl_qdma_format *csgf, int len)
+{
+	csgf->cfg = cpu_to_le32(len & QDMA_SG_LEN_MASK);
+}
+
+static inline void qdma_csgf_set_f(struct fsl_qdma_format *csgf, int len)
+{
+	csgf->cfg = cpu_to_le32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK));
+}
+
+static inline void qdma_csgf_set_e(struct fsl_qdma_format *csgf, int len)
+{
+	csgf->cfg = cpu_to_le32(QDMA_SG_EXT | (len & QDMA_SG_LEN_MASK));
+}
+
+static u32 qdma_readl(struct fsl_qdma_engine *qdma, void __iomem *addr)
+{
+	return FSL_DMA_IN(qdma, addr, 32);
+}
+
+static void qdma_writel(struct fsl_qdma_engine *qdma, u32 val,
+						void __iomem *addr)
+{
+	FSL_DMA_OUT(qdma, addr, val, 32);
+}
+
+static struct fsl_qdma_chan *to_fsl_qdma_chan(struct dma_chan *chan)
+{
+	return container_of(chan, struct fsl_qdma_chan, vchan.chan);
+}
+
+static struct fsl_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
+{
+	return container_of(vd, struct fsl_qdma_comp, vdesc);
+}
+
+static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
+{
+	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
+	unsigned long flags;
+	LIST_HEAD(head);
+
+	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+	vchan_get_all_descriptors(&fsl_chan->vchan, &head);
+	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+
+	vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
+}
+
+static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
+					dma_addr_t dst, dma_addr_t src, u32 len)
+{
+	struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
+	struct fsl_qdma_sdf *sdf;
+	struct fsl_qdma_ddf *ddf;
+
+	ccdf = (struct fsl_qdma_format *)fsl_comp->virt_addr;
+	csgf_desc = (struct fsl_qdma_format *)fsl_comp->virt_addr + 1;
+	csgf_src = (struct fsl_qdma_format *)fsl_comp->virt_addr + 2;
+	csgf_dest = (struct fsl_qdma_format *)fsl_comp->virt_addr + 3;
+	sdf = (struct fsl_qdma_sdf *)fsl_comp->virt_addr + 4;
+	ddf = (struct fsl_qdma_ddf *)fsl_comp->virt_addr + 5;
+
+	memset(fsl_comp->virt_addr, 0, FSL_QDMA_BASE_BUFFER_SIZE);
+	/* Head Command Descriptor(Frame Descriptor) */
+	qdma_desc_addr_set64(ccdf, fsl_comp->bus_addr + 16);
+	qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(ccdf));
+	qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(ccdf));
+
+	/* Status notification is enqueued to status queue. */
+	/* Compound Command Descriptor(Frame List Table) */
+	qdma_desc_addr_set64(csgf_desc, fsl_comp->bus_addr + 64);
+	/* It must be 32 as Compound S/G Descriptor */
+	qdma_csgf_set_len(csgf_desc, 32);
+	qdma_desc_addr_set64(csgf_src, src);
+	qdma_csgf_set_len(csgf_src, len);
+	qdma_desc_addr_set64(csgf_dest, dst);
+	qdma_csgf_set_len(csgf_dest, len);
+	/* This entry is the last entry. */
+	qdma_csgf_set_f(csgf_dest, len);
+	/* Descriptor Buffer */
+	sdf->cmd = cpu_to_le32(
+			FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET);
+	ddf->cmd = cpu_to_le32(
+			FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET);
+	ddf->cmd |= cpu_to_le32(
+			FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
+}
+
+/*
+ * Pre-request full command descriptor for enqueue.
+ */
+static int fsl_qdma_pre_request_enqueue_desc(struct fsl_qdma_queue *queue)
+{
+	struct fsl_qdma_comp *comp_temp;
+	int i;
+
+	for (i = 0; i < queue->n_cq; i++) {
+		comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
+		if (!comp_temp)
+			return -ENOMEM;
+		comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
+						      GFP_KERNEL,
+						      &comp_temp->bus_addr);
+		if (!comp_temp->virt_addr)
+			return -ENOMEM;
+		list_add_tail(&comp_temp->list, &queue->comp_free);
+	}
+
+	return 0;
+}
+
+/*
+ * Request a command descriptor for enqueue.
+ */
+static struct fsl_qdma_comp *fsl_qdma_request_enqueue_desc(
+					struct fsl_qdma_chan *fsl_chan,
+					unsigned int dst_nents,
+					unsigned int src_nents)
+{
+	struct fsl_qdma_comp *comp_temp;
+	struct fsl_qdma_sg *sg_block;
+	struct fsl_qdma_queue *queue = fsl_chan->queue;
+	unsigned long flags;
+	unsigned int dst_sg_entry_block, src_sg_entry_block, sg_entry_total, i;
+
+	spin_lock_irqsave(&queue->queue_lock, flags);
+	if (list_empty(&queue->comp_free)) {
+		spin_unlock_irqrestore(&queue->queue_lock, flags);
+		comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
+		if (!comp_temp)
+			return NULL;
+		comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
+						      GFP_KERNEL,
+						      &comp_temp->bus_addr);
+		if (!comp_temp->virt_addr) {
+			kfree(comp_temp);
+			return NULL;
+		}
+
+	} else {
+		comp_temp = list_first_entry(&queue->comp_free,
+					     struct fsl_qdma_comp,
+					     list);
+		list_del(&comp_temp->list);
+		spin_unlock_irqrestore(&queue->queue_lock, flags);
+	}
+
+	if (dst_nents != 0)
+		dst_sg_entry_block = dst_nents /
+					(FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
+	else
+		dst_sg_entry_block = 0;
+
+	if (src_nents != 0)
+		src_sg_entry_block = src_nents /
+					(FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
+	else
+		src_sg_entry_block = 0;
+
+	sg_entry_total = dst_sg_entry_block + src_sg_entry_block;
+	if (sg_entry_total) {
+		sg_block = kzalloc(sizeof(*sg_block) *
+					      sg_entry_total,
+					      GFP_KERNEL);
+		if (!sg_block) {
+			dma_pool_free(queue->comp_pool,
+					comp_temp->virt_addr,
+					comp_temp->bus_addr);
+			return NULL;
+		}
+		comp_temp->sg_block = sg_block;
+		for (i = 0; i < sg_entry_total; i++) {
+			sg_block->virt_addr = dma_pool_alloc(queue->sg_pool,
+							GFP_KERNEL,
+							&sg_block->bus_addr);
+			memset(sg_block->virt_addr, 0,
+					FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16);
+			sg_block++;
+		}
+	}
+
+	comp_temp->sg_block_src = src_sg_entry_block;
+	comp_temp->sg_block_dst = dst_sg_entry_block;
+	comp_temp->qchan = fsl_chan;
+
+	return comp_temp;
+}
+
+static struct fsl_qdma_queue *fsl_qdma_alloc_queue_resources(
+					struct platform_device *pdev,
+					unsigned int queue_num)
+{
+	struct fsl_qdma_queue *queue_head, *queue_temp;
+	int ret, len, i;
+	unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
+
+	if (queue_num > FSL_QDMA_QUEUE_MAX)
+		queue_num = FSL_QDMA_QUEUE_MAX;
+	len = sizeof(*queue_head) * queue_num;
+	queue_head = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+	if (!queue_head)
+		return NULL;
+
+	ret = device_property_read_u32_array(&pdev->dev, "queue-sizes",
+					queue_size, queue_num);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't get queue-sizes.\n");
+		return NULL;
+	}
+
+	for (i = 0; i < queue_num; i++) {
+		if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX
+			|| queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
+			dev_err(&pdev->dev, "Get wrong queue-sizes.\n");
+			return NULL;
+		}
+		queue_temp = queue_head + i;
+		queue_temp->cq = dma_alloc_coherent(&pdev->dev,
+						sizeof(struct fsl_qdma_format) *
+						queue_size[i],
+						&queue_temp->bus_addr,
+						GFP_KERNEL);
+		if (!queue_temp->cq)
+			return NULL;
+		queue_temp->n_cq = queue_size[i];
+		queue_temp->id = i;
+		queue_temp->virt_head = queue_temp->cq;
+		queue_temp->virt_tail = queue_temp->cq;
+		/*
+		 * The dma pool for queue command buffer
+		 */
+		queue_temp->comp_pool = dma_pool_create("comp_pool",
+						&pdev->dev,
+						FSL_QDMA_BASE_BUFFER_SIZE,
+						16, 0);
+		if (!queue_temp->comp_pool)
+			goto err_free_comp;
+
+		/*
+		 * The dma pool for queue command buffer
+		 */
+		queue_temp->sg_pool = dma_pool_create("sg_pool",
+					&pdev->dev,
+					FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16,
+					64, 0);
+		if (!queue_temp->sg_pool)
+			goto err_free_comp;
+
+		/*
+		 * List for queue command buffer
+		 */
+		INIT_LIST_HEAD(&queue_temp->comp_used);
+		INIT_LIST_HEAD(&queue_temp->comp_free);
+		spin_lock_init(&queue_temp->queue_lock);
+	}
+
+	return queue_head;
+
+err_free_comp:
+	dma_free_coherent(&pdev->dev,
+			sizeof(struct fsl_qdma_format) *
+			queue_size[i],
+			queue_temp->cq,
+			queue_temp->bus_addr);
+	return NULL;
+}
+
+static struct fsl_qdma_queue *fsl_qdma_prep_status_queue(
+						struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct fsl_qdma_queue *status_head;
+	unsigned int status_size;
+	int ret;
+
+	ret = of_property_read_u32(np, "status-sizes", &status_size);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't get status-sizes.\n");
+		return NULL;
+	}
+	if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX
+			|| status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
+		dev_err(&pdev->dev, "Get wrong status_size.\n");
+		return NULL;
+	}
+	status_head = devm_kzalloc(&pdev->dev, sizeof(*status_head),
+								GFP_KERNEL);
+	if (!status_head)
+		return NULL;
+
+	/*
+	 * Buffer for queue command
+	 */
+	status_head->cq = dma_alloc_coherent(&pdev->dev,
+						sizeof(struct fsl_qdma_format) *
+						status_size,
+						&status_head->bus_addr,
+						GFP_KERNEL);
+	if (!status_head->cq)
+		return NULL;
+
+	status_head->n_cq = status_size;
+	status_head->virt_head = status_head->cq;
+	status_head->virt_tail = status_head->cq;
+	status_head->comp_pool = NULL;
+
+	return status_head;
+}
+
+static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
+{
+	void __iomem *ctrl = fsl_qdma->ctrl_base;
+	void __iomem *block = fsl_qdma->block_base;
+	int i, count = 5;
+	u32 reg;
+
+	/* Disable the command queue and wait for idle state. */
+	reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
+	reg |= FSL_QDMA_DMR_DQD;
+	qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
+	for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++)
+		qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQMR(i));
+
+	while (1) {
+		reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DSR);
+		if (!(reg & FSL_QDMA_DSR_DB))
+			break;
+		if (count-- < 0)
+			return -EBUSY;
+		udelay(100);
+	}
+
+	/* Disable status queue. */
+	qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BSQMR);
+
+	/*
+	 * Clear the command queue interrupt detect register for all queues.
+	 */
+	qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
+
+	return 0;
+}
+
+static int fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma)
+{
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
+	struct fsl_qdma_queue *fsl_status = fsl_qdma->status;
+	struct fsl_qdma_queue *temp_queue;
+	struct fsl_qdma_comp *fsl_comp;
+	struct fsl_qdma_format *status_addr;
+	struct fsl_qdma_format *csgf_src;
+	void __iomem *block = fsl_qdma->block_base;
+	u32 reg, i;
+	bool duplicate, duplicate_handle;
+
+	while (1) {
+		duplicate = 0;
+		duplicate_handle = 0;
+		reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);
+		if (reg & FSL_QDMA_BSQSR_QE)
+			return 0;
+		status_addr = fsl_status->virt_head;
+		if (qdma_ccdf_get_queue(status_addr) == pre_queue &&
+			qdma_ccdf_addr_get64(status_addr) == pre_addr)
+			duplicate = 1;
+		i = qdma_ccdf_get_queue(status_addr);
+		pre_queue = qdma_ccdf_get_queue(status_addr);
+		pre_addr = qdma_ccdf_addr_get64(status_addr);
+		temp_queue = fsl_queue + i;
+		spin_lock(&temp_queue->queue_lock);
+		if (list_empty(&temp_queue->comp_used)) {
+			if (duplicate)
+				duplicate_handle = 1;
+			else {
+				spin_unlock(&temp_queue->queue_lock);
+				return -1;
+			}
+		} else {
+			fsl_comp = list_first_entry(&temp_queue->comp_used,
+							struct fsl_qdma_comp,
+							list);
+			csgf_src = (struct fsl_qdma_format *)fsl_comp->virt_addr
+							   + 2;
+			if (fsl_comp->bus_addr + 16 != pre_addr) {
+				if (duplicate)
+					duplicate_handle = 1;
+				else {
+					spin_unlock(&temp_queue->queue_lock);
+					return -1;
+				}
+			}
+		}
+
+			if (duplicate_handle) {
+				reg = qdma_readl(fsl_qdma, block +
+						FSL_QDMA_BSQMR);
+			reg |= FSL_QDMA_BSQMR_DI;
+			qdma_desc_addr_set64(status_addr, 0x0);
+			fsl_status->virt_head++;
+			if (fsl_status->virt_head == fsl_status->cq
+						   + fsl_status->n_cq)
+				fsl_status->virt_head = fsl_status->cq;
+			qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
+			spin_unlock(&temp_queue->queue_lock);
+			continue;
+		}
+		list_del(&fsl_comp->list);
+
+		reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
+		reg |= FSL_QDMA_BSQMR_DI;
+		qdma_desc_addr_set64(status_addr, 0x0);
+		fsl_status->virt_head++;
+		if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
+			fsl_status->virt_head = fsl_status->cq;
+		qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
+		spin_unlock(&temp_queue->queue_lock);
+
+		spin_lock(&fsl_comp->qchan->vchan.lock);
+		vchan_cookie_complete(&fsl_comp->vdesc);
+		fsl_comp->qchan->status = DMA_COMPLETE;
+		spin_unlock(&fsl_comp->qchan->vchan.lock);
+	}
+	return 0;
+}
+
+static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id)
+{
+	struct fsl_qdma_engine *fsl_qdma = dev_id;
+	unsigned int intr;
+	void __iomem *status = fsl_qdma->status_base;
+
+	intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR);
+
+	if (intr)
+		dev_err(fsl_qdma->dma_dev.dev, "DMA transaction error!\n");
+
+	qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fsl_qdma_queue_handler(int irq, void *dev_id)
+{
+	struct fsl_qdma_engine *fsl_qdma = dev_id;
+	unsigned int intr, reg;
+	void __iomem *block = fsl_qdma->block_base;
+	void __iomem *ctrl = fsl_qdma->ctrl_base;
+
+	intr = qdma_readl(fsl_qdma, block + FSL_QDMA_BCQIDR(0));
+
+	if ((intr & FSL_QDMA_CQIDR_SQT) != 0)
+		intr = fsl_qdma_queue_transfer_complete(fsl_qdma);
+
+	if (intr != 0) {
+		reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
+		reg |= FSL_QDMA_DMR_DQD;
+		qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
+		qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQIER(0));
+		dev_err(fsl_qdma->dma_dev.dev, "QDMA: status err!\n");
+	}
+
+	qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
+
+	return IRQ_HANDLED;
+}
+
+static int
+fsl_qdma_irq_init(struct platform_device *pdev,
+		  struct fsl_qdma_engine *fsl_qdma)
+{
+	int ret;
+
+	fsl_qdma->error_irq = platform_get_irq_byname(pdev,
+							"qdma-error");
+	if (fsl_qdma->error_irq < 0) {
+		dev_err(&pdev->dev, "Can't get qdma controller irq.\n");
+		return fsl_qdma->error_irq;
+	}
+
+	fsl_qdma->queue_irq = platform_get_irq_byname(pdev, "qdma-queue");
+	if (fsl_qdma->queue_irq < 0) {
+		dev_err(&pdev->dev, "Can't get qdma queue irq.\n");
+		return fsl_qdma->queue_irq;
+	}
+
+	ret = devm_request_irq(&pdev->dev, fsl_qdma->error_irq,
+			fsl_qdma_error_handler, 0, "qDMA error", fsl_qdma);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't register qDMA controller IRQ.\n");
+		return  ret;
+	}
+	ret = devm_request_irq(&pdev->dev, fsl_qdma->queue_irq,
+			fsl_qdma_queue_handler, 0, "qDMA queue", fsl_qdma);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't register qDMA queue IRQ.\n");
+		return  ret;
+	}
+
+	return 0;
+}
+
+static void fsl_qdma_irq_exit(
+		struct platform_device *pdev, struct fsl_qdma_engine *fsl_qdma)
+{
+	if (fsl_qdma->queue_irq == fsl_qdma->error_irq) {
+		devm_free_irq(&pdev->dev, fsl_qdma->queue_irq, fsl_qdma);
+	} else {
+		devm_free_irq(&pdev->dev, fsl_qdma->queue_irq, fsl_qdma);
+		devm_free_irq(&pdev->dev, fsl_qdma->error_irq, fsl_qdma);
+	}
+}
+
+static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
+{
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
+	struct fsl_qdma_queue *temp;
+	void __iomem *ctrl = fsl_qdma->ctrl_base;
+	void __iomem *status = fsl_qdma->status_base;
+	void __iomem *block = fsl_qdma->block_base;
+	int i, ret;
+	u32 reg;
+
+	/* Try to halt the qDMA engine first. */
+	ret = fsl_qdma_halt(fsl_qdma);
+	if (ret) {
+		dev_err(fsl_qdma->dma_dev.dev, "DMA halt failed!");
+		return ret;
+	}
+
+	/*
+	 * Clear the command queue interrupt detect register for all queues.
+	 */
+	qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
+
+	for (i = 0; i < fsl_qdma->n_queues; i++) {
+		temp = fsl_queue + i;
+		/*
+		 * Initialize Command Queue registers to point to the first
+		 * command descriptor in memory.
+		 * Dequeue Pointer Address Registers
+		 * Enqueue Pointer Address Registers
+		 */
+		qdma_writel(fsl_qdma, temp->bus_addr,
+				block + FSL_QDMA_BCQDPA_SADDR(i));
+		qdma_writel(fsl_qdma, temp->bus_addr,
+				block + FSL_QDMA_BCQEPA_SADDR(i));
+
+		/* Initialize the queue mode. */
+		reg = FSL_QDMA_BCQMR_EN;
+		reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq)-4);
+		reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq)-6);
+		qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BCQMR(i));
+	}
+
+	/*
+	 * Workaround for erratum: ERR010812.
+	 * We must enable XOFF to avoid the enqueue rejection occurs.
+	 * Setting SQCCMR ENTER_WM to 0x20.
+	 */
+	qdma_writel(fsl_qdma, FSL_QDMA_SQCCMR_ENTER_WM,
+			      block + FSL_QDMA_SQCCMR);
+	/*
+	 * Initialize status queue registers to point to the first
+	 * command descriptor in memory.
+	 * Dequeue Pointer Address Registers
+	 * Enqueue Pointer Address Registers
+	 */
+	qdma_writel(fsl_qdma, fsl_qdma->status->bus_addr,
+					block + FSL_QDMA_SQEPAR);
+	qdma_writel(fsl_qdma, fsl_qdma->status->bus_addr,
+					block + FSL_QDMA_SQDPAR);
+	/* Initialize status queue interrupt. */
+	qdma_writel(fsl_qdma, FSL_QDMA_BCQIER_CQTIE,
+			      block + FSL_QDMA_BCQIER(0));
+	qdma_writel(fsl_qdma, FSL_QDMA_BSQICR_ICEN | FSL_QDMA_BSQICR_ICST(5)
+						   | 0x8000,
+			      block + FSL_QDMA_BSQICR);
+	qdma_writel(fsl_qdma, FSL_QDMA_CQIER_MEIE | FSL_QDMA_CQIER_TEIE,
+			      block + FSL_QDMA_CQIER);
+	/* Initialize controller interrupt register. */
+	qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR);
+	qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEIER);
+
+	/* Initialize the status queue mode. */
+	reg = FSL_QDMA_BSQMR_EN;
+	reg |= FSL_QDMA_BSQMR_CQ_SIZE(ilog2(fsl_qdma->status->n_cq)-6);
+	qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
+
+	reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
+	reg &= ~FSL_QDMA_DMR_DQD;
+	qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
+
+	return 0;
+}
+
+static struct dma_async_tx_descriptor *
+fsl_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
+		dma_addr_t src, size_t len, unsigned long flags)
+{
+	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
+	struct fsl_qdma_comp *fsl_comp;
+
+	fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan, 0, 0);
+	fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len);
+
+	return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags);
+}
+
+static void fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
+{
+	void __iomem *block = fsl_chan->qdma->block_base;
+	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+	struct fsl_qdma_comp *fsl_comp;
+	struct virt_dma_desc *vdesc;
+	u32 reg;
+
+	reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQSR(fsl_queue->id));
+	if (reg & (FSL_QDMA_BCQSR_QF | FSL_QDMA_BCQSR_XOFF))
+		return;
+	vdesc = vchan_next_desc(&fsl_chan->vchan);
+	if (!vdesc)
+		return;
+	list_del(&vdesc->node);
+	fsl_comp = to_fsl_qdma_comp(vdesc);
+
+	memcpy(fsl_queue->virt_head++, fsl_comp->virt_addr, 16);
+	if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
+		fsl_queue->virt_head = fsl_queue->cq;
+
+	list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
+	barrier();
+	reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQMR(fsl_queue->id));
+	reg |= FSL_QDMA_BCQMR_EI;
+	qdma_writel(fsl_chan->qdma, reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
+	fsl_chan->status = DMA_IN_PROGRESS;
+}
+
+static enum dma_status fsl_qdma_tx_status(struct dma_chan *chan,
+		dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
+	enum dma_status ret;
+
+	ret = dma_cookie_status(chan, cookie, txstate);
+	if (ret == DMA_COMPLETE || !txstate)
+		return ret;
+
+	return ret;
+}
+
+static void fsl_qdma_free_desc(struct virt_dma_desc *vdesc)
+{
+	struct fsl_qdma_comp *fsl_comp;
+	struct fsl_qdma_queue *fsl_queue;
+	struct fsl_qdma_sg *sg_block;
+	unsigned long flags;
+	unsigned int i;
+
+	fsl_comp = to_fsl_qdma_comp(vdesc);
+	fsl_queue = fsl_comp->qchan->queue;
+
+	if (fsl_comp->sg_block) {
+		for (i = 0; i < fsl_comp->sg_block_src +
+				fsl_comp->sg_block_dst; i++) {
+			sg_block = fsl_comp->sg_block + i;
+			dma_pool_free(fsl_queue->sg_pool,
+				      sg_block->virt_addr,
+				      sg_block->bus_addr);
+		}
+		kfree(fsl_comp->sg_block);
+	}
+
+	spin_lock_irqsave(&fsl_queue->queue_lock, flags);
+	list_add_tail(&fsl_comp->list, &fsl_queue->comp_free);
+	spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
+}
+
+static void fsl_qdma_issue_pending(struct dma_chan *chan)
+{
+	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
+	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+	unsigned long flags;
+
+	spin_lock_irqsave(&fsl_queue->queue_lock, flags);
+	spin_lock(&fsl_chan->vchan.lock);
+	if (vchan_issue_pending(&fsl_chan->vchan))
+		fsl_qdma_enqueue_desc(fsl_chan);
+	spin_unlock(&fsl_chan->vchan.lock);
+	spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
+}
+
+static int fsl_qdma_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct fsl_qdma_engine *fsl_qdma;
+	struct fsl_qdma_chan *fsl_chan;
+	struct resource *res;
+	unsigned int len, chans, queues;
+	int ret, i;
+
+	ret = of_property_read_u32(np, "dma-channels", &chans);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't get dma-channels.\n");
+		return ret;
+	}
+
+	len = sizeof(*fsl_qdma) + sizeof(*fsl_chan) * chans;
+	fsl_qdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+	if (!fsl_qdma)
+		return -ENOMEM;
+
+	ret = of_property_read_u32(np, "queues", &queues);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't get queues.\n");
+		return ret;
+	}
+
+	fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, queues);
+	if (!fsl_qdma->queue)
+		return -ENOMEM;
+
+	fsl_qdma->status = fsl_qdma_prep_status_queue(pdev);
+	if (!fsl_qdma->status)
+		return -ENOMEM;
+
+	fsl_qdma->n_chans = chans;
+	fsl_qdma->n_queues = queues;
+	mutex_init(&fsl_qdma->fsl_qdma_mutex);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	fsl_qdma->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(fsl_qdma->ctrl_base))
+		return PTR_ERR(fsl_qdma->ctrl_base);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	fsl_qdma->status_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(fsl_qdma->status_base))
+		return PTR_ERR(fsl_qdma->status_base);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+	fsl_qdma->block_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(fsl_qdma->block_base))
+		return PTR_ERR(fsl_qdma->block_base);
+
+	ret = fsl_qdma_irq_init(pdev, fsl_qdma);
+	if (ret)
+		return ret;
+
+	fsl_qdma->feature = of_property_read_bool(np, "big-endian");
+	INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels);
+	for (i = 0; i < fsl_qdma->n_chans; i++) {
+		struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
+
+		fsl_chan->qdma = fsl_qdma;
+		fsl_chan->queue = fsl_qdma->queue + i % fsl_qdma->n_queues;
+		fsl_chan->vchan.desc_free = fsl_qdma_free_desc;
+		INIT_LIST_HEAD(&fsl_chan->qcomp);
+		vchan_init(&fsl_chan->vchan, &fsl_qdma->dma_dev);
+	}
+	for (i = 0; i < fsl_qdma->n_queues; i++)
+		fsl_qdma_pre_request_enqueue_desc(fsl_qdma->queue + i);
+
+	dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask);
+
+	fsl_qdma->dma_dev.dev = &pdev->dev;
+	fsl_qdma->dma_dev.device_free_chan_resources
+		= fsl_qdma_free_chan_resources;
+	fsl_qdma->dma_dev.device_tx_status = fsl_qdma_tx_status;
+	fsl_qdma->dma_dev.device_prep_dma_memcpy = fsl_qdma_prep_memcpy;
+	fsl_qdma->dma_dev.device_issue_pending = fsl_qdma_issue_pending;
+
+	dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
+
+	platform_set_drvdata(pdev, fsl_qdma);
+
+	ret = dma_async_device_register(&fsl_qdma->dma_dev);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't register NXP Layerscape qDMA engine.\n");
+		return ret;
+	}
+
+	ret = fsl_qdma_reg_init(fsl_qdma);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
+		return ret;
+	}
+
+
+	return 0;
+}
+
+static void fsl_qdma_cleanup_vchan(struct dma_device *dmadev)
+{
+	struct fsl_qdma_chan *chan, *_chan;
+
+	list_for_each_entry_safe(chan, _chan,
+				&dmadev->channels, vchan.chan.device_node) {
+		list_del(&chan->vchan.chan.device_node);
+		tasklet_kill(&chan->vchan.task);
+	}
+}
+
+static int fsl_qdma_remove(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
+	struct fsl_qdma_queue *queue_temp;
+	struct fsl_qdma_queue *status = fsl_qdma->status;
+	struct fsl_qdma_comp *comp_temp, *_comp_temp;
+	int i;
+
+	fsl_qdma_irq_exit(pdev, fsl_qdma);
+	fsl_qdma_cleanup_vchan(&fsl_qdma->dma_dev);
+	of_dma_controller_free(np);
+	dma_async_device_unregister(&fsl_qdma->dma_dev);
+
+	/* Free descriptor areas */
+	for (i = 0; i < fsl_qdma->n_queues; i++) {
+		queue_temp = fsl_qdma->queue + i;
+		list_for_each_entry_safe(comp_temp, _comp_temp,
+					&queue_temp->comp_used,	list) {
+			dma_pool_free(queue_temp->comp_pool,
+					comp_temp->virt_addr,
+					comp_temp->bus_addr);
+			list_del(&comp_temp->list);
+			kfree(comp_temp);
+		}
+		list_for_each_entry_safe(comp_temp, _comp_temp,
+					&queue_temp->comp_free, list) {
+			dma_pool_free(queue_temp->comp_pool,
+					comp_temp->virt_addr,
+					comp_temp->bus_addr);
+			list_del(&comp_temp->list);
+			kfree(comp_temp);
+		}
+		dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_format) *
+					queue_temp->n_cq, queue_temp->cq,
+					queue_temp->bus_addr);
+		dma_pool_destroy(queue_temp->comp_pool);
+	}
+
+	dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_format) *
+				status->n_cq, status->cq, status->bus_addr);
+	return 0;
+}
+
+static const struct of_device_id fsl_qdma_dt_ids[] = {
+	{ .compatible = "fsl,ls1021a-qdma", },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, fsl_qdma_dt_ids);
+
+static struct platform_driver fsl_qdma_driver = {
+	.driver		= {
+		.name	= "fsl-qdma",
+		.of_match_table = fsl_qdma_dt_ids,
+	},
+	.probe          = fsl_qdma_probe,
+	.remove		= fsl_qdma_remove,
+};
+
+module_platform_driver(fsl_qdma_driver);
+
+MODULE_ALIAS("platform:fsl-qdma");
+MODULE_DESCRIPTION("NXP Layerscape qDMA engine driver");
+MODULE_LICENSE("GPL v2");

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [v4 2/6] dmaengine: fsl-qdma: Add qDMA controller driver for Layerscape SoCs
@ 2018-05-14 12:03 ` Wen He
  0 siblings, 0 replies; 32+ messages in thread
From: Wen He @ 2018-05-14 12:03 UTC (permalink / raw)
  To: vinod.koul, dmaengine
  Cc: robh+dt, devicetree, leoyang.li, jiafei.pan, jiaheng.fan, Wen He

NXP Queue DMA controller(qDMA) on Layerscape SoCs supports channel
virtuallization by allowing DMA jobs to be enqueued into different
command queues.

Note that this module depends on NXP DPAA.

Signed-off-by: Wen He <wen.he_1@nxp.com>
Signed-off-by: Jiaheng Fan <jiaheng.fan@nxp.com>
---
change in v4:
	- Fixed the issues that Vinod point out in the mail list.

change in v3:
	- Add 'depends on ARM || ARM64' in file 'drivers/dma/Kconfig'

change in v2:
	- Replace GPL V2 License details by SPDX tags
	- Replace Freescale by NXP
	- Reduce and optimize header file references
	- Replace big_endian by feature in struct fsl_qdma_engine
	- Replace struct fsl_qdma_format by struct fsl_qdma_ccdf
	  and struct fsl_qdma_csgf
	- Remove empty line
	- Replace 'if..else' by macro 'FSL_QDMA_IN/OUT' in function
	  qdma_readl() and qdma_writel()
	- Remove function fsl_qdma_alloc_chan_resources()
	- Replace 'prei' by 'pre'
	- Replace '-1' by '-ENOMEM' in function fsl_qdma_pre_request_enqueue_desc()
	- Fix dma pool allocation need to rolled back in function
	  fsl_qdma_request_enqueue_desc()
	- Replace function of_property_read_u32_array() by device_property_read_u32_array()
	- Add functions fsl_qdma_cleanup_vchan() and fsl_qdma_irq_exit() to ensure
	  irq and tasklets stopped
	- Replace dts node element 'channels' by 'dma-channels'
	- Replace function platform_driver_register() by module_platform_driver()

 drivers/dma/Kconfig    |   13 +
 drivers/dma/Makefile   |    1 +
 drivers/dma/fsl-qdma.c | 1120 ++++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 1134 insertions(+), 0 deletions(-)
 create mode 100644 drivers/dma/fsl-qdma.c

diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6d61cd0..99aff33 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -225,6 +225,19 @@ config FSL_EDMA
 	  multiplexing capability for DMA request sources(slot).
 	  This module can be found on Freescale Vybrid and LS-1 SoCs.
 
+config FSL_QDMA
+       tristate "NXP Layerscape qDMA engine support"
+       depends on ARM || ARM64
+       select DMA_ENGINE
+       select DMA_VIRTUAL_CHANNELS
+       select DMA_ENGINE_RAID
+       select ASYNC_TX_ENABLE_CHANNEL_SWITCH
+       help
+         Support the NXP Layerscape qDMA engine with command queue and legacy mode.
+         Channel virtualization is supported through enqueuing of DMA jobs to,
+         or dequeuing DMA jobs from, different work queues.
+         This module can be found on NXP Layerscape SoCs.
+
 config FSL_RAID
         tristate "Freescale RAID engine Support"
         depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 0f62a4d..93db0fc 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -33,6 +33,7 @@ obj-$(CONFIG_DW_DMAC_CORE) += dw/
 obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
 obj-$(CONFIG_FSL_DMA) += fsldma.o
 obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
+obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o
 obj-$(CONFIG_FSL_RAID) += fsl_raid.o
 obj-$(CONFIG_HSU_DMA) += hsu/
 obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
new file mode 100644
index 0000000..4341b86
--- /dev/null
+++ b/drivers/dma/fsl-qdma.c
@@ -0,0 +1,1120 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright 2018 NXP
+
+/*
+ * Driver for NXP Layerscape Queue Direct Memory Access Controller
+ *
+ * Author:
+ *  Wen He <wen.he_1@nxp.com>
+ *  Jiaheng Fan <jiaheng.fan@nxp.com>
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/of_dma.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/dmaengine.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "virt-dma.h"
+#include "fsldma.h"
+
+/* Register related definition */
+#define FSL_QDMA_DMR			0x0
+#define FSL_QDMA_DSR			0x4
+#define FSL_QDMA_DEIER			0xe00
+#define FSL_QDMA_DEDR			0xe04
+#define FSL_QDMA_DECFDW0R		0xe10
+#define FSL_QDMA_DECFDW1R		0xe14
+#define FSL_QDMA_DECFDW2R		0xe18
+#define FSL_QDMA_DECFDW3R		0xe1c
+#define FSL_QDMA_DECFQIDR		0xe30
+#define FSL_QDMA_DECBR			0xe34
+
+#define FSL_QDMA_BCQMR(x)		(0xc0 + 0x100 * (x))
+#define FSL_QDMA_BCQSR(x)		(0xc4 + 0x100 * (x))
+#define FSL_QDMA_BCQEDPA_SADDR(x)	(0xc8 + 0x100 * (x))
+#define FSL_QDMA_BCQDPA_SADDR(x)	(0xcc + 0x100 * (x))
+#define FSL_QDMA_BCQEEPA_SADDR(x)	(0xd0 + 0x100 * (x))
+#define FSL_QDMA_BCQEPA_SADDR(x)	(0xd4 + 0x100 * (x))
+#define FSL_QDMA_BCQIER(x)		(0xe0 + 0x100 * (x))
+#define FSL_QDMA_BCQIDR(x)		(0xe4 + 0x100 * (x))
+
+#define FSL_QDMA_SQDPAR			0x80c
+#define FSL_QDMA_SQEPAR			0x814
+#define FSL_QDMA_BSQMR			0x800
+#define FSL_QDMA_BSQSR			0x804
+#define FSL_QDMA_BSQICR			0x828
+#define FSL_QDMA_CQMR			0xa00
+#define FSL_QDMA_CQDSCR1		0xa08
+#define FSL_QDMA_CQDSCR2                0xa0c
+#define FSL_QDMA_CQIER			0xa10
+#define FSL_QDMA_CQEDR			0xa14
+#define FSL_QDMA_SQCCMR			0xa20
+
+/* Registers for bit and genmask */
+#define FSL_QDMA_CQIDR_SQT		0x8000
+#define QDMA_CCDF_MASK			GENMASK(28, 20)
+#define QDMA_CCDF_FOTMAT		BIT(29)
+#define QDMA_CCDF_SER			BIT(30)
+#define QDMA_SG_FIN			BIT(30)
+#define QDMA_SG_EXT			BIT(31)
+#define QDMA_SG_LEN_MASK		GENMASK(29, 0)
+
+#define QDMA_CCDF_STATUS		20
+#define QDMA_CCDF_OFFSET		20
+#define FSL_QDMA_BCQIER_CQTIE		0x8000
+#define FSL_QDMA_BCQIER_CQPEIE		0x800000
+#define FSL_QDMA_BSQICR_ICEN		0x80000000
+#define FSL_QDMA_BSQICR_ICST(x)		((x) << 16)
+#define FSL_QDMA_CQIER_MEIE		0x80000000
+#define FSL_QDMA_CQIER_TEIE		0x1
+#define FSL_QDMA_SQCCMR_ENTER_WM	0x200000
+
+#define FSL_QDMA_QUEUE_MAX		8
+
+#define FSL_QDMA_BCQMR_EN		0x80000000
+#define FSL_QDMA_BCQMR_EI		0x40000000
+#define FSL_QDMA_BCQMR_CD_THLD(x)	((x) << 20)
+#define FSL_QDMA_BCQMR_CQ_SIZE(x)	((x) << 16)
+
+#define FSL_QDMA_BCQSR_QF		0x10000
+#define FSL_QDMA_BCQSR_XOFF		0x1
+
+#define FSL_QDMA_BSQMR_EN		0x80000000
+#define FSL_QDMA_BSQMR_DI		0x40000000
+#define FSL_QDMA_BSQMR_CQ_SIZE(x)	((x) << 16)
+
+#define FSL_QDMA_BSQSR_QE		0x20000
+
+#define FSL_QDMA_DMR_DQD		0x40000000
+#define FSL_QDMA_DSR_DB			0x80000000
+
+/* Size related definition */
+#define FSL_QDMA_BASE_BUFFER_SIZE	96
+#define FSL_QDMA_EXPECT_SG_ENTRY_NUM	16
+#define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN	64
+#define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX	16384
+#define FSL_QDMA_QUEUE_NUM_MAX		8
+
+/* Field definition for CMD */
+#define FSL_QDMA_CMD_RWTTYPE		0x4
+#define FSL_QDMA_CMD_LWC                0x2
+#define FSL_QDMA_CMD_RWTTYPE_OFFSET	28
+#define FSL_QDMA_CMD_NS_OFFSET		27
+#define FSL_QDMA_CMD_DQOS_OFFSET	24
+#define FSL_QDMA_CMD_WTHROTL_OFFSET	20
+#define FSL_QDMA_CMD_DSEN_OFFSET	19
+#define FSL_QDMA_CMD_LWC_OFFSET		16
+
+#define FSL_QDMA_E_SG_TABLE		1
+#define FSL_QDMA_E_DATA_BUFFER		0
+#define FSL_QDMA_F_LAST_ENTRY		1
+
+u64 pre_addr, pre_queue;
+
+/* qDMA Command Descriptor Fotmats */
+
+struct fsl_qdma_format {
+	__le32 status; /* ser, status */
+	__le32 cfg;	/* format, offset */
+	union {
+		struct {
+			__le32 addr_lo;	/* low 32-bits of 40-bit address */
+			u8 addr_hi;	/* high 8-bits of 40-bit address */
+			u8 __reserved1[2];
+			u8 cfg8b_w1; /* dd, queue */
+		} __packed;
+		__le64 data;
+	};
+} __packed;
+
+/* qDMA Source Descriptor Format */
+struct fsl_qdma_sdf {
+	__le32 rev3;
+	__le32 cfg; /* rev4, bit[0-11] - ssd, bit[12-23] sss */
+	__le32 rev5;
+	__le32 cmd;
+} __packed;
+
+/* qDMA Destination Descriptor Format */
+struct fsl_qdma_ddf {
+	__le32 rev1;
+	__le32 cfg; /* rev2, bit[0-11] - dsd, bit[12-23] - dss */
+	__le32 rev3;
+	__le32 cmd;
+} __packed;
+
+struct fsl_qdma_chan {
+	struct virt_dma_chan		vchan;
+	struct virt_dma_desc		vdesc;
+	enum dma_status			status;
+	u32				slave_id;
+	struct fsl_qdma_engine		*qdma;
+	struct fsl_qdma_queue		*queue;
+	struct list_head		qcomp;
+};
+
+struct fsl_qdma_queue {
+	struct fsl_qdma_format	*virt_head;
+	struct fsl_qdma_format	*virt_tail;
+	struct list_head	comp_used;
+	struct list_head	comp_free;
+	struct dma_pool		*comp_pool;
+	struct dma_pool		*sg_pool;
+	spinlock_t		queue_lock;
+	dma_addr_t		bus_addr;
+	u32                     n_cq;
+	u32			id;
+	struct fsl_qdma_format	*cq;
+};
+
+struct fsl_qdma_sg {
+	dma_addr_t		bus_addr;
+	void			*virt_addr;
+};
+
+struct fsl_qdma_comp {
+	dma_addr_t              bus_addr;
+	void			*virt_addr;
+	struct fsl_qdma_chan	*qchan;
+	struct fsl_qdma_sg	*sg_block;
+	struct virt_dma_desc    vdesc;
+	struct list_head	list;
+	u32			sg_block_src;
+	u32			sg_block_dst;
+};
+
+struct fsl_qdma_engine {
+	struct dma_device	dma_dev;
+	void __iomem		*ctrl_base;
+	void __iomem            *status_base;
+	void __iomem		*block_base;
+	u32			n_chans;
+	u32			n_queues;
+	struct mutex            fsl_qdma_mutex;
+	int			error_irq;
+	int			queue_irq;
+	bool			feature;
+	struct fsl_qdma_queue	*queue;
+	struct fsl_qdma_queue	*status;
+	struct fsl_qdma_chan	chans[];
+
+};
+
+static inline u64
+qdma_ccdf_addr_get64(const struct fsl_qdma_format *ccdf)
+{
+	return le64_to_cpu(ccdf->data) & (U64_MAX >> 24);
+}
+
+static inline void
+qdma_desc_addr_set64(struct fsl_qdma_format *ccdf, u64 addr)
+{
+	ccdf->addr_hi = upper_32_bits(addr);
+	ccdf->addr_lo = cpu_to_le32(lower_32_bits(addr));
+}
+
+static inline u64
+qdma_ccdf_get_queue(const struct fsl_qdma_format *ccdf)
+{
+	return ccdf->cfg8b_w1 & U8_MAX;
+}
+
+static inline int
+qdma_ccdf_get_offset(const struct fsl_qdma_format *ccdf)
+{
+	return (le32_to_cpu(ccdf->cfg) & QDMA_CCDF_MASK) >> QDMA_CCDF_OFFSET;
+}
+
+static inline void
+qdma_ccdf_set_format(struct fsl_qdma_format *ccdf, int offset)
+{
+	ccdf->cfg = cpu_to_le32(QDMA_CCDF_FOTMAT | offset);
+}
+
+static inline int
+qdma_ccdf_get_status(const struct fsl_qdma_format *ccdf)
+{
+	return (le32_to_cpu(ccdf->status) & QDMA_CCDF_MASK) >> QDMA_CCDF_STATUS;
+}
+
+static inline void
+qdma_ccdf_set_ser(struct fsl_qdma_format *ccdf, int status)
+{
+	ccdf->status = cpu_to_le32(QDMA_CCDF_SER | status);
+}
+
+static inline void qdma_csgf_set_len(struct fsl_qdma_format *csgf, int len)
+{
+	csgf->cfg = cpu_to_le32(len & QDMA_SG_LEN_MASK);
+}
+
+static inline void qdma_csgf_set_f(struct fsl_qdma_format *csgf, int len)
+{
+	csgf->cfg = cpu_to_le32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK));
+}
+
+static inline void qdma_csgf_set_e(struct fsl_qdma_format *csgf, int len)
+{
+	csgf->cfg = cpu_to_le32(QDMA_SG_EXT | (len & QDMA_SG_LEN_MASK));
+}
+
+static u32 qdma_readl(struct fsl_qdma_engine *qdma, void __iomem *addr)
+{
+	return FSL_DMA_IN(qdma, addr, 32);
+}
+
+static void qdma_writel(struct fsl_qdma_engine *qdma, u32 val,
+						void __iomem *addr)
+{
+	FSL_DMA_OUT(qdma, addr, val, 32);
+}
+
+static struct fsl_qdma_chan *to_fsl_qdma_chan(struct dma_chan *chan)
+{
+	return container_of(chan, struct fsl_qdma_chan, vchan.chan);
+}
+
+static struct fsl_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
+{
+	return container_of(vd, struct fsl_qdma_comp, vdesc);
+}
+
+static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
+{
+	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
+	unsigned long flags;
+	LIST_HEAD(head);
+
+	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+	vchan_get_all_descriptors(&fsl_chan->vchan, &head);
+	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+
+	vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
+}
+
+static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
+					dma_addr_t dst, dma_addr_t src, u32 len)
+{
+	struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
+	struct fsl_qdma_sdf *sdf;
+	struct fsl_qdma_ddf *ddf;
+
+	ccdf = (struct fsl_qdma_format *)fsl_comp->virt_addr;
+	csgf_desc = (struct fsl_qdma_format *)fsl_comp->virt_addr + 1;
+	csgf_src = (struct fsl_qdma_format *)fsl_comp->virt_addr + 2;
+	csgf_dest = (struct fsl_qdma_format *)fsl_comp->virt_addr + 3;
+	sdf = (struct fsl_qdma_sdf *)fsl_comp->virt_addr + 4;
+	ddf = (struct fsl_qdma_ddf *)fsl_comp->virt_addr + 5;
+
+	memset(fsl_comp->virt_addr, 0, FSL_QDMA_BASE_BUFFER_SIZE);
+	/* Head Command Descriptor(Frame Descriptor) */
+	qdma_desc_addr_set64(ccdf, fsl_comp->bus_addr + 16);
+	qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(ccdf));
+	qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(ccdf));
+
+	/* Status notification is enqueued to status queue. */
+	/* Compound Command Descriptor(Frame List Table) */
+	qdma_desc_addr_set64(csgf_desc, fsl_comp->bus_addr + 64);
+	/* It must be 32 as Compound S/G Descriptor */
+	qdma_csgf_set_len(csgf_desc, 32);
+	qdma_desc_addr_set64(csgf_src, src);
+	qdma_csgf_set_len(csgf_src, len);
+	qdma_desc_addr_set64(csgf_dest, dst);
+	qdma_csgf_set_len(csgf_dest, len);
+	/* This entry is the last entry. */
+	qdma_csgf_set_f(csgf_dest, len);
+	/* Descriptor Buffer */
+	sdf->cmd = cpu_to_le32(
+			FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET);
+	ddf->cmd = cpu_to_le32(
+			FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET);
+	ddf->cmd |= cpu_to_le32(
+			FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
+}
+
+/*
+ * Pre-request full command descriptor for enqueue.
+ */
+static int fsl_qdma_pre_request_enqueue_desc(struct fsl_qdma_queue *queue)
+{
+	struct fsl_qdma_comp *comp_temp;
+	int i;
+
+	for (i = 0; i < queue->n_cq; i++) {
+		comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
+		if (!comp_temp)
+			return -ENOMEM;
+		comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
+						      GFP_KERNEL,
+						      &comp_temp->bus_addr);
+		if (!comp_temp->virt_addr)
+			return -ENOMEM;
+		list_add_tail(&comp_temp->list, &queue->comp_free);
+	}
+
+	return 0;
+}
+
+/*
+ * Request a command descriptor for enqueue.
+ */
+static struct fsl_qdma_comp *fsl_qdma_request_enqueue_desc(
+					struct fsl_qdma_chan *fsl_chan,
+					unsigned int dst_nents,
+					unsigned int src_nents)
+{
+	struct fsl_qdma_comp *comp_temp;
+	struct fsl_qdma_sg *sg_block;
+	struct fsl_qdma_queue *queue = fsl_chan->queue;
+	unsigned long flags;
+	unsigned int dst_sg_entry_block, src_sg_entry_block, sg_entry_total, i;
+
+	spin_lock_irqsave(&queue->queue_lock, flags);
+	if (list_empty(&queue->comp_free)) {
+		spin_unlock_irqrestore(&queue->queue_lock, flags);
+		comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
+		if (!comp_temp)
+			return NULL;
+		comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
+						      GFP_KERNEL,
+						      &comp_temp->bus_addr);
+		if (!comp_temp->virt_addr) {
+			kfree(comp_temp);
+			return NULL;
+		}
+
+	} else {
+		comp_temp = list_first_entry(&queue->comp_free,
+					     struct fsl_qdma_comp,
+					     list);
+		list_del(&comp_temp->list);
+		spin_unlock_irqrestore(&queue->queue_lock, flags);
+	}
+
+	if (dst_nents != 0)
+		dst_sg_entry_block = dst_nents /
+					(FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
+	else
+		dst_sg_entry_block = 0;
+
+	if (src_nents != 0)
+		src_sg_entry_block = src_nents /
+					(FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
+	else
+		src_sg_entry_block = 0;
+
+	sg_entry_total = dst_sg_entry_block + src_sg_entry_block;
+	if (sg_entry_total) {
+		sg_block = kzalloc(sizeof(*sg_block) *
+					      sg_entry_total,
+					      GFP_KERNEL);
+		if (!sg_block) {
+			dma_pool_free(queue->comp_pool,
+					comp_temp->virt_addr,
+					comp_temp->bus_addr);
+			return NULL;
+		}
+		comp_temp->sg_block = sg_block;
+		for (i = 0; i < sg_entry_total; i++) {
+			sg_block->virt_addr = dma_pool_alloc(queue->sg_pool,
+							GFP_KERNEL,
+							&sg_block->bus_addr);
+			memset(sg_block->virt_addr, 0,
+					FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16);
+			sg_block++;
+		}
+	}
+
+	comp_temp->sg_block_src = src_sg_entry_block;
+	comp_temp->sg_block_dst = dst_sg_entry_block;
+	comp_temp->qchan = fsl_chan;
+
+	return comp_temp;
+}
+
+static struct fsl_qdma_queue *fsl_qdma_alloc_queue_resources(
+					struct platform_device *pdev,
+					unsigned int queue_num)
+{
+	struct fsl_qdma_queue *queue_head, *queue_temp;
+	int ret, len, i;
+	unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
+
+	if (queue_num > FSL_QDMA_QUEUE_MAX)
+		queue_num = FSL_QDMA_QUEUE_MAX;
+	len = sizeof(*queue_head) * queue_num;
+	queue_head = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+	if (!queue_head)
+		return NULL;
+
+	ret = device_property_read_u32_array(&pdev->dev, "queue-sizes",
+					queue_size, queue_num);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't get queue-sizes.\n");
+		return NULL;
+	}
+
+	for (i = 0; i < queue_num; i++) {
+		if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX
+			|| queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
+			dev_err(&pdev->dev, "Get wrong queue-sizes.\n");
+			return NULL;
+		}
+		queue_temp = queue_head + i;
+		queue_temp->cq = dma_alloc_coherent(&pdev->dev,
+						sizeof(struct fsl_qdma_format) *
+						queue_size[i],
+						&queue_temp->bus_addr,
+						GFP_KERNEL);
+		if (!queue_temp->cq)
+			return NULL;
+		queue_temp->n_cq = queue_size[i];
+		queue_temp->id = i;
+		queue_temp->virt_head = queue_temp->cq;
+		queue_temp->virt_tail = queue_temp->cq;
+		/*
+		 * The dma pool for queue command buffer
+		 */
+		queue_temp->comp_pool = dma_pool_create("comp_pool",
+						&pdev->dev,
+						FSL_QDMA_BASE_BUFFER_SIZE,
+						16, 0);
+		if (!queue_temp->comp_pool)
+			goto err_free_comp;
+
+		/*
+		 * The dma pool for queue command buffer
+		 */
+		queue_temp->sg_pool = dma_pool_create("sg_pool",
+					&pdev->dev,
+					FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16,
+					64, 0);
+		if (!queue_temp->sg_pool)
+			goto err_free_comp;
+
+		/*
+		 * List for queue command buffer
+		 */
+		INIT_LIST_HEAD(&queue_temp->comp_used);
+		INIT_LIST_HEAD(&queue_temp->comp_free);
+		spin_lock_init(&queue_temp->queue_lock);
+	}
+
+	return queue_head;
+
+err_free_comp:
+	dma_free_coherent(&pdev->dev,
+			sizeof(struct fsl_qdma_format) *
+			queue_size[i],
+			queue_temp->cq,
+			queue_temp->bus_addr);
+	return NULL;
+}
+
+static struct fsl_qdma_queue *fsl_qdma_prep_status_queue(
+						struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct fsl_qdma_queue *status_head;
+	unsigned int status_size;
+	int ret;
+
+	ret = of_property_read_u32(np, "status-sizes", &status_size);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't get status-sizes.\n");
+		return NULL;
+	}
+	if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX
+			|| status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
+		dev_err(&pdev->dev, "Get wrong status_size.\n");
+		return NULL;
+	}
+	status_head = devm_kzalloc(&pdev->dev, sizeof(*status_head),
+								GFP_KERNEL);
+	if (!status_head)
+		return NULL;
+
+	/*
+	 * Buffer for queue command
+	 */
+	status_head->cq = dma_alloc_coherent(&pdev->dev,
+						sizeof(struct fsl_qdma_format) *
+						status_size,
+						&status_head->bus_addr,
+						GFP_KERNEL);
+	if (!status_head->cq)
+		return NULL;
+
+	status_head->n_cq = status_size;
+	status_head->virt_head = status_head->cq;
+	status_head->virt_tail = status_head->cq;
+	status_head->comp_pool = NULL;
+
+	return status_head;
+}
+
+static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
+{
+	void __iomem *ctrl = fsl_qdma->ctrl_base;
+	void __iomem *block = fsl_qdma->block_base;
+	int i, count = 5;
+	u32 reg;
+
+	/* Disable the command queue and wait for idle state. */
+	reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
+	reg |= FSL_QDMA_DMR_DQD;
+	qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
+	for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++)
+		qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQMR(i));
+
+	while (1) {
+		reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DSR);
+		if (!(reg & FSL_QDMA_DSR_DB))
+			break;
+		if (count-- < 0)
+			return -EBUSY;
+		udelay(100);
+	}
+
+	/* Disable status queue. */
+	qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BSQMR);
+
+	/*
+	 * Clear the command queue interrupt detect register for all queues.
+	 */
+	qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
+
+	return 0;
+}
+
+static int fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma)
+{
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
+	struct fsl_qdma_queue *fsl_status = fsl_qdma->status;
+	struct fsl_qdma_queue *temp_queue;
+	struct fsl_qdma_comp *fsl_comp;
+	struct fsl_qdma_format *status_addr;
+	struct fsl_qdma_format *csgf_src;
+	void __iomem *block = fsl_qdma->block_base;
+	u32 reg, i;
+	bool duplicate, duplicate_handle;
+
+	while (1) {
+		duplicate = 0;
+		duplicate_handle = 0;
+		reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);
+		if (reg & FSL_QDMA_BSQSR_QE)
+			return 0;
+		status_addr = fsl_status->virt_head;
+		if (qdma_ccdf_get_queue(status_addr) == pre_queue &&
+			qdma_ccdf_addr_get64(status_addr) == pre_addr)
+			duplicate = 1;
+		i = qdma_ccdf_get_queue(status_addr);
+		pre_queue = qdma_ccdf_get_queue(status_addr);
+		pre_addr = qdma_ccdf_addr_get64(status_addr);
+		temp_queue = fsl_queue + i;
+		spin_lock(&temp_queue->queue_lock);
+		if (list_empty(&temp_queue->comp_used)) {
+			if (duplicate)
+				duplicate_handle = 1;
+			else {
+				spin_unlock(&temp_queue->queue_lock);
+				return -1;
+			}
+		} else {
+			fsl_comp = list_first_entry(&temp_queue->comp_used,
+							struct fsl_qdma_comp,
+							list);
+			csgf_src = (struct fsl_qdma_format *)fsl_comp->virt_addr
+							   + 2;
+			if (fsl_comp->bus_addr + 16 != pre_addr) {
+				if (duplicate)
+					duplicate_handle = 1;
+				else {
+					spin_unlock(&temp_queue->queue_lock);
+					return -1;
+				}
+			}
+		}
+
+			if (duplicate_handle) {
+				reg = qdma_readl(fsl_qdma, block +
+						FSL_QDMA_BSQMR);
+			reg |= FSL_QDMA_BSQMR_DI;
+			qdma_desc_addr_set64(status_addr, 0x0);
+			fsl_status->virt_head++;
+			if (fsl_status->virt_head == fsl_status->cq
+						   + fsl_status->n_cq)
+				fsl_status->virt_head = fsl_status->cq;
+			qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
+			spin_unlock(&temp_queue->queue_lock);
+			continue;
+		}
+		list_del(&fsl_comp->list);
+
+		reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
+		reg |= FSL_QDMA_BSQMR_DI;
+		qdma_desc_addr_set64(status_addr, 0x0);
+		fsl_status->virt_head++;
+		if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
+			fsl_status->virt_head = fsl_status->cq;
+		qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
+		spin_unlock(&temp_queue->queue_lock);
+
+		spin_lock(&fsl_comp->qchan->vchan.lock);
+		vchan_cookie_complete(&fsl_comp->vdesc);
+		fsl_comp->qchan->status = DMA_COMPLETE;
+		spin_unlock(&fsl_comp->qchan->vchan.lock);
+	}
+	return 0;
+}
+
+static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id)
+{
+	struct fsl_qdma_engine *fsl_qdma = dev_id;
+	unsigned int intr;
+	void __iomem *status = fsl_qdma->status_base;
+
+	intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR);
+
+	if (intr)
+		dev_err(fsl_qdma->dma_dev.dev, "DMA transaction error!\n");
+
+	qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fsl_qdma_queue_handler(int irq, void *dev_id)
+{
+	struct fsl_qdma_engine *fsl_qdma = dev_id;
+	unsigned int intr, reg;
+	void __iomem *block = fsl_qdma->block_base;
+	void __iomem *ctrl = fsl_qdma->ctrl_base;
+
+	intr = qdma_readl(fsl_qdma, block + FSL_QDMA_BCQIDR(0));
+
+	if ((intr & FSL_QDMA_CQIDR_SQT) != 0)
+		intr = fsl_qdma_queue_transfer_complete(fsl_qdma);
+
+	if (intr != 0) {
+		reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
+		reg |= FSL_QDMA_DMR_DQD;
+		qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
+		qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQIER(0));
+		dev_err(fsl_qdma->dma_dev.dev, "QDMA: status err!\n");
+	}
+
+	qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
+
+	return IRQ_HANDLED;
+}
+
+static int
+fsl_qdma_irq_init(struct platform_device *pdev,
+		  struct fsl_qdma_engine *fsl_qdma)
+{
+	int ret;
+
+	fsl_qdma->error_irq = platform_get_irq_byname(pdev,
+							"qdma-error");
+	if (fsl_qdma->error_irq < 0) {
+		dev_err(&pdev->dev, "Can't get qdma controller irq.\n");
+		return fsl_qdma->error_irq;
+	}
+
+	fsl_qdma->queue_irq = platform_get_irq_byname(pdev, "qdma-queue");
+	if (fsl_qdma->queue_irq < 0) {
+		dev_err(&pdev->dev, "Can't get qdma queue irq.\n");
+		return fsl_qdma->queue_irq;
+	}
+
+	ret = devm_request_irq(&pdev->dev, fsl_qdma->error_irq,
+			fsl_qdma_error_handler, 0, "qDMA error", fsl_qdma);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't register qDMA controller IRQ.\n");
+		return  ret;
+	}
+	ret = devm_request_irq(&pdev->dev, fsl_qdma->queue_irq,
+			fsl_qdma_queue_handler, 0, "qDMA queue", fsl_qdma);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't register qDMA queue IRQ.\n");
+		return  ret;
+	}
+
+	return 0;
+}
+
+static void fsl_qdma_irq_exit(
+		struct platform_device *pdev, struct fsl_qdma_engine *fsl_qdma)
+{
+	if (fsl_qdma->queue_irq == fsl_qdma->error_irq) {
+		devm_free_irq(&pdev->dev, fsl_qdma->queue_irq, fsl_qdma);
+	} else {
+		devm_free_irq(&pdev->dev, fsl_qdma->queue_irq, fsl_qdma);
+		devm_free_irq(&pdev->dev, fsl_qdma->error_irq, fsl_qdma);
+	}
+}
+
+static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
+{
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
+	struct fsl_qdma_queue *temp;
+	void __iomem *ctrl = fsl_qdma->ctrl_base;
+	void __iomem *status = fsl_qdma->status_base;
+	void __iomem *block = fsl_qdma->block_base;
+	int i, ret;
+	u32 reg;
+
+	/* Try to halt the qDMA engine first. */
+	ret = fsl_qdma_halt(fsl_qdma);
+	if (ret) {
+		dev_err(fsl_qdma->dma_dev.dev, "DMA halt failed!");
+		return ret;
+	}
+
+	/*
+	 * Clear the command queue interrupt detect register for all queues.
+	 */
+	qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
+
+	for (i = 0; i < fsl_qdma->n_queues; i++) {
+		temp = fsl_queue + i;
+		/*
+		 * Initialize Command Queue registers to point to the first
+		 * command descriptor in memory.
+		 * Dequeue Pointer Address Registers
+		 * Enqueue Pointer Address Registers
+		 */
+		qdma_writel(fsl_qdma, temp->bus_addr,
+				block + FSL_QDMA_BCQDPA_SADDR(i));
+		qdma_writel(fsl_qdma, temp->bus_addr,
+				block + FSL_QDMA_BCQEPA_SADDR(i));
+
+		/* Initialize the queue mode. */
+		reg = FSL_QDMA_BCQMR_EN;
+		reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq)-4);
+		reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq)-6);
+		qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BCQMR(i));
+	}
+
+	/*
+	 * Workaround for erratum: ERR010812.
+	 * We must enable XOFF to avoid the enqueue rejection occurs.
+	 * Setting SQCCMR ENTER_WM to 0x20.
+	 */
+	qdma_writel(fsl_qdma, FSL_QDMA_SQCCMR_ENTER_WM,
+			      block + FSL_QDMA_SQCCMR);
+	/*
+	 * Initialize status queue registers to point to the first
+	 * command descriptor in memory.
+	 * Dequeue Pointer Address Registers
+	 * Enqueue Pointer Address Registers
+	 */
+	qdma_writel(fsl_qdma, fsl_qdma->status->bus_addr,
+					block + FSL_QDMA_SQEPAR);
+	qdma_writel(fsl_qdma, fsl_qdma->status->bus_addr,
+					block + FSL_QDMA_SQDPAR);
+	/* Initialize status queue interrupt. */
+	qdma_writel(fsl_qdma, FSL_QDMA_BCQIER_CQTIE,
+			      block + FSL_QDMA_BCQIER(0));
+	qdma_writel(fsl_qdma, FSL_QDMA_BSQICR_ICEN | FSL_QDMA_BSQICR_ICST(5)
+						   | 0x8000,
+			      block + FSL_QDMA_BSQICR);
+	qdma_writel(fsl_qdma, FSL_QDMA_CQIER_MEIE | FSL_QDMA_CQIER_TEIE,
+			      block + FSL_QDMA_CQIER);
+	/* Initialize controller interrupt register. */
+	qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR);
+	qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEIER);
+
+	/* Initialize the status queue mode. */
+	reg = FSL_QDMA_BSQMR_EN;
+	reg |= FSL_QDMA_BSQMR_CQ_SIZE(ilog2(fsl_qdma->status->n_cq)-6);
+	qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
+
+	reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
+	reg &= ~FSL_QDMA_DMR_DQD;
+	qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
+
+	return 0;
+}
+
+static struct dma_async_tx_descriptor *
+fsl_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
+		dma_addr_t src, size_t len, unsigned long flags)
+{
+	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
+	struct fsl_qdma_comp *fsl_comp;
+
+	fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan, 0, 0);
+	fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len);
+
+	return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags);
+}
+
+static void fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
+{
+	void __iomem *block = fsl_chan->qdma->block_base;
+	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+	struct fsl_qdma_comp *fsl_comp;
+	struct virt_dma_desc *vdesc;
+	u32 reg;
+
+	reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQSR(fsl_queue->id));
+	if (reg & (FSL_QDMA_BCQSR_QF | FSL_QDMA_BCQSR_XOFF))
+		return;
+	vdesc = vchan_next_desc(&fsl_chan->vchan);
+	if (!vdesc)
+		return;
+	list_del(&vdesc->node);
+	fsl_comp = to_fsl_qdma_comp(vdesc);
+
+	memcpy(fsl_queue->virt_head++, fsl_comp->virt_addr, 16);
+	if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
+		fsl_queue->virt_head = fsl_queue->cq;
+
+	list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
+	barrier();
+	reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQMR(fsl_queue->id));
+	reg |= FSL_QDMA_BCQMR_EI;
+	qdma_writel(fsl_chan->qdma, reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
+	fsl_chan->status = DMA_IN_PROGRESS;
+}
+
+static enum dma_status fsl_qdma_tx_status(struct dma_chan *chan,
+		dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
+	enum dma_status ret;
+
+	ret = dma_cookie_status(chan, cookie, txstate);
+	if (ret == DMA_COMPLETE || !txstate)
+		return ret;
+
+	return ret;
+}
+
+static void fsl_qdma_free_desc(struct virt_dma_desc *vdesc)
+{
+	struct fsl_qdma_comp *fsl_comp;
+	struct fsl_qdma_queue *fsl_queue;
+	struct fsl_qdma_sg *sg_block;
+	unsigned long flags;
+	unsigned int i;
+
+	fsl_comp = to_fsl_qdma_comp(vdesc);
+	fsl_queue = fsl_comp->qchan->queue;
+
+	if (fsl_comp->sg_block) {
+		for (i = 0; i < fsl_comp->sg_block_src +
+				fsl_comp->sg_block_dst; i++) {
+			sg_block = fsl_comp->sg_block + i;
+			dma_pool_free(fsl_queue->sg_pool,
+				      sg_block->virt_addr,
+				      sg_block->bus_addr);
+		}
+		kfree(fsl_comp->sg_block);
+	}
+
+	spin_lock_irqsave(&fsl_queue->queue_lock, flags);
+	list_add_tail(&fsl_comp->list, &fsl_queue->comp_free);
+	spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
+}
+
+static void fsl_qdma_issue_pending(struct dma_chan *chan)
+{
+	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
+	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+	unsigned long flags;
+
+	spin_lock_irqsave(&fsl_queue->queue_lock, flags);
+	spin_lock(&fsl_chan->vchan.lock);
+	if (vchan_issue_pending(&fsl_chan->vchan))
+		fsl_qdma_enqueue_desc(fsl_chan);
+	spin_unlock(&fsl_chan->vchan.lock);
+	spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
+}
+
+static int fsl_qdma_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct fsl_qdma_engine *fsl_qdma;
+	struct fsl_qdma_chan *fsl_chan;
+	struct resource *res;
+	unsigned int len, chans, queues;
+	int ret, i;
+
+	ret = of_property_read_u32(np, "dma-channels", &chans);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't get dma-channels.\n");
+		return ret;
+	}
+
+	len = sizeof(*fsl_qdma) + sizeof(*fsl_chan) * chans;
+	fsl_qdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+	if (!fsl_qdma)
+		return -ENOMEM;
+
+	ret = of_property_read_u32(np, "queues", &queues);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't get queues.\n");
+		return ret;
+	}
+
+	fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, queues);
+	if (!fsl_qdma->queue)
+		return -ENOMEM;
+
+	fsl_qdma->status = fsl_qdma_prep_status_queue(pdev);
+	if (!fsl_qdma->status)
+		return -ENOMEM;
+
+	fsl_qdma->n_chans = chans;
+	fsl_qdma->n_queues = queues;
+	mutex_init(&fsl_qdma->fsl_qdma_mutex);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	fsl_qdma->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(fsl_qdma->ctrl_base))
+		return PTR_ERR(fsl_qdma->ctrl_base);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	fsl_qdma->status_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(fsl_qdma->status_base))
+		return PTR_ERR(fsl_qdma->status_base);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+	fsl_qdma->block_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(fsl_qdma->block_base))
+		return PTR_ERR(fsl_qdma->block_base);
+
+	ret = fsl_qdma_irq_init(pdev, fsl_qdma);
+	if (ret)
+		return ret;
+
+	fsl_qdma->feature = of_property_read_bool(np, "big-endian");
+	INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels);
+	for (i = 0; i < fsl_qdma->n_chans; i++) {
+		struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
+
+		fsl_chan->qdma = fsl_qdma;
+		fsl_chan->queue = fsl_qdma->queue + i % fsl_qdma->n_queues;
+		fsl_chan->vchan.desc_free = fsl_qdma_free_desc;
+		INIT_LIST_HEAD(&fsl_chan->qcomp);
+		vchan_init(&fsl_chan->vchan, &fsl_qdma->dma_dev);
+	}
+	for (i = 0; i < fsl_qdma->n_queues; i++)
+		fsl_qdma_pre_request_enqueue_desc(fsl_qdma->queue + i);
+
+	dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask);
+
+	fsl_qdma->dma_dev.dev = &pdev->dev;
+	fsl_qdma->dma_dev.device_free_chan_resources
+		= fsl_qdma_free_chan_resources;
+	fsl_qdma->dma_dev.device_tx_status = fsl_qdma_tx_status;
+	fsl_qdma->dma_dev.device_prep_dma_memcpy = fsl_qdma_prep_memcpy;
+	fsl_qdma->dma_dev.device_issue_pending = fsl_qdma_issue_pending;
+
+	dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
+
+	platform_set_drvdata(pdev, fsl_qdma);
+
+	ret = dma_async_device_register(&fsl_qdma->dma_dev);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't register NXP Layerscape qDMA engine.\n");
+		return ret;
+	}
+
+	ret = fsl_qdma_reg_init(fsl_qdma);
+	if (ret) {
+		dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
+		return ret;
+	}
+
+
+	return 0;
+}
+
+static void fsl_qdma_cleanup_vchan(struct dma_device *dmadev)
+{
+	struct fsl_qdma_chan *chan, *_chan;
+
+	list_for_each_entry_safe(chan, _chan,
+				&dmadev->channels, vchan.chan.device_node) {
+		list_del(&chan->vchan.chan.device_node);
+		tasklet_kill(&chan->vchan.task);
+	}
+}
+
+static int fsl_qdma_remove(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
+	struct fsl_qdma_queue *queue_temp;
+	struct fsl_qdma_queue *status = fsl_qdma->status;
+	struct fsl_qdma_comp *comp_temp, *_comp_temp;
+	int i;
+
+	fsl_qdma_irq_exit(pdev, fsl_qdma);
+	fsl_qdma_cleanup_vchan(&fsl_qdma->dma_dev);
+	of_dma_controller_free(np);
+	dma_async_device_unregister(&fsl_qdma->dma_dev);
+
+	/* Free descriptor areas */
+	for (i = 0; i < fsl_qdma->n_queues; i++) {
+		queue_temp = fsl_qdma->queue + i;
+		list_for_each_entry_safe(comp_temp, _comp_temp,
+					&queue_temp->comp_used,	list) {
+			dma_pool_free(queue_temp->comp_pool,
+					comp_temp->virt_addr,
+					comp_temp->bus_addr);
+			list_del(&comp_temp->list);
+			kfree(comp_temp);
+		}
+		list_for_each_entry_safe(comp_temp, _comp_temp,
+					&queue_temp->comp_free, list) {
+			dma_pool_free(queue_temp->comp_pool,
+					comp_temp->virt_addr,
+					comp_temp->bus_addr);
+			list_del(&comp_temp->list);
+			kfree(comp_temp);
+		}
+		dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_format) *
+					queue_temp->n_cq, queue_temp->cq,
+					queue_temp->bus_addr);
+		dma_pool_destroy(queue_temp->comp_pool);
+	}
+
+	dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_format) *
+				status->n_cq, status->cq, status->bus_addr);
+	return 0;
+}
+
+static const struct of_device_id fsl_qdma_dt_ids[] = {
+	{ .compatible = "fsl,ls1021a-qdma", },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, fsl_qdma_dt_ids);
+
+static struct platform_driver fsl_qdma_driver = {
+	.driver		= {
+		.name	= "fsl-qdma",
+		.of_match_table = fsl_qdma_dt_ids,
+	},
+	.probe          = fsl_qdma_probe,
+	.remove		= fsl_qdma_remove,
+};
+
+module_platform_driver(fsl_qdma_driver);
+
+MODULE_ALIAS("platform:fsl-qdma");
+MODULE_DESCRIPTION("NXP Layerscape qDMA engine driver");
+MODULE_LICENSE("GPL v2");
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [v4,3/6] dt-bindings: fsl-qdma: Add NXP Layerscpae qDMA controller bindings
  2018-05-14 12:03 ` [v4 1/6] " Wen He
@ 2018-05-14 12:03 ` Wen He
  -1 siblings, 0 replies; 32+ messages in thread
From: Wen He @ 2018-05-14 12:03 UTC (permalink / raw)
  To: vinod.koul, dmaengine
  Cc: robh+dt, devicetree, leoyang.li, jiafei.pan, jiaheng.fan, Wen He

Document the devicetree bindings for NXP Layerscape qDMA controller
which could be found on NXP QorIQ Layerscape SoCs.

Signed-off-by: Wen He <wen.he_1@nxp.com>
---
change in v4:
	- Rewrite the bindings document that follows generic DMA bindings file

change in v3:
	- no change

change in v2:
	- Remove indentation
	- Add "Should be" before 'fsl,ls1021a-qdma'
	- Replace 'channels' by 'dma-channels'
	- Replace 'qdma@8390000' by 'dma-controller@8390000'

 Documentation/devicetree/bindings/dma/fsl-qdma.txt |   41 ++++++++++++++++++++
 1 files changed, 41 insertions(+), 0 deletions(-)
 create mode 100644 Documentation/devicetree/bindings/dma/fsl-qdma.txt

diff --git a/Documentation/devicetree/bindings/dma/fsl-qdma.txt b/Documentation/devicetree/bindings/dma/fsl-qdma.txt
new file mode 100644
index 0000000..368c4e7
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/fsl-qdma.txt
@@ -0,0 +1,41 @@
+NXP Layerscape SoC qDMA Controller
+==================================
+
+This device follows the generic DMA bindings defined in dma/dma.txt.
+
+Required properties:
+
+- compatible:		Must be one of
+			 "fsl,ls1021a-qdma": for LS1021A Board
+			 "fsl,ls1043a-qdma": for ls1043A Board
+			 "fsl,ls1046a-qdma": for ls1046A Board
+- reg:			Should contain the register's base address and length.
+- interrupts:		Should contain a reference to the interrupt used by this
+			device.
+- interrupt-names:	Should contain interrupt names:
+			 "qdma-error": the error interrupt
+			 "qdma-queue": the queue interrupt
+- queues:		Should contain number of queues supported.
+
+Optional properties:
+
+- dma-channels:		Number of DMA channels supported by the controller.
+- big-endian:		If present registers and hardware scatter/gather descriptors
+			of the qDMA are implemented in big endian mode, otherwise in little
+			mode.
+
+Examples:
+
+	qdma: dma-controller@8390000 {
+		compatible = "fsl,ls1021a-qdma";
+		reg = <0x0 0x8398000 0x0 0x2000 /* Controller registers */
+		       0x0 0x839a000 0x0 0x2000>; /* Block registers */
+		interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "qdma-error", "qdma-queue";
+		dma-channels = <8>;
+		queues = <2>;
+		big-endian;
+	};
+
+DMA clients must use the format described in dma/dma.txt file.

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [v4 3/6] dt-bindings: fsl-qdma: Add NXP Layerscpae qDMA controller bindings
@ 2018-05-14 12:03 ` Wen He
  0 siblings, 0 replies; 32+ messages in thread
From: Wen He @ 2018-05-14 12:03 UTC (permalink / raw)
  To: vinod.koul, dmaengine
  Cc: robh+dt, devicetree, leoyang.li, jiafei.pan, jiaheng.fan, Wen He

Document the devicetree bindings for NXP Layerscape qDMA controller
which could be found on NXP QorIQ Layerscape SoCs.

Signed-off-by: Wen He <wen.he_1@nxp.com>
---
change in v4:
	- Rewrite the bindings document that follows generic DMA bindings file

change in v3:
	- no change

change in v2:
	- Remove indentation
	- Add "Should be" before 'fsl,ls1021a-qdma'
	- Replace 'channels' by 'dma-channels'
	- Replace 'qdma@8390000' by 'dma-controller@8390000'

 Documentation/devicetree/bindings/dma/fsl-qdma.txt |   41 ++++++++++++++++++++
 1 files changed, 41 insertions(+), 0 deletions(-)
 create mode 100644 Documentation/devicetree/bindings/dma/fsl-qdma.txt

diff --git a/Documentation/devicetree/bindings/dma/fsl-qdma.txt b/Documentation/devicetree/bindings/dma/fsl-qdma.txt
new file mode 100644
index 0000000..368c4e7
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/fsl-qdma.txt
@@ -0,0 +1,41 @@
+NXP Layerscape SoC qDMA Controller
+==================================
+
+This device follows the generic DMA bindings defined in dma/dma.txt.
+
+Required properties:
+
+- compatible:		Must be one of
+			 "fsl,ls1021a-qdma": for LS1021A Board
+			 "fsl,ls1043a-qdma": for ls1043A Board
+			 "fsl,ls1046a-qdma": for ls1046A Board
+- reg:			Should contain the register's base address and length.
+- interrupts:		Should contain a reference to the interrupt used by this
+			device.
+- interrupt-names:	Should contain interrupt names:
+			 "qdma-error": the error interrupt
+			 "qdma-queue": the queue interrupt
+- queues:		Should contain number of queues supported.
+
+Optional properties:
+
+- dma-channels:		Number of DMA channels supported by the controller.
+- big-endian:		If present registers and hardware scatter/gather descriptors
+			of the qDMA are implemented in big endian mode, otherwise in little
+			mode.
+
+Examples:
+
+	qdma: dma-controller@8390000 {
+		compatible = "fsl,ls1021a-qdma";
+		reg = <0x0 0x8398000 0x0 0x2000 /* Controller registers */
+		       0x0 0x839a000 0x0 0x2000>; /* Block registers */
+		interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "qdma-error", "qdma-queue";
+		dma-channels = <8>;
+		queues = <2>;
+		big-endian;
+	};
+
+DMA clients must use the format described in dma/dma.txt file.
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [v4,4/6] arm64: dts: ls1043a: add qdma device tree nodes
  2018-05-14 12:03 ` [v4 1/6] " Wen He
@ 2018-05-14 12:03 ` Wen He
  -1 siblings, 0 replies; 32+ messages in thread
From: Wen He @ 2018-05-14 12:03 UTC (permalink / raw)
  To: vinod.koul, dmaengine
  Cc: robh+dt, devicetree, leoyang.li, jiafei.pan, jiaheng.fan, Wen He

add the qDMA device tree nodes for LS1043A devices.

Signed-off-by: Wen He <wen.he_1@nxp.com>
---
chang in v4:
	- no

 arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi |   15 +++++++++++++++
 1 files changed, 15 insertions(+), 0 deletions(-)

diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
index 1109f22..1425765 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
@@ -673,6 +673,21 @@
 			dma-coherent;
 		};
 
+		qdma: dma-controller@8380000 {
+			compatible = "fsl,ls1021a-qdma", "fsl,ls1043a-qdma";
+			reg = <0x0 0x8380000 0x0 0x1000>, /* Controller regs */
+				<0x0 0x8390000 0x0 0x10000>, /* Status regs */
+				<0x0 0x83a0000 0x0 0x40000>; /* Block regs */
+			interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>,
+					<GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "qdma-error", "qdma-queue";
+			dma-channels = <8>;
+			queues = <2>;
+			status-sizes = <64>;
+			queue-sizes = <64 64>;
+			big-endian;
+		};
+
 		msi1: msi-controller1@1571000 {
 			compatible = "fsl,ls1043a-msi";
 			reg = <0x0 0x1571000 0x0 0x8>;

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [v4 4/6] arm64: dts: ls1043a: add qdma device tree nodes
@ 2018-05-14 12:03 ` Wen He
  0 siblings, 0 replies; 32+ messages in thread
From: Wen He @ 2018-05-14 12:03 UTC (permalink / raw)
  To: vinod.koul, dmaengine
  Cc: robh+dt, devicetree, leoyang.li, jiafei.pan, jiaheng.fan, Wen He

add the qDMA device tree nodes for LS1043A devices.

Signed-off-by: Wen He <wen.he_1@nxp.com>
---
chang in v4:
	- no

 arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi |   15 +++++++++++++++
 1 files changed, 15 insertions(+), 0 deletions(-)

diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
index 1109f22..1425765 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
@@ -673,6 +673,21 @@
 			dma-coherent;
 		};
 
+		qdma: dma-controller@8380000 {
+			compatible = "fsl,ls1021a-qdma", "fsl,ls1043a-qdma";
+			reg = <0x0 0x8380000 0x0 0x1000>, /* Controller regs */
+				<0x0 0x8390000 0x0 0x10000>, /* Status regs */
+				<0x0 0x83a0000 0x0 0x40000>; /* Block regs */
+			interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>,
+					<GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "qdma-error", "qdma-queue";
+			dma-channels = <8>;
+			queues = <2>;
+			status-sizes = <64>;
+			queue-sizes = <64 64>;
+			big-endian;
+		};
+
 		msi1: msi-controller1@1571000 {
 			compatible = "fsl,ls1043a-msi";
 			reg = <0x0 0x1571000 0x0 0x8>;
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [v4,5/6] arm64: dts: ls1046a: add qdma device tree nodes
  2018-05-14 12:03 ` [v4 1/6] " Wen He
@ 2018-05-14 12:03 ` Wen He
  -1 siblings, 0 replies; 32+ messages in thread
From: Wen He @ 2018-05-14 12:03 UTC (permalink / raw)
  To: vinod.koul, dmaengine
  Cc: robh+dt, devicetree, leoyang.li, jiafei.pan, jiaheng.fan, Wen He

add the qDMA device tree nodes for LS1046A devices.

Signed-off-by: Wen He <wen.he_1@nxp.com>
---
chang in v4: 
	- no

 arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi |   15 +++++++++++++++
 1 files changed, 15 insertions(+), 0 deletions(-)

diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
index 136ebfa..6a925be 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
@@ -597,6 +597,21 @@
 				 <&clockgen 4 1>;
 		};
 
+		qdma: dma-controller@8380000 {
+			compatible = "fsl,ls1021a-qdma", "fsl,ls1046a-qdma";
+			reg = <0x0 0x8380000 0x0 0x1000>, /* Controller regs */
+				<0x0 0x8390000 0x0 0x10000>, /* Status regs */
+				<0x0 0x83a0000 0x0 0x40000>; /* Block regs */
+			interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>,
+					<GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "qdma-error", "qdma-queue";
+			dma-channels = <8>;
+			queues = <2>;
+			status-sizes = <64>;
+			queue-sizes = <64 64>;
+			big-endian;
+		};
+
 		usb0: usb@2f00000 {
 			compatible = "snps,dwc3";
 			reg = <0x0 0x2f00000 0x0 0x10000>;

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [v4 5/6] arm64: dts: ls1046a: add qdma device tree nodes
@ 2018-05-14 12:03 ` Wen He
  0 siblings, 0 replies; 32+ messages in thread
From: Wen He @ 2018-05-14 12:03 UTC (permalink / raw)
  To: vinod.koul, dmaengine
  Cc: robh+dt, devicetree, leoyang.li, jiafei.pan, jiaheng.fan, Wen He

add the qDMA device tree nodes for LS1046A devices.

Signed-off-by: Wen He <wen.he_1@nxp.com>
---
chang in v4: 
	- no

 arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi |   15 +++++++++++++++
 1 files changed, 15 insertions(+), 0 deletions(-)

diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
index 136ebfa..6a925be 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
@@ -597,6 +597,21 @@
 				 <&clockgen 4 1>;
 		};
 
+		qdma: dma-controller@8380000 {
+			compatible = "fsl,ls1021a-qdma", "fsl,ls1046a-qdma";
+			reg = <0x0 0x8380000 0x0 0x1000>, /* Controller regs */
+				<0x0 0x8390000 0x0 0x10000>, /* Status regs */
+				<0x0 0x83a0000 0x0 0x40000>; /* Block regs */
+			interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>,
+					<GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "qdma-error", "qdma-queue";
+			dma-channels = <8>;
+			queues = <2>;
+			status-sizes = <64>;
+			queue-sizes = <64 64>;
+			big-endian;
+		};
+
 		usb0: usb@2f00000 {
 			compatible = "snps,dwc3";
 			reg = <0x0 0x2f00000 0x0 0x10000>;
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [v4,6/6] arm: dts: ls1021a: add qdma device tree nodes
  2018-05-14 12:03 ` [v4 1/6] " Wen He
@ 2018-05-14 12:03 ` Wen He
  -1 siblings, 0 replies; 32+ messages in thread
From: Wen He @ 2018-05-14 12:03 UTC (permalink / raw)
  To: vinod.koul, dmaengine
  Cc: robh+dt, devicetree, leoyang.li, jiafei.pan, jiaheng.fan, Wen He

add the qDMA device tree nodes for LS1021A devices.
Signed-off-by: Wen He <wen.he_1@nxp.com>
---
chang in v4:
	- no 

 arch/arm/boot/dts/ls1021a.dtsi |   15 +++++++++++++++
 1 files changed, 15 insertions(+), 0 deletions(-)

diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
index c55d479..8125cee 100644
--- a/arch/arm/boot/dts/ls1021a.dtsi
+++ b/arch/arm/boot/dts/ls1021a.dtsi
@@ -571,6 +571,21 @@
 				 <&clockgen 4 1>;
 		};
 
+		qdma: dma-controller@8390000 {
+			compatible = "fsl,ls1021a-qdma";
+			reg = <0x0 0x8398000 0x0 0x1000>, /* Controller regs */
+				<0x0 0x8399000 0x0 0x1000>, /* Status regs */
+				<0x0 0x839a000 0x0 0x2000>; /* Block regs */
+			interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
+					<GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "qdma-error", "qdma-queue";
+			dma-channels = <8>;
+			queues = <2>;
+			status-sizes = <64>;
+			queue-sizes = <64 64>;
+			big-endian;
+		};
+
 		dcu: dcu@2ce0000 {
 			compatible = "fsl,ls1021a-dcu";
 			reg = <0x0 0x2ce0000 0x0 0x10000>;

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [v4 6/6] arm: dts: ls1021a: add qdma device tree nodes
@ 2018-05-14 12:03 ` Wen He
  0 siblings, 0 replies; 32+ messages in thread
From: Wen He @ 2018-05-14 12:03 UTC (permalink / raw)
  To: vinod.koul, dmaengine
  Cc: robh+dt, devicetree, leoyang.li, jiafei.pan, jiaheng.fan, Wen He

add the qDMA device tree nodes for LS1021A devices.
Signed-off-by: Wen He <wen.he_1@nxp.com>
---
chang in v4:
	- no 

 arch/arm/boot/dts/ls1021a.dtsi |   15 +++++++++++++++
 1 files changed, 15 insertions(+), 0 deletions(-)

diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
index c55d479..8125cee 100644
--- a/arch/arm/boot/dts/ls1021a.dtsi
+++ b/arch/arm/boot/dts/ls1021a.dtsi
@@ -571,6 +571,21 @@
 				 <&clockgen 4 1>;
 		};
 
+		qdma: dma-controller@8390000 {
+			compatible = "fsl,ls1021a-qdma";
+			reg = <0x0 0x8398000 0x0 0x1000>, /* Controller regs */
+				<0x0 0x8399000 0x0 0x1000>, /* Status regs */
+				<0x0 0x839a000 0x0 0x2000>; /* Block regs */
+			interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
+					<GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "qdma-error", "qdma-queue";
+			dma-channels = <8>;
+			queues = <2>;
+			status-sizes = <64>;
+			queue-sizes = <64 64>;
+			big-endian;
+		};
+
 		dcu: dcu@2ce0000 {
 			compatible = "fsl,ls1021a-dcu";
 			reg = <0x0 0x2ce0000 0x0 0x10000>;
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [v4,2/6] dmaengine: fsl-qdma: Add qDMA controller driver for Layerscape SoCs
  2018-05-14 12:03 ` [v4 2/6] " Wen He
@ 2018-05-17  6:04 ` Vinod
  -1 siblings, 0 replies; 32+ messages in thread
From: Vinod Koul @ 2018-05-17  6:04 UTC (permalink / raw)
  To: Wen He
  Cc: vinod.koul, dmaengine, robh+dt, devicetree, leoyang.li,
	jiafei.pan, jiaheng.fan

On 14-05-18, 20:03, Wen He wrote:

> +
> +/* Registers for bit and genmask */
> +#define FSL_QDMA_CQIDR_SQT		0x8000

BIT() ?

> +#define QDMA_CCDF_MASK			GENMASK(28, 20)
> +#define QDMA_CCDF_FOTMAT		BIT(29)
> +#define QDMA_CCDF_SER			BIT(30)
> +#define QDMA_SG_FIN			BIT(30)
> +#define QDMA_SG_EXT			BIT(31)
> +#define QDMA_SG_LEN_MASK		GENMASK(29, 0)
> +
> +#define QDMA_CCDF_STATUS		20
> +#define QDMA_CCDF_OFFSET		20
> +#define FSL_QDMA_BCQIER_CQTIE		0x8000
> +#define FSL_QDMA_BCQIER_CQPEIE		0x800000
> +#define FSL_QDMA_BSQICR_ICEN		0x80000000

here and few other places as well

> +
> +u64 pre_addr, pre_queue;

why do we have a global?

> +static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
> +					dma_addr_t dst, dma_addr_t src, u32 len)
> +{
> +	struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
> +	struct fsl_qdma_sdf *sdf;
> +	struct fsl_qdma_ddf *ddf;
> +
> +	ccdf = (struct fsl_qdma_format *)fsl_comp->virt_addr;

Cast are not required to/away from void

> +	csgf_desc = (struct fsl_qdma_format *)fsl_comp->virt_addr + 1;
> +	csgf_src = (struct fsl_qdma_format *)fsl_comp->virt_addr + 2;
> +	csgf_dest = (struct fsl_qdma_format *)fsl_comp->virt_addr + 3;
> +	sdf = (struct fsl_qdma_sdf *)fsl_comp->virt_addr + 4;
> +	ddf = (struct fsl_qdma_ddf *)fsl_comp->virt_addr + 5;
> +
> +	memset(fsl_comp->virt_addr, 0, FSL_QDMA_BASE_BUFFER_SIZE);
> +	/* Head Command Descriptor(Frame Descriptor) */
> +	qdma_desc_addr_set64(ccdf, fsl_comp->bus_addr + 16);
> +	qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(ccdf));
> +	qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(ccdf));
> +
> +	/* Status notification is enqueued to status queue. */
> +	/* Compound Command Descriptor(Frame List Table) */
> +	qdma_desc_addr_set64(csgf_desc, fsl_comp->bus_addr + 64);
> +	/* It must be 32 as Compound S/G Descriptor */
> +	qdma_csgf_set_len(csgf_desc, 32);
> +	qdma_desc_addr_set64(csgf_src, src);
> +	qdma_csgf_set_len(csgf_src, len);
> +	qdma_desc_addr_set64(csgf_dest, dst);
> +	qdma_csgf_set_len(csgf_dest, len);
> +	/* This entry is the last entry. */
> +	qdma_csgf_set_f(csgf_dest, len);
> +	/* Descriptor Buffer */
> +	sdf->cmd = cpu_to_le32(
> +			FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET);
> +	ddf->cmd = cpu_to_le32(
> +			FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET);
> +	ddf->cmd |= cpu_to_le32(
> +			FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
> +}
> +
> +/*
> + * Pre-request full command descriptor for enqueue.
> + */
> +static int fsl_qdma_pre_request_enqueue_desc(struct fsl_qdma_queue *queue)
> +{
> +	struct fsl_qdma_comp *comp_temp;
> +	int i;
> +
> +	for (i = 0; i < queue->n_cq; i++) {
> +		comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
> +		if (!comp_temp)
> +			return -ENOMEM;

where is the previous allocations freed? Return of this function is not even
checked??

> +		comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
> +						      GFP_KERNEL,
> +						      &comp_temp->bus_addr);
> +		if (!comp_temp->virt_addr)
> +			return -ENOMEM;

and here too
> +		list_add_tail(&comp_temp->list, &queue->comp_free);
> +	}
> +
> +	return 0;
> +}
> +
> +/*
> + * Request a command descriptor for enqueue.
> + */
> +static struct fsl_qdma_comp *fsl_qdma_request_enqueue_desc(
> +					struct fsl_qdma_chan *fsl_chan,
> +					unsigned int dst_nents,
> +					unsigned int src_nents)
> +{
> +	struct fsl_qdma_comp *comp_temp;
> +	struct fsl_qdma_sg *sg_block;
> +	struct fsl_qdma_queue *queue = fsl_chan->queue;
> +	unsigned long flags;
> +	unsigned int dst_sg_entry_block, src_sg_entry_block, sg_entry_total, i;
> +
> +	spin_lock_irqsave(&queue->queue_lock, flags);
> +	if (list_empty(&queue->comp_free)) {
> +		spin_unlock_irqrestore(&queue->queue_lock, flags);
> +		comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
> +		if (!comp_temp)
> +			return NULL;
> +		comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
> +						      GFP_KERNEL,
> +						      &comp_temp->bus_addr);
> +		if (!comp_temp->virt_addr) {
> +			kfree(comp_temp);
> +			return NULL;
> +		}
> +
> +	} else {
> +		comp_temp = list_first_entry(&queue->comp_free,
> +					     struct fsl_qdma_comp,
> +					     list);
> +		list_del(&comp_temp->list);
> +		spin_unlock_irqrestore(&queue->queue_lock, flags);
> +	}
> +
> +	if (dst_nents != 0)
> +		dst_sg_entry_block = dst_nents /
> +					(FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;

DIV_ROUND_UP()?

> +	else
> +		dst_sg_entry_block = 0;
> +
> +	if (src_nents != 0)
> +		src_sg_entry_block = src_nents /
> +					(FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
> +	else
> +		src_sg_entry_block = 0;
> +
> +	sg_entry_total = dst_sg_entry_block + src_sg_entry_block;
> +	if (sg_entry_total) {
> +		sg_block = kzalloc(sizeof(*sg_block) *
> +					      sg_entry_total,
> +					      GFP_KERNEL);

kcalloc?

> +		if (!sg_block) {
> +			dma_pool_free(queue->comp_pool,
> +					comp_temp->virt_addr,
> +					comp_temp->bus_addr);
> +			return NULL;
> +		}
> +		comp_temp->sg_block = sg_block;
> +		for (i = 0; i < sg_entry_total; i++) {
> +			sg_block->virt_addr = dma_pool_alloc(queue->sg_pool,
> +							GFP_KERNEL,
> +							&sg_block->bus_addr);

no check if this succeeded?

> +			memset(sg_block->virt_addr, 0,
> +					FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16);

why FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16? and not what you allocated?

> +static struct fsl_qdma_queue *fsl_qdma_alloc_queue_resources(
> +					struct platform_device *pdev,
> +					unsigned int queue_num)
> +{
> +	struct fsl_qdma_queue *queue_head, *queue_temp;
> +	int ret, len, i;
> +	unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
> +
> +	if (queue_num > FSL_QDMA_QUEUE_MAX)
> +		queue_num = FSL_QDMA_QUEUE_MAX;
> +	len = sizeof(*queue_head) * queue_num;
> +	queue_head = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
> +	if (!queue_head)
> +		return NULL;
> +
> +	ret = device_property_read_u32_array(&pdev->dev, "queue-sizes",
> +					queue_size, queue_num);
> +	if (ret) {
> +		dev_err(&pdev->dev, "Can't get queue-sizes.\n");
> +		return NULL;
> +	}
> +
> +	for (i = 0; i < queue_num; i++) {
> +		if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX
> +			|| queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
> +			dev_err(&pdev->dev, "Get wrong queue-sizes.\n");
> +			return NULL;
> +		}
> +		queue_temp = queue_head + i;
> +		queue_temp->cq = dma_alloc_coherent(&pdev->dev,
> +						sizeof(struct fsl_qdma_format) *
> +						queue_size[i],
> +						&queue_temp->bus_addr,
> +						GFP_KERNEL);
> +		if (!queue_temp->cq)
> +			return NULL;
> +		queue_temp->n_cq = queue_size[i];
> +		queue_temp->id = i;
> +		queue_temp->virt_head = queue_temp->cq;
> +		queue_temp->virt_tail = queue_temp->cq;
> +		/*
> +		 * The dma pool for queue command buffer
> +		 */
> +		queue_temp->comp_pool = dma_pool_create("comp_pool",
> +						&pdev->dev,
> +						FSL_QDMA_BASE_BUFFER_SIZE,
> +						16, 0);
> +		if (!queue_temp->comp_pool)
> +			goto err_free_comp;
> +
> +		/*
> +		 * The dma pool for queue command buffer

same comment as prev?

> +static int fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma)
> +{
> +	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
> +	struct fsl_qdma_queue *fsl_status = fsl_qdma->status;
> +	struct fsl_qdma_queue *temp_queue;
> +	struct fsl_qdma_comp *fsl_comp;
> +	struct fsl_qdma_format *status_addr;
> +	struct fsl_qdma_format *csgf_src;
> +	void __iomem *block = fsl_qdma->block_base;
> +	u32 reg, i;
> +	bool duplicate, duplicate_handle;
> +
> +	while (1) {
> +		duplicate = 0;
> +		duplicate_handle = 0;
> +		reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);
> +		if (reg & FSL_QDMA_BSQSR_QE)
> +			return 0;
> +		status_addr = fsl_status->virt_head;
> +		if (qdma_ccdf_get_queue(status_addr) == pre_queue &&
> +			qdma_ccdf_addr_get64(status_addr) == pre_addr)
> +			duplicate = 1;
> +		i = qdma_ccdf_get_queue(status_addr);
> +		pre_queue = qdma_ccdf_get_queue(status_addr);
> +		pre_addr = qdma_ccdf_addr_get64(status_addr);
> +		temp_queue = fsl_queue + i;
> +		spin_lock(&temp_queue->queue_lock);
> +		if (list_empty(&temp_queue->comp_used)) {
> +			if (duplicate)
> +				duplicate_handle = 1;
> +			else {
> +				spin_unlock(&temp_queue->queue_lock);
> +				return -1;

-1? really. You are in while(1) wouldn't break make sense here?

> +static enum dma_status fsl_qdma_tx_status(struct dma_chan *chan,
> +		dma_cookie_t cookie, struct dma_tx_state *txstate)
> +{
> +	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);

why do you need this here, its unused

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [v4 2/6] dmaengine: fsl-qdma: Add qDMA controller driver for Layerscape SoCs
@ 2018-05-17  6:04 ` Vinod
  0 siblings, 0 replies; 32+ messages in thread
From: Vinod @ 2018-05-17  6:04 UTC (permalink / raw)
  To: Wen He
  Cc: vinod.koul, dmaengine, robh+dt, devicetree, leoyang.li,
	jiafei.pan, jiaheng.fan

On 14-05-18, 20:03, Wen He wrote:

> +
> +/* Registers for bit and genmask */
> +#define FSL_QDMA_CQIDR_SQT		0x8000

BIT() ?

> +#define QDMA_CCDF_MASK			GENMASK(28, 20)
> +#define QDMA_CCDF_FOTMAT		BIT(29)
> +#define QDMA_CCDF_SER			BIT(30)
> +#define QDMA_SG_FIN			BIT(30)
> +#define QDMA_SG_EXT			BIT(31)
> +#define QDMA_SG_LEN_MASK		GENMASK(29, 0)
> +
> +#define QDMA_CCDF_STATUS		20
> +#define QDMA_CCDF_OFFSET		20
> +#define FSL_QDMA_BCQIER_CQTIE		0x8000
> +#define FSL_QDMA_BCQIER_CQPEIE		0x800000
> +#define FSL_QDMA_BSQICR_ICEN		0x80000000

here and few other places as well

> +
> +u64 pre_addr, pre_queue;

why do we have a global?

> +static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
> +					dma_addr_t dst, dma_addr_t src, u32 len)
> +{
> +	struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
> +	struct fsl_qdma_sdf *sdf;
> +	struct fsl_qdma_ddf *ddf;
> +
> +	ccdf = (struct fsl_qdma_format *)fsl_comp->virt_addr;

Cast are not required to/away from void

> +	csgf_desc = (struct fsl_qdma_format *)fsl_comp->virt_addr + 1;
> +	csgf_src = (struct fsl_qdma_format *)fsl_comp->virt_addr + 2;
> +	csgf_dest = (struct fsl_qdma_format *)fsl_comp->virt_addr + 3;
> +	sdf = (struct fsl_qdma_sdf *)fsl_comp->virt_addr + 4;
> +	ddf = (struct fsl_qdma_ddf *)fsl_comp->virt_addr + 5;
> +
> +	memset(fsl_comp->virt_addr, 0, FSL_QDMA_BASE_BUFFER_SIZE);
> +	/* Head Command Descriptor(Frame Descriptor) */
> +	qdma_desc_addr_set64(ccdf, fsl_comp->bus_addr + 16);
> +	qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(ccdf));
> +	qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(ccdf));
> +
> +	/* Status notification is enqueued to status queue. */
> +	/* Compound Command Descriptor(Frame List Table) */
> +	qdma_desc_addr_set64(csgf_desc, fsl_comp->bus_addr + 64);
> +	/* It must be 32 as Compound S/G Descriptor */
> +	qdma_csgf_set_len(csgf_desc, 32);
> +	qdma_desc_addr_set64(csgf_src, src);
> +	qdma_csgf_set_len(csgf_src, len);
> +	qdma_desc_addr_set64(csgf_dest, dst);
> +	qdma_csgf_set_len(csgf_dest, len);
> +	/* This entry is the last entry. */
> +	qdma_csgf_set_f(csgf_dest, len);
> +	/* Descriptor Buffer */
> +	sdf->cmd = cpu_to_le32(
> +			FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET);
> +	ddf->cmd = cpu_to_le32(
> +			FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET);
> +	ddf->cmd |= cpu_to_le32(
> +			FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
> +}
> +
> +/*
> + * Pre-request full command descriptor for enqueue.
> + */
> +static int fsl_qdma_pre_request_enqueue_desc(struct fsl_qdma_queue *queue)
> +{
> +	struct fsl_qdma_comp *comp_temp;
> +	int i;
> +
> +	for (i = 0; i < queue->n_cq; i++) {
> +		comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
> +		if (!comp_temp)
> +			return -ENOMEM;

where is the previous allocations freed? Return of this function is not even
checked??

> +		comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
> +						      GFP_KERNEL,
> +						      &comp_temp->bus_addr);
> +		if (!comp_temp->virt_addr)
> +			return -ENOMEM;

and here too
> +		list_add_tail(&comp_temp->list, &queue->comp_free);
> +	}
> +
> +	return 0;
> +}
> +
> +/*
> + * Request a command descriptor for enqueue.
> + */
> +static struct fsl_qdma_comp *fsl_qdma_request_enqueue_desc(
> +					struct fsl_qdma_chan *fsl_chan,
> +					unsigned int dst_nents,
> +					unsigned int src_nents)
> +{
> +	struct fsl_qdma_comp *comp_temp;
> +	struct fsl_qdma_sg *sg_block;
> +	struct fsl_qdma_queue *queue = fsl_chan->queue;
> +	unsigned long flags;
> +	unsigned int dst_sg_entry_block, src_sg_entry_block, sg_entry_total, i;
> +
> +	spin_lock_irqsave(&queue->queue_lock, flags);
> +	if (list_empty(&queue->comp_free)) {
> +		spin_unlock_irqrestore(&queue->queue_lock, flags);
> +		comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
> +		if (!comp_temp)
> +			return NULL;
> +		comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
> +						      GFP_KERNEL,
> +						      &comp_temp->bus_addr);
> +		if (!comp_temp->virt_addr) {
> +			kfree(comp_temp);
> +			return NULL;
> +		}
> +
> +	} else {
> +		comp_temp = list_first_entry(&queue->comp_free,
> +					     struct fsl_qdma_comp,
> +					     list);
> +		list_del(&comp_temp->list);
> +		spin_unlock_irqrestore(&queue->queue_lock, flags);
> +	}
> +
> +	if (dst_nents != 0)
> +		dst_sg_entry_block = dst_nents /
> +					(FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;

DIV_ROUND_UP()?

> +	else
> +		dst_sg_entry_block = 0;
> +
> +	if (src_nents != 0)
> +		src_sg_entry_block = src_nents /
> +					(FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
> +	else
> +		src_sg_entry_block = 0;
> +
> +	sg_entry_total = dst_sg_entry_block + src_sg_entry_block;
> +	if (sg_entry_total) {
> +		sg_block = kzalloc(sizeof(*sg_block) *
> +					      sg_entry_total,
> +					      GFP_KERNEL);

kcalloc?

> +		if (!sg_block) {
> +			dma_pool_free(queue->comp_pool,
> +					comp_temp->virt_addr,
> +					comp_temp->bus_addr);
> +			return NULL;
> +		}
> +		comp_temp->sg_block = sg_block;
> +		for (i = 0; i < sg_entry_total; i++) {
> +			sg_block->virt_addr = dma_pool_alloc(queue->sg_pool,
> +							GFP_KERNEL,
> +							&sg_block->bus_addr);

no check if this succeeded?

> +			memset(sg_block->virt_addr, 0,
> +					FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16);

why FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16? and not what you allocated?

> +static struct fsl_qdma_queue *fsl_qdma_alloc_queue_resources(
> +					struct platform_device *pdev,
> +					unsigned int queue_num)
> +{
> +	struct fsl_qdma_queue *queue_head, *queue_temp;
> +	int ret, len, i;
> +	unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
> +
> +	if (queue_num > FSL_QDMA_QUEUE_MAX)
> +		queue_num = FSL_QDMA_QUEUE_MAX;
> +	len = sizeof(*queue_head) * queue_num;
> +	queue_head = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
> +	if (!queue_head)
> +		return NULL;
> +
> +	ret = device_property_read_u32_array(&pdev->dev, "queue-sizes",
> +					queue_size, queue_num);
> +	if (ret) {
> +		dev_err(&pdev->dev, "Can't get queue-sizes.\n");
> +		return NULL;
> +	}
> +
> +	for (i = 0; i < queue_num; i++) {
> +		if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX
> +			|| queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
> +			dev_err(&pdev->dev, "Get wrong queue-sizes.\n");
> +			return NULL;
> +		}
> +		queue_temp = queue_head + i;
> +		queue_temp->cq = dma_alloc_coherent(&pdev->dev,
> +						sizeof(struct fsl_qdma_format) *
> +						queue_size[i],
> +						&queue_temp->bus_addr,
> +						GFP_KERNEL);
> +		if (!queue_temp->cq)
> +			return NULL;
> +		queue_temp->n_cq = queue_size[i];
> +		queue_temp->id = i;
> +		queue_temp->virt_head = queue_temp->cq;
> +		queue_temp->virt_tail = queue_temp->cq;
> +		/*
> +		 * The dma pool for queue command buffer
> +		 */
> +		queue_temp->comp_pool = dma_pool_create("comp_pool",
> +						&pdev->dev,
> +						FSL_QDMA_BASE_BUFFER_SIZE,
> +						16, 0);
> +		if (!queue_temp->comp_pool)
> +			goto err_free_comp;
> +
> +		/*
> +		 * The dma pool for queue command buffer

same comment as prev?

> +static int fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma)
> +{
> +	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
> +	struct fsl_qdma_queue *fsl_status = fsl_qdma->status;
> +	struct fsl_qdma_queue *temp_queue;
> +	struct fsl_qdma_comp *fsl_comp;
> +	struct fsl_qdma_format *status_addr;
> +	struct fsl_qdma_format *csgf_src;
> +	void __iomem *block = fsl_qdma->block_base;
> +	u32 reg, i;
> +	bool duplicate, duplicate_handle;
> +
> +	while (1) {
> +		duplicate = 0;
> +		duplicate_handle = 0;
> +		reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);
> +		if (reg & FSL_QDMA_BSQSR_QE)
> +			return 0;
> +		status_addr = fsl_status->virt_head;
> +		if (qdma_ccdf_get_queue(status_addr) == pre_queue &&
> +			qdma_ccdf_addr_get64(status_addr) == pre_addr)
> +			duplicate = 1;
> +		i = qdma_ccdf_get_queue(status_addr);
> +		pre_queue = qdma_ccdf_get_queue(status_addr);
> +		pre_addr = qdma_ccdf_addr_get64(status_addr);
> +		temp_queue = fsl_queue + i;
> +		spin_lock(&temp_queue->queue_lock);
> +		if (list_empty(&temp_queue->comp_used)) {
> +			if (duplicate)
> +				duplicate_handle = 1;
> +			else {
> +				spin_unlock(&temp_queue->queue_lock);
> +				return -1;

-1? really. You are in while(1) wouldn't break make sense here?

> +static enum dma_status fsl_qdma_tx_status(struct dma_chan *chan,
> +		dma_cookie_t cookie, struct dma_tx_state *txstate)
> +{
> +	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);

why do you need this here, its unused

-- 
~Vinod

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [v4,2/6] dmaengine: fsl-qdma: Add qDMA controller driver for Layerscape SoCs
  2018-05-17  6:04 ` [v4 2/6] " Vinod
@ 2018-05-17 11:27 ` Wen He
  -1 siblings, 0 replies; 32+ messages in thread
From: Wen He @ 2018-05-17 11:27 UTC (permalink / raw)
  To: Vinod; +Cc: dmaengine, robh+dt, devicetree, Leo Li, Jiafei Pan, Jiaheng Fan

> -----Original Message-----
> From: Vinod [mailto:vkoul@kernel.org]
> Sent: 2018年5月17日 14:05
> To: Wen He <wen.he_1@nxp.com>
> Cc: vinod.koul@intel.com; dmaengine@vger.kernel.org; robh+dt@kernel.org;
> devicetree@vger.kernel.org; Leo Li <leoyang.li@nxp.com>; Jiafei Pan
> <jiafei.pan@nxp.com>; Jiaheng Fan <jiaheng.fan@nxp.com>
> Subject: Re: [v4 2/6] dmaengine: fsl-qdma: Add qDMA controller driver for
> Layerscape SoCs
> 
> On 14-05-18, 20:03, Wen He wrote:
> 
> > +
> > +/* Registers for bit and genmask */
> > +#define FSL_QDMA_CQIDR_SQT		0x8000
> 
> BIT() ?

Sorry, Maybe I should replace 0x8000 to BIT(15).

> 
> > +#define QDMA_CCDF_MASK			GENMASK(28, 20)
> > +#define QDMA_CCDF_FOTMAT		BIT(29)
> > +#define QDMA_CCDF_SER			BIT(30)
> > +#define QDMA_SG_FIN			BIT(30)
> > +#define QDMA_SG_EXT			BIT(31)
> > +#define QDMA_SG_LEN_MASK		GENMASK(29, 0)
> > +
> > +#define QDMA_CCDF_STATUS		20
> > +#define QDMA_CCDF_OFFSET		20
> > +#define FSL_QDMA_BCQIER_CQTIE		0x8000
> > +#define FSL_QDMA_BCQIER_CQPEIE		0x800000
> > +#define FSL_QDMA_BSQICR_ICEN		0x80000000
> 
> here and few other places as well
> 

Got it, will be next version replace to BIT() definition.

> > +
> > +u64 pre_addr, pre_queue;
> 
> why do we have a global?

Let's us see qDMA that how is works?

First, the status notification for DMA jobs are reported back to the status queue.
Status information is carried within the command descriptor status/command field,
bits 120-127. The command descriptor dequeue pointer advances only after the
transaction has completed and the status information field has been updated.

Then, the command descriptor address field wiil pointer to the command descriptor in
its original format. It is the responsibity of the address of the status queue consumer
to deallocate buffers as needed when the command descriptor address pointer is non-zero.

More details of the Status Queue can be found in QorIQ Layerscape Soc datasheet.

So, these variable is used to record latest value that command descriptor queue
and status field.

Every time variables value is zero when set these variable to local, that's not what I want.

> 
> > +static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
> > +					dma_addr_t dst, dma_addr_t src, u32 len) {
> > +	struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
> > +	struct fsl_qdma_sdf *sdf;
> > +	struct fsl_qdma_ddf *ddf;
> > +
> > +	ccdf = (struct fsl_qdma_format *)fsl_comp->virt_addr;
> 
> Cast are not required to/away from void
> 

Does means: remove force conver?

> > +	csgf_desc = (struct fsl_qdma_format *)fsl_comp->virt_addr + 1;
> > +	csgf_src = (struct fsl_qdma_format *)fsl_comp->virt_addr + 2;
> > +	csgf_dest = (struct fsl_qdma_format *)fsl_comp->virt_addr + 3;
> > +	sdf = (struct fsl_qdma_sdf *)fsl_comp->virt_addr + 4;
> > +	ddf = (struct fsl_qdma_ddf *)fsl_comp->virt_addr + 5;
> > +
> > +	memset(fsl_comp->virt_addr, 0, FSL_QDMA_BASE_BUFFER_SIZE);
> > +	/* Head Command Descriptor(Frame Descriptor) */
> > +	qdma_desc_addr_set64(ccdf, fsl_comp->bus_addr + 16);
> > +	qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(ccdf));
> > +	qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(ccdf));
> > +
> > +	/* Status notification is enqueued to status queue. */
> > +	/* Compound Command Descriptor(Frame List Table) */
> > +	qdma_desc_addr_set64(csgf_desc, fsl_comp->bus_addr + 64);
> > +	/* It must be 32 as Compound S/G Descriptor */
> > +	qdma_csgf_set_len(csgf_desc, 32);
> > +	qdma_desc_addr_set64(csgf_src, src);
> > +	qdma_csgf_set_len(csgf_src, len);
> > +	qdma_desc_addr_set64(csgf_dest, dst);
> > +	qdma_csgf_set_len(csgf_dest, len);
> > +	/* This entry is the last entry. */
> > +	qdma_csgf_set_f(csgf_dest, len);
> > +	/* Descriptor Buffer */
> > +	sdf->cmd = cpu_to_le32(
> > +			FSL_QDMA_CMD_RWTTYPE <<
> FSL_QDMA_CMD_RWTTYPE_OFFSET);
> > +	ddf->cmd = cpu_to_le32(
> > +			FSL_QDMA_CMD_RWTTYPE <<
> FSL_QDMA_CMD_RWTTYPE_OFFSET);
> > +	ddf->cmd |= cpu_to_le32(
> > +			FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET); }
> > +
> > +/*
> > + * Pre-request full command descriptor for enqueue.
> > + */
> > +static int fsl_qdma_pre_request_enqueue_desc(struct fsl_qdma_queue
> > +*queue) {
> > +	struct fsl_qdma_comp *comp_temp;
> > +	int i;
> > +
> > +	for (i = 0; i < queue->n_cq; i++) {
> > +		comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
> > +		if (!comp_temp)
> > +			return -ENOMEM;
> 
> where is the previous allocations freed? Return of this function is not even
> checked??
> 

Sorry, Maybe I forget.

> > +		comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
> > +						      GFP_KERNEL,
> > +						      &comp_temp->bus_addr);
> > +		if (!comp_temp->virt_addr)
> > +			return -ENOMEM;
> 
> and here too

okay

> > +		list_add_tail(&comp_temp->list, &queue->comp_free);
> > +	}
> > +
> > +	return 0;
> > +}
> > +
> > +/*
> > + * Request a command descriptor for enqueue.
> > + */
> > +static struct fsl_qdma_comp *fsl_qdma_request_enqueue_desc(
> > +					struct fsl_qdma_chan *fsl_chan,
> > +					unsigned int dst_nents,
> > +					unsigned int src_nents)
> > +{
> > +	struct fsl_qdma_comp *comp_temp;
> > +	struct fsl_qdma_sg *sg_block;
> > +	struct fsl_qdma_queue *queue = fsl_chan->queue;
> > +	unsigned long flags;
> > +	unsigned int dst_sg_entry_block, src_sg_entry_block, sg_entry_total,
> > +i;
> > +
> > +	spin_lock_irqsave(&queue->queue_lock, flags);
> > +	if (list_empty(&queue->comp_free)) {
> > +		spin_unlock_irqrestore(&queue->queue_lock, flags);
> > +		comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
> > +		if (!comp_temp)
> > +			return NULL;
> > +		comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
> > +						      GFP_KERNEL,
> > +						      &comp_temp->bus_addr);
> > +		if (!comp_temp->virt_addr) {
> > +			kfree(comp_temp);
> > +			return NULL;
> > +		}
> > +
> > +	} else {
> > +		comp_temp = list_first_entry(&queue->comp_free,
> > +					     struct fsl_qdma_comp,
> > +					     list);
> > +		list_del(&comp_temp->list);
> > +		spin_unlock_irqrestore(&queue->queue_lock, flags);
> > +	}
> > +
> > +	if (dst_nents != 0)
> > +		dst_sg_entry_block = dst_nents /
> > +					(FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
> 
> DIV_ROUND_UP()?
> 

The DIV_ROUND_UP() definition see below:

#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
#define __KERNEL_DIV_ROUND_UP(n ,d) (((n) + (d) - 1) / (d))

But here is 'd / (n - 1) + 1' ?

> > +	else
> > +		dst_sg_entry_block = 0;
> > +
> > +	if (src_nents != 0)
> > +		src_sg_entry_block = src_nents /
> > +					(FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
> > +	else
> > +		src_sg_entry_block = 0;
> > +
> > +	sg_entry_total = dst_sg_entry_block + src_sg_entry_block;
> > +	if (sg_entry_total) {
> > +		sg_block = kzalloc(sizeof(*sg_block) *
> > +					      sg_entry_total,
> > +					      GFP_KERNEL);
> 
> kcalloc?
> 

All right, replace.

> > +		if (!sg_block) {
> > +			dma_pool_free(queue->comp_pool,
> > +					comp_temp->virt_addr,
> > +					comp_temp->bus_addr);
> > +			return NULL;
> > +		}
> > +		comp_temp->sg_block = sg_block;
> > +		for (i = 0; i < sg_entry_total; i++) {
> > +			sg_block->virt_addr = dma_pool_alloc(queue->sg_pool,
> > +							GFP_KERNEL,
> > +							&sg_block->bus_addr);
> 
> no check if this succeeded?
> 

Sorry, will be next version fix.

> > +			memset(sg_block->virt_addr, 0,
> > +					FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16);
> 
> why FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16? and not what you
> allocated?
> 

see line 497.
The sg_pool buffer size created is FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16.

> > +static struct fsl_qdma_queue *fsl_qdma_alloc_queue_resources(
> > +					struct platform_device *pdev,
> > +					unsigned int queue_num)
> > +{
> > +	struct fsl_qdma_queue *queue_head, *queue_temp;
> > +	int ret, len, i;
> > +	unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
> > +
> > +	if (queue_num > FSL_QDMA_QUEUE_MAX)
> > +		queue_num = FSL_QDMA_QUEUE_MAX;
> > +	len = sizeof(*queue_head) * queue_num;
> > +	queue_head = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
> > +	if (!queue_head)
> > +		return NULL;
> > +
> > +	ret = device_property_read_u32_array(&pdev->dev, "queue-sizes",
> > +					queue_size, queue_num);
> > +	if (ret) {
> > +		dev_err(&pdev->dev, "Can't get queue-sizes.\n");
> > +		return NULL;
> > +	}
> > +
> > +	for (i = 0; i < queue_num; i++) {
> > +		if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX
> > +			|| queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
> > +			dev_err(&pdev->dev, "Get wrong queue-sizes.\n");
> > +			return NULL;
> > +		}
> > +		queue_temp = queue_head + i;
> > +		queue_temp->cq = dma_alloc_coherent(&pdev->dev,
> > +						sizeof(struct fsl_qdma_format) *
> > +						queue_size[i],
> > +						&queue_temp->bus_addr,
> > +						GFP_KERNEL);
> > +		if (!queue_temp->cq)
> > +			return NULL;
> > +		queue_temp->n_cq = queue_size[i];
> > +		queue_temp->id = i;
> > +		queue_temp->virt_head = queue_temp->cq;
> > +		queue_temp->virt_tail = queue_temp->cq;
> > +		/*
> > +		 * The dma pool for queue command buffer
> > +		 */
> > +		queue_temp->comp_pool = dma_pool_create("comp_pool",
> > +						&pdev->dev,
> > +						FSL_QDMA_BASE_BUFFER_SIZE,
> > +						16, 0);
> > +		if (!queue_temp->comp_pool)
> > +			goto err_free_comp;
> > +
> > +		/*
> > +		 * The dma pool for queue command buffer
> 
> same comment as prev?
> 

okay, second comment should be remove.

> > +static int fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine
> > +*fsl_qdma) {
> > +	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
> > +	struct fsl_qdma_queue *fsl_status = fsl_qdma->status;
> > +	struct fsl_qdma_queue *temp_queue;
> > +	struct fsl_qdma_comp *fsl_comp;
> > +	struct fsl_qdma_format *status_addr;
> > +	struct fsl_qdma_format *csgf_src;
> > +	void __iomem *block = fsl_qdma->block_base;
> > +	u32 reg, i;
> > +	bool duplicate, duplicate_handle;
> > +
> > +	while (1) {
> > +		duplicate = 0;
> > +		duplicate_handle = 0;
> > +		reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);
> > +		if (reg & FSL_QDMA_BSQSR_QE)
> > +			return 0;
> > +		status_addr = fsl_status->virt_head;
> > +		if (qdma_ccdf_get_queue(status_addr) == pre_queue &&
> > +			qdma_ccdf_addr_get64(status_addr) == pre_addr)
> > +			duplicate = 1;
> > +		i = qdma_ccdf_get_queue(status_addr);
> > +		pre_queue = qdma_ccdf_get_queue(status_addr);
> > +		pre_addr = qdma_ccdf_addr_get64(status_addr);
> > +		temp_queue = fsl_queue + i;
> > +		spin_lock(&temp_queue->queue_lock);
> > +		if (list_empty(&temp_queue->comp_used)) {
> > +			if (duplicate)
> > +				duplicate_handle = 1;
> > +			else {
> > +				spin_unlock(&temp_queue->queue_lock);
> > +				return -1;
> 
> -1? really. You are in while(1) wouldn't break make sense here?
> 

Does means: using break?

> > +static enum dma_status fsl_qdma_tx_status(struct dma_chan *chan,
> > +		dma_cookie_t cookie, struct dma_tx_state *txstate) {
> > +	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
> 
> why do you need this here, its unused
> 

okay , remove it.

> --
> ~Vinod
--
Best Regards,
Wen

^ permalink raw reply	[flat|nested] 32+ messages in thread

* RE: [v4 2/6] dmaengine: fsl-qdma: Add qDMA controller driver for Layerscape SoCs
@ 2018-05-17 11:27 ` Wen He
  0 siblings, 0 replies; 32+ messages in thread
From: Wen He @ 2018-05-17 11:27 UTC (permalink / raw)
  To: Vinod; +Cc: dmaengine, robh+dt, devicetree, Leo Li, Jiafei Pan, Jiaheng Fan



> -----Original Message-----
> From: Vinod [mailto:vkoul@kernel.org]
> Sent: 2018年5月17日 14:05
> To: Wen He <wen.he_1@nxp.com>
> Cc: vinod.koul@intel.com; dmaengine@vger.kernel.org; robh+dt@kernel.org;
> devicetree@vger.kernel.org; Leo Li <leoyang.li@nxp.com>; Jiafei Pan
> <jiafei.pan@nxp.com>; Jiaheng Fan <jiaheng.fan@nxp.com>
> Subject: Re: [v4 2/6] dmaengine: fsl-qdma: Add qDMA controller driver for
> Layerscape SoCs
> 
> On 14-05-18, 20:03, Wen He wrote:
> 
> > +
> > +/* Registers for bit and genmask */
> > +#define FSL_QDMA_CQIDR_SQT		0x8000
> 
> BIT() ?

Sorry, Maybe I should replace 0x8000 to BIT(15).

> 
> > +#define QDMA_CCDF_MASK			GENMASK(28, 20)
> > +#define QDMA_CCDF_FOTMAT		BIT(29)
> > +#define QDMA_CCDF_SER			BIT(30)
> > +#define QDMA_SG_FIN			BIT(30)
> > +#define QDMA_SG_EXT			BIT(31)
> > +#define QDMA_SG_LEN_MASK		GENMASK(29, 0)
> > +
> > +#define QDMA_CCDF_STATUS		20
> > +#define QDMA_CCDF_OFFSET		20
> > +#define FSL_QDMA_BCQIER_CQTIE		0x8000
> > +#define FSL_QDMA_BCQIER_CQPEIE		0x800000
> > +#define FSL_QDMA_BSQICR_ICEN		0x80000000
> 
> here and few other places as well
> 

Got it, will be next version replace to BIT() definition.

> > +
> > +u64 pre_addr, pre_queue;
> 
> why do we have a global?

Let's us see qDMA that how is works?

First, the status notification for DMA jobs are reported back to the status queue.
Status information is carried within the command descriptor status/command field,
bits 120-127. The command descriptor dequeue pointer advances only after the
transaction has completed and the status information field has been updated.

Then, the command descriptor address field wiil pointer to the command descriptor in
its original format. It is the responsibity of the address of the status queue consumer
to deallocate buffers as needed when the command descriptor address pointer is non-zero.

More details of the Status Queue can be found in QorIQ Layerscape Soc datasheet.

So, these variable is used to record latest value that command descriptor queue
and status field.

Every time variables value is zero when set these variable to local, that's not what I want.

> 
> > +static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
> > +					dma_addr_t dst, dma_addr_t src, u32 len) {
> > +	struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
> > +	struct fsl_qdma_sdf *sdf;
> > +	struct fsl_qdma_ddf *ddf;
> > +
> > +	ccdf = (struct fsl_qdma_format *)fsl_comp->virt_addr;
> 
> Cast are not required to/away from void
> 

Does means: remove force conver?

> > +	csgf_desc = (struct fsl_qdma_format *)fsl_comp->virt_addr + 1;
> > +	csgf_src = (struct fsl_qdma_format *)fsl_comp->virt_addr + 2;
> > +	csgf_dest = (struct fsl_qdma_format *)fsl_comp->virt_addr + 3;
> > +	sdf = (struct fsl_qdma_sdf *)fsl_comp->virt_addr + 4;
> > +	ddf = (struct fsl_qdma_ddf *)fsl_comp->virt_addr + 5;
> > +
> > +	memset(fsl_comp->virt_addr, 0, FSL_QDMA_BASE_BUFFER_SIZE);
> > +	/* Head Command Descriptor(Frame Descriptor) */
> > +	qdma_desc_addr_set64(ccdf, fsl_comp->bus_addr + 16);
> > +	qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(ccdf));
> > +	qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(ccdf));
> > +
> > +	/* Status notification is enqueued to status queue. */
> > +	/* Compound Command Descriptor(Frame List Table) */
> > +	qdma_desc_addr_set64(csgf_desc, fsl_comp->bus_addr + 64);
> > +	/* It must be 32 as Compound S/G Descriptor */
> > +	qdma_csgf_set_len(csgf_desc, 32);
> > +	qdma_desc_addr_set64(csgf_src, src);
> > +	qdma_csgf_set_len(csgf_src, len);
> > +	qdma_desc_addr_set64(csgf_dest, dst);
> > +	qdma_csgf_set_len(csgf_dest, len);
> > +	/* This entry is the last entry. */
> > +	qdma_csgf_set_f(csgf_dest, len);
> > +	/* Descriptor Buffer */
> > +	sdf->cmd = cpu_to_le32(
> > +			FSL_QDMA_CMD_RWTTYPE <<
> FSL_QDMA_CMD_RWTTYPE_OFFSET);
> > +	ddf->cmd = cpu_to_le32(
> > +			FSL_QDMA_CMD_RWTTYPE <<
> FSL_QDMA_CMD_RWTTYPE_OFFSET);
> > +	ddf->cmd |= cpu_to_le32(
> > +			FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET); }
> > +
> > +/*
> > + * Pre-request full command descriptor for enqueue.
> > + */
> > +static int fsl_qdma_pre_request_enqueue_desc(struct fsl_qdma_queue
> > +*queue) {
> > +	struct fsl_qdma_comp *comp_temp;
> > +	int i;
> > +
> > +	for (i = 0; i < queue->n_cq; i++) {
> > +		comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
> > +		if (!comp_temp)
> > +			return -ENOMEM;
> 
> where is the previous allocations freed? Return of this function is not even
> checked??
> 

Sorry, Maybe I forget.

> > +		comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
> > +						      GFP_KERNEL,
> > +						      &comp_temp->bus_addr);
> > +		if (!comp_temp->virt_addr)
> > +			return -ENOMEM;
> 
> and here too

okay

> > +		list_add_tail(&comp_temp->list, &queue->comp_free);
> > +	}
> > +
> > +	return 0;
> > +}
> > +
> > +/*
> > + * Request a command descriptor for enqueue.
> > + */
> > +static struct fsl_qdma_comp *fsl_qdma_request_enqueue_desc(
> > +					struct fsl_qdma_chan *fsl_chan,
> > +					unsigned int dst_nents,
> > +					unsigned int src_nents)
> > +{
> > +	struct fsl_qdma_comp *comp_temp;
> > +	struct fsl_qdma_sg *sg_block;
> > +	struct fsl_qdma_queue *queue = fsl_chan->queue;
> > +	unsigned long flags;
> > +	unsigned int dst_sg_entry_block, src_sg_entry_block, sg_entry_total,
> > +i;
> > +
> > +	spin_lock_irqsave(&queue->queue_lock, flags);
> > +	if (list_empty(&queue->comp_free)) {
> > +		spin_unlock_irqrestore(&queue->queue_lock, flags);
> > +		comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
> > +		if (!comp_temp)
> > +			return NULL;
> > +		comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
> > +						      GFP_KERNEL,
> > +						      &comp_temp->bus_addr);
> > +		if (!comp_temp->virt_addr) {
> > +			kfree(comp_temp);
> > +			return NULL;
> > +		}
> > +
> > +	} else {
> > +		comp_temp = list_first_entry(&queue->comp_free,
> > +					     struct fsl_qdma_comp,
> > +					     list);
> > +		list_del(&comp_temp->list);
> > +		spin_unlock_irqrestore(&queue->queue_lock, flags);
> > +	}
> > +
> > +	if (dst_nents != 0)
> > +		dst_sg_entry_block = dst_nents /
> > +					(FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
> 
> DIV_ROUND_UP()?
> 

The DIV_ROUND_UP() definition see below:

#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
#define __KERNEL_DIV_ROUND_UP(n ,d) (((n) + (d) - 1) / (d))

But here is 'd / (n - 1) + 1' ?

> > +	else
> > +		dst_sg_entry_block = 0;
> > +
> > +	if (src_nents != 0)
> > +		src_sg_entry_block = src_nents /
> > +					(FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
> > +	else
> > +		src_sg_entry_block = 0;
> > +
> > +	sg_entry_total = dst_sg_entry_block + src_sg_entry_block;
> > +	if (sg_entry_total) {
> > +		sg_block = kzalloc(sizeof(*sg_block) *
> > +					      sg_entry_total,
> > +					      GFP_KERNEL);
> 
> kcalloc?
> 

All right, replace.

> > +		if (!sg_block) {
> > +			dma_pool_free(queue->comp_pool,
> > +					comp_temp->virt_addr,
> > +					comp_temp->bus_addr);
> > +			return NULL;
> > +		}
> > +		comp_temp->sg_block = sg_block;
> > +		for (i = 0; i < sg_entry_total; i++) {
> > +			sg_block->virt_addr = dma_pool_alloc(queue->sg_pool,
> > +							GFP_KERNEL,
> > +							&sg_block->bus_addr);
> 
> no check if this succeeded?
> 

Sorry, will be next version fix.

> > +			memset(sg_block->virt_addr, 0,
> > +					FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16);
> 
> why FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16? and not what you
> allocated?
> 

see line 497.
The sg_pool buffer size created is FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16.

> > +static struct fsl_qdma_queue *fsl_qdma_alloc_queue_resources(
> > +					struct platform_device *pdev,
> > +					unsigned int queue_num)
> > +{
> > +	struct fsl_qdma_queue *queue_head, *queue_temp;
> > +	int ret, len, i;
> > +	unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
> > +
> > +	if (queue_num > FSL_QDMA_QUEUE_MAX)
> > +		queue_num = FSL_QDMA_QUEUE_MAX;
> > +	len = sizeof(*queue_head) * queue_num;
> > +	queue_head = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
> > +	if (!queue_head)
> > +		return NULL;
> > +
> > +	ret = device_property_read_u32_array(&pdev->dev, "queue-sizes",
> > +					queue_size, queue_num);
> > +	if (ret) {
> > +		dev_err(&pdev->dev, "Can't get queue-sizes.\n");
> > +		return NULL;
> > +	}
> > +
> > +	for (i = 0; i < queue_num; i++) {
> > +		if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX
> > +			|| queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
> > +			dev_err(&pdev->dev, "Get wrong queue-sizes.\n");
> > +			return NULL;
> > +		}
> > +		queue_temp = queue_head + i;
> > +		queue_temp->cq = dma_alloc_coherent(&pdev->dev,
> > +						sizeof(struct fsl_qdma_format) *
> > +						queue_size[i],
> > +						&queue_temp->bus_addr,
> > +						GFP_KERNEL);
> > +		if (!queue_temp->cq)
> > +			return NULL;
> > +		queue_temp->n_cq = queue_size[i];
> > +		queue_temp->id = i;
> > +		queue_temp->virt_head = queue_temp->cq;
> > +		queue_temp->virt_tail = queue_temp->cq;
> > +		/*
> > +		 * The dma pool for queue command buffer
> > +		 */
> > +		queue_temp->comp_pool = dma_pool_create("comp_pool",
> > +						&pdev->dev,
> > +						FSL_QDMA_BASE_BUFFER_SIZE,
> > +						16, 0);
> > +		if (!queue_temp->comp_pool)
> > +			goto err_free_comp;
> > +
> > +		/*
> > +		 * The dma pool for queue command buffer
> 
> same comment as prev?
> 

okay, second comment should be remove.

> > +static int fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine
> > +*fsl_qdma) {
> > +	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
> > +	struct fsl_qdma_queue *fsl_status = fsl_qdma->status;
> > +	struct fsl_qdma_queue *temp_queue;
> > +	struct fsl_qdma_comp *fsl_comp;
> > +	struct fsl_qdma_format *status_addr;
> > +	struct fsl_qdma_format *csgf_src;
> > +	void __iomem *block = fsl_qdma->block_base;
> > +	u32 reg, i;
> > +	bool duplicate, duplicate_handle;
> > +
> > +	while (1) {
> > +		duplicate = 0;
> > +		duplicate_handle = 0;
> > +		reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);
> > +		if (reg & FSL_QDMA_BSQSR_QE)
> > +			return 0;
> > +		status_addr = fsl_status->virt_head;
> > +		if (qdma_ccdf_get_queue(status_addr) == pre_queue &&
> > +			qdma_ccdf_addr_get64(status_addr) == pre_addr)
> > +			duplicate = 1;
> > +		i = qdma_ccdf_get_queue(status_addr);
> > +		pre_queue = qdma_ccdf_get_queue(status_addr);
> > +		pre_addr = qdma_ccdf_addr_get64(status_addr);
> > +		temp_queue = fsl_queue + i;
> > +		spin_lock(&temp_queue->queue_lock);
> > +		if (list_empty(&temp_queue->comp_used)) {
> > +			if (duplicate)
> > +				duplicate_handle = 1;
> > +			else {
> > +				spin_unlock(&temp_queue->queue_lock);
> > +				return -1;
> 
> -1? really. You are in while(1) wouldn't break make sense here?
> 

Does means: using break?

> > +static enum dma_status fsl_qdma_tx_status(struct dma_chan *chan,
> > +		dma_cookie_t cookie, struct dma_tx_state *txstate) {
> > +	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
> 
> why do you need this here, its unused
> 

okay , remove it.

> --
> ~Vinod
--
Best Regards,
Wen

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [v4,2/6] dmaengine: fsl-qdma: Add qDMA controller driver for Layerscape SoCs
  2018-05-17 11:27 ` [v4 2/6] " Wen He
@ 2018-05-18  4:21 ` Vinod
  -1 siblings, 0 replies; 32+ messages in thread
From: Vinod Koul @ 2018-05-18  4:21 UTC (permalink / raw)
  To: Wen He; +Cc: dmaengine, robh+dt, devicetree, Leo Li, Jiafei Pan, Jiaheng Fan

On 17-05-18, 11:27, Wen He wrote:
> > > +
> > > +/* Registers for bit and genmask */
> > > +#define FSL_QDMA_CQIDR_SQT		0x8000
> > 
> > BIT() ?
> 
> Sorry, Maybe I should replace 0x8000 to BIT(15).

yes please

> > > +u64 pre_addr, pre_queue;
> > 
> > why do we have a global?
> 
> Let's us see qDMA that how is works?
> 
> First, the status notification for DMA jobs are reported back to the status queue.
> Status information is carried within the command descriptor status/command field,
> bits 120-127. The command descriptor dequeue pointer advances only after the
> transaction has completed and the status information field has been updated.
> 
> Then, the command descriptor address field wiil pointer to the command descriptor in
> its original format. It is the responsibity of the address of the status queue consumer
> to deallocate buffers as needed when the command descriptor address pointer is non-zero.
> 
> More details of the Status Queue can be found in QorIQ Layerscape Soc datasheet.
> 
> So, these variable is used to record latest value that command descriptor queue
> and status field.
> 
> Every time variables value is zero when set these variable to local, that's not what I want.

Why not store them in driver context?

> > > +static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
> > > +					dma_addr_t dst, dma_addr_t src, u32 len) {
> > > +	struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
> > > +	struct fsl_qdma_sdf *sdf;
> > > +	struct fsl_qdma_ddf *ddf;
> > > +
> > > +	ccdf = (struct fsl_qdma_format *)fsl_comp->virt_addr;
> > 
> > Cast are not required to/away from void
> > 
> 
> Does means: remove force conver?

yes and it would work

> > > +static struct fsl_qdma_comp *fsl_qdma_request_enqueue_desc(
> > > +					struct fsl_qdma_chan *fsl_chan,
> > > +					unsigned int dst_nents,
> > > +					unsigned int src_nents)
> > > +{
> > > +	struct fsl_qdma_comp *comp_temp;
> > > +	struct fsl_qdma_sg *sg_block;
> > > +	struct fsl_qdma_queue *queue = fsl_chan->queue;
> > > +	unsigned long flags;
> > > +	unsigned int dst_sg_entry_block, src_sg_entry_block, sg_entry_total,
> > > +i;
> > > +
> > > +	spin_lock_irqsave(&queue->queue_lock, flags);
> > > +	if (list_empty(&queue->comp_free)) {
> > > +		spin_unlock_irqrestore(&queue->queue_lock, flags);
> > > +		comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
> > > +		if (!comp_temp)
> > > +			return NULL;
> > > +		comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
> > > +						      GFP_KERNEL,
> > > +						      &comp_temp->bus_addr);
> > > +		if (!comp_temp->virt_addr) {
> > > +			kfree(comp_temp);
> > > +			return NULL;
> > > +		}
> > > +
> > > +	} else {
> > > +		comp_temp = list_first_entry(&queue->comp_free,
> > > +					     struct fsl_qdma_comp,
> > > +					     list);
> > > +		list_del(&comp_temp->list);
> > > +		spin_unlock_irqrestore(&queue->queue_lock, flags);
> > > +	}
> > > +
> > > +	if (dst_nents != 0)
> > > +		dst_sg_entry_block = dst_nents /
> > > +					(FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
> > 
> > DIV_ROUND_UP()?
> > 
> 
> The DIV_ROUND_UP() definition see below:
> 
> #define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
> #define __KERNEL_DIV_ROUND_UP(n ,d) (((n) + (d) - 1) / (d))
> 
> But here is 'd / (n - 1) + 1' ?

Yeah this doesn't look apt here, check if any other macros suits...

> > > +			memset(sg_block->virt_addr, 0,
> > > +					FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16);
> > 
> > why FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16? and not what you
> > allocated?
> > 
> 
> see line 497.
> The sg_pool buffer size created is FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16.

Please document this

> > > +static int fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine
> > > +*fsl_qdma) {
> > > +	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
> > > +	struct fsl_qdma_queue *fsl_status = fsl_qdma->status;
> > > +	struct fsl_qdma_queue *temp_queue;
> > > +	struct fsl_qdma_comp *fsl_comp;
> > > +	struct fsl_qdma_format *status_addr;
> > > +	struct fsl_qdma_format *csgf_src;
> > > +	void __iomem *block = fsl_qdma->block_base;
> > > +	u32 reg, i;
> > > +	bool duplicate, duplicate_handle;
> > > +
> > > +	while (1) {
> > > +		duplicate = 0;
> > > +		duplicate_handle = 0;
> > > +		reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);
> > > +		if (reg & FSL_QDMA_BSQSR_QE)
> > > +			return 0;
> > > +		status_addr = fsl_status->virt_head;
> > > +		if (qdma_ccdf_get_queue(status_addr) == pre_queue &&
> > > +			qdma_ccdf_addr_get64(status_addr) == pre_addr)
> > > +			duplicate = 1;
> > > +		i = qdma_ccdf_get_queue(status_addr);
> > > +		pre_queue = qdma_ccdf_get_queue(status_addr);
> > > +		pre_addr = qdma_ccdf_addr_get64(status_addr);
> > > +		temp_queue = fsl_queue + i;
> > > +		spin_lock(&temp_queue->queue_lock);
> > > +		if (list_empty(&temp_queue->comp_used)) {
> > > +			if (duplicate)
> > > +				duplicate_handle = 1;
> > > +			else {
> > > +				spin_unlock(&temp_queue->queue_lock);
> > > +				return -1;
> > 
> > -1? really. You are in while(1) wouldn't break make sense here?
> > 
> 
> Does means: using break?

it means two things, we don't use return -1, if it is valid error then return
a proper kernel error code
second, since it is a while loop, do you want to use a break?

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [v4 2/6] dmaengine: fsl-qdma: Add qDMA controller driver for Layerscape SoCs
@ 2018-05-18  4:21 ` Vinod
  0 siblings, 0 replies; 32+ messages in thread
From: Vinod @ 2018-05-18  4:21 UTC (permalink / raw)
  To: Wen He; +Cc: dmaengine, robh+dt, devicetree, Leo Li, Jiafei Pan, Jiaheng Fan

On 17-05-18, 11:27, Wen He wrote:
> > > +
> > > +/* Registers for bit and genmask */
> > > +#define FSL_QDMA_CQIDR_SQT		0x8000
> > 
> > BIT() ?
> 
> Sorry, Maybe I should replace 0x8000 to BIT(15).

yes please

> > > +u64 pre_addr, pre_queue;
> > 
> > why do we have a global?
> 
> Let's us see qDMA that how is works?
> 
> First, the status notification for DMA jobs are reported back to the status queue.
> Status information is carried within the command descriptor status/command field,
> bits 120-127. The command descriptor dequeue pointer advances only after the
> transaction has completed and the status information field has been updated.
> 
> Then, the command descriptor address field wiil pointer to the command descriptor in
> its original format. It is the responsibity of the address of the status queue consumer
> to deallocate buffers as needed when the command descriptor address pointer is non-zero.
> 
> More details of the Status Queue can be found in QorIQ Layerscape Soc datasheet.
> 
> So, these variable is used to record latest value that command descriptor queue
> and status field.
> 
> Every time variables value is zero when set these variable to local, that's not what I want.

Why not store them in driver context?

> > > +static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
> > > +					dma_addr_t dst, dma_addr_t src, u32 len) {
> > > +	struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
> > > +	struct fsl_qdma_sdf *sdf;
> > > +	struct fsl_qdma_ddf *ddf;
> > > +
> > > +	ccdf = (struct fsl_qdma_format *)fsl_comp->virt_addr;
> > 
> > Cast are not required to/away from void
> > 
> 
> Does means: remove force conver?

yes and it would work

> > > +static struct fsl_qdma_comp *fsl_qdma_request_enqueue_desc(
> > > +					struct fsl_qdma_chan *fsl_chan,
> > > +					unsigned int dst_nents,
> > > +					unsigned int src_nents)
> > > +{
> > > +	struct fsl_qdma_comp *comp_temp;
> > > +	struct fsl_qdma_sg *sg_block;
> > > +	struct fsl_qdma_queue *queue = fsl_chan->queue;
> > > +	unsigned long flags;
> > > +	unsigned int dst_sg_entry_block, src_sg_entry_block, sg_entry_total,
> > > +i;
> > > +
> > > +	spin_lock_irqsave(&queue->queue_lock, flags);
> > > +	if (list_empty(&queue->comp_free)) {
> > > +		spin_unlock_irqrestore(&queue->queue_lock, flags);
> > > +		comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
> > > +		if (!comp_temp)
> > > +			return NULL;
> > > +		comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
> > > +						      GFP_KERNEL,
> > > +						      &comp_temp->bus_addr);
> > > +		if (!comp_temp->virt_addr) {
> > > +			kfree(comp_temp);
> > > +			return NULL;
> > > +		}
> > > +
> > > +	} else {
> > > +		comp_temp = list_first_entry(&queue->comp_free,
> > > +					     struct fsl_qdma_comp,
> > > +					     list);
> > > +		list_del(&comp_temp->list);
> > > +		spin_unlock_irqrestore(&queue->queue_lock, flags);
> > > +	}
> > > +
> > > +	if (dst_nents != 0)
> > > +		dst_sg_entry_block = dst_nents /
> > > +					(FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
> > 
> > DIV_ROUND_UP()?
> > 
> 
> The DIV_ROUND_UP() definition see below:
> 
> #define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
> #define __KERNEL_DIV_ROUND_UP(n ,d) (((n) + (d) - 1) / (d))
> 
> But here is 'd / (n - 1) + 1' ?

Yeah this doesn't look apt here, check if any other macros suits...

> > > +			memset(sg_block->virt_addr, 0,
> > > +					FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16);
> > 
> > why FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16? and not what you
> > allocated?
> > 
> 
> see line 497.
> The sg_pool buffer size created is FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16.

Please document this

> > > +static int fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine
> > > +*fsl_qdma) {
> > > +	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
> > > +	struct fsl_qdma_queue *fsl_status = fsl_qdma->status;
> > > +	struct fsl_qdma_queue *temp_queue;
> > > +	struct fsl_qdma_comp *fsl_comp;
> > > +	struct fsl_qdma_format *status_addr;
> > > +	struct fsl_qdma_format *csgf_src;
> > > +	void __iomem *block = fsl_qdma->block_base;
> > > +	u32 reg, i;
> > > +	bool duplicate, duplicate_handle;
> > > +
> > > +	while (1) {
> > > +		duplicate = 0;
> > > +		duplicate_handle = 0;
> > > +		reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);
> > > +		if (reg & FSL_QDMA_BSQSR_QE)
> > > +			return 0;
> > > +		status_addr = fsl_status->virt_head;
> > > +		if (qdma_ccdf_get_queue(status_addr) == pre_queue &&
> > > +			qdma_ccdf_addr_get64(status_addr) == pre_addr)
> > > +			duplicate = 1;
> > > +		i = qdma_ccdf_get_queue(status_addr);
> > > +		pre_queue = qdma_ccdf_get_queue(status_addr);
> > > +		pre_addr = qdma_ccdf_addr_get64(status_addr);
> > > +		temp_queue = fsl_queue + i;
> > > +		spin_lock(&temp_queue->queue_lock);
> > > +		if (list_empty(&temp_queue->comp_used)) {
> > > +			if (duplicate)
> > > +				duplicate_handle = 1;
> > > +			else {
> > > +				spin_unlock(&temp_queue->queue_lock);
> > > +				return -1;
> > 
> > -1? really. You are in while(1) wouldn't break make sense here?
> > 
> 
> Does means: using break?

it means two things, we don't use return -1, if it is valid error then return
a proper kernel error code
second, since it is a while loop, do you want to use a break?
 
-- 
~Vinod

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [v4,2/6] dmaengine: fsl-qdma: Add qDMA controller driver for Layerscape SoCs
  2018-05-18  4:21 ` [v4 2/6] " Vinod
@ 2018-05-18 10:04 ` Wen He
  -1 siblings, 0 replies; 32+ messages in thread
From: Wen He @ 2018-05-18 10:04 UTC (permalink / raw)
  To: Vinod; +Cc: dmaengine, robh+dt, devicetree, Leo Li, Jiafei Pan, Jiaheng Fan

> -----Original Message-----
> From: dmaengine-owner@vger.kernel.org
> [mailto:dmaengine-owner@vger.kernel.org] On Behalf Of Vinod
> Sent: 2018年5月18日 12:21
> To: Wen He <wen.he_1@nxp.com>
> Cc: dmaengine@vger.kernel.org; robh+dt@kernel.org;
> devicetree@vger.kernel.org; Leo Li <leoyang.li@nxp.com>; Jiafei Pan
> <jiafei.pan@nxp.com>; Jiaheng Fan <jiaheng.fan@nxp.com>
> Subject: Re: [v4 2/6] dmaengine: fsl-qdma: Add qDMA controller driver for
> Layerscape SoCs
> 
> On 17-05-18, 11:27, Wen He wrote:
> > > > +
> > > > +/* Registers for bit and genmask */
> > > > +#define FSL_QDMA_CQIDR_SQT		0x8000
> > >
> > > BIT() ?
> >
> > Sorry, Maybe I should replace 0x8000 to BIT(15).
> 
> yes please
> 
> > > > +u64 pre_addr, pre_queue;
> > >
> > > why do we have a global?
> >
> > Let's us see qDMA that how is works?
> >
> > First, the status notification for DMA jobs are reported back to the status
> queue.
> > Status information is carried within the command descriptor
> > status/command field, bits 120-127. The command descriptor dequeue
> > pointer advances only after the transaction has completed and the status
> information field has been updated.
> >
> > Then, the command descriptor address field wiil pointer to the command
> > descriptor in its original format. It is the responsibity of the
> > address of the status queue consumer to deallocate buffers as needed when
> the command descriptor address pointer is non-zero.
> >
> > More details of the Status Queue can be found in QorIQ Layerscape Soc
> datasheet.
> >
> > So, these variable is used to record latest value that command
> > descriptor queue and status field.
> >
> > Every time variables value is zero when set these variable to local, that's not
> what I want.
> 
> Why not store them in driver context?
> 

okay, maybe I should remove these variable to private struct?

> > > > +static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp
> *fsl_comp,
> > > > +					dma_addr_t dst, dma_addr_t src, u32 len) {
> > > > +	struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
> > > > +	struct fsl_qdma_sdf *sdf;
> > > > +	struct fsl_qdma_ddf *ddf;
> > > > +
> > > > +	ccdf = (struct fsl_qdma_format *)fsl_comp->virt_addr;
> > >
> > > Cast are not required to/away from void
> > >
> >
> > Does means: remove force conver?
> 
> yes and it would work
> 
> > > > +static struct fsl_qdma_comp *fsl_qdma_request_enqueue_desc(
> > > > +					struct fsl_qdma_chan *fsl_chan,
> > > > +					unsigned int dst_nents,
> > > > +					unsigned int src_nents)
> > > > +{
> > > > +	struct fsl_qdma_comp *comp_temp;
> > > > +	struct fsl_qdma_sg *sg_block;
> > > > +	struct fsl_qdma_queue *queue = fsl_chan->queue;
> > > > +	unsigned long flags;
> > > > +	unsigned int dst_sg_entry_block, src_sg_entry_block,
> > > > +sg_entry_total, i;
> > > > +
> > > > +	spin_lock_irqsave(&queue->queue_lock, flags);
> > > > +	if (list_empty(&queue->comp_free)) {
> > > > +		spin_unlock_irqrestore(&queue->queue_lock, flags);
> > > > +		comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
> > > > +		if (!comp_temp)
> > > > +			return NULL;
> > > > +		comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
> > > > +						      GFP_KERNEL,
> > > > +						      &comp_temp->bus_addr);
> > > > +		if (!comp_temp->virt_addr) {
> > > > +			kfree(comp_temp);
> > > > +			return NULL;
> > > > +		}
> > > > +
> > > > +	} else {
> > > > +		comp_temp = list_first_entry(&queue->comp_free,
> > > > +					     struct fsl_qdma_comp,
> > > > +					     list);
> > > > +		list_del(&comp_temp->list);
> > > > +		spin_unlock_irqrestore(&queue->queue_lock, flags);
> > > > +	}
> > > > +
> > > > +	if (dst_nents != 0)
> > > > +		dst_sg_entry_block = dst_nents /
> > > > +					(FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
> > >
> > > DIV_ROUND_UP()?
> > >
> >
> > The DIV_ROUND_UP() definition see below:
> >
> > #define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP #define
> > __KERNEL_DIV_ROUND_UP(n ,d) (((n) + (d) - 1) / (d))
> >
> > But here is 'd / (n - 1) + 1' ?
> 
> Yeah this doesn't look apt here, check if any other macros suits...
> 

Got it.

> > > > +			memset(sg_block->virt_addr, 0,
> > > > +					FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16);
> > >
> > > why FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16? and not what you
> allocated?
> > >
> >
> > see line 497.
> > The sg_pool buffer size created is FSL_QDMA_EXPECT_SG_ENTRY_NUM *
> 16.
> 
> Please document this
> 
Add comment to this?

> > > > +static int fsl_qdma_queue_transfer_complete(struct
> > > > +fsl_qdma_engine
> > > > +*fsl_qdma) {
> > > > +	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
> > > > +	struct fsl_qdma_queue *fsl_status = fsl_qdma->status;
> > > > +	struct fsl_qdma_queue *temp_queue;
> > > > +	struct fsl_qdma_comp *fsl_comp;
> > > > +	struct fsl_qdma_format *status_addr;
> > > > +	struct fsl_qdma_format *csgf_src;
> > > > +	void __iomem *block = fsl_qdma->block_base;
> > > > +	u32 reg, i;
> > > > +	bool duplicate, duplicate_handle;
> > > > +
> > > > +	while (1) {
> > > > +		duplicate = 0;
> > > > +		duplicate_handle = 0;
> > > > +		reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);
> > > > +		if (reg & FSL_QDMA_BSQSR_QE)
> > > > +			return 0;
> > > > +		status_addr = fsl_status->virt_head;
> > > > +		if (qdma_ccdf_get_queue(status_addr) == pre_queue &&
> > > > +			qdma_ccdf_addr_get64(status_addr) == pre_addr)
> > > > +			duplicate = 1;
> > > > +		i = qdma_ccdf_get_queue(status_addr);
> > > > +		pre_queue = qdma_ccdf_get_queue(status_addr);
> > > > +		pre_addr = qdma_ccdf_addr_get64(status_addr);
> > > > +		temp_queue = fsl_queue + i;
> > > > +		spin_lock(&temp_queue->queue_lock);
> > > > +		if (list_empty(&temp_queue->comp_used)) {
> > > > +			if (duplicate)
> > > > +				duplicate_handle = 1;
> > > > +			else {
> > > > +				spin_unlock(&temp_queue->queue_lock);
> > > > +				return -1;
> > >
> > > -1? really. You are in while(1) wouldn't break make sense here?
> > >
> >
> > Does means: using break?
> 
> it means two things, we don't use return -1, if it is valid error then return a
> proper kernel error code second, since it is a while loop, do you want to use a
> break?
> 
Got it, Thanks.

> --
> ~Vinod
> --
> To unsubscribe from this list: send the line "unsubscribe dmaengine" in the
> body of a message to majordomo@vger.kernel.org More majordomo info at
> https://emea01.safelinks.protection.outlook.com/?url=http%3A%2F%2Fvger.
> kernel.org%2Fmajordomo-info.html&data=02%7C01%7Cwen.he_1%40nxp.co
> m%7Cedcdbb818c534ec70b3108d5bc76d7b3%7C686ea1d3bc2b4c6fa92cd9
> 9c5c301635%7C0%7C0%7C636622140978593180&sdata=TuE40l7HejTz%2Bq
> S8LibIkdYV84Esh5sfD1jSIzvAwTY%3D&reserved=0

^ permalink raw reply	[flat|nested] 32+ messages in thread

* RE: [v4 2/6] dmaengine: fsl-qdma: Add qDMA controller driver for Layerscape SoCs
@ 2018-05-18 10:04 ` Wen He
  0 siblings, 0 replies; 32+ messages in thread
From: Wen He @ 2018-05-18 10:04 UTC (permalink / raw)
  To: Vinod; +Cc: dmaengine, robh+dt, devicetree, Leo Li, Jiafei Pan, Jiaheng Fan



> -----Original Message-----
> From: dmaengine-owner@vger.kernel.org
> [mailto:dmaengine-owner@vger.kernel.org] On Behalf Of Vinod
> Sent: 2018年5月18日 12:21
> To: Wen He <wen.he_1@nxp.com>
> Cc: dmaengine@vger.kernel.org; robh+dt@kernel.org;
> devicetree@vger.kernel.org; Leo Li <leoyang.li@nxp.com>; Jiafei Pan
> <jiafei.pan@nxp.com>; Jiaheng Fan <jiaheng.fan@nxp.com>
> Subject: Re: [v4 2/6] dmaengine: fsl-qdma: Add qDMA controller driver for
> Layerscape SoCs
> 
> On 17-05-18, 11:27, Wen He wrote:
> > > > +
> > > > +/* Registers for bit and genmask */
> > > > +#define FSL_QDMA_CQIDR_SQT		0x8000
> > >
> > > BIT() ?
> >
> > Sorry, Maybe I should replace 0x8000 to BIT(15).
> 
> yes please
> 
> > > > +u64 pre_addr, pre_queue;
> > >
> > > why do we have a global?
> >
> > Let's us see qDMA that how is works?
> >
> > First, the status notification for DMA jobs are reported back to the status
> queue.
> > Status information is carried within the command descriptor
> > status/command field, bits 120-127. The command descriptor dequeue
> > pointer advances only after the transaction has completed and the status
> information field has been updated.
> >
> > Then, the command descriptor address field wiil pointer to the command
> > descriptor in its original format. It is the responsibity of the
> > address of the status queue consumer to deallocate buffers as needed when
> the command descriptor address pointer is non-zero.
> >
> > More details of the Status Queue can be found in QorIQ Layerscape Soc
> datasheet.
> >
> > So, these variable is used to record latest value that command
> > descriptor queue and status field.
> >
> > Every time variables value is zero when set these variable to local, that's not
> what I want.
> 
> Why not store them in driver context?
> 

okay, maybe I should remove these variable to private struct?

> > > > +static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp
> *fsl_comp,
> > > > +					dma_addr_t dst, dma_addr_t src, u32 len) {
> > > > +	struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
> > > > +	struct fsl_qdma_sdf *sdf;
> > > > +	struct fsl_qdma_ddf *ddf;
> > > > +
> > > > +	ccdf = (struct fsl_qdma_format *)fsl_comp->virt_addr;
> > >
> > > Cast are not required to/away from void
> > >
> >
> > Does means: remove force conver?
> 
> yes and it would work
> 
> > > > +static struct fsl_qdma_comp *fsl_qdma_request_enqueue_desc(
> > > > +					struct fsl_qdma_chan *fsl_chan,
> > > > +					unsigned int dst_nents,
> > > > +					unsigned int src_nents)
> > > > +{
> > > > +	struct fsl_qdma_comp *comp_temp;
> > > > +	struct fsl_qdma_sg *sg_block;
> > > > +	struct fsl_qdma_queue *queue = fsl_chan->queue;
> > > > +	unsigned long flags;
> > > > +	unsigned int dst_sg_entry_block, src_sg_entry_block,
> > > > +sg_entry_total, i;
> > > > +
> > > > +	spin_lock_irqsave(&queue->queue_lock, flags);
> > > > +	if (list_empty(&queue->comp_free)) {
> > > > +		spin_unlock_irqrestore(&queue->queue_lock, flags);
> > > > +		comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
> > > > +		if (!comp_temp)
> > > > +			return NULL;
> > > > +		comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
> > > > +						      GFP_KERNEL,
> > > > +						      &comp_temp->bus_addr);
> > > > +		if (!comp_temp->virt_addr) {
> > > > +			kfree(comp_temp);
> > > > +			return NULL;
> > > > +		}
> > > > +
> > > > +	} else {
> > > > +		comp_temp = list_first_entry(&queue->comp_free,
> > > > +					     struct fsl_qdma_comp,
> > > > +					     list);
> > > > +		list_del(&comp_temp->list);
> > > > +		spin_unlock_irqrestore(&queue->queue_lock, flags);
> > > > +	}
> > > > +
> > > > +	if (dst_nents != 0)
> > > > +		dst_sg_entry_block = dst_nents /
> > > > +					(FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
> > >
> > > DIV_ROUND_UP()?
> > >
> >
> > The DIV_ROUND_UP() definition see below:
> >
> > #define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP #define
> > __KERNEL_DIV_ROUND_UP(n ,d) (((n) + (d) - 1) / (d))
> >
> > But here is 'd / (n - 1) + 1' ?
> 
> Yeah this doesn't look apt here, check if any other macros suits...
> 

Got it.

> > > > +			memset(sg_block->virt_addr, 0,
> > > > +					FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16);
> > >
> > > why FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16? and not what you
> allocated?
> > >
> >
> > see line 497.
> > The sg_pool buffer size created is FSL_QDMA_EXPECT_SG_ENTRY_NUM *
> 16.
> 
> Please document this
> 
Add comment to this?

> > > > +static int fsl_qdma_queue_transfer_complete(struct
> > > > +fsl_qdma_engine
> > > > +*fsl_qdma) {
> > > > +	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
> > > > +	struct fsl_qdma_queue *fsl_status = fsl_qdma->status;
> > > > +	struct fsl_qdma_queue *temp_queue;
> > > > +	struct fsl_qdma_comp *fsl_comp;
> > > > +	struct fsl_qdma_format *status_addr;
> > > > +	struct fsl_qdma_format *csgf_src;
> > > > +	void __iomem *block = fsl_qdma->block_base;
> > > > +	u32 reg, i;
> > > > +	bool duplicate, duplicate_handle;
> > > > +
> > > > +	while (1) {
> > > > +		duplicate = 0;
> > > > +		duplicate_handle = 0;
> > > > +		reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);
> > > > +		if (reg & FSL_QDMA_BSQSR_QE)
> > > > +			return 0;
> > > > +		status_addr = fsl_status->virt_head;
> > > > +		if (qdma_ccdf_get_queue(status_addr) == pre_queue &&
> > > > +			qdma_ccdf_addr_get64(status_addr) == pre_addr)
> > > > +			duplicate = 1;
> > > > +		i = qdma_ccdf_get_queue(status_addr);
> > > > +		pre_queue = qdma_ccdf_get_queue(status_addr);
> > > > +		pre_addr = qdma_ccdf_addr_get64(status_addr);
> > > > +		temp_queue = fsl_queue + i;
> > > > +		spin_lock(&temp_queue->queue_lock);
> > > > +		if (list_empty(&temp_queue->comp_used)) {
> > > > +			if (duplicate)
> > > > +				duplicate_handle = 1;
> > > > +			else {
> > > > +				spin_unlock(&temp_queue->queue_lock);
> > > > +				return -1;
> > >
> > > -1? really. You are in while(1) wouldn't break make sense here?
> > >
> >
> > Does means: using break?
> 
> it means two things, we don't use return -1, if it is valid error then return a
> proper kernel error code second, since it is a while loop, do you want to use a
> break?
> 
Got it, Thanks.

> --
> ~Vinod
> --
> To unsubscribe from this list: send the line "unsubscribe dmaengine" in the
> body of a message to majordomo@vger.kernel.org More majordomo info at
> https://emea01.safelinks.protection.outlook.com/?url=http%3A%2F%2Fvger.
> kernel.org%2Fmajordomo-info.html&data=02%7C01%7Cwen.he_1%40nxp.co
> m%7Cedcdbb818c534ec70b3108d5bc76d7b3%7C686ea1d3bc2b4c6fa92cd9
> 9c5c301635%7C0%7C0%7C636622140978593180&sdata=TuE40l7HejTz%2Bq
> S8LibIkdYV84Esh5sfD1jSIzvAwTY%3D&reserved=0

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [v4,3/6] dt-bindings: fsl-qdma: Add NXP Layerscpae qDMA controller bindings
  2018-05-14 12:03 ` [v4 3/6] " Wen He
@ 2018-05-18 21:26 ` Rob Herring
  -1 siblings, 0 replies; 32+ messages in thread
From: Rob Herring @ 2018-05-18 21:26 UTC (permalink / raw)
  To: Wen He
  Cc: vinod.koul, dmaengine, devicetree, leoyang.li, jiafei.pan, jiaheng.fan

On Mon, May 14, 2018 at 08:03:04PM +0800, Wen He wrote:
> Document the devicetree bindings for NXP Layerscape qDMA controller
> which could be found on NXP QorIQ Layerscape SoCs.
> 
> Signed-off-by: Wen He <wen.he_1@nxp.com>
> ---
> change in v4:
> 	- Rewrite the bindings document that follows generic DMA bindings file
> 
> change in v3:
> 	- no change
> 
> change in v2:
> 	- Remove indentation
> 	- Add "Should be" before 'fsl,ls1021a-qdma'
> 	- Replace 'channels' by 'dma-channels'
> 	- Replace 'qdma@8390000' by 'dma-controller@8390000'
> 
>  Documentation/devicetree/bindings/dma/fsl-qdma.txt |   41 ++++++++++++++++++++
>  1 files changed, 41 insertions(+), 0 deletions(-)
>  create mode 100644 Documentation/devicetree/bindings/dma/fsl-qdma.txt
> 
> diff --git a/Documentation/devicetree/bindings/dma/fsl-qdma.txt b/Documentation/devicetree/bindings/dma/fsl-qdma.txt
> new file mode 100644
> index 0000000..368c4e7
> --- /dev/null
> +++ b/Documentation/devicetree/bindings/dma/fsl-qdma.txt
> @@ -0,0 +1,41 @@
> +NXP Layerscape SoC qDMA Controller
> +==================================
> +
> +This device follows the generic DMA bindings defined in dma/dma.txt.
> +
> +Required properties:
> +
> +- compatible:		Must be one of
> +			 "fsl,ls1021a-qdma": for LS1021A Board
> +			 "fsl,ls1043a-qdma": for ls1043A Board
> +			 "fsl,ls1046a-qdma": for ls1046A Board
> +- reg:			Should contain the register's base address and length.
> +- interrupts:		Should contain a reference to the interrupt used by this
> +			device.
> +- interrupt-names:	Should contain interrupt names:
> +			 "qdma-error": the error interrupt
> +			 "qdma-queue": the queue interrupt
> +- queues:		Should contain number of queues supported.

Needs a vendor prefix.

> +
> +Optional properties:
> +
> +- dma-channels:		Number of DMA channels supported by the controller.
> +- big-endian:		If present registers and hardware scatter/gather descriptors
> +			of the qDMA are implemented in big endian mode, otherwise in little
> +			mode.
> +
> +Examples:
> +
> +	qdma: dma-controller@8390000 {
> +		compatible = "fsl,ls1021a-qdma";
> +		reg = <0x0 0x8398000 0x0 0x2000 /* Controller registers */
> +		       0x0 0x839a000 0x0 0x2000>; /* Block registers */
> +		interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
> +				<GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
> +		interrupt-names = "qdma-error", "qdma-queue";
> +		dma-channels = <8>;
> +		queues = <2>;
> +		big-endian;
> +	};
> +
> +DMA clients must use the format described in dma/dma.txt file.
> -- 
> 1.7.1
> 
> --
> To unsubscribe from this list: send the line "unsubscribe devicetree" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
---
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [v4 3/6] dt-bindings: fsl-qdma: Add NXP Layerscpae qDMA controller bindings
@ 2018-05-18 21:26 ` Rob Herring
  0 siblings, 0 replies; 32+ messages in thread
From: Rob Herring @ 2018-05-18 21:26 UTC (permalink / raw)
  To: Wen He
  Cc: vinod.koul, dmaengine, devicetree, leoyang.li, jiafei.pan, jiaheng.fan

On Mon, May 14, 2018 at 08:03:04PM +0800, Wen He wrote:
> Document the devicetree bindings for NXP Layerscape qDMA controller
> which could be found on NXP QorIQ Layerscape SoCs.
> 
> Signed-off-by: Wen He <wen.he_1@nxp.com>
> ---
> change in v4:
> 	- Rewrite the bindings document that follows generic DMA bindings file
> 
> change in v3:
> 	- no change
> 
> change in v2:
> 	- Remove indentation
> 	- Add "Should be" before 'fsl,ls1021a-qdma'
> 	- Replace 'channels' by 'dma-channels'
> 	- Replace 'qdma@8390000' by 'dma-controller@8390000'
> 
>  Documentation/devicetree/bindings/dma/fsl-qdma.txt |   41 ++++++++++++++++++++
>  1 files changed, 41 insertions(+), 0 deletions(-)
>  create mode 100644 Documentation/devicetree/bindings/dma/fsl-qdma.txt
> 
> diff --git a/Documentation/devicetree/bindings/dma/fsl-qdma.txt b/Documentation/devicetree/bindings/dma/fsl-qdma.txt
> new file mode 100644
> index 0000000..368c4e7
> --- /dev/null
> +++ b/Documentation/devicetree/bindings/dma/fsl-qdma.txt
> @@ -0,0 +1,41 @@
> +NXP Layerscape SoC qDMA Controller
> +==================================
> +
> +This device follows the generic DMA bindings defined in dma/dma.txt.
> +
> +Required properties:
> +
> +- compatible:		Must be one of
> +			 "fsl,ls1021a-qdma": for LS1021A Board
> +			 "fsl,ls1043a-qdma": for ls1043A Board
> +			 "fsl,ls1046a-qdma": for ls1046A Board
> +- reg:			Should contain the register's base address and length.
> +- interrupts:		Should contain a reference to the interrupt used by this
> +			device.
> +- interrupt-names:	Should contain interrupt names:
> +			 "qdma-error": the error interrupt
> +			 "qdma-queue": the queue interrupt
> +- queues:		Should contain number of queues supported.

Needs a vendor prefix.

> +
> +Optional properties:
> +
> +- dma-channels:		Number of DMA channels supported by the controller.
> +- big-endian:		If present registers and hardware scatter/gather descriptors
> +			of the qDMA are implemented in big endian mode, otherwise in little
> +			mode.
> +
> +Examples:
> +
> +	qdma: dma-controller@8390000 {
> +		compatible = "fsl,ls1021a-qdma";
> +		reg = <0x0 0x8398000 0x0 0x2000 /* Controller registers */
> +		       0x0 0x839a000 0x0 0x2000>; /* Block registers */
> +		interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
> +				<GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
> +		interrupt-names = "qdma-error", "qdma-queue";
> +		dma-channels = <8>;
> +		queues = <2>;
> +		big-endian;
> +	};
> +
> +DMA clients must use the format described in dma/dma.txt file.
> -- 
> 1.7.1
> 
> --
> To unsubscribe from this list: send the line "unsubscribe devicetree" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [v4,3/6] dt-bindings: fsl-qdma: Add NXP Layerscpae qDMA controller bindings
  2018-05-18 21:26 ` [v4 3/6] " Rob Herring
@ 2018-05-21  5:52 ` Wen He
  -1 siblings, 0 replies; 32+ messages in thread
From: Wen He @ 2018-05-21  5:52 UTC (permalink / raw)
  To: Rob Herring
  Cc: vinod.koul, dmaengine, devicetree, Leo Li, Jiafei Pan, Jiaheng Fan

Hi Rob,

Please see my comments inline.

Best Regards,
Wen

> -----Original Message-----
> From: Rob Herring [mailto:robh@kernel.org]
> Sent: 2018年5月19日 5:26
> To: Wen He <wen.he_1@nxp.com>
> Cc: vinod.koul@intel.com; dmaengine@vger.kernel.org;
> devicetree@vger.kernel.org; Leo Li <leoyang.li@nxp.com>; Jiafei Pan
> <jiafei.pan@nxp.com>; Jiaheng Fan <jiaheng.fan@nxp.com>
> Subject: Re: [v4 3/6] dt-bindings: fsl-qdma: Add NXP Layerscpae qDMA
> controller bindings
> 
> On Mon, May 14, 2018 at 08:03:04PM +0800, Wen He wrote:
> > Document the devicetree bindings for NXP Layerscape qDMA controller
> > which could be found on NXP QorIQ Layerscape SoCs.
> >
> > Signed-off-by: Wen He <wen.he_1@nxp.com>
> > ---
> > change in v4:
> > 	- Rewrite the bindings document that follows generic DMA bindings
> > file
> >
> > change in v3:
> > 	- no change
> >
> > change in v2:
> > 	- Remove indentation
> > 	- Add "Should be" before 'fsl,ls1021a-qdma'
> > 	- Replace 'channels' by 'dma-channels'
> > 	- Replace 'qdma@8390000' by 'dma-controller@8390000'
> >
> >  Documentation/devicetree/bindings/dma/fsl-qdma.txt |   41
> ++++++++++++++++++++
> >  1 files changed, 41 insertions(+), 0 deletions(-)  create mode 100644
> > Documentation/devicetree/bindings/dma/fsl-qdma.txt
> >
> > diff --git a/Documentation/devicetree/bindings/dma/fsl-qdma.txt
> > b/Documentation/devicetree/bindings/dma/fsl-qdma.txt
> > new file mode 100644
> > index 0000000..368c4e7
> > --- /dev/null
> > +++ b/Documentation/devicetree/bindings/dma/fsl-qdma.txt
> > @@ -0,0 +1,41 @@
> > +NXP Layerscape SoC qDMA Controller
> > +==================================
> > +
> > +This device follows the generic DMA bindings defined in dma/dma.txt.
> > +
> > +Required properties:
> > +
> > +- compatible:		Must be one of
> > +			 "fsl,ls1021a-qdma": for LS1021A Board
> > +			 "fsl,ls1043a-qdma": for ls1043A Board
> > +			 "fsl,ls1046a-qdma": for ls1046A Board
> > +- reg:			Should contain the register's base address and length.
> > +- interrupts:		Should contain a reference to the interrupt used by
> this
> > +			device.
> > +- interrupt-names:	Should contain interrupt names:
> > +			 "qdma-error": the error interrupt
> > +			 "qdma-queue": the queue interrupt
> > +- queues:		Should contain number of queues supported.
> 
> Needs a vendor prefix.
> 

Does means: The queues filed need a vendor prefix ?
like 'fsl-queues' ? right?

> > +
> > +Optional properties:
> > +
> > +- dma-channels:		Number of DMA channels supported by the
> controller.
> > +- big-endian:		If present registers and hardware scatter/gather
> descriptors
> > +			of the qDMA are implemented in big endian mode, otherwise in
> little
> > +			mode.
> > +
> > +Examples:
> > +
> > +	qdma: dma-controller@8390000 {
> > +		compatible = "fsl,ls1021a-qdma";
> > +		reg = <0x0 0x8398000 0x0 0x2000 /* Controller registers */
> > +		       0x0 0x839a000 0x0 0x2000>; /* Block registers */
> > +		interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
> > +				<GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
> > +		interrupt-names = "qdma-error", "qdma-queue";
> > +		dma-channels = <8>;
> > +		queues = <2>;
> > +		big-endian;
> > +	};
> > +
> > +DMA clients must use the format described in dma/dma.txt file.
> > --
> > 1.7.1
> >
> > --
> > To unsubscribe from this list: send the line "unsubscribe devicetree"
> > in the body of a message to majordomo@vger.kernel.org More majordomo
> > info at
> >
> https://emea01.safelinks.protection.outlook.com/?url=http%3A%2F%2Fvger
> > .kernel.org%2Fmajordomo-info.html&data=02%7C01%7Cwen.he_1%40nxp
> .com%7C
> >
> afe9f30f68654b36085408d5bd05f856%7C686ea1d3bc2b4c6fa92cd99c5c301
> 635%7C
> >
> 0%7C1%7C636622755700659994&sdata=W65hD8ZYUQm2%2F8TdfiUGorgB
> Om8GojXdES2
> > mVNzQpIE%3D&reserved=0

^ permalink raw reply	[flat|nested] 32+ messages in thread

* RE: [v4 3/6] dt-bindings: fsl-qdma: Add NXP Layerscpae qDMA controller bindings
@ 2018-05-21  5:52 ` Wen He
  0 siblings, 0 replies; 32+ messages in thread
From: Wen He @ 2018-05-21  5:52 UTC (permalink / raw)
  To: Rob Herring
  Cc: vinod.koul, dmaengine, devicetree, Leo Li, Jiafei Pan, Jiaheng Fan

Hi Rob,

Please see my comments inline.

Best Regards,
Wen

> -----Original Message-----
> From: Rob Herring [mailto:robh@kernel.org]
> Sent: 2018年5月19日 5:26
> To: Wen He <wen.he_1@nxp.com>
> Cc: vinod.koul@intel.com; dmaengine@vger.kernel.org;
> devicetree@vger.kernel.org; Leo Li <leoyang.li@nxp.com>; Jiafei Pan
> <jiafei.pan@nxp.com>; Jiaheng Fan <jiaheng.fan@nxp.com>
> Subject: Re: [v4 3/6] dt-bindings: fsl-qdma: Add NXP Layerscpae qDMA
> controller bindings
> 
> On Mon, May 14, 2018 at 08:03:04PM +0800, Wen He wrote:
> > Document the devicetree bindings for NXP Layerscape qDMA controller
> > which could be found on NXP QorIQ Layerscape SoCs.
> >
> > Signed-off-by: Wen He <wen.he_1@nxp.com>
> > ---
> > change in v4:
> > 	- Rewrite the bindings document that follows generic DMA bindings
> > file
> >
> > change in v3:
> > 	- no change
> >
> > change in v2:
> > 	- Remove indentation
> > 	- Add "Should be" before 'fsl,ls1021a-qdma'
> > 	- Replace 'channels' by 'dma-channels'
> > 	- Replace 'qdma@8390000' by 'dma-controller@8390000'
> >
> >  Documentation/devicetree/bindings/dma/fsl-qdma.txt |   41
> ++++++++++++++++++++
> >  1 files changed, 41 insertions(+), 0 deletions(-)  create mode 100644
> > Documentation/devicetree/bindings/dma/fsl-qdma.txt
> >
> > diff --git a/Documentation/devicetree/bindings/dma/fsl-qdma.txt
> > b/Documentation/devicetree/bindings/dma/fsl-qdma.txt
> > new file mode 100644
> > index 0000000..368c4e7
> > --- /dev/null
> > +++ b/Documentation/devicetree/bindings/dma/fsl-qdma.txt
> > @@ -0,0 +1,41 @@
> > +NXP Layerscape SoC qDMA Controller
> > +==================================
> > +
> > +This device follows the generic DMA bindings defined in dma/dma.txt.
> > +
> > +Required properties:
> > +
> > +- compatible:		Must be one of
> > +			 "fsl,ls1021a-qdma": for LS1021A Board
> > +			 "fsl,ls1043a-qdma": for ls1043A Board
> > +			 "fsl,ls1046a-qdma": for ls1046A Board
> > +- reg:			Should contain the register's base address and length.
> > +- interrupts:		Should contain a reference to the interrupt used by
> this
> > +			device.
> > +- interrupt-names:	Should contain interrupt names:
> > +			 "qdma-error": the error interrupt
> > +			 "qdma-queue": the queue interrupt
> > +- queues:		Should contain number of queues supported.
> 
> Needs a vendor prefix.
> 

Does means: The queues filed need a vendor prefix ?
like 'fsl-queues' ? right?

> > +
> > +Optional properties:
> > +
> > +- dma-channels:		Number of DMA channels supported by the
> controller.
> > +- big-endian:		If present registers and hardware scatter/gather
> descriptors
> > +			of the qDMA are implemented in big endian mode, otherwise in
> little
> > +			mode.
> > +
> > +Examples:
> > +
> > +	qdma: dma-controller@8390000 {
> > +		compatible = "fsl,ls1021a-qdma";
> > +		reg = <0x0 0x8398000 0x0 0x2000 /* Controller registers */
> > +		       0x0 0x839a000 0x0 0x2000>; /* Block registers */
> > +		interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
> > +				<GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
> > +		interrupt-names = "qdma-error", "qdma-queue";
> > +		dma-channels = <8>;
> > +		queues = <2>;
> > +		big-endian;
> > +	};
> > +
> > +DMA clients must use the format described in dma/dma.txt file.
> > --
> > 1.7.1
> >
> > --
> > To unsubscribe from this list: send the line "unsubscribe devicetree"
> > in the body of a message to majordomo@vger.kernel.org More majordomo
> > info at
> >
> https://emea01.safelinks.protection.outlook.com/?url=http%3A%2F%2Fvger
> > .kernel.org%2Fmajordomo-info.html&data=02%7C01%7Cwen.he_1%40nxp
> .com%7C
> >
> afe9f30f68654b36085408d5bd05f856%7C686ea1d3bc2b4c6fa92cd99c5c301
> 635%7C
> >
> 0%7C1%7C636622755700659994&sdata=W65hD8ZYUQm2%2F8TdfiUGorgB
> Om8GojXdES2
> > mVNzQpIE%3D&reserved=0

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [v4,2/6] dmaengine: fsl-qdma: Add qDMA controller driver for Layerscape SoCs
  2018-05-18 10:04 ` [v4 2/6] " Wen He
@ 2018-05-21  9:09 ` Vinod Koul
  -1 siblings, 0 replies; 32+ messages in thread
From: Vinod Koul @ 2018-05-21  9:09 UTC (permalink / raw)
  To: Wen He; +Cc: dmaengine, robh+dt, devicetree, Leo Li, Jiafei Pan, Jiaheng Fan

On 18-05-18, 10:04, Wen He wrote:

> > > > > +u64 pre_addr, pre_queue;
> > > >
> > > > why do we have a global?
> > >
> > > Let's us see qDMA that how is works?
> > >
> > > First, the status notification for DMA jobs are reported back to the status
> > queue.
> > > Status information is carried within the command descriptor
> > > status/command field, bits 120-127. The command descriptor dequeue
> > > pointer advances only after the transaction has completed and the status
> > information field has been updated.
> > >
> > > Then, the command descriptor address field wiil pointer to the command
> > > descriptor in its original format. It is the responsibity of the
> > > address of the status queue consumer to deallocate buffers as needed when
> > the command descriptor address pointer is non-zero.
> > >
> > > More details of the Status Queue can be found in QorIQ Layerscape Soc
> > datasheet.
> > >
> > > So, these variable is used to record latest value that command
> > > descriptor queue and status field.
> > >
> > > Every time variables value is zero when set these variable to local, that's not
> > what I want.
> > 
> > Why not store them in driver context?
> > 
> 
> okay, maybe I should remove these variable to private struct?

Yes that would be better

> > > > > +			memset(sg_block->virt_addr, 0,
> > > > > +					FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16);
> > > >
> > > > why FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16? and not what you
> > allocated?
> > > >
> > >
> > > see line 497.
> > > The sg_pool buffer size created is FSL_QDMA_EXPECT_SG_ENTRY_NUM *
> > 16.
> > 
> > Please document this
> > 
> Add comment to this?

yup, explaining where 16 is coming from

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [v4 2/6] dmaengine: fsl-qdma: Add qDMA controller driver for Layerscape SoCs
@ 2018-05-21  9:09 ` Vinod Koul
  0 siblings, 0 replies; 32+ messages in thread
From: Vinod Koul @ 2018-05-21  9:09 UTC (permalink / raw)
  To: Wen He; +Cc: dmaengine, robh+dt, devicetree, Leo Li, Jiafei Pan, Jiaheng Fan

On 18-05-18, 10:04, Wen He wrote:

> > > > > +u64 pre_addr, pre_queue;
> > > >
> > > > why do we have a global?
> > >
> > > Let's us see qDMA that how is works?
> > >
> > > First, the status notification for DMA jobs are reported back to the status
> > queue.
> > > Status information is carried within the command descriptor
> > > status/command field, bits 120-127. The command descriptor dequeue
> > > pointer advances only after the transaction has completed and the status
> > information field has been updated.
> > >
> > > Then, the command descriptor address field wiil pointer to the command
> > > descriptor in its original format. It is the responsibity of the
> > > address of the status queue consumer to deallocate buffers as needed when
> > the command descriptor address pointer is non-zero.
> > >
> > > More details of the Status Queue can be found in QorIQ Layerscape Soc
> > datasheet.
> > >
> > > So, these variable is used to record latest value that command
> > > descriptor queue and status field.
> > >
> > > Every time variables value is zero when set these variable to local, that's not
> > what I want.
> > 
> > Why not store them in driver context?
> > 
> 
> okay, maybe I should remove these variable to private struct?

Yes that would be better

> > > > > +			memset(sg_block->virt_addr, 0,
> > > > > +					FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16);
> > > >
> > > > why FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16? and not what you
> > allocated?
> > > >
> > >
> > > see line 497.
> > > The sg_pool buffer size created is FSL_QDMA_EXPECT_SG_ENTRY_NUM *
> > 16.
> > 
> > Please document this
> > 
> Add comment to this?

yup, explaining where 16 is coming from
-- 
~Vinod

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [v4,2/6] dmaengine: fsl-qdma: Add qDMA controller driver for Layerscape SoCs
  2018-05-21  9:09 ` [v4 2/6] " Vinod Koul
@ 2018-05-21  9:49 ` Wen He
  -1 siblings, 0 replies; 32+ messages in thread
From: Wen He @ 2018-05-21  9:49 UTC (permalink / raw)
  To: Vinod Koul
  Cc: dmaengine, robh+dt, devicetree, Leo Li, Jiafei Pan, Jiaheng Fan

> -----Original Message-----
> From: Vinod Koul [mailto:vinod.koul@linaro.org]
> Sent: 2018年5月21日 17:09
> To: Wen He <wen.he_1@nxp.com>
> Cc: dmaengine@vger.kernel.org; robh+dt@kernel.org;
> devicetree@vger.kernel.org; Leo Li <leoyang.li@nxp.com>; Jiafei Pan
> <jiafei.pan@nxp.com>; Jiaheng Fan <jiaheng.fan@nxp.com>
> Subject: Re: [v4 2/6] dmaengine: fsl-qdma: Add qDMA controller driver for
> Layerscape SoCs
> 
> On 18-05-18, 10:04, Wen He wrote:
> 
> > > > > > +u64 pre_addr, pre_queue;
> > > > >
> > > > > why do we have a global?
> > > >
> > > > Let's us see qDMA that how is works?
> > > >
> > > > First, the status notification for DMA jobs are reported back to
> > > > the status
> > > queue.
> > > > Status information is carried within the command descriptor
> > > > status/command field, bits 120-127. The command descriptor dequeue
> > > > pointer advances only after the transaction has completed and the
> > > > status
> > > information field has been updated.
> > > >
> > > > Then, the command descriptor address field wiil pointer to the
> > > > command descriptor in its original format. It is the responsibity
> > > > of the address of the status queue consumer to deallocate buffers
> > > > as needed when
> > > the command descriptor address pointer is non-zero.
> > > >
> > > > More details of the Status Queue can be found in QorIQ Layerscape
> > > > Soc
> > > datasheet.
> > > >
> > > > So, these variable is used to record latest value that command
> > > > descriptor queue and status field.
> > > >
> > > > Every time variables value is zero when set these variable to
> > > > local, that's not
> > > what I want.
> > >
> > > Why not store them in driver context?
> > >
> >
> > okay, maybe I should remove these variable to private struct?
> 
> Yes that would be better
> 
> > > > > > +			memset(sg_block->virt_addr, 0,
> > > > > > +					FSL_QDMA_EXPECT_SG_ENTRY_NUM *
> 16);
> > > > >
> > > > > why FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16? and not what you
> > > allocated?
> > > > >
> > > >
> > > > see line 497.
> > > > The sg_pool buffer size created is FSL_QDMA_EXPECT_SG_ENTRY_NUM
> *
> > > 16.
> > >
> > > Please document this
> > >
> > Add comment to this?
> 
> yup, explaining where 16 is coming from

got it, thanks for your review and will be next version fix it.

Best Regards,
Wen

> --
> ~Vinod

^ permalink raw reply	[flat|nested] 32+ messages in thread

* RE: [v4 2/6] dmaengine: fsl-qdma: Add qDMA controller driver for Layerscape SoCs
@ 2018-05-21  9:49 ` Wen He
  0 siblings, 0 replies; 32+ messages in thread
From: Wen He @ 2018-05-21  9:49 UTC (permalink / raw)
  To: Vinod Koul
  Cc: dmaengine, robh+dt, devicetree, Leo Li, Jiafei Pan, Jiaheng Fan



> -----Original Message-----
> From: Vinod Koul [mailto:vinod.koul@linaro.org]
> Sent: 2018年5月21日 17:09
> To: Wen He <wen.he_1@nxp.com>
> Cc: dmaengine@vger.kernel.org; robh+dt@kernel.org;
> devicetree@vger.kernel.org; Leo Li <leoyang.li@nxp.com>; Jiafei Pan
> <jiafei.pan@nxp.com>; Jiaheng Fan <jiaheng.fan@nxp.com>
> Subject: Re: [v4 2/6] dmaengine: fsl-qdma: Add qDMA controller driver for
> Layerscape SoCs
> 
> On 18-05-18, 10:04, Wen He wrote:
> 
> > > > > > +u64 pre_addr, pre_queue;
> > > > >
> > > > > why do we have a global?
> > > >
> > > > Let's us see qDMA that how is works?
> > > >
> > > > First, the status notification for DMA jobs are reported back to
> > > > the status
> > > queue.
> > > > Status information is carried within the command descriptor
> > > > status/command field, bits 120-127. The command descriptor dequeue
> > > > pointer advances only after the transaction has completed and the
> > > > status
> > > information field has been updated.
> > > >
> > > > Then, the command descriptor address field wiil pointer to the
> > > > command descriptor in its original format. It is the responsibity
> > > > of the address of the status queue consumer to deallocate buffers
> > > > as needed when
> > > the command descriptor address pointer is non-zero.
> > > >
> > > > More details of the Status Queue can be found in QorIQ Layerscape
> > > > Soc
> > > datasheet.
> > > >
> > > > So, these variable is used to record latest value that command
> > > > descriptor queue and status field.
> > > >
> > > > Every time variables value is zero when set these variable to
> > > > local, that's not
> > > what I want.
> > >
> > > Why not store them in driver context?
> > >
> >
> > okay, maybe I should remove these variable to private struct?
> 
> Yes that would be better
> 
> > > > > > +			memset(sg_block->virt_addr, 0,
> > > > > > +					FSL_QDMA_EXPECT_SG_ENTRY_NUM *
> 16);
> > > > >
> > > > > why FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16? and not what you
> > > allocated?
> > > > >
> > > >
> > > > see line 497.
> > > > The sg_pool buffer size created is FSL_QDMA_EXPECT_SG_ENTRY_NUM
> *
> > > 16.
> > >
> > > Please document this
> > >
> > Add comment to this?
> 
> yup, explaining where 16 is coming from

got it, thanks for your review and will be next version fix it.

Best Regards,
Wen

> --
> ~Vinod

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [v4,3/6] dt-bindings: fsl-qdma: Add NXP Layerscpae qDMA controller bindings
  2018-05-21  5:52 ` [v4 3/6] " Wen He
@ 2018-05-23 19:59 ` Rob Herring
  -1 siblings, 0 replies; 32+ messages in thread
From: Rob Herring @ 2018-05-23 19:59 UTC (permalink / raw)
  To: Wen He; +Cc: dmaengine, devicetree, Leo Li, Jiafei Pan, Jiaheng Fan, Vinod

Updated Vinod's email...

On Mon, May 21, 2018 at 12:52 AM, Wen He <wen.he_1@nxp.com> wrote:
> Hi Rob,
>
> Please see my comments inline.
>
> Best Regards,
> Wen
>
>> -----Original Message-----
>> From: Rob Herring [mailto:robh@kernel.org]
>> Sent: 2018年5月19日 5:26
>> To: Wen He <wen.he_1@nxp.com>
>> Cc: vinod.koul@intel.com; dmaengine@vger.kernel.org;
>> devicetree@vger.kernel.org; Leo Li <leoyang.li@nxp.com>; Jiafei Pan
>> <jiafei.pan@nxp.com>; Jiaheng Fan <jiaheng.fan@nxp.com>
>> Subject: Re: [v4 3/6] dt-bindings: fsl-qdma: Add NXP Layerscpae qDMA
>> controller bindings
>>
>> On Mon, May 14, 2018 at 08:03:04PM +0800, Wen He wrote:
>> > Document the devicetree bindings for NXP Layerscape qDMA controller
>> > which could be found on NXP QorIQ Layerscape SoCs.
>> >
>> > Signed-off-by: Wen He <wen.he_1@nxp.com>
>> > ---
>> > change in v4:
>> >     - Rewrite the bindings document that follows generic DMA bindings
>> > file
>> >
>> > change in v3:
>> >     - no change
>> >
>> > change in v2:
>> >     - Remove indentation
>> >     - Add "Should be" before 'fsl,ls1021a-qdma'
>> >     - Replace 'channels' by 'dma-channels'
>> >     - Replace 'qdma@8390000' by 'dma-controller@8390000'
>> >
>> >  Documentation/devicetree/bindings/dma/fsl-qdma.txt |   41
>> ++++++++++++++++++++
>> >  1 files changed, 41 insertions(+), 0 deletions(-)  create mode 100644
>> > Documentation/devicetree/bindings/dma/fsl-qdma.txt
>> >
>> > diff --git a/Documentation/devicetree/bindings/dma/fsl-qdma.txt
>> > b/Documentation/devicetree/bindings/dma/fsl-qdma.txt
>> > new file mode 100644
>> > index 0000000..368c4e7
>> > --- /dev/null
>> > +++ b/Documentation/devicetree/bindings/dma/fsl-qdma.txt
>> > @@ -0,0 +1,41 @@
>> > +NXP Layerscape SoC qDMA Controller
>> > +==================================
>> > +
>> > +This device follows the generic DMA bindings defined in dma/dma.txt.
>> > +
>> > +Required properties:
>> > +
>> > +- compatible:              Must be one of
>> > +                    "fsl,ls1021a-qdma": for LS1021A Board
>> > +                    "fsl,ls1043a-qdma": for ls1043A Board
>> > +                    "fsl,ls1046a-qdma": for ls1046A Board
>> > +- reg:                     Should contain the register's base address and length.
>> > +- interrupts:              Should contain a reference to the interrupt used by
>> this
>> > +                   device.
>> > +- interrupt-names: Should contain interrupt names:
>> > +                    "qdma-error": the error interrupt
>> > +                    "qdma-queue": the queue interrupt
>> > +- queues:          Should contain number of queues supported.
>>
>> Needs a vendor prefix.
>>
>
> Does means: The queues filed need a vendor prefix ?
> like 'fsl-queues' ? right?

No, vendor prefixes end with a comma: fsl,queues

Rob
---
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [v4 3/6] dt-bindings: fsl-qdma: Add NXP Layerscpae qDMA controller bindings
@ 2018-05-23 19:59 ` Rob Herring
  0 siblings, 0 replies; 32+ messages in thread
From: Rob Herring @ 2018-05-23 19:59 UTC (permalink / raw)
  To: Wen He; +Cc: dmaengine, devicetree, Leo Li, Jiafei Pan, Jiaheng Fan, Vinod

Updated Vinod's email...

On Mon, May 21, 2018 at 12:52 AM, Wen He <wen.he_1@nxp.com> wrote:
> Hi Rob,
>
> Please see my comments inline.
>
> Best Regards,
> Wen
>
>> -----Original Message-----
>> From: Rob Herring [mailto:robh@kernel.org]
>> Sent: 2018年5月19日 5:26
>> To: Wen He <wen.he_1@nxp.com>
>> Cc: vinod.koul@intel.com; dmaengine@vger.kernel.org;
>> devicetree@vger.kernel.org; Leo Li <leoyang.li@nxp.com>; Jiafei Pan
>> <jiafei.pan@nxp.com>; Jiaheng Fan <jiaheng.fan@nxp.com>
>> Subject: Re: [v4 3/6] dt-bindings: fsl-qdma: Add NXP Layerscpae qDMA
>> controller bindings
>>
>> On Mon, May 14, 2018 at 08:03:04PM +0800, Wen He wrote:
>> > Document the devicetree bindings for NXP Layerscape qDMA controller
>> > which could be found on NXP QorIQ Layerscape SoCs.
>> >
>> > Signed-off-by: Wen He <wen.he_1@nxp.com>
>> > ---
>> > change in v4:
>> >     - Rewrite the bindings document that follows generic DMA bindings
>> > file
>> >
>> > change in v3:
>> >     - no change
>> >
>> > change in v2:
>> >     - Remove indentation
>> >     - Add "Should be" before 'fsl,ls1021a-qdma'
>> >     - Replace 'channels' by 'dma-channels'
>> >     - Replace 'qdma@8390000' by 'dma-controller@8390000'
>> >
>> >  Documentation/devicetree/bindings/dma/fsl-qdma.txt |   41
>> ++++++++++++++++++++
>> >  1 files changed, 41 insertions(+), 0 deletions(-)  create mode 100644
>> > Documentation/devicetree/bindings/dma/fsl-qdma.txt
>> >
>> > diff --git a/Documentation/devicetree/bindings/dma/fsl-qdma.txt
>> > b/Documentation/devicetree/bindings/dma/fsl-qdma.txt
>> > new file mode 100644
>> > index 0000000..368c4e7
>> > --- /dev/null
>> > +++ b/Documentation/devicetree/bindings/dma/fsl-qdma.txt
>> > @@ -0,0 +1,41 @@
>> > +NXP Layerscape SoC qDMA Controller
>> > +==================================
>> > +
>> > +This device follows the generic DMA bindings defined in dma/dma.txt.
>> > +
>> > +Required properties:
>> > +
>> > +- compatible:              Must be one of
>> > +                    "fsl,ls1021a-qdma": for LS1021A Board
>> > +                    "fsl,ls1043a-qdma": for ls1043A Board
>> > +                    "fsl,ls1046a-qdma": for ls1046A Board
>> > +- reg:                     Should contain the register's base address and length.
>> > +- interrupts:              Should contain a reference to the interrupt used by
>> this
>> > +                   device.
>> > +- interrupt-names: Should contain interrupt names:
>> > +                    "qdma-error": the error interrupt
>> > +                    "qdma-queue": the queue interrupt
>> > +- queues:          Should contain number of queues supported.
>>
>> Needs a vendor prefix.
>>
>
> Does means: The queues filed need a vendor prefix ?
> like 'fsl-queues' ? right?

No, vendor prefixes end with a comma: fsl,queues

Rob

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [v4,3/6] dt-bindings: fsl-qdma: Add NXP Layerscpae qDMA controller bindings
  2018-05-23 19:59 ` [v4 3/6] " Rob Herring
@ 2018-05-24  7:20 ` Wen He
  -1 siblings, 0 replies; 32+ messages in thread
From: Wen He @ 2018-05-24  7:20 UTC (permalink / raw)
  To: Rob Herring; +Cc: dmaengine, devicetree, Leo Li, Jiafei Pan, Jiaheng Fan, Vinod

> -----Original Message-----
> From: Rob Herring [mailto:robh@kernel.org]
> Sent: 2018年5月24日 3:59
> To: Wen He <wen.he_1@nxp.com>
> Cc: dmaengine@vger.kernel.org; devicetree@vger.kernel.org; Leo Li
> <leoyang.li@nxp.com>; Jiafei Pan <jiafei.pan@nxp.com>; Jiaheng Fan
> <jiaheng.fan@nxp.com>; Vinod <vkoul@kernel.org>
> Subject: Re: [v4 3/6] dt-bindings: fsl-qdma: Add NXP Layerscpae qDMA
> controller bindings
> 
> Updated Vinod's email...
> 
> On Mon, May 21, 2018 at 12:52 AM, Wen He <wen.he_1@nxp.com> wrote:
> > Hi Rob,
> >
> > Please see my comments inline.
> >
> > Best Regards,
> > Wen
> >
> >> -----Original Message-----
> >> From: Rob Herring [mailto:robh@kernel.org]
> >> Sent: 2018年5月19日 5:26
> >> To: Wen He <wen.he_1@nxp.com>
> >> Cc: vinod.koul@intel.com; dmaengine@vger.kernel.org;
> >> devicetree@vger.kernel.org; Leo Li <leoyang.li@nxp.com>; Jiafei Pan
> >> <jiafei.pan@nxp.com>; Jiaheng Fan <jiaheng.fan@nxp.com>
> >> Subject: Re: [v4 3/6] dt-bindings: fsl-qdma: Add NXP Layerscpae qDMA
> >> controller bindings
> >>
> >> On Mon, May 14, 2018 at 08:03:04PM +0800, Wen He wrote:
> >> > Document the devicetree bindings for NXP Layerscape qDMA controller
> >> > which could be found on NXP QorIQ Layerscape SoCs.
> >> >
> >> > Signed-off-by: Wen He <wen.he_1@nxp.com>
> >> > ---
> >> > change in v4:
> >> >     - Rewrite the bindings document that follows generic DMA
> >> > bindings file
> >> >
> >> > change in v3:
> >> >     - no change
> >> >
> >> > change in v2:
> >> >     - Remove indentation
> >> >     - Add "Should be" before 'fsl,ls1021a-qdma'
> >> >     - Replace 'channels' by 'dma-channels'
> >> >     - Replace 'qdma@8390000' by 'dma-controller@8390000'
> >> >
> >> >  Documentation/devicetree/bindings/dma/fsl-qdma.txt |   41
> >> ++++++++++++++++++++
> >> >  1 files changed, 41 insertions(+), 0 deletions(-)  create mode
> >> > 100644 Documentation/devicetree/bindings/dma/fsl-qdma.txt
> >> >
> >> > diff --git a/Documentation/devicetree/bindings/dma/fsl-qdma.txt
> >> > b/Documentation/devicetree/bindings/dma/fsl-qdma.txt
> >> > new file mode 100644
> >> > index 0000000..368c4e7
> >> > --- /dev/null
> >> > +++ b/Documentation/devicetree/bindings/dma/fsl-qdma.txt
> >> > @@ -0,0 +1,41 @@
> >> > +NXP Layerscape SoC qDMA Controller
> >> > +==================================
> >> > +
> >> > +This device follows the generic DMA bindings defined in dma/dma.txt.
> >> > +
> >> > +Required properties:
> >> > +
> >> > +- compatible:              Must be one of
> >> > +                    "fsl,ls1021a-qdma": for LS1021A Board
> >> > +                    "fsl,ls1043a-qdma": for ls1043A Board
> >> > +                    "fsl,ls1046a-qdma": for ls1046A Board
> >> > +- reg:                     Should contain the register's base
> address and length.
> >> > +- interrupts:              Should contain a reference to the
> interrupt used by
> >> this
> >> > +                   device.
> >> > +- interrupt-names: Should contain interrupt names:
> >> > +                    "qdma-error": the error interrupt
> >> > +                    "qdma-queue": the queue interrupt
> >> > +- queues:          Should contain number of queues supported.
> >>
> >> Needs a vendor prefix.
> >>
> >
> > Does means: The queues filed need a vendor prefix ?
> > like 'fsl-queues' ? right?
> 
> No, vendor prefixes end with a comma: fsl,queues
> 
> Rob

Done.
Thanks for your review, the issue will next version fix.

Best Regards,
Wen

^ permalink raw reply	[flat|nested] 32+ messages in thread

* RE: [v4 3/6] dt-bindings: fsl-qdma: Add NXP Layerscpae qDMA controller bindings
@ 2018-05-24  7:20 ` Wen He
  0 siblings, 0 replies; 32+ messages in thread
From: Wen He @ 2018-05-24  7:20 UTC (permalink / raw)
  To: Rob Herring; +Cc: dmaengine, devicetree, Leo Li, Jiafei Pan, Jiaheng Fan, Vinod



> -----Original Message-----
> From: Rob Herring [mailto:robh@kernel.org]
> Sent: 2018年5月24日 3:59
> To: Wen He <wen.he_1@nxp.com>
> Cc: dmaengine@vger.kernel.org; devicetree@vger.kernel.org; Leo Li
> <leoyang.li@nxp.com>; Jiafei Pan <jiafei.pan@nxp.com>; Jiaheng Fan
> <jiaheng.fan@nxp.com>; Vinod <vkoul@kernel.org>
> Subject: Re: [v4 3/6] dt-bindings: fsl-qdma: Add NXP Layerscpae qDMA
> controller bindings
> 
> Updated Vinod's email...
> 
> On Mon, May 21, 2018 at 12:52 AM, Wen He <wen.he_1@nxp.com> wrote:
> > Hi Rob,
> >
> > Please see my comments inline.
> >
> > Best Regards,
> > Wen
> >
> >> -----Original Message-----
> >> From: Rob Herring [mailto:robh@kernel.org]
> >> Sent: 2018年5月19日 5:26
> >> To: Wen He <wen.he_1@nxp.com>
> >> Cc: vinod.koul@intel.com; dmaengine@vger.kernel.org;
> >> devicetree@vger.kernel.org; Leo Li <leoyang.li@nxp.com>; Jiafei Pan
> >> <jiafei.pan@nxp.com>; Jiaheng Fan <jiaheng.fan@nxp.com>
> >> Subject: Re: [v4 3/6] dt-bindings: fsl-qdma: Add NXP Layerscpae qDMA
> >> controller bindings
> >>
> >> On Mon, May 14, 2018 at 08:03:04PM +0800, Wen He wrote:
> >> > Document the devicetree bindings for NXP Layerscape qDMA controller
> >> > which could be found on NXP QorIQ Layerscape SoCs.
> >> >
> >> > Signed-off-by: Wen He <wen.he_1@nxp.com>
> >> > ---
> >> > change in v4:
> >> >     - Rewrite the bindings document that follows generic DMA
> >> > bindings file
> >> >
> >> > change in v3:
> >> >     - no change
> >> >
> >> > change in v2:
> >> >     - Remove indentation
> >> >     - Add "Should be" before 'fsl,ls1021a-qdma'
> >> >     - Replace 'channels' by 'dma-channels'
> >> >     - Replace 'qdma@8390000' by 'dma-controller@8390000'
> >> >
> >> >  Documentation/devicetree/bindings/dma/fsl-qdma.txt |   41
> >> ++++++++++++++++++++
> >> >  1 files changed, 41 insertions(+), 0 deletions(-)  create mode
> >> > 100644 Documentation/devicetree/bindings/dma/fsl-qdma.txt
> >> >
> >> > diff --git a/Documentation/devicetree/bindings/dma/fsl-qdma.txt
> >> > b/Documentation/devicetree/bindings/dma/fsl-qdma.txt
> >> > new file mode 100644
> >> > index 0000000..368c4e7
> >> > --- /dev/null
> >> > +++ b/Documentation/devicetree/bindings/dma/fsl-qdma.txt
> >> > @@ -0,0 +1,41 @@
> >> > +NXP Layerscape SoC qDMA Controller
> >> > +==================================
> >> > +
> >> > +This device follows the generic DMA bindings defined in dma/dma.txt.
> >> > +
> >> > +Required properties:
> >> > +
> >> > +- compatible:              Must be one of
> >> > +                    "fsl,ls1021a-qdma": for LS1021A Board
> >> > +                    "fsl,ls1043a-qdma": for ls1043A Board
> >> > +                    "fsl,ls1046a-qdma": for ls1046A Board
> >> > +- reg:                     Should contain the register's base
> address and length.
> >> > +- interrupts:              Should contain a reference to the
> interrupt used by
> >> this
> >> > +                   device.
> >> > +- interrupt-names: Should contain interrupt names:
> >> > +                    "qdma-error": the error interrupt
> >> > +                    "qdma-queue": the queue interrupt
> >> > +- queues:          Should contain number of queues supported.
> >>
> >> Needs a vendor prefix.
> >>
> >
> > Does means: The queues filed need a vendor prefix ?
> > like 'fsl-queues' ? right?
> 
> No, vendor prefixes end with a comma: fsl,queues
> 
> Rob

Done.
Thanks for your review, the issue will next version fix.

Best Regards,
Wen

^ permalink raw reply	[flat|nested] 32+ messages in thread

end of thread, other threads:[~2018-05-24  7:20 UTC | newest]

Thread overview: 32+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-05-14 12:03 [v4,1/6] dmaengine: fsldma: Replace DMA_IN/OUT by FSL_DMA_IN/OUT Wen He
2018-05-14 12:03 ` [v4 1/6] " Wen He
2018-05-14 12:03 [v4,2/6] dmaengine: fsl-qdma: Add qDMA controller driver for Layerscape SoCs Wen He
2018-05-14 12:03 ` [v4 2/6] " Wen He
2018-05-14 12:03 [v4,3/6] dt-bindings: fsl-qdma: Add NXP Layerscpae qDMA controller bindings Wen He
2018-05-14 12:03 ` [v4 3/6] " Wen He
2018-05-14 12:03 [v4,4/6] arm64: dts: ls1043a: add qdma device tree nodes Wen He
2018-05-14 12:03 ` [v4 4/6] " Wen He
2018-05-14 12:03 [v4,5/6] arm64: dts: ls1046a: " Wen He
2018-05-14 12:03 ` [v4 5/6] " Wen He
2018-05-14 12:03 [v4,6/6] arm: dts: ls1021a: " Wen He
2018-05-14 12:03 ` [v4 6/6] " Wen He
2018-05-17  6:04 [v4,2/6] dmaengine: fsl-qdma: Add qDMA controller driver for Layerscape SoCs Vinod Koul
2018-05-17  6:04 ` [v4 2/6] " Vinod
2018-05-17 11:27 [v4,2/6] " Wen He
2018-05-17 11:27 ` [v4 2/6] " Wen He
2018-05-18  4:21 [v4,2/6] " Vinod Koul
2018-05-18  4:21 ` [v4 2/6] " Vinod
2018-05-18 10:04 [v4,2/6] " Wen He
2018-05-18 10:04 ` [v4 2/6] " Wen He
2018-05-18 21:26 [v4,3/6] dt-bindings: fsl-qdma: Add NXP Layerscpae qDMA controller bindings Rob Herring
2018-05-18 21:26 ` [v4 3/6] " Rob Herring
2018-05-21  5:52 [v4,3/6] " Wen He
2018-05-21  5:52 ` [v4 3/6] " Wen He
2018-05-21  9:09 [v4,2/6] dmaengine: fsl-qdma: Add qDMA controller driver for Layerscape SoCs Vinod Koul
2018-05-21  9:09 ` [v4 2/6] " Vinod Koul
2018-05-21  9:49 [v4,2/6] " Wen He
2018-05-21  9:49 ` [v4 2/6] " Wen He
2018-05-23 19:59 [v4,3/6] dt-bindings: fsl-qdma: Add NXP Layerscpae qDMA controller bindings Rob Herring
2018-05-23 19:59 ` [v4 3/6] " Rob Herring
2018-05-24  7:20 [v4,3/6] " Wen He
2018-05-24  7:20 ` [v4 3/6] " Wen He

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.