All of lore.kernel.org
 help / color / mirror / Atom feed
* [1/2] dmaengine: sprd: Optimize the sprd_dma_prep_dma_memcpy()
@ 2018-05-04  8:01 ` Baolin Wang
  0 siblings, 0 replies; 12+ messages in thread
From: Baolin Wang @ 2018-05-04  8:01 UTC (permalink / raw)
  To: dan.j.williams, vinod.koul
  Cc: eric.long, broonie, baolin.wang, dmaengine, linux-kernel

From: Eric Long <eric.long@spreadtrum.com>

This is one preparation patch, we can use default DMA configuration to
implement the device_prep_dma_memcpy() interface instead of issuing
sprd_dma_config().

We will implement one new sprd_dma_config() function with introducing
device_prep_slave_sg() interface in following patch. So we can remove
the obsolete sprd_dma_config() firstly.

Signed-off-by: Eric Long <eric.long@spreadtrum.com>
Signed-off-by: Baolin Wang <baolin.wang@linaro.org>
---
 drivers/dma/sprd-dma.c |  154 ++++++++++--------------------------------------
 1 file changed, 32 insertions(+), 122 deletions(-)

diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index ccdeb8f..a7a89fd 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -552,147 +552,57 @@ static void sprd_dma_issue_pending(struct dma_chan *chan)
 	spin_unlock_irqrestore(&schan->vc.lock, flags);
 }
 
-static int sprd_dma_config(struct dma_chan *chan, struct sprd_dma_desc *sdesc,
-			   dma_addr_t dest, dma_addr_t src, size_t len)
+static struct dma_async_tx_descriptor *
+sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+			 size_t len, unsigned long flags)
 {
-	struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan);
-	struct sprd_dma_chn_hw *hw = &sdesc->chn_hw;
-	u32 datawidth, src_step, des_step, fragment_len;
-	u32 block_len, req_mode, irq_mode, transcation_len;
-	u32 fix_mode = 0, fix_en = 0;
+	struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+	struct sprd_dma_desc *sdesc;
+	struct sprd_dma_chn_hw *hw;
+	enum sprd_dma_datawidth datawidth;
+	u32 step;
 
-	if (IS_ALIGNED(len, 4)) {
-		datawidth = SPRD_DMA_DATAWIDTH_4_BYTES;
-		src_step = SPRD_DMA_WORD_STEP;
-		des_step = SPRD_DMA_WORD_STEP;
-	} else if (IS_ALIGNED(len, 2)) {
-		datawidth = SPRD_DMA_DATAWIDTH_2_BYTES;
-		src_step = SPRD_DMA_SHORT_STEP;
-		des_step = SPRD_DMA_SHORT_STEP;
-	} else {
-		datawidth = SPRD_DMA_DATAWIDTH_1_BYTE;
-		src_step = SPRD_DMA_BYTE_STEP;
-		des_step = SPRD_DMA_BYTE_STEP;
-	}
+	sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
+	if (!sdesc)
+		return NULL;
 
-	fragment_len = SPRD_DMA_MEMCPY_MIN_SIZE;
-	if (len <= SPRD_DMA_BLK_LEN_MASK) {
-		block_len = len;
-		transcation_len = 0;
-		req_mode = SPRD_DMA_BLK_REQ;
-		irq_mode = SPRD_DMA_BLK_INT;
-	} else {
-		block_len = SPRD_DMA_MEMCPY_MIN_SIZE;
-		transcation_len = len;
-		req_mode = SPRD_DMA_TRANS_REQ;
-		irq_mode = SPRD_DMA_TRANS_INT;
-	}
+	hw = &sdesc->chn_hw;
 
 	hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET;
+	hw->intc = SPRD_DMA_TRANS_INT | SPRD_DMA_CFG_ERR_INT_EN;
+	hw->src_addr = (u32)(src & SPRD_DMA_LOW_ADDR_MASK);
+	hw->des_addr = (u32)(dest & SPRD_DMA_LOW_ADDR_MASK);
 	hw->wrap_ptr = (u32)((src >> SPRD_DMA_HIGH_ADDR_OFFSET) &
 			     SPRD_DMA_HIGH_ADDR_MASK);
 	hw->wrap_to = (u32)((dest >> SPRD_DMA_HIGH_ADDR_OFFSET) &
 			    SPRD_DMA_HIGH_ADDR_MASK);
 
-	hw->src_addr = (u32)(src & SPRD_DMA_LOW_ADDR_MASK);
-	hw->des_addr = (u32)(dest & SPRD_DMA_LOW_ADDR_MASK);
-
-	if ((src_step != 0 && des_step != 0) || (src_step | des_step) == 0) {
-		fix_en = 0;
+	if (IS_ALIGNED(len, 8)) {
+		datawidth = SPRD_DMA_DATAWIDTH_8_BYTES;
+		step = SPRD_DMA_DWORD_STEP;
+	} else if (IS_ALIGNED(len, 4)) {
+		datawidth = SPRD_DMA_DATAWIDTH_4_BYTES;
+		step = SPRD_DMA_WORD_STEP;
+	} else if (IS_ALIGNED(len, 2)) {
+		datawidth = SPRD_DMA_DATAWIDTH_2_BYTES;
+		step = SPRD_DMA_SHORT_STEP;
 	} else {
-		fix_en = 1;
-		if (src_step)
-			fix_mode = 1;
-		else
-			fix_mode = 0;
+		datawidth = SPRD_DMA_DATAWIDTH_1_BYTE;
+		step = SPRD_DMA_BYTE_STEP;
 	}
 
 	hw->frg_len = datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET |
 		datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET |
-		req_mode << SPRD_DMA_REQ_MODE_OFFSET |
-		fix_mode << SPRD_DMA_FIX_SEL_OFFSET |
-		fix_en << SPRD_DMA_FIX_EN_OFFSET |
-		(fragment_len & SPRD_DMA_FRG_LEN_MASK);
-	hw->blk_len = block_len & SPRD_DMA_BLK_LEN_MASK;
-
-	hw->intc = SPRD_DMA_CFG_ERR_INT_EN;
-
-	switch (irq_mode) {
-	case SPRD_DMA_NO_INT:
-		break;
-
-	case SPRD_DMA_FRAG_INT:
-		hw->intc |= SPRD_DMA_FRAG_INT_EN;
-		break;
-
-	case SPRD_DMA_BLK_INT:
-		hw->intc |= SPRD_DMA_BLK_INT_EN;
-		break;
+		SPRD_DMA_TRANS_REQ << SPRD_DMA_REQ_MODE_OFFSET |
+		(len & SPRD_DMA_FRG_LEN_MASK);
+	hw->blk_len = len & SPRD_DMA_BLK_LEN_MASK;
+	hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK;
 
-	case SPRD_DMA_BLK_FRAG_INT:
-		hw->intc |= SPRD_DMA_BLK_INT_EN | SPRD_DMA_FRAG_INT_EN;
-		break;
-
-	case SPRD_DMA_TRANS_INT:
-		hw->intc |= SPRD_DMA_TRANS_INT_EN;
-		break;
-
-	case SPRD_DMA_TRANS_FRAG_INT:
-		hw->intc |= SPRD_DMA_TRANS_INT_EN | SPRD_DMA_FRAG_INT_EN;
-		break;
-
-	case SPRD_DMA_TRANS_BLK_INT:
-		hw->intc |= SPRD_DMA_TRANS_INT_EN | SPRD_DMA_BLK_INT_EN;
-		break;
-
-	case SPRD_DMA_LIST_INT:
-		hw->intc |= SPRD_DMA_LIST_INT_EN;
-		break;
-
-	case SPRD_DMA_CFGERR_INT:
-		hw->intc |= SPRD_DMA_CFG_ERR_INT_EN;
-		break;
-
-	default:
-		dev_err(sdev->dma_dev.dev, "invalid irq mode\n");
-		return -EINVAL;
-	}
-
-	if (transcation_len == 0)
-		hw->trsc_len = block_len & SPRD_DMA_TRSC_LEN_MASK;
-	else
-		hw->trsc_len = transcation_len & SPRD_DMA_TRSC_LEN_MASK;
-
-	hw->trsf_step = (des_step & SPRD_DMA_TRSF_STEP_MASK) <<
+	hw->trsf_step = (step & SPRD_DMA_TRSF_STEP_MASK) <<
 			SPRD_DMA_DEST_TRSF_STEP_OFFSET |
-			(src_step & SPRD_DMA_TRSF_STEP_MASK) <<
+			(step & SPRD_DMA_TRSF_STEP_MASK) <<
 			SPRD_DMA_SRC_TRSF_STEP_OFFSET;
 
-	hw->frg_step = 0;
-	hw->src_blk_step = 0;
-	hw->des_blk_step = 0;
-	hw->src_blk_step = 0;
-	return 0;
-}
-
-static struct dma_async_tx_descriptor *
-sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
-			 size_t len, unsigned long flags)
-{
-	struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
-	struct sprd_dma_desc *sdesc;
-	int ret;
-
-	sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
-	if (!sdesc)
-		return NULL;
-
-	ret = sprd_dma_config(chan, sdesc, dest, src, len);
-	if (ret) {
-		kfree(sdesc);
-		return NULL;
-	}
-
 	return vchan_tx_prep(&schan->vc, &sdesc->vd, flags);
 }
 

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 1/2] dmaengine: sprd: Optimize the sprd_dma_prep_dma_memcpy()
@ 2018-05-04  8:01 ` Baolin Wang
  0 siblings, 0 replies; 12+ messages in thread
From: Baolin Wang @ 2018-05-04  8:01 UTC (permalink / raw)
  To: dan.j.williams, vinod.koul
  Cc: eric.long, broonie, baolin.wang, dmaengine, linux-kernel

From: Eric Long <eric.long@spreadtrum.com>

This is one preparation patch, we can use default DMA configuration to
implement the device_prep_dma_memcpy() interface instead of issuing
sprd_dma_config().

We will implement one new sprd_dma_config() function with introducing
device_prep_slave_sg() interface in following patch. So we can remove
the obsolete sprd_dma_config() firstly.

Signed-off-by: Eric Long <eric.long@spreadtrum.com>
Signed-off-by: Baolin Wang <baolin.wang@linaro.org>
---
 drivers/dma/sprd-dma.c |  154 ++++++++++--------------------------------------
 1 file changed, 32 insertions(+), 122 deletions(-)

diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index ccdeb8f..a7a89fd 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -552,147 +552,57 @@ static void sprd_dma_issue_pending(struct dma_chan *chan)
 	spin_unlock_irqrestore(&schan->vc.lock, flags);
 }
 
-static int sprd_dma_config(struct dma_chan *chan, struct sprd_dma_desc *sdesc,
-			   dma_addr_t dest, dma_addr_t src, size_t len)
+static struct dma_async_tx_descriptor *
+sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+			 size_t len, unsigned long flags)
 {
-	struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan);
-	struct sprd_dma_chn_hw *hw = &sdesc->chn_hw;
-	u32 datawidth, src_step, des_step, fragment_len;
-	u32 block_len, req_mode, irq_mode, transcation_len;
-	u32 fix_mode = 0, fix_en = 0;
+	struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+	struct sprd_dma_desc *sdesc;
+	struct sprd_dma_chn_hw *hw;
+	enum sprd_dma_datawidth datawidth;
+	u32 step;
 
-	if (IS_ALIGNED(len, 4)) {
-		datawidth = SPRD_DMA_DATAWIDTH_4_BYTES;
-		src_step = SPRD_DMA_WORD_STEP;
-		des_step = SPRD_DMA_WORD_STEP;
-	} else if (IS_ALIGNED(len, 2)) {
-		datawidth = SPRD_DMA_DATAWIDTH_2_BYTES;
-		src_step = SPRD_DMA_SHORT_STEP;
-		des_step = SPRD_DMA_SHORT_STEP;
-	} else {
-		datawidth = SPRD_DMA_DATAWIDTH_1_BYTE;
-		src_step = SPRD_DMA_BYTE_STEP;
-		des_step = SPRD_DMA_BYTE_STEP;
-	}
+	sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
+	if (!sdesc)
+		return NULL;
 
-	fragment_len = SPRD_DMA_MEMCPY_MIN_SIZE;
-	if (len <= SPRD_DMA_BLK_LEN_MASK) {
-		block_len = len;
-		transcation_len = 0;
-		req_mode = SPRD_DMA_BLK_REQ;
-		irq_mode = SPRD_DMA_BLK_INT;
-	} else {
-		block_len = SPRD_DMA_MEMCPY_MIN_SIZE;
-		transcation_len = len;
-		req_mode = SPRD_DMA_TRANS_REQ;
-		irq_mode = SPRD_DMA_TRANS_INT;
-	}
+	hw = &sdesc->chn_hw;
 
 	hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET;
+	hw->intc = SPRD_DMA_TRANS_INT | SPRD_DMA_CFG_ERR_INT_EN;
+	hw->src_addr = (u32)(src & SPRD_DMA_LOW_ADDR_MASK);
+	hw->des_addr = (u32)(dest & SPRD_DMA_LOW_ADDR_MASK);
 	hw->wrap_ptr = (u32)((src >> SPRD_DMA_HIGH_ADDR_OFFSET) &
 			     SPRD_DMA_HIGH_ADDR_MASK);
 	hw->wrap_to = (u32)((dest >> SPRD_DMA_HIGH_ADDR_OFFSET) &
 			    SPRD_DMA_HIGH_ADDR_MASK);
 
-	hw->src_addr = (u32)(src & SPRD_DMA_LOW_ADDR_MASK);
-	hw->des_addr = (u32)(dest & SPRD_DMA_LOW_ADDR_MASK);
-
-	if ((src_step != 0 && des_step != 0) || (src_step | des_step) == 0) {
-		fix_en = 0;
+	if (IS_ALIGNED(len, 8)) {
+		datawidth = SPRD_DMA_DATAWIDTH_8_BYTES;
+		step = SPRD_DMA_DWORD_STEP;
+	} else if (IS_ALIGNED(len, 4)) {
+		datawidth = SPRD_DMA_DATAWIDTH_4_BYTES;
+		step = SPRD_DMA_WORD_STEP;
+	} else if (IS_ALIGNED(len, 2)) {
+		datawidth = SPRD_DMA_DATAWIDTH_2_BYTES;
+		step = SPRD_DMA_SHORT_STEP;
 	} else {
-		fix_en = 1;
-		if (src_step)
-			fix_mode = 1;
-		else
-			fix_mode = 0;
+		datawidth = SPRD_DMA_DATAWIDTH_1_BYTE;
+		step = SPRD_DMA_BYTE_STEP;
 	}
 
 	hw->frg_len = datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET |
 		datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET |
-		req_mode << SPRD_DMA_REQ_MODE_OFFSET |
-		fix_mode << SPRD_DMA_FIX_SEL_OFFSET |
-		fix_en << SPRD_DMA_FIX_EN_OFFSET |
-		(fragment_len & SPRD_DMA_FRG_LEN_MASK);
-	hw->blk_len = block_len & SPRD_DMA_BLK_LEN_MASK;
-
-	hw->intc = SPRD_DMA_CFG_ERR_INT_EN;
-
-	switch (irq_mode) {
-	case SPRD_DMA_NO_INT:
-		break;
-
-	case SPRD_DMA_FRAG_INT:
-		hw->intc |= SPRD_DMA_FRAG_INT_EN;
-		break;
-
-	case SPRD_DMA_BLK_INT:
-		hw->intc |= SPRD_DMA_BLK_INT_EN;
-		break;
+		SPRD_DMA_TRANS_REQ << SPRD_DMA_REQ_MODE_OFFSET |
+		(len & SPRD_DMA_FRG_LEN_MASK);
+	hw->blk_len = len & SPRD_DMA_BLK_LEN_MASK;
+	hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK;
 
-	case SPRD_DMA_BLK_FRAG_INT:
-		hw->intc |= SPRD_DMA_BLK_INT_EN | SPRD_DMA_FRAG_INT_EN;
-		break;
-
-	case SPRD_DMA_TRANS_INT:
-		hw->intc |= SPRD_DMA_TRANS_INT_EN;
-		break;
-
-	case SPRD_DMA_TRANS_FRAG_INT:
-		hw->intc |= SPRD_DMA_TRANS_INT_EN | SPRD_DMA_FRAG_INT_EN;
-		break;
-
-	case SPRD_DMA_TRANS_BLK_INT:
-		hw->intc |= SPRD_DMA_TRANS_INT_EN | SPRD_DMA_BLK_INT_EN;
-		break;
-
-	case SPRD_DMA_LIST_INT:
-		hw->intc |= SPRD_DMA_LIST_INT_EN;
-		break;
-
-	case SPRD_DMA_CFGERR_INT:
-		hw->intc |= SPRD_DMA_CFG_ERR_INT_EN;
-		break;
-
-	default:
-		dev_err(sdev->dma_dev.dev, "invalid irq mode\n");
-		return -EINVAL;
-	}
-
-	if (transcation_len == 0)
-		hw->trsc_len = block_len & SPRD_DMA_TRSC_LEN_MASK;
-	else
-		hw->trsc_len = transcation_len & SPRD_DMA_TRSC_LEN_MASK;
-
-	hw->trsf_step = (des_step & SPRD_DMA_TRSF_STEP_MASK) <<
+	hw->trsf_step = (step & SPRD_DMA_TRSF_STEP_MASK) <<
 			SPRD_DMA_DEST_TRSF_STEP_OFFSET |
-			(src_step & SPRD_DMA_TRSF_STEP_MASK) <<
+			(step & SPRD_DMA_TRSF_STEP_MASK) <<
 			SPRD_DMA_SRC_TRSF_STEP_OFFSET;
 
-	hw->frg_step = 0;
-	hw->src_blk_step = 0;
-	hw->des_blk_step = 0;
-	hw->src_blk_step = 0;
-	return 0;
-}
-
-static struct dma_async_tx_descriptor *
-sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
-			 size_t len, unsigned long flags)
-{
-	struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
-	struct sprd_dma_desc *sdesc;
-	int ret;
-
-	sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
-	if (!sdesc)
-		return NULL;
-
-	ret = sprd_dma_config(chan, sdesc, dest, src, len);
-	if (ret) {
-		kfree(sdesc);
-		return NULL;
-	}
-
 	return vchan_tx_prep(&schan->vc, &sdesc->vd, flags);
 }
 
-- 
1.7.9.5

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [2/2] dmaengine: sprd: Add Spreadtrum DMA configuration
  2018-05-04  8:01 ` [PATCH 1/2] " Baolin Wang
@ 2018-05-04  8:01 ` Baolin Wang
  -1 siblings, 0 replies; 12+ messages in thread
From: Baolin Wang @ 2018-05-04  8:01 UTC (permalink / raw)
  To: dan.j.williams, vinod.koul
  Cc: eric.long, broonie, baolin.wang, dmaengine, linux-kernel

From: Eric Long <eric.long@spreadtrum.com>

This patch adds the 'device_config' and 'device_prep_slave_sg' interfaces
for users to configure DMA, as well as adding one 'struct sprd_dma_config'
structure to save Spreadtrum DMA configuration for each DMA channel.

Signed-off-by: Eric Long <eric.long@spreadtrum.com>
Signed-off-by: Baolin Wang <baolin.wang@linaro.org>
---
 drivers/dma/sprd-dma.c       |  215 ++++++++++++++++++++++++++++++++++++++++++
 include/linux/dma/sprd-dma.h |    4 +
 2 files changed, 219 insertions(+)

diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index a7a89fd..d7c7ffa 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -100,6 +100,8 @@
 #define SPRD_DMA_DES_DATAWIDTH_OFFSET	28
 #define SPRD_DMA_SWT_MODE_OFFSET	26
 #define SPRD_DMA_REQ_MODE_OFFSET	24
+#define SPRD_DMA_WRAP_SEL_OFFSET	23
+#define SPRD_DMA_WRAP_EN_OFFSET		22
 #define SPRD_DMA_REQ_MODE_MASK		GENMASK(1, 0)
 #define SPRD_DMA_FIX_SEL_OFFSET		21
 #define SPRD_DMA_FIX_EN_OFFSET		20
@@ -154,6 +156,35 @@ struct sprd_dma_chn_hw {
 	u32 des_blk_step;
 };
 
+/*
+ * struct sprd_dma_config - DMA configuration structure
+ * @cfg: dma slave channel runtime config
+ * @src_addr: the source physical address
+ * @dst_addr: the destination physical address
+ * @block_len: specify one block transfer length
+ * @transcation_len: specify one transcation transfer length
+ * @src_step: source transfer step
+ * @dst_step: destination transfer step
+ * @wrap_ptr: wrap pointer address, once the transfer address reaches the
+ * 'wrap_ptr', the next transfer address will jump to the 'wrap_to' address.
+ * @wrap_to: wrap jump to address
+ * @req_mode: specify the DMA request mode
+ * @int_mode: specify the DMA interrupt type
+ */
+struct sprd_dma_config {
+	struct dma_slave_config cfg;
+	phys_addr_t src_addr;
+	phys_addr_t dst_addr;
+	u32 block_len;
+	u32 transcation_len;
+	u32 src_step;
+	u32 dst_step;
+	phys_addr_t wrap_ptr;
+	phys_addr_t wrap_to;
+	enum sprd_dma_req_mode req_mode;
+	enum sprd_dma_int_type int_mode;
+};
+
 /* dma request description */
 struct sprd_dma_desc {
 	struct virt_dma_desc	vd;
@@ -164,6 +195,7 @@ struct sprd_dma_desc {
 struct sprd_dma_chn {
 	struct virt_dma_chan	vc;
 	void __iomem		*chn_base;
+	struct sprd_dma_config	slave_cfg;
 	u32			chn_num;
 	u32			dev_id;
 	struct sprd_dma_desc	*cur_desc;
@@ -552,6 +584,121 @@ static void sprd_dma_issue_pending(struct dma_chan *chan)
 	spin_unlock_irqrestore(&schan->vc.lock, flags);
 }
 
+static enum sprd_dma_datawidth
+sprd_dma_get_datawidth(enum dma_slave_buswidth buswidth)
+{
+	switch (buswidth) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		return SPRD_DMA_DATAWIDTH_1_BYTE;
+
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		return SPRD_DMA_DATAWIDTH_2_BYTES;
+
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+		return SPRD_DMA_DATAWIDTH_4_BYTES;
+
+	case DMA_SLAVE_BUSWIDTH_8_BYTES:
+		return SPRD_DMA_DATAWIDTH_8_BYTES;
+
+	default:
+		return SPRD_DMA_DATAWIDTH_4_BYTES;
+	}
+}
+
+static u32 sprd_dma_get_step(enum sprd_dma_datawidth datawidth)
+{
+	switch (datawidth) {
+	case SPRD_DMA_DATAWIDTH_1_BYTE:
+		return SPRD_DMA_BYTE_STEP;
+
+	case SPRD_DMA_DATAWIDTH_2_BYTES:
+		return SPRD_DMA_SHORT_STEP;
+
+	case SPRD_DMA_DATAWIDTH_4_BYTES:
+		return SPRD_DMA_WORD_STEP;
+
+	case SPRD_DMA_DATAWIDTH_8_BYTES:
+		return SPRD_DMA_DWORD_STEP;
+
+	default:
+		return SPRD_DMA_DWORD_STEP;
+	}
+}
+
+static int sprd_dma_config(struct dma_chan *chan, struct sprd_dma_desc *sdesc,
+			   struct sprd_dma_config *slave_cfg)
+{
+	struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan);
+	struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+	struct sprd_dma_chn_hw *hw = &sdesc->chn_hw;
+	u32 fix_mode = 0, fix_en = 0, wrap_en = 0, wrap_mode = 0;
+	u32 src_datawidth, dst_datawidth;
+
+	if (slave_cfg->cfg.slave_id)
+		schan->dev_id = slave_cfg->cfg.slave_id;
+
+	hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET;
+	hw->wrap_ptr = (u32)((slave_cfg->wrap_ptr & SPRD_DMA_LOW_ADDR_MASK) |
+		((slave_cfg->src_addr >> SPRD_DMA_HIGH_ADDR_OFFSET) &
+		 SPRD_DMA_HIGH_ADDR_MASK));
+	hw->wrap_to = (u32)((slave_cfg->wrap_to & SPRD_DMA_LOW_ADDR_MASK) |
+		((slave_cfg->dst_addr >> SPRD_DMA_HIGH_ADDR_OFFSET) &
+		 SPRD_DMA_HIGH_ADDR_MASK));
+
+	hw->src_addr = (u32)(slave_cfg->src_addr & SPRD_DMA_LOW_ADDR_MASK);
+	hw->des_addr = (u32)(slave_cfg->dst_addr & SPRD_DMA_LOW_ADDR_MASK);
+
+	if ((slave_cfg->src_step != 0 && slave_cfg->dst_step != 0)
+	    || (slave_cfg->src_step | slave_cfg->dst_step) == 0) {
+		fix_en = 0;
+	} else {
+		fix_en = 1;
+		if (slave_cfg->src_step)
+			fix_mode = 1;
+		else
+			fix_mode = 0;
+	}
+
+	if (slave_cfg->wrap_ptr && slave_cfg->wrap_to) {
+		wrap_en = 1;
+		if (slave_cfg->wrap_to == slave_cfg->src_addr) {
+			wrap_mode = 0;
+		} else if (slave_cfg->wrap_to == slave_cfg->dst_addr) {
+			wrap_mode = 1;
+		} else {
+			dev_err(sdev->dma_dev.dev, "invalid wrap mode\n");
+			return -EINVAL;
+		}
+	}
+
+	hw->intc = slave_cfg->int_mode | SPRD_DMA_CFG_ERR_INT_EN;
+
+	src_datawidth = sprd_dma_get_datawidth(slave_cfg->cfg.src_addr_width);
+	dst_datawidth = sprd_dma_get_datawidth(slave_cfg->cfg.dst_addr_width);
+	hw->frg_len = src_datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET |
+		dst_datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET |
+		slave_cfg->req_mode << SPRD_DMA_REQ_MODE_OFFSET |
+		wrap_mode << SPRD_DMA_WRAP_SEL_OFFSET |
+		wrap_en << SPRD_DMA_WRAP_EN_OFFSET |
+		fix_mode << SPRD_DMA_FIX_SEL_OFFSET |
+		fix_en << SPRD_DMA_FIX_EN_OFFSET |
+		(slave_cfg->cfg.src_maxburst & SPRD_DMA_FRG_LEN_MASK);
+
+	hw->blk_len = slave_cfg->block_len & SPRD_DMA_BLK_LEN_MASK;
+
+	hw->trsc_len = slave_cfg->transcation_len & SPRD_DMA_TRSC_LEN_MASK;
+
+	hw->trsf_step = (slave_cfg->dst_step & SPRD_DMA_TRSF_STEP_MASK) <<
+			SPRD_DMA_DEST_TRSF_STEP_OFFSET |
+			(slave_cfg->src_step & SPRD_DMA_TRSF_STEP_MASK) <<
+			SPRD_DMA_SRC_TRSF_STEP_OFFSET;
+
+	hw->frg_step = 0;
+	hw->src_blk_step = 0;
+	hw->des_blk_step = 0;
+	return 0;
+}
+
 static struct dma_async_tx_descriptor *
 sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
 			 size_t len, unsigned long flags)
@@ -606,6 +753,72 @@ static void sprd_dma_issue_pending(struct dma_chan *chan)
 	return vchan_tx_prep(&schan->vc, &sdesc->vd, flags);
 }
 
+static struct dma_async_tx_descriptor *
+sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+		       unsigned int sglen, enum dma_transfer_direction dir,
+		       unsigned long flags, void *context)
+{
+	struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+	struct sprd_dma_config *slave_cfg = &schan->slave_cfg;
+	struct sprd_dma_desc *sdesc;
+	struct scatterlist *sg;
+	int ret, i;
+
+	/* TODO: now we only support one sg for each DMA configuration. */
+	if (!is_slave_direction(dir) || sglen > 1)
+		return NULL;
+
+	sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
+	if (!sdesc)
+		return NULL;
+
+	for_each_sg(sgl, sg, sglen, i) {
+		if (dir == DMA_MEM_TO_DEV) {
+			slave_cfg->src_addr = sg_dma_address(sg);
+			slave_cfg->dst_addr = slave_cfg->cfg.dst_addr;
+			slave_cfg->src_step =
+			sprd_dma_get_step(slave_cfg->cfg.src_addr_width);
+			slave_cfg->dst_step = SPRD_DMA_NONE_STEP;
+		} else {
+			slave_cfg->src_addr = slave_cfg->cfg.src_addr;
+			slave_cfg->dst_addr = sg_dma_address(sg);
+			slave_cfg->src_step = SPRD_DMA_NONE_STEP;
+			slave_cfg->dst_step =
+			sprd_dma_get_step(slave_cfg->cfg.dst_addr_width);
+		}
+
+		slave_cfg->block_len = sg_dma_len(sg);
+		slave_cfg->transcation_len = sg_dma_len(sg);
+	}
+
+	slave_cfg->req_mode =
+		(flags >> SPRD_DMA_REQ_SHIFT) & SPRD_DMA_REQ_MODE_MASK;
+	slave_cfg->int_mode = flags & SPRD_DMA_INT_MASK;
+
+	ret = sprd_dma_config(chan, sdesc, slave_cfg);
+	if (ret) {
+		kfree(sdesc);
+		return NULL;
+	}
+
+	return vchan_tx_prep(&schan->vc, &sdesc->vd, flags);
+}
+
+static int sprd_dma_slave_config(struct dma_chan *chan,
+				 struct dma_slave_config *config)
+{
+	struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+	struct sprd_dma_config *slave_cfg = &schan->slave_cfg;
+
+	if (!is_slave_direction(config->direction))
+		return -EINVAL;
+
+	memset(slave_cfg, 0, sizeof(*slave_cfg));
+	memcpy(&slave_cfg->cfg, config, sizeof(*config));
+
+	return 0;
+}
+
 static int sprd_dma_pause(struct dma_chan *chan)
 {
 	struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
@@ -733,6 +946,8 @@ static int sprd_dma_probe(struct platform_device *pdev)
 	sdev->dma_dev.device_tx_status = sprd_dma_tx_status;
 	sdev->dma_dev.device_issue_pending = sprd_dma_issue_pending;
 	sdev->dma_dev.device_prep_dma_memcpy = sprd_dma_prep_dma_memcpy;
+	sdev->dma_dev.device_prep_slave_sg = sprd_dma_prep_slave_sg;
+	sdev->dma_dev.device_config = sprd_dma_slave_config;
 	sdev->dma_dev.device_pause = sprd_dma_pause;
 	sdev->dma_dev.device_resume = sprd_dma_resume;
 	sdev->dma_dev.device_terminate_all = sprd_dma_terminate_all;
diff --git a/include/linux/dma/sprd-dma.h b/include/linux/dma/sprd-dma.h
index c545162..b0115e3 100644
--- a/include/linux/dma/sprd-dma.h
+++ b/include/linux/dma/sprd-dma.h
@@ -3,6 +3,10 @@
 #ifndef _SPRD_DMA_H_
 #define _SPRD_DMA_H_
 
+#define SPRD_DMA_REQ_SHIFT 16
+#define SPRD_DMA_FLAGS(req_mode, int_type) \
+	((req_mode) << SPRD_DMA_REQ_SHIFT | (int_type))
+
 /*
  * enum sprd_dma_req_mode: define the DMA request mode
  * @SPRD_DMA_FRAG_REQ: fragment request mode

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 2/2] dmaengine: sprd: Add Spreadtrum DMA configuration
@ 2018-05-04  8:01 ` Baolin Wang
  0 siblings, 0 replies; 12+ messages in thread
From: Baolin Wang @ 2018-05-04  8:01 UTC (permalink / raw)
  To: dan.j.williams, vinod.koul
  Cc: eric.long, broonie, baolin.wang, dmaengine, linux-kernel

From: Eric Long <eric.long@spreadtrum.com>

This patch adds the 'device_config' and 'device_prep_slave_sg' interfaces
for users to configure DMA, as well as adding one 'struct sprd_dma_config'
structure to save Spreadtrum DMA configuration for each DMA channel.

Signed-off-by: Eric Long <eric.long@spreadtrum.com>
Signed-off-by: Baolin Wang <baolin.wang@linaro.org>
---
 drivers/dma/sprd-dma.c       |  215 ++++++++++++++++++++++++++++++++++++++++++
 include/linux/dma/sprd-dma.h |    4 +
 2 files changed, 219 insertions(+)

diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index a7a89fd..d7c7ffa 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -100,6 +100,8 @@
 #define SPRD_DMA_DES_DATAWIDTH_OFFSET	28
 #define SPRD_DMA_SWT_MODE_OFFSET	26
 #define SPRD_DMA_REQ_MODE_OFFSET	24
+#define SPRD_DMA_WRAP_SEL_OFFSET	23
+#define SPRD_DMA_WRAP_EN_OFFSET		22
 #define SPRD_DMA_REQ_MODE_MASK		GENMASK(1, 0)
 #define SPRD_DMA_FIX_SEL_OFFSET		21
 #define SPRD_DMA_FIX_EN_OFFSET		20
@@ -154,6 +156,35 @@ struct sprd_dma_chn_hw {
 	u32 des_blk_step;
 };
 
+/*
+ * struct sprd_dma_config - DMA configuration structure
+ * @cfg: dma slave channel runtime config
+ * @src_addr: the source physical address
+ * @dst_addr: the destination physical address
+ * @block_len: specify one block transfer length
+ * @transcation_len: specify one transcation transfer length
+ * @src_step: source transfer step
+ * @dst_step: destination transfer step
+ * @wrap_ptr: wrap pointer address, once the transfer address reaches the
+ * 'wrap_ptr', the next transfer address will jump to the 'wrap_to' address.
+ * @wrap_to: wrap jump to address
+ * @req_mode: specify the DMA request mode
+ * @int_mode: specify the DMA interrupt type
+ */
+struct sprd_dma_config {
+	struct dma_slave_config cfg;
+	phys_addr_t src_addr;
+	phys_addr_t dst_addr;
+	u32 block_len;
+	u32 transcation_len;
+	u32 src_step;
+	u32 dst_step;
+	phys_addr_t wrap_ptr;
+	phys_addr_t wrap_to;
+	enum sprd_dma_req_mode req_mode;
+	enum sprd_dma_int_type int_mode;
+};
+
 /* dma request description */
 struct sprd_dma_desc {
 	struct virt_dma_desc	vd;
@@ -164,6 +195,7 @@ struct sprd_dma_desc {
 struct sprd_dma_chn {
 	struct virt_dma_chan	vc;
 	void __iomem		*chn_base;
+	struct sprd_dma_config	slave_cfg;
 	u32			chn_num;
 	u32			dev_id;
 	struct sprd_dma_desc	*cur_desc;
@@ -552,6 +584,121 @@ static void sprd_dma_issue_pending(struct dma_chan *chan)
 	spin_unlock_irqrestore(&schan->vc.lock, flags);
 }
 
+static enum sprd_dma_datawidth
+sprd_dma_get_datawidth(enum dma_slave_buswidth buswidth)
+{
+	switch (buswidth) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		return SPRD_DMA_DATAWIDTH_1_BYTE;
+
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		return SPRD_DMA_DATAWIDTH_2_BYTES;
+
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+		return SPRD_DMA_DATAWIDTH_4_BYTES;
+
+	case DMA_SLAVE_BUSWIDTH_8_BYTES:
+		return SPRD_DMA_DATAWIDTH_8_BYTES;
+
+	default:
+		return SPRD_DMA_DATAWIDTH_4_BYTES;
+	}
+}
+
+static u32 sprd_dma_get_step(enum sprd_dma_datawidth datawidth)
+{
+	switch (datawidth) {
+	case SPRD_DMA_DATAWIDTH_1_BYTE:
+		return SPRD_DMA_BYTE_STEP;
+
+	case SPRD_DMA_DATAWIDTH_2_BYTES:
+		return SPRD_DMA_SHORT_STEP;
+
+	case SPRD_DMA_DATAWIDTH_4_BYTES:
+		return SPRD_DMA_WORD_STEP;
+
+	case SPRD_DMA_DATAWIDTH_8_BYTES:
+		return SPRD_DMA_DWORD_STEP;
+
+	default:
+		return SPRD_DMA_DWORD_STEP;
+	}
+}
+
+static int sprd_dma_config(struct dma_chan *chan, struct sprd_dma_desc *sdesc,
+			   struct sprd_dma_config *slave_cfg)
+{
+	struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan);
+	struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+	struct sprd_dma_chn_hw *hw = &sdesc->chn_hw;
+	u32 fix_mode = 0, fix_en = 0, wrap_en = 0, wrap_mode = 0;
+	u32 src_datawidth, dst_datawidth;
+
+	if (slave_cfg->cfg.slave_id)
+		schan->dev_id = slave_cfg->cfg.slave_id;
+
+	hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET;
+	hw->wrap_ptr = (u32)((slave_cfg->wrap_ptr & SPRD_DMA_LOW_ADDR_MASK) |
+		((slave_cfg->src_addr >> SPRD_DMA_HIGH_ADDR_OFFSET) &
+		 SPRD_DMA_HIGH_ADDR_MASK));
+	hw->wrap_to = (u32)((slave_cfg->wrap_to & SPRD_DMA_LOW_ADDR_MASK) |
+		((slave_cfg->dst_addr >> SPRD_DMA_HIGH_ADDR_OFFSET) &
+		 SPRD_DMA_HIGH_ADDR_MASK));
+
+	hw->src_addr = (u32)(slave_cfg->src_addr & SPRD_DMA_LOW_ADDR_MASK);
+	hw->des_addr = (u32)(slave_cfg->dst_addr & SPRD_DMA_LOW_ADDR_MASK);
+
+	if ((slave_cfg->src_step != 0 && slave_cfg->dst_step != 0)
+	    || (slave_cfg->src_step | slave_cfg->dst_step) == 0) {
+		fix_en = 0;
+	} else {
+		fix_en = 1;
+		if (slave_cfg->src_step)
+			fix_mode = 1;
+		else
+			fix_mode = 0;
+	}
+
+	if (slave_cfg->wrap_ptr && slave_cfg->wrap_to) {
+		wrap_en = 1;
+		if (slave_cfg->wrap_to == slave_cfg->src_addr) {
+			wrap_mode = 0;
+		} else if (slave_cfg->wrap_to == slave_cfg->dst_addr) {
+			wrap_mode = 1;
+		} else {
+			dev_err(sdev->dma_dev.dev, "invalid wrap mode\n");
+			return -EINVAL;
+		}
+	}
+
+	hw->intc = slave_cfg->int_mode | SPRD_DMA_CFG_ERR_INT_EN;
+
+	src_datawidth = sprd_dma_get_datawidth(slave_cfg->cfg.src_addr_width);
+	dst_datawidth = sprd_dma_get_datawidth(slave_cfg->cfg.dst_addr_width);
+	hw->frg_len = src_datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET |
+		dst_datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET |
+		slave_cfg->req_mode << SPRD_DMA_REQ_MODE_OFFSET |
+		wrap_mode << SPRD_DMA_WRAP_SEL_OFFSET |
+		wrap_en << SPRD_DMA_WRAP_EN_OFFSET |
+		fix_mode << SPRD_DMA_FIX_SEL_OFFSET |
+		fix_en << SPRD_DMA_FIX_EN_OFFSET |
+		(slave_cfg->cfg.src_maxburst & SPRD_DMA_FRG_LEN_MASK);
+
+	hw->blk_len = slave_cfg->block_len & SPRD_DMA_BLK_LEN_MASK;
+
+	hw->trsc_len = slave_cfg->transcation_len & SPRD_DMA_TRSC_LEN_MASK;
+
+	hw->trsf_step = (slave_cfg->dst_step & SPRD_DMA_TRSF_STEP_MASK) <<
+			SPRD_DMA_DEST_TRSF_STEP_OFFSET |
+			(slave_cfg->src_step & SPRD_DMA_TRSF_STEP_MASK) <<
+			SPRD_DMA_SRC_TRSF_STEP_OFFSET;
+
+	hw->frg_step = 0;
+	hw->src_blk_step = 0;
+	hw->des_blk_step = 0;
+	return 0;
+}
+
 static struct dma_async_tx_descriptor *
 sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
 			 size_t len, unsigned long flags)
@@ -606,6 +753,72 @@ static void sprd_dma_issue_pending(struct dma_chan *chan)
 	return vchan_tx_prep(&schan->vc, &sdesc->vd, flags);
 }
 
+static struct dma_async_tx_descriptor *
+sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+		       unsigned int sglen, enum dma_transfer_direction dir,
+		       unsigned long flags, void *context)
+{
+	struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+	struct sprd_dma_config *slave_cfg = &schan->slave_cfg;
+	struct sprd_dma_desc *sdesc;
+	struct scatterlist *sg;
+	int ret, i;
+
+	/* TODO: now we only support one sg for each DMA configuration. */
+	if (!is_slave_direction(dir) || sglen > 1)
+		return NULL;
+
+	sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
+	if (!sdesc)
+		return NULL;
+
+	for_each_sg(sgl, sg, sglen, i) {
+		if (dir == DMA_MEM_TO_DEV) {
+			slave_cfg->src_addr = sg_dma_address(sg);
+			slave_cfg->dst_addr = slave_cfg->cfg.dst_addr;
+			slave_cfg->src_step =
+			sprd_dma_get_step(slave_cfg->cfg.src_addr_width);
+			slave_cfg->dst_step = SPRD_DMA_NONE_STEP;
+		} else {
+			slave_cfg->src_addr = slave_cfg->cfg.src_addr;
+			slave_cfg->dst_addr = sg_dma_address(sg);
+			slave_cfg->src_step = SPRD_DMA_NONE_STEP;
+			slave_cfg->dst_step =
+			sprd_dma_get_step(slave_cfg->cfg.dst_addr_width);
+		}
+
+		slave_cfg->block_len = sg_dma_len(sg);
+		slave_cfg->transcation_len = sg_dma_len(sg);
+	}
+
+	slave_cfg->req_mode =
+		(flags >> SPRD_DMA_REQ_SHIFT) & SPRD_DMA_REQ_MODE_MASK;
+	slave_cfg->int_mode = flags & SPRD_DMA_INT_MASK;
+
+	ret = sprd_dma_config(chan, sdesc, slave_cfg);
+	if (ret) {
+		kfree(sdesc);
+		return NULL;
+	}
+
+	return vchan_tx_prep(&schan->vc, &sdesc->vd, flags);
+}
+
+static int sprd_dma_slave_config(struct dma_chan *chan,
+				 struct dma_slave_config *config)
+{
+	struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+	struct sprd_dma_config *slave_cfg = &schan->slave_cfg;
+
+	if (!is_slave_direction(config->direction))
+		return -EINVAL;
+
+	memset(slave_cfg, 0, sizeof(*slave_cfg));
+	memcpy(&slave_cfg->cfg, config, sizeof(*config));
+
+	return 0;
+}
+
 static int sprd_dma_pause(struct dma_chan *chan)
 {
 	struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
@@ -733,6 +946,8 @@ static int sprd_dma_probe(struct platform_device *pdev)
 	sdev->dma_dev.device_tx_status = sprd_dma_tx_status;
 	sdev->dma_dev.device_issue_pending = sprd_dma_issue_pending;
 	sdev->dma_dev.device_prep_dma_memcpy = sprd_dma_prep_dma_memcpy;
+	sdev->dma_dev.device_prep_slave_sg = sprd_dma_prep_slave_sg;
+	sdev->dma_dev.device_config = sprd_dma_slave_config;
 	sdev->dma_dev.device_pause = sprd_dma_pause;
 	sdev->dma_dev.device_resume = sprd_dma_resume;
 	sdev->dma_dev.device_terminate_all = sprd_dma_terminate_all;
diff --git a/include/linux/dma/sprd-dma.h b/include/linux/dma/sprd-dma.h
index c545162..b0115e3 100644
--- a/include/linux/dma/sprd-dma.h
+++ b/include/linux/dma/sprd-dma.h
@@ -3,6 +3,10 @@
 #ifndef _SPRD_DMA_H_
 #define _SPRD_DMA_H_
 
+#define SPRD_DMA_REQ_SHIFT 16
+#define SPRD_DMA_FLAGS(req_mode, int_type) \
+	((req_mode) << SPRD_DMA_REQ_SHIFT | (int_type))
+
 /*
  * enum sprd_dma_req_mode: define the DMA request mode
  * @SPRD_DMA_FRAG_REQ: fragment request mode
-- 
1.7.9.5

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [1/2] dmaengine: sprd: Optimize the sprd_dma_prep_dma_memcpy()
  2018-05-04  8:01 ` [PATCH 1/2] " Baolin Wang
@ 2018-05-04  8:06 ` Baolin Wang
  -1 siblings, 0 replies; 12+ messages in thread
From: Baolin Wang @ 2018-05-04  8:06 UTC (permalink / raw)
  To: Dan Williams, Vinod Koul, vkoul
  Cc: Eric Long, Mark Brown, Baolin Wang, dmaengine, LKML

Sorry, add Vinod new email.

On 4 May 2018 at 16:01, Baolin Wang <baolin.wang@linaro.org> wrote:
> From: Eric Long <eric.long@spreadtrum.com>
>
> This is one preparation patch, we can use default DMA configuration to
> implement the device_prep_dma_memcpy() interface instead of issuing
> sprd_dma_config().
>
> We will implement one new sprd_dma_config() function with introducing
> device_prep_slave_sg() interface in following patch. So we can remove
> the obsolete sprd_dma_config() firstly.
>
> Signed-off-by: Eric Long <eric.long@spreadtrum.com>
> Signed-off-by: Baolin Wang <baolin.wang@linaro.org>
> ---
>  drivers/dma/sprd-dma.c |  154 ++++++++++--------------------------------------
>  1 file changed, 32 insertions(+), 122 deletions(-)
>
> diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
> index ccdeb8f..a7a89fd 100644
> --- a/drivers/dma/sprd-dma.c
> +++ b/drivers/dma/sprd-dma.c
> @@ -552,147 +552,57 @@ static void sprd_dma_issue_pending(struct dma_chan *chan)
>         spin_unlock_irqrestore(&schan->vc.lock, flags);
>  }
>
> -static int sprd_dma_config(struct dma_chan *chan, struct sprd_dma_desc *sdesc,
> -                          dma_addr_t dest, dma_addr_t src, size_t len)
> +static struct dma_async_tx_descriptor *
> +sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
> +                        size_t len, unsigned long flags)
>  {
> -       struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan);
> -       struct sprd_dma_chn_hw *hw = &sdesc->chn_hw;
> -       u32 datawidth, src_step, des_step, fragment_len;
> -       u32 block_len, req_mode, irq_mode, transcation_len;
> -       u32 fix_mode = 0, fix_en = 0;
> +       struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
> +       struct sprd_dma_desc *sdesc;
> +       struct sprd_dma_chn_hw *hw;
> +       enum sprd_dma_datawidth datawidth;
> +       u32 step;
>
> -       if (IS_ALIGNED(len, 4)) {
> -               datawidth = SPRD_DMA_DATAWIDTH_4_BYTES;
> -               src_step = SPRD_DMA_WORD_STEP;
> -               des_step = SPRD_DMA_WORD_STEP;
> -       } else if (IS_ALIGNED(len, 2)) {
> -               datawidth = SPRD_DMA_DATAWIDTH_2_BYTES;
> -               src_step = SPRD_DMA_SHORT_STEP;
> -               des_step = SPRD_DMA_SHORT_STEP;
> -       } else {
> -               datawidth = SPRD_DMA_DATAWIDTH_1_BYTE;
> -               src_step = SPRD_DMA_BYTE_STEP;
> -               des_step = SPRD_DMA_BYTE_STEP;
> -       }
> +       sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
> +       if (!sdesc)
> +               return NULL;
>
> -       fragment_len = SPRD_DMA_MEMCPY_MIN_SIZE;
> -       if (len <= SPRD_DMA_BLK_LEN_MASK) {
> -               block_len = len;
> -               transcation_len = 0;
> -               req_mode = SPRD_DMA_BLK_REQ;
> -               irq_mode = SPRD_DMA_BLK_INT;
> -       } else {
> -               block_len = SPRD_DMA_MEMCPY_MIN_SIZE;
> -               transcation_len = len;
> -               req_mode = SPRD_DMA_TRANS_REQ;
> -               irq_mode = SPRD_DMA_TRANS_INT;
> -       }
> +       hw = &sdesc->chn_hw;
>
>         hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET;
> +       hw->intc = SPRD_DMA_TRANS_INT | SPRD_DMA_CFG_ERR_INT_EN;
> +       hw->src_addr = (u32)(src & SPRD_DMA_LOW_ADDR_MASK);
> +       hw->des_addr = (u32)(dest & SPRD_DMA_LOW_ADDR_MASK);
>         hw->wrap_ptr = (u32)((src >> SPRD_DMA_HIGH_ADDR_OFFSET) &
>                              SPRD_DMA_HIGH_ADDR_MASK);
>         hw->wrap_to = (u32)((dest >> SPRD_DMA_HIGH_ADDR_OFFSET) &
>                             SPRD_DMA_HIGH_ADDR_MASK);
>
> -       hw->src_addr = (u32)(src & SPRD_DMA_LOW_ADDR_MASK);
> -       hw->des_addr = (u32)(dest & SPRD_DMA_LOW_ADDR_MASK);
> -
> -       if ((src_step != 0 && des_step != 0) || (src_step | des_step) == 0) {
> -               fix_en = 0;
> +       if (IS_ALIGNED(len, 8)) {
> +               datawidth = SPRD_DMA_DATAWIDTH_8_BYTES;
> +               step = SPRD_DMA_DWORD_STEP;
> +       } else if (IS_ALIGNED(len, 4)) {
> +               datawidth = SPRD_DMA_DATAWIDTH_4_BYTES;
> +               step = SPRD_DMA_WORD_STEP;
> +       } else if (IS_ALIGNED(len, 2)) {
> +               datawidth = SPRD_DMA_DATAWIDTH_2_BYTES;
> +               step = SPRD_DMA_SHORT_STEP;
>         } else {
> -               fix_en = 1;
> -               if (src_step)
> -                       fix_mode = 1;
> -               else
> -                       fix_mode = 0;
> +               datawidth = SPRD_DMA_DATAWIDTH_1_BYTE;
> +               step = SPRD_DMA_BYTE_STEP;
>         }
>
>         hw->frg_len = datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET |
>                 datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET |
> -               req_mode << SPRD_DMA_REQ_MODE_OFFSET |
> -               fix_mode << SPRD_DMA_FIX_SEL_OFFSET |
> -               fix_en << SPRD_DMA_FIX_EN_OFFSET |
> -               (fragment_len & SPRD_DMA_FRG_LEN_MASK);
> -       hw->blk_len = block_len & SPRD_DMA_BLK_LEN_MASK;
> -
> -       hw->intc = SPRD_DMA_CFG_ERR_INT_EN;
> -
> -       switch (irq_mode) {
> -       case SPRD_DMA_NO_INT:
> -               break;
> -
> -       case SPRD_DMA_FRAG_INT:
> -               hw->intc |= SPRD_DMA_FRAG_INT_EN;
> -               break;
> -
> -       case SPRD_DMA_BLK_INT:
> -               hw->intc |= SPRD_DMA_BLK_INT_EN;
> -               break;
> +               SPRD_DMA_TRANS_REQ << SPRD_DMA_REQ_MODE_OFFSET |
> +               (len & SPRD_DMA_FRG_LEN_MASK);
> +       hw->blk_len = len & SPRD_DMA_BLK_LEN_MASK;
> +       hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK;
>
> -       case SPRD_DMA_BLK_FRAG_INT:
> -               hw->intc |= SPRD_DMA_BLK_INT_EN | SPRD_DMA_FRAG_INT_EN;
> -               break;
> -
> -       case SPRD_DMA_TRANS_INT:
> -               hw->intc |= SPRD_DMA_TRANS_INT_EN;
> -               break;
> -
> -       case SPRD_DMA_TRANS_FRAG_INT:
> -               hw->intc |= SPRD_DMA_TRANS_INT_EN | SPRD_DMA_FRAG_INT_EN;
> -               break;
> -
> -       case SPRD_DMA_TRANS_BLK_INT:
> -               hw->intc |= SPRD_DMA_TRANS_INT_EN | SPRD_DMA_BLK_INT_EN;
> -               break;
> -
> -       case SPRD_DMA_LIST_INT:
> -               hw->intc |= SPRD_DMA_LIST_INT_EN;
> -               break;
> -
> -       case SPRD_DMA_CFGERR_INT:
> -               hw->intc |= SPRD_DMA_CFG_ERR_INT_EN;
> -               break;
> -
> -       default:
> -               dev_err(sdev->dma_dev.dev, "invalid irq mode\n");
> -               return -EINVAL;
> -       }
> -
> -       if (transcation_len == 0)
> -               hw->trsc_len = block_len & SPRD_DMA_TRSC_LEN_MASK;
> -       else
> -               hw->trsc_len = transcation_len & SPRD_DMA_TRSC_LEN_MASK;
> -
> -       hw->trsf_step = (des_step & SPRD_DMA_TRSF_STEP_MASK) <<
> +       hw->trsf_step = (step & SPRD_DMA_TRSF_STEP_MASK) <<
>                         SPRD_DMA_DEST_TRSF_STEP_OFFSET |
> -                       (src_step & SPRD_DMA_TRSF_STEP_MASK) <<
> +                       (step & SPRD_DMA_TRSF_STEP_MASK) <<
>                         SPRD_DMA_SRC_TRSF_STEP_OFFSET;
>
> -       hw->frg_step = 0;
> -       hw->src_blk_step = 0;
> -       hw->des_blk_step = 0;
> -       hw->src_blk_step = 0;
> -       return 0;
> -}
> -
> -static struct dma_async_tx_descriptor *
> -sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
> -                        size_t len, unsigned long flags)
> -{
> -       struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
> -       struct sprd_dma_desc *sdesc;
> -       int ret;
> -
> -       sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
> -       if (!sdesc)
> -               return NULL;
> -
> -       ret = sprd_dma_config(chan, sdesc, dest, src, len);
> -       if (ret) {
> -               kfree(sdesc);
> -               return NULL;
> -       }
> -
>         return vchan_tx_prep(&schan->vc, &sdesc->vd, flags);
>  }
>
> --
> 1.7.9.5
>

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 1/2] dmaengine: sprd: Optimize the sprd_dma_prep_dma_memcpy()
@ 2018-05-04  8:06 ` Baolin Wang
  0 siblings, 0 replies; 12+ messages in thread
From: Baolin Wang @ 2018-05-04  8:06 UTC (permalink / raw)
  To: Dan Williams, Vinod Koul, vkoul
  Cc: Eric Long, Mark Brown, Baolin Wang, dmaengine, LKML

Sorry, add Vinod new email.

On 4 May 2018 at 16:01, Baolin Wang <baolin.wang@linaro.org> wrote:
> From: Eric Long <eric.long@spreadtrum.com>
>
> This is one preparation patch, we can use default DMA configuration to
> implement the device_prep_dma_memcpy() interface instead of issuing
> sprd_dma_config().
>
> We will implement one new sprd_dma_config() function with introducing
> device_prep_slave_sg() interface in following patch. So we can remove
> the obsolete sprd_dma_config() firstly.
>
> Signed-off-by: Eric Long <eric.long@spreadtrum.com>
> Signed-off-by: Baolin Wang <baolin.wang@linaro.org>
> ---
>  drivers/dma/sprd-dma.c |  154 ++++++++++--------------------------------------
>  1 file changed, 32 insertions(+), 122 deletions(-)
>
> diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
> index ccdeb8f..a7a89fd 100644
> --- a/drivers/dma/sprd-dma.c
> +++ b/drivers/dma/sprd-dma.c
> @@ -552,147 +552,57 @@ static void sprd_dma_issue_pending(struct dma_chan *chan)
>         spin_unlock_irqrestore(&schan->vc.lock, flags);
>  }
>
> -static int sprd_dma_config(struct dma_chan *chan, struct sprd_dma_desc *sdesc,
> -                          dma_addr_t dest, dma_addr_t src, size_t len)
> +static struct dma_async_tx_descriptor *
> +sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
> +                        size_t len, unsigned long flags)
>  {
> -       struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan);
> -       struct sprd_dma_chn_hw *hw = &sdesc->chn_hw;
> -       u32 datawidth, src_step, des_step, fragment_len;
> -       u32 block_len, req_mode, irq_mode, transcation_len;
> -       u32 fix_mode = 0, fix_en = 0;
> +       struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
> +       struct sprd_dma_desc *sdesc;
> +       struct sprd_dma_chn_hw *hw;
> +       enum sprd_dma_datawidth datawidth;
> +       u32 step;
>
> -       if (IS_ALIGNED(len, 4)) {
> -               datawidth = SPRD_DMA_DATAWIDTH_4_BYTES;
> -               src_step = SPRD_DMA_WORD_STEP;
> -               des_step = SPRD_DMA_WORD_STEP;
> -       } else if (IS_ALIGNED(len, 2)) {
> -               datawidth = SPRD_DMA_DATAWIDTH_2_BYTES;
> -               src_step = SPRD_DMA_SHORT_STEP;
> -               des_step = SPRD_DMA_SHORT_STEP;
> -       } else {
> -               datawidth = SPRD_DMA_DATAWIDTH_1_BYTE;
> -               src_step = SPRD_DMA_BYTE_STEP;
> -               des_step = SPRD_DMA_BYTE_STEP;
> -       }
> +       sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
> +       if (!sdesc)
> +               return NULL;
>
> -       fragment_len = SPRD_DMA_MEMCPY_MIN_SIZE;
> -       if (len <= SPRD_DMA_BLK_LEN_MASK) {
> -               block_len = len;
> -               transcation_len = 0;
> -               req_mode = SPRD_DMA_BLK_REQ;
> -               irq_mode = SPRD_DMA_BLK_INT;
> -       } else {
> -               block_len = SPRD_DMA_MEMCPY_MIN_SIZE;
> -               transcation_len = len;
> -               req_mode = SPRD_DMA_TRANS_REQ;
> -               irq_mode = SPRD_DMA_TRANS_INT;
> -       }
> +       hw = &sdesc->chn_hw;
>
>         hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET;
> +       hw->intc = SPRD_DMA_TRANS_INT | SPRD_DMA_CFG_ERR_INT_EN;
> +       hw->src_addr = (u32)(src & SPRD_DMA_LOW_ADDR_MASK);
> +       hw->des_addr = (u32)(dest & SPRD_DMA_LOW_ADDR_MASK);
>         hw->wrap_ptr = (u32)((src >> SPRD_DMA_HIGH_ADDR_OFFSET) &
>                              SPRD_DMA_HIGH_ADDR_MASK);
>         hw->wrap_to = (u32)((dest >> SPRD_DMA_HIGH_ADDR_OFFSET) &
>                             SPRD_DMA_HIGH_ADDR_MASK);
>
> -       hw->src_addr = (u32)(src & SPRD_DMA_LOW_ADDR_MASK);
> -       hw->des_addr = (u32)(dest & SPRD_DMA_LOW_ADDR_MASK);
> -
> -       if ((src_step != 0 && des_step != 0) || (src_step | des_step) == 0) {
> -               fix_en = 0;
> +       if (IS_ALIGNED(len, 8)) {
> +               datawidth = SPRD_DMA_DATAWIDTH_8_BYTES;
> +               step = SPRD_DMA_DWORD_STEP;
> +       } else if (IS_ALIGNED(len, 4)) {
> +               datawidth = SPRD_DMA_DATAWIDTH_4_BYTES;
> +               step = SPRD_DMA_WORD_STEP;
> +       } else if (IS_ALIGNED(len, 2)) {
> +               datawidth = SPRD_DMA_DATAWIDTH_2_BYTES;
> +               step = SPRD_DMA_SHORT_STEP;
>         } else {
> -               fix_en = 1;
> -               if (src_step)
> -                       fix_mode = 1;
> -               else
> -                       fix_mode = 0;
> +               datawidth = SPRD_DMA_DATAWIDTH_1_BYTE;
> +               step = SPRD_DMA_BYTE_STEP;
>         }
>
>         hw->frg_len = datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET |
>                 datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET |
> -               req_mode << SPRD_DMA_REQ_MODE_OFFSET |
> -               fix_mode << SPRD_DMA_FIX_SEL_OFFSET |
> -               fix_en << SPRD_DMA_FIX_EN_OFFSET |
> -               (fragment_len & SPRD_DMA_FRG_LEN_MASK);
> -       hw->blk_len = block_len & SPRD_DMA_BLK_LEN_MASK;
> -
> -       hw->intc = SPRD_DMA_CFG_ERR_INT_EN;
> -
> -       switch (irq_mode) {
> -       case SPRD_DMA_NO_INT:
> -               break;
> -
> -       case SPRD_DMA_FRAG_INT:
> -               hw->intc |= SPRD_DMA_FRAG_INT_EN;
> -               break;
> -
> -       case SPRD_DMA_BLK_INT:
> -               hw->intc |= SPRD_DMA_BLK_INT_EN;
> -               break;
> +               SPRD_DMA_TRANS_REQ << SPRD_DMA_REQ_MODE_OFFSET |
> +               (len & SPRD_DMA_FRG_LEN_MASK);
> +       hw->blk_len = len & SPRD_DMA_BLK_LEN_MASK;
> +       hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK;
>
> -       case SPRD_DMA_BLK_FRAG_INT:
> -               hw->intc |= SPRD_DMA_BLK_INT_EN | SPRD_DMA_FRAG_INT_EN;
> -               break;
> -
> -       case SPRD_DMA_TRANS_INT:
> -               hw->intc |= SPRD_DMA_TRANS_INT_EN;
> -               break;
> -
> -       case SPRD_DMA_TRANS_FRAG_INT:
> -               hw->intc |= SPRD_DMA_TRANS_INT_EN | SPRD_DMA_FRAG_INT_EN;
> -               break;
> -
> -       case SPRD_DMA_TRANS_BLK_INT:
> -               hw->intc |= SPRD_DMA_TRANS_INT_EN | SPRD_DMA_BLK_INT_EN;
> -               break;
> -
> -       case SPRD_DMA_LIST_INT:
> -               hw->intc |= SPRD_DMA_LIST_INT_EN;
> -               break;
> -
> -       case SPRD_DMA_CFGERR_INT:
> -               hw->intc |= SPRD_DMA_CFG_ERR_INT_EN;
> -               break;
> -
> -       default:
> -               dev_err(sdev->dma_dev.dev, "invalid irq mode\n");
> -               return -EINVAL;
> -       }
> -
> -       if (transcation_len == 0)
> -               hw->trsc_len = block_len & SPRD_DMA_TRSC_LEN_MASK;
> -       else
> -               hw->trsc_len = transcation_len & SPRD_DMA_TRSC_LEN_MASK;
> -
> -       hw->trsf_step = (des_step & SPRD_DMA_TRSF_STEP_MASK) <<
> +       hw->trsf_step = (step & SPRD_DMA_TRSF_STEP_MASK) <<
>                         SPRD_DMA_DEST_TRSF_STEP_OFFSET |
> -                       (src_step & SPRD_DMA_TRSF_STEP_MASK) <<
> +                       (step & SPRD_DMA_TRSF_STEP_MASK) <<
>                         SPRD_DMA_SRC_TRSF_STEP_OFFSET;
>
> -       hw->frg_step = 0;
> -       hw->src_blk_step = 0;
> -       hw->des_blk_step = 0;
> -       hw->src_blk_step = 0;
> -       return 0;
> -}
> -
> -static struct dma_async_tx_descriptor *
> -sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
> -                        size_t len, unsigned long flags)
> -{
> -       struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
> -       struct sprd_dma_desc *sdesc;
> -       int ret;
> -
> -       sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
> -       if (!sdesc)
> -               return NULL;
> -
> -       ret = sprd_dma_config(chan, sdesc, dest, src, len);
> -       if (ret) {
> -               kfree(sdesc);
> -               return NULL;
> -       }
> -
>         return vchan_tx_prep(&schan->vc, &sdesc->vd, flags);
>  }
>
> --
> 1.7.9.5
>



-- 
Baolin.wang
Best Regards

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [2/2] dmaengine: sprd: Add Spreadtrum DMA configuration
  2018-05-04  8:01 ` [PATCH 2/2] " Baolin Wang
@ 2018-05-04  8:06 ` Baolin Wang
  -1 siblings, 0 replies; 12+ messages in thread
From: Baolin Wang @ 2018-05-04  8:06 UTC (permalink / raw)
  To: Dan Williams, Vinod Koul, vkoul
  Cc: Eric Long, Mark Brown, Baolin Wang, dmaengine, LKML

Add Vinod new email.

On 4 May 2018 at 16:01, Baolin Wang <baolin.wang@linaro.org> wrote:
> From: Eric Long <eric.long@spreadtrum.com>
>
> This patch adds the 'device_config' and 'device_prep_slave_sg' interfaces
> for users to configure DMA, as well as adding one 'struct sprd_dma_config'
> structure to save Spreadtrum DMA configuration for each DMA channel.
>
> Signed-off-by: Eric Long <eric.long@spreadtrum.com>
> Signed-off-by: Baolin Wang <baolin.wang@linaro.org>
> ---
>  drivers/dma/sprd-dma.c       |  215 ++++++++++++++++++++++++++++++++++++++++++
>  include/linux/dma/sprd-dma.h |    4 +
>  2 files changed, 219 insertions(+)
>
> diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
> index a7a89fd..d7c7ffa 100644
> --- a/drivers/dma/sprd-dma.c
> +++ b/drivers/dma/sprd-dma.c
> @@ -100,6 +100,8 @@
>  #define SPRD_DMA_DES_DATAWIDTH_OFFSET  28
>  #define SPRD_DMA_SWT_MODE_OFFSET       26
>  #define SPRD_DMA_REQ_MODE_OFFSET       24
> +#define SPRD_DMA_WRAP_SEL_OFFSET       23
> +#define SPRD_DMA_WRAP_EN_OFFSET                22
>  #define SPRD_DMA_REQ_MODE_MASK         GENMASK(1, 0)
>  #define SPRD_DMA_FIX_SEL_OFFSET                21
>  #define SPRD_DMA_FIX_EN_OFFSET         20
> @@ -154,6 +156,35 @@ struct sprd_dma_chn_hw {
>         u32 des_blk_step;
>  };
>
> +/*
> + * struct sprd_dma_config - DMA configuration structure
> + * @cfg: dma slave channel runtime config
> + * @src_addr: the source physical address
> + * @dst_addr: the destination physical address
> + * @block_len: specify one block transfer length
> + * @transcation_len: specify one transcation transfer length
> + * @src_step: source transfer step
> + * @dst_step: destination transfer step
> + * @wrap_ptr: wrap pointer address, once the transfer address reaches the
> + * 'wrap_ptr', the next transfer address will jump to the 'wrap_to' address.
> + * @wrap_to: wrap jump to address
> + * @req_mode: specify the DMA request mode
> + * @int_mode: specify the DMA interrupt type
> + */
> +struct sprd_dma_config {
> +       struct dma_slave_config cfg;
> +       phys_addr_t src_addr;
> +       phys_addr_t dst_addr;
> +       u32 block_len;
> +       u32 transcation_len;
> +       u32 src_step;
> +       u32 dst_step;
> +       phys_addr_t wrap_ptr;
> +       phys_addr_t wrap_to;
> +       enum sprd_dma_req_mode req_mode;
> +       enum sprd_dma_int_type int_mode;
> +};
> +
>  /* dma request description */
>  struct sprd_dma_desc {
>         struct virt_dma_desc    vd;
> @@ -164,6 +195,7 @@ struct sprd_dma_desc {
>  struct sprd_dma_chn {
>         struct virt_dma_chan    vc;
>         void __iomem            *chn_base;
> +       struct sprd_dma_config  slave_cfg;
>         u32                     chn_num;
>         u32                     dev_id;
>         struct sprd_dma_desc    *cur_desc;
> @@ -552,6 +584,121 @@ static void sprd_dma_issue_pending(struct dma_chan *chan)
>         spin_unlock_irqrestore(&schan->vc.lock, flags);
>  }
>
> +static enum sprd_dma_datawidth
> +sprd_dma_get_datawidth(enum dma_slave_buswidth buswidth)
> +{
> +       switch (buswidth) {
> +       case DMA_SLAVE_BUSWIDTH_1_BYTE:
> +               return SPRD_DMA_DATAWIDTH_1_BYTE;
> +
> +       case DMA_SLAVE_BUSWIDTH_2_BYTES:
> +               return SPRD_DMA_DATAWIDTH_2_BYTES;
> +
> +       case DMA_SLAVE_BUSWIDTH_4_BYTES:
> +               return SPRD_DMA_DATAWIDTH_4_BYTES;
> +
> +       case DMA_SLAVE_BUSWIDTH_8_BYTES:
> +               return SPRD_DMA_DATAWIDTH_8_BYTES;
> +
> +       default:
> +               return SPRD_DMA_DATAWIDTH_4_BYTES;
> +       }
> +}
> +
> +static u32 sprd_dma_get_step(enum sprd_dma_datawidth datawidth)
> +{
> +       switch (datawidth) {
> +       case SPRD_DMA_DATAWIDTH_1_BYTE:
> +               return SPRD_DMA_BYTE_STEP;
> +
> +       case SPRD_DMA_DATAWIDTH_2_BYTES:
> +               return SPRD_DMA_SHORT_STEP;
> +
> +       case SPRD_DMA_DATAWIDTH_4_BYTES:
> +               return SPRD_DMA_WORD_STEP;
> +
> +       case SPRD_DMA_DATAWIDTH_8_BYTES:
> +               return SPRD_DMA_DWORD_STEP;
> +
> +       default:
> +               return SPRD_DMA_DWORD_STEP;
> +       }
> +}
> +
> +static int sprd_dma_config(struct dma_chan *chan, struct sprd_dma_desc *sdesc,
> +                          struct sprd_dma_config *slave_cfg)
> +{
> +       struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan);
> +       struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
> +       struct sprd_dma_chn_hw *hw = &sdesc->chn_hw;
> +       u32 fix_mode = 0, fix_en = 0, wrap_en = 0, wrap_mode = 0;
> +       u32 src_datawidth, dst_datawidth;
> +
> +       if (slave_cfg->cfg.slave_id)
> +               schan->dev_id = slave_cfg->cfg.slave_id;
> +
> +       hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET;
> +       hw->wrap_ptr = (u32)((slave_cfg->wrap_ptr & SPRD_DMA_LOW_ADDR_MASK) |
> +               ((slave_cfg->src_addr >> SPRD_DMA_HIGH_ADDR_OFFSET) &
> +                SPRD_DMA_HIGH_ADDR_MASK));
> +       hw->wrap_to = (u32)((slave_cfg->wrap_to & SPRD_DMA_LOW_ADDR_MASK) |
> +               ((slave_cfg->dst_addr >> SPRD_DMA_HIGH_ADDR_OFFSET) &
> +                SPRD_DMA_HIGH_ADDR_MASK));
> +
> +       hw->src_addr = (u32)(slave_cfg->src_addr & SPRD_DMA_LOW_ADDR_MASK);
> +       hw->des_addr = (u32)(slave_cfg->dst_addr & SPRD_DMA_LOW_ADDR_MASK);
> +
> +       if ((slave_cfg->src_step != 0 && slave_cfg->dst_step != 0)
> +           || (slave_cfg->src_step | slave_cfg->dst_step) == 0) {
> +               fix_en = 0;
> +       } else {
> +               fix_en = 1;
> +               if (slave_cfg->src_step)
> +                       fix_mode = 1;
> +               else
> +                       fix_mode = 0;
> +       }
> +
> +       if (slave_cfg->wrap_ptr && slave_cfg->wrap_to) {
> +               wrap_en = 1;
> +               if (slave_cfg->wrap_to == slave_cfg->src_addr) {
> +                       wrap_mode = 0;
> +               } else if (slave_cfg->wrap_to == slave_cfg->dst_addr) {
> +                       wrap_mode = 1;
> +               } else {
> +                       dev_err(sdev->dma_dev.dev, "invalid wrap mode\n");
> +                       return -EINVAL;
> +               }
> +       }
> +
> +       hw->intc = slave_cfg->int_mode | SPRD_DMA_CFG_ERR_INT_EN;
> +
> +       src_datawidth = sprd_dma_get_datawidth(slave_cfg->cfg.src_addr_width);
> +       dst_datawidth = sprd_dma_get_datawidth(slave_cfg->cfg.dst_addr_width);
> +       hw->frg_len = src_datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET |
> +               dst_datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET |
> +               slave_cfg->req_mode << SPRD_DMA_REQ_MODE_OFFSET |
> +               wrap_mode << SPRD_DMA_WRAP_SEL_OFFSET |
> +               wrap_en << SPRD_DMA_WRAP_EN_OFFSET |
> +               fix_mode << SPRD_DMA_FIX_SEL_OFFSET |
> +               fix_en << SPRD_DMA_FIX_EN_OFFSET |
> +               (slave_cfg->cfg.src_maxburst & SPRD_DMA_FRG_LEN_MASK);
> +
> +       hw->blk_len = slave_cfg->block_len & SPRD_DMA_BLK_LEN_MASK;
> +
> +       hw->trsc_len = slave_cfg->transcation_len & SPRD_DMA_TRSC_LEN_MASK;
> +
> +       hw->trsf_step = (slave_cfg->dst_step & SPRD_DMA_TRSF_STEP_MASK) <<
> +                       SPRD_DMA_DEST_TRSF_STEP_OFFSET |
> +                       (slave_cfg->src_step & SPRD_DMA_TRSF_STEP_MASK) <<
> +                       SPRD_DMA_SRC_TRSF_STEP_OFFSET;
> +
> +       hw->frg_step = 0;
> +       hw->src_blk_step = 0;
> +       hw->des_blk_step = 0;
> +       return 0;
> +}
> +
>  static struct dma_async_tx_descriptor *
>  sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
>                          size_t len, unsigned long flags)
> @@ -606,6 +753,72 @@ static void sprd_dma_issue_pending(struct dma_chan *chan)
>         return vchan_tx_prep(&schan->vc, &sdesc->vd, flags);
>  }
>
> +static struct dma_async_tx_descriptor *
> +sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
> +                      unsigned int sglen, enum dma_transfer_direction dir,
> +                      unsigned long flags, void *context)
> +{
> +       struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
> +       struct sprd_dma_config *slave_cfg = &schan->slave_cfg;
> +       struct sprd_dma_desc *sdesc;
> +       struct scatterlist *sg;
> +       int ret, i;
> +
> +       /* TODO: now we only support one sg for each DMA configuration. */
> +       if (!is_slave_direction(dir) || sglen > 1)
> +               return NULL;
> +
> +       sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
> +       if (!sdesc)
> +               return NULL;
> +
> +       for_each_sg(sgl, sg, sglen, i) {
> +               if (dir == DMA_MEM_TO_DEV) {
> +                       slave_cfg->src_addr = sg_dma_address(sg);
> +                       slave_cfg->dst_addr = slave_cfg->cfg.dst_addr;
> +                       slave_cfg->src_step =
> +                       sprd_dma_get_step(slave_cfg->cfg.src_addr_width);
> +                       slave_cfg->dst_step = SPRD_DMA_NONE_STEP;
> +               } else {
> +                       slave_cfg->src_addr = slave_cfg->cfg.src_addr;
> +                       slave_cfg->dst_addr = sg_dma_address(sg);
> +                       slave_cfg->src_step = SPRD_DMA_NONE_STEP;
> +                       slave_cfg->dst_step =
> +                       sprd_dma_get_step(slave_cfg->cfg.dst_addr_width);
> +               }
> +
> +               slave_cfg->block_len = sg_dma_len(sg);
> +               slave_cfg->transcation_len = sg_dma_len(sg);
> +       }
> +
> +       slave_cfg->req_mode =
> +               (flags >> SPRD_DMA_REQ_SHIFT) & SPRD_DMA_REQ_MODE_MASK;
> +       slave_cfg->int_mode = flags & SPRD_DMA_INT_MASK;
> +
> +       ret = sprd_dma_config(chan, sdesc, slave_cfg);
> +       if (ret) {
> +               kfree(sdesc);
> +               return NULL;
> +       }
> +
> +       return vchan_tx_prep(&schan->vc, &sdesc->vd, flags);
> +}
> +
> +static int sprd_dma_slave_config(struct dma_chan *chan,
> +                                struct dma_slave_config *config)
> +{
> +       struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
> +       struct sprd_dma_config *slave_cfg = &schan->slave_cfg;
> +
> +       if (!is_slave_direction(config->direction))
> +               return -EINVAL;
> +
> +       memset(slave_cfg, 0, sizeof(*slave_cfg));
> +       memcpy(&slave_cfg->cfg, config, sizeof(*config));
> +
> +       return 0;
> +}
> +
>  static int sprd_dma_pause(struct dma_chan *chan)
>  {
>         struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
> @@ -733,6 +946,8 @@ static int sprd_dma_probe(struct platform_device *pdev)
>         sdev->dma_dev.device_tx_status = sprd_dma_tx_status;
>         sdev->dma_dev.device_issue_pending = sprd_dma_issue_pending;
>         sdev->dma_dev.device_prep_dma_memcpy = sprd_dma_prep_dma_memcpy;
> +       sdev->dma_dev.device_prep_slave_sg = sprd_dma_prep_slave_sg;
> +       sdev->dma_dev.device_config = sprd_dma_slave_config;
>         sdev->dma_dev.device_pause = sprd_dma_pause;
>         sdev->dma_dev.device_resume = sprd_dma_resume;
>         sdev->dma_dev.device_terminate_all = sprd_dma_terminate_all;
> diff --git a/include/linux/dma/sprd-dma.h b/include/linux/dma/sprd-dma.h
> index c545162..b0115e3 100644
> --- a/include/linux/dma/sprd-dma.h
> +++ b/include/linux/dma/sprd-dma.h
> @@ -3,6 +3,10 @@
>  #ifndef _SPRD_DMA_H_
>  #define _SPRD_DMA_H_
>
> +#define SPRD_DMA_REQ_SHIFT 16
> +#define SPRD_DMA_FLAGS(req_mode, int_type) \
> +       ((req_mode) << SPRD_DMA_REQ_SHIFT | (int_type))
> +
>  /*
>   * enum sprd_dma_req_mode: define the DMA request mode
>   * @SPRD_DMA_FRAG_REQ: fragment request mode
> --
> 1.7.9.5
>

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/2] dmaengine: sprd: Add Spreadtrum DMA configuration
@ 2018-05-04  8:06 ` Baolin Wang
  0 siblings, 0 replies; 12+ messages in thread
From: Baolin Wang @ 2018-05-04  8:06 UTC (permalink / raw)
  To: Dan Williams, Vinod Koul, vkoul
  Cc: Eric Long, Mark Brown, Baolin Wang, dmaengine, LKML

Add Vinod new email.

On 4 May 2018 at 16:01, Baolin Wang <baolin.wang@linaro.org> wrote:
> From: Eric Long <eric.long@spreadtrum.com>
>
> This patch adds the 'device_config' and 'device_prep_slave_sg' interfaces
> for users to configure DMA, as well as adding one 'struct sprd_dma_config'
> structure to save Spreadtrum DMA configuration for each DMA channel.
>
> Signed-off-by: Eric Long <eric.long@spreadtrum.com>
> Signed-off-by: Baolin Wang <baolin.wang@linaro.org>
> ---
>  drivers/dma/sprd-dma.c       |  215 ++++++++++++++++++++++++++++++++++++++++++
>  include/linux/dma/sprd-dma.h |    4 +
>  2 files changed, 219 insertions(+)
>
> diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
> index a7a89fd..d7c7ffa 100644
> --- a/drivers/dma/sprd-dma.c
> +++ b/drivers/dma/sprd-dma.c
> @@ -100,6 +100,8 @@
>  #define SPRD_DMA_DES_DATAWIDTH_OFFSET  28
>  #define SPRD_DMA_SWT_MODE_OFFSET       26
>  #define SPRD_DMA_REQ_MODE_OFFSET       24
> +#define SPRD_DMA_WRAP_SEL_OFFSET       23
> +#define SPRD_DMA_WRAP_EN_OFFSET                22
>  #define SPRD_DMA_REQ_MODE_MASK         GENMASK(1, 0)
>  #define SPRD_DMA_FIX_SEL_OFFSET                21
>  #define SPRD_DMA_FIX_EN_OFFSET         20
> @@ -154,6 +156,35 @@ struct sprd_dma_chn_hw {
>         u32 des_blk_step;
>  };
>
> +/*
> + * struct sprd_dma_config - DMA configuration structure
> + * @cfg: dma slave channel runtime config
> + * @src_addr: the source physical address
> + * @dst_addr: the destination physical address
> + * @block_len: specify one block transfer length
> + * @transcation_len: specify one transcation transfer length
> + * @src_step: source transfer step
> + * @dst_step: destination transfer step
> + * @wrap_ptr: wrap pointer address, once the transfer address reaches the
> + * 'wrap_ptr', the next transfer address will jump to the 'wrap_to' address.
> + * @wrap_to: wrap jump to address
> + * @req_mode: specify the DMA request mode
> + * @int_mode: specify the DMA interrupt type
> + */
> +struct sprd_dma_config {
> +       struct dma_slave_config cfg;
> +       phys_addr_t src_addr;
> +       phys_addr_t dst_addr;
> +       u32 block_len;
> +       u32 transcation_len;
> +       u32 src_step;
> +       u32 dst_step;
> +       phys_addr_t wrap_ptr;
> +       phys_addr_t wrap_to;
> +       enum sprd_dma_req_mode req_mode;
> +       enum sprd_dma_int_type int_mode;
> +};
> +
>  /* dma request description */
>  struct sprd_dma_desc {
>         struct virt_dma_desc    vd;
> @@ -164,6 +195,7 @@ struct sprd_dma_desc {
>  struct sprd_dma_chn {
>         struct virt_dma_chan    vc;
>         void __iomem            *chn_base;
> +       struct sprd_dma_config  slave_cfg;
>         u32                     chn_num;
>         u32                     dev_id;
>         struct sprd_dma_desc    *cur_desc;
> @@ -552,6 +584,121 @@ static void sprd_dma_issue_pending(struct dma_chan *chan)
>         spin_unlock_irqrestore(&schan->vc.lock, flags);
>  }
>
> +static enum sprd_dma_datawidth
> +sprd_dma_get_datawidth(enum dma_slave_buswidth buswidth)
> +{
> +       switch (buswidth) {
> +       case DMA_SLAVE_BUSWIDTH_1_BYTE:
> +               return SPRD_DMA_DATAWIDTH_1_BYTE;
> +
> +       case DMA_SLAVE_BUSWIDTH_2_BYTES:
> +               return SPRD_DMA_DATAWIDTH_2_BYTES;
> +
> +       case DMA_SLAVE_BUSWIDTH_4_BYTES:
> +               return SPRD_DMA_DATAWIDTH_4_BYTES;
> +
> +       case DMA_SLAVE_BUSWIDTH_8_BYTES:
> +               return SPRD_DMA_DATAWIDTH_8_BYTES;
> +
> +       default:
> +               return SPRD_DMA_DATAWIDTH_4_BYTES;
> +       }
> +}
> +
> +static u32 sprd_dma_get_step(enum sprd_dma_datawidth datawidth)
> +{
> +       switch (datawidth) {
> +       case SPRD_DMA_DATAWIDTH_1_BYTE:
> +               return SPRD_DMA_BYTE_STEP;
> +
> +       case SPRD_DMA_DATAWIDTH_2_BYTES:
> +               return SPRD_DMA_SHORT_STEP;
> +
> +       case SPRD_DMA_DATAWIDTH_4_BYTES:
> +               return SPRD_DMA_WORD_STEP;
> +
> +       case SPRD_DMA_DATAWIDTH_8_BYTES:
> +               return SPRD_DMA_DWORD_STEP;
> +
> +       default:
> +               return SPRD_DMA_DWORD_STEP;
> +       }
> +}
> +
> +static int sprd_dma_config(struct dma_chan *chan, struct sprd_dma_desc *sdesc,
> +                          struct sprd_dma_config *slave_cfg)
> +{
> +       struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan);
> +       struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
> +       struct sprd_dma_chn_hw *hw = &sdesc->chn_hw;
> +       u32 fix_mode = 0, fix_en = 0, wrap_en = 0, wrap_mode = 0;
> +       u32 src_datawidth, dst_datawidth;
> +
> +       if (slave_cfg->cfg.slave_id)
> +               schan->dev_id = slave_cfg->cfg.slave_id;
> +
> +       hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET;
> +       hw->wrap_ptr = (u32)((slave_cfg->wrap_ptr & SPRD_DMA_LOW_ADDR_MASK) |
> +               ((slave_cfg->src_addr >> SPRD_DMA_HIGH_ADDR_OFFSET) &
> +                SPRD_DMA_HIGH_ADDR_MASK));
> +       hw->wrap_to = (u32)((slave_cfg->wrap_to & SPRD_DMA_LOW_ADDR_MASK) |
> +               ((slave_cfg->dst_addr >> SPRD_DMA_HIGH_ADDR_OFFSET) &
> +                SPRD_DMA_HIGH_ADDR_MASK));
> +
> +       hw->src_addr = (u32)(slave_cfg->src_addr & SPRD_DMA_LOW_ADDR_MASK);
> +       hw->des_addr = (u32)(slave_cfg->dst_addr & SPRD_DMA_LOW_ADDR_MASK);
> +
> +       if ((slave_cfg->src_step != 0 && slave_cfg->dst_step != 0)
> +           || (slave_cfg->src_step | slave_cfg->dst_step) == 0) {
> +               fix_en = 0;
> +       } else {
> +               fix_en = 1;
> +               if (slave_cfg->src_step)
> +                       fix_mode = 1;
> +               else
> +                       fix_mode = 0;
> +       }
> +
> +       if (slave_cfg->wrap_ptr && slave_cfg->wrap_to) {
> +               wrap_en = 1;
> +               if (slave_cfg->wrap_to == slave_cfg->src_addr) {
> +                       wrap_mode = 0;
> +               } else if (slave_cfg->wrap_to == slave_cfg->dst_addr) {
> +                       wrap_mode = 1;
> +               } else {
> +                       dev_err(sdev->dma_dev.dev, "invalid wrap mode\n");
> +                       return -EINVAL;
> +               }
> +       }
> +
> +       hw->intc = slave_cfg->int_mode | SPRD_DMA_CFG_ERR_INT_EN;
> +
> +       src_datawidth = sprd_dma_get_datawidth(slave_cfg->cfg.src_addr_width);
> +       dst_datawidth = sprd_dma_get_datawidth(slave_cfg->cfg.dst_addr_width);
> +       hw->frg_len = src_datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET |
> +               dst_datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET |
> +               slave_cfg->req_mode << SPRD_DMA_REQ_MODE_OFFSET |
> +               wrap_mode << SPRD_DMA_WRAP_SEL_OFFSET |
> +               wrap_en << SPRD_DMA_WRAP_EN_OFFSET |
> +               fix_mode << SPRD_DMA_FIX_SEL_OFFSET |
> +               fix_en << SPRD_DMA_FIX_EN_OFFSET |
> +               (slave_cfg->cfg.src_maxburst & SPRD_DMA_FRG_LEN_MASK);
> +
> +       hw->blk_len = slave_cfg->block_len & SPRD_DMA_BLK_LEN_MASK;
> +
> +       hw->trsc_len = slave_cfg->transcation_len & SPRD_DMA_TRSC_LEN_MASK;
> +
> +       hw->trsf_step = (slave_cfg->dst_step & SPRD_DMA_TRSF_STEP_MASK) <<
> +                       SPRD_DMA_DEST_TRSF_STEP_OFFSET |
> +                       (slave_cfg->src_step & SPRD_DMA_TRSF_STEP_MASK) <<
> +                       SPRD_DMA_SRC_TRSF_STEP_OFFSET;
> +
> +       hw->frg_step = 0;
> +       hw->src_blk_step = 0;
> +       hw->des_blk_step = 0;
> +       return 0;
> +}
> +
>  static struct dma_async_tx_descriptor *
>  sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
>                          size_t len, unsigned long flags)
> @@ -606,6 +753,72 @@ static void sprd_dma_issue_pending(struct dma_chan *chan)
>         return vchan_tx_prep(&schan->vc, &sdesc->vd, flags);
>  }
>
> +static struct dma_async_tx_descriptor *
> +sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
> +                      unsigned int sglen, enum dma_transfer_direction dir,
> +                      unsigned long flags, void *context)
> +{
> +       struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
> +       struct sprd_dma_config *slave_cfg = &schan->slave_cfg;
> +       struct sprd_dma_desc *sdesc;
> +       struct scatterlist *sg;
> +       int ret, i;
> +
> +       /* TODO: now we only support one sg for each DMA configuration. */
> +       if (!is_slave_direction(dir) || sglen > 1)
> +               return NULL;
> +
> +       sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
> +       if (!sdesc)
> +               return NULL;
> +
> +       for_each_sg(sgl, sg, sglen, i) {
> +               if (dir == DMA_MEM_TO_DEV) {
> +                       slave_cfg->src_addr = sg_dma_address(sg);
> +                       slave_cfg->dst_addr = slave_cfg->cfg.dst_addr;
> +                       slave_cfg->src_step =
> +                       sprd_dma_get_step(slave_cfg->cfg.src_addr_width);
> +                       slave_cfg->dst_step = SPRD_DMA_NONE_STEP;
> +               } else {
> +                       slave_cfg->src_addr = slave_cfg->cfg.src_addr;
> +                       slave_cfg->dst_addr = sg_dma_address(sg);
> +                       slave_cfg->src_step = SPRD_DMA_NONE_STEP;
> +                       slave_cfg->dst_step =
> +                       sprd_dma_get_step(slave_cfg->cfg.dst_addr_width);
> +               }
> +
> +               slave_cfg->block_len = sg_dma_len(sg);
> +               slave_cfg->transcation_len = sg_dma_len(sg);
> +       }
> +
> +       slave_cfg->req_mode =
> +               (flags >> SPRD_DMA_REQ_SHIFT) & SPRD_DMA_REQ_MODE_MASK;
> +       slave_cfg->int_mode = flags & SPRD_DMA_INT_MASK;
> +
> +       ret = sprd_dma_config(chan, sdesc, slave_cfg);
> +       if (ret) {
> +               kfree(sdesc);
> +               return NULL;
> +       }
> +
> +       return vchan_tx_prep(&schan->vc, &sdesc->vd, flags);
> +}
> +
> +static int sprd_dma_slave_config(struct dma_chan *chan,
> +                                struct dma_slave_config *config)
> +{
> +       struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
> +       struct sprd_dma_config *slave_cfg = &schan->slave_cfg;
> +
> +       if (!is_slave_direction(config->direction))
> +               return -EINVAL;
> +
> +       memset(slave_cfg, 0, sizeof(*slave_cfg));
> +       memcpy(&slave_cfg->cfg, config, sizeof(*config));
> +
> +       return 0;
> +}
> +
>  static int sprd_dma_pause(struct dma_chan *chan)
>  {
>         struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
> @@ -733,6 +946,8 @@ static int sprd_dma_probe(struct platform_device *pdev)
>         sdev->dma_dev.device_tx_status = sprd_dma_tx_status;
>         sdev->dma_dev.device_issue_pending = sprd_dma_issue_pending;
>         sdev->dma_dev.device_prep_dma_memcpy = sprd_dma_prep_dma_memcpy;
> +       sdev->dma_dev.device_prep_slave_sg = sprd_dma_prep_slave_sg;
> +       sdev->dma_dev.device_config = sprd_dma_slave_config;
>         sdev->dma_dev.device_pause = sprd_dma_pause;
>         sdev->dma_dev.device_resume = sprd_dma_resume;
>         sdev->dma_dev.device_terminate_all = sprd_dma_terminate_all;
> diff --git a/include/linux/dma/sprd-dma.h b/include/linux/dma/sprd-dma.h
> index c545162..b0115e3 100644
> --- a/include/linux/dma/sprd-dma.h
> +++ b/include/linux/dma/sprd-dma.h
> @@ -3,6 +3,10 @@
>  #ifndef _SPRD_DMA_H_
>  #define _SPRD_DMA_H_
>
> +#define SPRD_DMA_REQ_SHIFT 16
> +#define SPRD_DMA_FLAGS(req_mode, int_type) \
> +       ((req_mode) << SPRD_DMA_REQ_SHIFT | (int_type))
> +
>  /*
>   * enum sprd_dma_req_mode: define the DMA request mode
>   * @SPRD_DMA_FRAG_REQ: fragment request mode
> --
> 1.7.9.5
>



-- 
Baolin.wang
Best Regards

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [2/2] dmaengine: sprd: Add Spreadtrum DMA configuration
  2018-05-04  8:01 ` [PATCH 2/2] " Baolin Wang
@ 2018-05-05  5:54 ` kbuild test robot
  -1 siblings, 0 replies; 12+ messages in thread
From: kbuild test robot @ 2018-05-05  5:54 UTC (permalink / raw)
  To: Baolin Wang
  Cc: kbuild-all, dan.j.williams, vinod.koul, eric.long, broonie,
	dmaengine, linux-kernel

Hi Eric,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on slave-dma/next]
[also build test WARNING on next-20180504]
[cannot apply to linus/master v4.17-rc3]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url:    https://github.com/0day-ci/linux/commits/Baolin-Wang/dmaengine-sprd-Optimize-the-sprd_dma_prep_dma_memcpy/20180505-071137
base:   https://git.kernel.org/pub/scm/linux/kernel/git/vkoul/slave-dma.git next
reproduce:
        # apt-get install sparse
        make ARCH=x86_64 allmodconfig
        make C=1 CF=-D__CHECK_ENDIAN__


sparse warnings: (new ones prefixed by >>)

>> drivers/dma/sprd-dma.c:780:57: sparse: mixing different enum types
   drivers/dma/sprd-dma.c:780:57:     int enum dma_slave_buswidth  versus
   drivers/dma/sprd-dma.c:780:57:     int enum sprd_dma_datawidth
   drivers/dma/sprd-dma.c:787:57: sparse: mixing different enum types
   drivers/dma/sprd-dma.c:787:57:     int enum dma_slave_buswidth  versus
   drivers/dma/sprd-dma.c:787:57:     int enum sprd_dma_datawidth

vim +780 drivers/dma/sprd-dma.c

   755	
   756	static struct dma_async_tx_descriptor *
   757	sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
   758			       unsigned int sglen, enum dma_transfer_direction dir,
   759			       unsigned long flags, void *context)
   760	{
   761		struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
   762		struct sprd_dma_config *slave_cfg = &schan->slave_cfg;
   763		struct sprd_dma_desc *sdesc;
   764		struct scatterlist *sg;
   765		int ret, i;
   766	
   767		/* TODO: now we only support one sg for each DMA configuration. */
   768		if (!is_slave_direction(dir) || sglen > 1)
   769			return NULL;
   770	
   771		sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
   772		if (!sdesc)
   773			return NULL;
   774	
   775		for_each_sg(sgl, sg, sglen, i) {
   776			if (dir == DMA_MEM_TO_DEV) {
   777				slave_cfg->src_addr = sg_dma_address(sg);
   778				slave_cfg->dst_addr = slave_cfg->cfg.dst_addr;
   779				slave_cfg->src_step =
 > 780				sprd_dma_get_step(slave_cfg->cfg.src_addr_width);
   781				slave_cfg->dst_step = SPRD_DMA_NONE_STEP;
   782			} else {
   783				slave_cfg->src_addr = slave_cfg->cfg.src_addr;
   784				slave_cfg->dst_addr = sg_dma_address(sg);
   785				slave_cfg->src_step = SPRD_DMA_NONE_STEP;
   786				slave_cfg->dst_step =
   787				sprd_dma_get_step(slave_cfg->cfg.dst_addr_width);
   788			}
   789	
   790			slave_cfg->block_len = sg_dma_len(sg);
   791			slave_cfg->transcation_len = sg_dma_len(sg);
   792		}
   793	
   794		slave_cfg->req_mode =
   795			(flags >> SPRD_DMA_REQ_SHIFT) & SPRD_DMA_REQ_MODE_MASK;
   796		slave_cfg->int_mode = flags & SPRD_DMA_INT_MASK;
   797	
   798		ret = sprd_dma_config(chan, sdesc, slave_cfg);
   799		if (ret) {
   800			kfree(sdesc);
   801			return NULL;
   802		}
   803	
   804		return vchan_tx_prep(&schan->vc, &sdesc->vd, flags);
   805	}
   806
---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation
--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/2] dmaengine: sprd: Add Spreadtrum DMA configuration
@ 2018-05-05  5:54 ` kbuild test robot
  0 siblings, 0 replies; 12+ messages in thread
From: kbuild test robot @ 2018-05-05  5:54 UTC (permalink / raw)
  To: Baolin Wang
  Cc: kbuild-all, dan.j.williams, vinod.koul, eric.long, broonie,
	baolin.wang, dmaengine, linux-kernel

Hi Eric,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on slave-dma/next]
[also build test WARNING on next-20180504]
[cannot apply to linus/master v4.17-rc3]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url:    https://github.com/0day-ci/linux/commits/Baolin-Wang/dmaengine-sprd-Optimize-the-sprd_dma_prep_dma_memcpy/20180505-071137
base:   https://git.kernel.org/pub/scm/linux/kernel/git/vkoul/slave-dma.git next
reproduce:
        # apt-get install sparse
        make ARCH=x86_64 allmodconfig
        make C=1 CF=-D__CHECK_ENDIAN__


sparse warnings: (new ones prefixed by >>)

>> drivers/dma/sprd-dma.c:780:57: sparse: mixing different enum types
   drivers/dma/sprd-dma.c:780:57:     int enum dma_slave_buswidth  versus
   drivers/dma/sprd-dma.c:780:57:     int enum sprd_dma_datawidth
   drivers/dma/sprd-dma.c:787:57: sparse: mixing different enum types
   drivers/dma/sprd-dma.c:787:57:     int enum dma_slave_buswidth  versus
   drivers/dma/sprd-dma.c:787:57:     int enum sprd_dma_datawidth

vim +780 drivers/dma/sprd-dma.c

   755	
   756	static struct dma_async_tx_descriptor *
   757	sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
   758			       unsigned int sglen, enum dma_transfer_direction dir,
   759			       unsigned long flags, void *context)
   760	{
   761		struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
   762		struct sprd_dma_config *slave_cfg = &schan->slave_cfg;
   763		struct sprd_dma_desc *sdesc;
   764		struct scatterlist *sg;
   765		int ret, i;
   766	
   767		/* TODO: now we only support one sg for each DMA configuration. */
   768		if (!is_slave_direction(dir) || sglen > 1)
   769			return NULL;
   770	
   771		sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
   772		if (!sdesc)
   773			return NULL;
   774	
   775		for_each_sg(sgl, sg, sglen, i) {
   776			if (dir == DMA_MEM_TO_DEV) {
   777				slave_cfg->src_addr = sg_dma_address(sg);
   778				slave_cfg->dst_addr = slave_cfg->cfg.dst_addr;
   779				slave_cfg->src_step =
 > 780				sprd_dma_get_step(slave_cfg->cfg.src_addr_width);
   781				slave_cfg->dst_step = SPRD_DMA_NONE_STEP;
   782			} else {
   783				slave_cfg->src_addr = slave_cfg->cfg.src_addr;
   784				slave_cfg->dst_addr = sg_dma_address(sg);
   785				slave_cfg->src_step = SPRD_DMA_NONE_STEP;
   786				slave_cfg->dst_step =
   787				sprd_dma_get_step(slave_cfg->cfg.dst_addr_width);
   788			}
   789	
   790			slave_cfg->block_len = sg_dma_len(sg);
   791			slave_cfg->transcation_len = sg_dma_len(sg);
   792		}
   793	
   794		slave_cfg->req_mode =
   795			(flags >> SPRD_DMA_REQ_SHIFT) & SPRD_DMA_REQ_MODE_MASK;
   796		slave_cfg->int_mode = flags & SPRD_DMA_INT_MASK;
   797	
   798		ret = sprd_dma_config(chan, sdesc, slave_cfg);
   799		if (ret) {
   800			kfree(sdesc);
   801			return NULL;
   802		}
   803	
   804		return vchan_tx_prep(&schan->vc, &sdesc->vd, flags);
   805	}
   806	

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [2/2] dmaengine: sprd: Add Spreadtrum DMA configuration
  2018-05-05  5:54 ` [PATCH 2/2] " kbuild test robot
@ 2018-05-07  7:58 ` Baolin Wang
  -1 siblings, 0 replies; 12+ messages in thread
From: Baolin Wang @ 2018-05-07  7:58 UTC (permalink / raw)
  To: kbuild test robot
  Cc: kbuild-all, Dan Williams, Vinod Koul, Eric Long, Mark Brown,
	dmaengine, LKML

On 5 May 2018 at 13:54, kbuild test robot <lkp@intel.com> wrote:
> Hi Eric,
>
> Thank you for the patch! Perhaps something to improve:
>
> [auto build test WARNING on slave-dma/next]
> [also build test WARNING on next-20180504]
> [cannot apply to linus/master v4.17-rc3]
> [if your patch is applied to the wrong git tree, please drop us a note to help improve the system]
>
> url:    https://github.com/0day-ci/linux/commits/Baolin-Wang/dmaengine-sprd-Optimize-the-sprd_dma_prep_dma_memcpy/20180505-071137
> base:   https://git.kernel.org/pub/scm/linux/kernel/git/vkoul/slave-dma.git next
> reproduce:
>         # apt-get install sparse
>         make ARCH=x86_64 allmodconfig
>         make C=1 CF=-D__CHECK_ENDIAN__
>
>
> sparse warnings: (new ones prefixed by >>)
>
>>> drivers/dma/sprd-dma.c:780:57: sparse: mixing different enum types
>    drivers/dma/sprd-dma.c:780:57:     int enum dma_slave_buswidth  versus
>    drivers/dma/sprd-dma.c:780:57:     int enum sprd_dma_datawidth
>    drivers/dma/sprd-dma.c:787:57: sparse: mixing different enum types
>    drivers/dma/sprd-dma.c:787:57:     int enum dma_slave_buswidth  versus
>    drivers/dma/sprd-dma.c:787:57:     int enum sprd_dma_datawidth
>
> vim +780 drivers/dma/sprd-dma.c

Sorry, It's one mistake. Eric will fix it in next version.

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/2] dmaengine: sprd: Add Spreadtrum DMA configuration
@ 2018-05-07  7:58 ` Baolin Wang
  0 siblings, 0 replies; 12+ messages in thread
From: Baolin Wang @ 2018-05-07  7:58 UTC (permalink / raw)
  To: kbuild test robot
  Cc: kbuild-all, Dan Williams, Vinod Koul, Eric Long, Mark Brown,
	dmaengine, LKML

On 5 May 2018 at 13:54, kbuild test robot <lkp@intel.com> wrote:
> Hi Eric,
>
> Thank you for the patch! Perhaps something to improve:
>
> [auto build test WARNING on slave-dma/next]
> [also build test WARNING on next-20180504]
> [cannot apply to linus/master v4.17-rc3]
> [if your patch is applied to the wrong git tree, please drop us a note to help improve the system]
>
> url:    https://github.com/0day-ci/linux/commits/Baolin-Wang/dmaengine-sprd-Optimize-the-sprd_dma_prep_dma_memcpy/20180505-071137
> base:   https://git.kernel.org/pub/scm/linux/kernel/git/vkoul/slave-dma.git next
> reproduce:
>         # apt-get install sparse
>         make ARCH=x86_64 allmodconfig
>         make C=1 CF=-D__CHECK_ENDIAN__
>
>
> sparse warnings: (new ones prefixed by >>)
>
>>> drivers/dma/sprd-dma.c:780:57: sparse: mixing different enum types
>    drivers/dma/sprd-dma.c:780:57:     int enum dma_slave_buswidth  versus
>    drivers/dma/sprd-dma.c:780:57:     int enum sprd_dma_datawidth
>    drivers/dma/sprd-dma.c:787:57: sparse: mixing different enum types
>    drivers/dma/sprd-dma.c:787:57:     int enum dma_slave_buswidth  versus
>    drivers/dma/sprd-dma.c:787:57:     int enum sprd_dma_datawidth
>
> vim +780 drivers/dma/sprd-dma.c

Sorry, It's one mistake. Eric will fix it in next version.

-- 
Baolin.wang
Best Regards

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2018-05-07  7:58 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-05-07  7:58 [2/2] dmaengine: sprd: Add Spreadtrum DMA configuration Baolin Wang
2018-05-07  7:58 ` [PATCH 2/2] " Baolin Wang
  -- strict thread matches above, loose matches on Subject: below --
2018-05-05  5:54 [2/2] " kbuild test robot
2018-05-05  5:54 ` [PATCH 2/2] " kbuild test robot
2018-05-04  8:06 [2/2] " Baolin Wang
2018-05-04  8:06 ` [PATCH 2/2] " Baolin Wang
2018-05-04  8:06 [1/2] dmaengine: sprd: Optimize the sprd_dma_prep_dma_memcpy() Baolin Wang
2018-05-04  8:06 ` [PATCH 1/2] " Baolin Wang
2018-05-04  8:01 [2/2] dmaengine: sprd: Add Spreadtrum DMA configuration Baolin Wang
2018-05-04  8:01 ` [PATCH 2/2] " Baolin Wang
2018-05-04  8:01 [1/2] dmaengine: sprd: Optimize the sprd_dma_prep_dma_memcpy() Baolin Wang
2018-05-04  8:01 ` [PATCH 1/2] " Baolin Wang

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.