dmaengine Archive on lore.kernel.org
 help / color / Atom feed
* [PATCH] dmaengine: ti: edma: Support for interleaved mem to mem transfer
@ 2020-02-07 14:20 Peter Ujfalusi
  2020-02-07 15:10 ` Peter Ujfalusi
  0 siblings, 1 reply; 2+ messages in thread
From: Peter Ujfalusi @ 2020-02-07 14:20 UTC (permalink / raw)
  To: vkoul; +Cc: dmaengine, linux-kernel, dan.j.williams

Add basic interleaved support via EDMA.

Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
---
 drivers/dma/ti/edma.c | 80 +++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 80 insertions(+)

diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index 03a7f647f7b2..c291e72260bd 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -1275,6 +1275,82 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
 	return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
 }
 
+static struct dma_async_tx_descriptor *
+edma_prep_dma_interleaved(struct dma_chan *chan,
+			  struct dma_interleaved_template *xt,
+			  unsigned long tx_flags)
+{
+	struct device *dev = chan->device->dev;
+	struct edma_chan *echan = to_edma_chan(chan);
+	struct edmacc_param *param;
+	struct edma_desc *edesc;
+	size_t src_icg, dst_icg;
+	int src_bidx, dst_bidx;
+
+	/* Slave mode is not supported */
+	if (is_slave_direction(xt->dir))
+		return NULL;
+
+	if (xt->frame_size != 1 || xt->numf == 0)
+		return NULL;
+
+	if (xt->sgl[0].size > SZ_64K || xt->numf > SZ_64K)
+		return NULL;
+
+	src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
+	if (src_icg) {
+		src_bidx = src_icg + xt->sgl[0].size;
+	} else if (xt->src_inc) {
+		src_bidx = xt->sgl[0].size;
+	} else {
+		dev_err(dev, "%s: SRC constant addressing is not supported\n",
+			__func__);
+		return NULL;
+	}
+
+	dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
+	if (dst_icg) {
+		dst_bidx = dst_icg + xt->sgl[0].size;
+	} else if (xt->dst_inc) {
+		dst_bidx = xt->sgl[0].size;
+	} else {
+		dev_err(dev, "%s: DST constant addressing is not supported\n",
+			__func__);
+		return NULL;
+	}
+
+	if (src_bidx > SZ_64K || dst_bidx > SZ_64K)
+		return NULL;
+
+	edesc = kzalloc(struct_size(edesc, pset, 1), GFP_ATOMIC);
+	if (!edesc)
+		return NULL;
+
+	edesc->direction = DMA_MEM_TO_MEM;
+	edesc->echan = echan;
+	edesc->pset_nr = 1;
+
+	param = &edesc->pset[0].param;
+
+	param->src = xt->src_start;
+	param->dst = xt->dst_start;
+	param->a_b_cnt = xt->numf << 16 | xt->sgl[0].size;
+	param->ccnt = 1;
+	param->src_dst_bidx = (dst_bidx << 16) | src_bidx;
+	param->src_dst_cidx = 0;
+	param->link_bcntrld = 0xffffffff;
+
+	param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
+	param->opt |= ITCCHEN;
+	/* Enable transfer complete interrupt if requested */
+	if (tx_flags & DMA_PREP_INTERRUPT)
+		param->opt |= TCINTEN;
+	else
+		edesc->polled = true;
+
+	return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
+}
+
 static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
 	struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
 	size_t period_len, enum dma_transfer_direction direction,
@@ -1917,7 +1993,9 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
 			 "Legacy memcpy is enabled, things might not work\n");
 
 		dma_cap_set(DMA_MEMCPY, s_ddev->cap_mask);
+		dma_cap_set(DMA_INTERLEAVE, m_ddev->cap_mask);
 		s_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
+		s_ddev->device_prep_interleaved_dma = edma_prep_dma_interleaved;
 		s_ddev->directions = BIT(DMA_MEM_TO_MEM);
 	}
 
@@ -1953,8 +2031,10 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
 
 		dma_cap_zero(m_ddev->cap_mask);
 		dma_cap_set(DMA_MEMCPY, m_ddev->cap_mask);
+		dma_cap_set(DMA_INTERLEAVE, m_ddev->cap_mask);
 
 		m_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
+		m_ddev->device_prep_interleaved_dma = edma_prep_dma_interleaved;
 		m_ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
 		m_ddev->device_free_chan_resources = edma_free_chan_resources;
 		m_ddev->device_issue_pending = edma_issue_pending;
-- 
Peter

Texas Instruments Finland Oy, Porkkalankatu 22, 00180 Helsinki.
Y-tunnus/Business ID: 0615521-4. Kotipaikka/Domicile: Helsinki


^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: [PATCH] dmaengine: ti: edma: Support for interleaved mem to mem transfer
  2020-02-07 14:20 [PATCH] dmaengine: ti: edma: Support for interleaved mem to mem transfer Peter Ujfalusi
@ 2020-02-07 15:10 ` Peter Ujfalusi
  0 siblings, 0 replies; 2+ messages in thread
From: Peter Ujfalusi @ 2020-02-07 15:10 UTC (permalink / raw)
  To: vkoul; +Cc: dmaengine, linux-kernel, dan.j.williams

Hi,

On 07/02/2020 16.20, Peter Ujfalusi wrote:
> Add basic interleaved support via EDMA.
> 
> Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
> ---
>  drivers/dma/ti/edma.c | 80 +++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 80 insertions(+)
> 
> diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
> index 03a7f647f7b2..c291e72260bd 100644
> --- a/drivers/dma/ti/edma.c
> +++ b/drivers/dma/ti/edma.c
> @@ -1275,6 +1275,82 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
>  	return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
>  }
>  
> +static struct dma_async_tx_descriptor *
> +edma_prep_dma_interleaved(struct dma_chan *chan,
> +			  struct dma_interleaved_template *xt,
> +			  unsigned long tx_flags)
> +{
> +	struct device *dev = chan->device->dev;
> +	struct edma_chan *echan = to_edma_chan(chan);
> +	struct edmacc_param *param;
> +	struct edma_desc *edesc;
> +	size_t src_icg, dst_icg;
> +	int src_bidx, dst_bidx;
> +
> +	/* Slave mode is not supported */
> +	if (is_slave_direction(xt->dir))
> +		return NULL;
> +
> +	if (xt->frame_size != 1 || xt->numf == 0)
> +		return NULL;
> +
> +	if (xt->sgl[0].size > SZ_64K || xt->numf > SZ_64K)
> +		return NULL;
> +
> +	src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
> +	if (src_icg) {
> +		src_bidx = src_icg + xt->sgl[0].size;
> +	} else if (xt->src_inc) {
> +		src_bidx = xt->sgl[0].size;
> +	} else {
> +		dev_err(dev, "%s: SRC constant addressing is not supported\n",
> +			__func__);
> +		return NULL;
> +	}
> +
> +	dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
> +	if (dst_icg) {
> +		dst_bidx = dst_icg + xt->sgl[0].size;
> +	} else if (xt->dst_inc) {
> +		dst_bidx = xt->sgl[0].size;
> +	} else {
> +		dev_err(dev, "%s: DST constant addressing is not supported\n",
> +			__func__);
> +		return NULL;
> +	}
> +
> +	if (src_bidx > SZ_64K || dst_bidx > SZ_64K)
> +		return NULL;
> +
> +	edesc = kzalloc(struct_size(edesc, pset, 1), GFP_ATOMIC);
> +	if (!edesc)
> +		return NULL;
> +
> +	edesc->direction = DMA_MEM_TO_MEM;
> +	edesc->echan = echan;
> +	edesc->pset_nr = 1;
> +
> +	param = &edesc->pset[0].param;
> +
> +	param->src = xt->src_start;
> +	param->dst = xt->dst_start;
> +	param->a_b_cnt = xt->numf << 16 | xt->sgl[0].size;
> +	param->ccnt = 1;
> +	param->src_dst_bidx = (dst_bidx << 16) | src_bidx;
> +	param->src_dst_cidx = 0;
> +	param->link_bcntrld = 0xffffffff;

The BCNTRLD should be 0 and only the link needs to be 0xffff.
BCNTRLD basically a don't care in this setup as CCNT is 1, but to be
precise it is better to leave it as 0.

I'll resend the patch on Monday.

> +
> +	param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
> +	param->opt |= ITCCHEN;
> +	/* Enable transfer complete interrupt if requested */
> +	if (tx_flags & DMA_PREP_INTERRUPT)
> +		param->opt |= TCINTEN;
> +	else
> +		edesc->polled = true;
> +
> +	return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
> +}
> +
>  static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
>  	struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
>  	size_t period_len, enum dma_transfer_direction direction,
> @@ -1917,7 +1993,9 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
>  			 "Legacy memcpy is enabled, things might not work\n");
>  
>  		dma_cap_set(DMA_MEMCPY, s_ddev->cap_mask);
> +		dma_cap_set(DMA_INTERLEAVE, m_ddev->cap_mask);
>  		s_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
> +		s_ddev->device_prep_interleaved_dma = edma_prep_dma_interleaved;
>  		s_ddev->directions = BIT(DMA_MEM_TO_MEM);
>  	}
>  
> @@ -1953,8 +2031,10 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
>  
>  		dma_cap_zero(m_ddev->cap_mask);
>  		dma_cap_set(DMA_MEMCPY, m_ddev->cap_mask);
> +		dma_cap_set(DMA_INTERLEAVE, m_ddev->cap_mask);
>  
>  		m_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
> +		m_ddev->device_prep_interleaved_dma = edma_prep_dma_interleaved;
>  		m_ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
>  		m_ddev->device_free_chan_resources = edma_free_chan_resources;
>  		m_ddev->device_issue_pending = edma_issue_pending;
> 

- Péter

Texas Instruments Finland Oy, Porkkalankatu 22, 00180 Helsinki.
Y-tunnus/Business ID: 0615521-4. Kotipaikka/Domicile: Helsinki

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, back to index

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-02-07 14:20 [PATCH] dmaengine: ti: edma: Support for interleaved mem to mem transfer Peter Ujfalusi
2020-02-07 15:10 ` Peter Ujfalusi

dmaengine Archive on lore.kernel.org

Archives are clonable:
	git clone --mirror https://lore.kernel.org/dmaengine/0 dmaengine/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 dmaengine dmaengine/ https://lore.kernel.org/dmaengine \
		dmaengine@vger.kernel.org
	public-inbox-index dmaengine

Example config snippet for mirrors

Newsgroup available over NNTP:
	nntp://nntp.lore.kernel.org/org.kernel.vger.dmaengine


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git