dmaengine.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2 0/2] dmaengine: ti: edma: Polled completion support
@ 2019-05-21  7:59 Peter Ujfalusi
  2019-05-21  7:59 ` [PATCH v2 1/2] dmaengine: ti: edma: Clean up the 2x32bit array register accesses Peter Ujfalusi
  2019-05-21  7:59 ` [PATCH v2 2/2] dmaengine: ti: edma: Enable support for polled (memcpy) completion Peter Ujfalusi
  0 siblings, 2 replies; 4+ messages in thread
From: Peter Ujfalusi @ 2019-05-21  7:59 UTC (permalink / raw)
  To: vkoul; +Cc: dan.j.williams, dmaengine, linux-arm-kernel, linux-omap

Hi,

Changes since v1:
- Cleanup patch for the array register handling
- typo fixed in patch2 commit message

The code around the array register access was pretty confusing for the first
look, so clean them up first then use the cleaner way in the polled handling.

When a DMA client driver decides that it is not providing callback for
completion of a transfer (and/or does not set the DMA_PREP_INTERRUPT) but
it will poll the status of the transfer (in case of short memcpy for
example) we will not get interrupt for the completion of the transfer and
will not mark the transaction as done.

Check the event registers (ER and EER) and if the channel is inactive then
return wioth DMA_COMPLETE to let the client know that the transfer is
completed.

Regards,
Peter
---
Peter Ujfalusi (2):
  dmaengine: ti: edma: Clean up the 2x32bit array register accesses
  dmaengine: ti: edma: Enable support for polled (memcpy) completion

 drivers/dma/ti/edma.c | 129 ++++++++++++++++++++++++++----------------
 1 file changed, 81 insertions(+), 48 deletions(-)

-- 
Peter

Texas Instruments Finland Oy, Porkkalankatu 22, 00180 Helsinki.
Y-tunnus/Business ID: 0615521-4. Kotipaikka/Domicile: Helsinki


^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH v2 1/2] dmaengine: ti: edma: Clean up the 2x32bit array register accesses
  2019-05-21  7:59 [PATCH v2 0/2] dmaengine: ti: edma: Polled completion support Peter Ujfalusi
@ 2019-05-21  7:59 ` Peter Ujfalusi
  2019-05-21  8:01   ` Peter Ujfalusi
  2019-05-21  7:59 ` [PATCH v2 2/2] dmaengine: ti: edma: Enable support for polled (memcpy) completion Peter Ujfalusi
  1 sibling, 1 reply; 4+ messages in thread
From: Peter Ujfalusi @ 2019-05-21  7:59 UTC (permalink / raw)
  To: vkoul; +Cc: dan.j.williams, dmaengine, linux-arm-kernel, linux-omap

Introduce defines for getting the array index and the bit number within the
64bit array register pairs.

Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
---
 drivers/dma/ti/edma.c | 106 ++++++++++++++++++++++++------------------
 1 file changed, 61 insertions(+), 45 deletions(-)

diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index ceabdea40ae0..a5822925a327 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -133,6 +133,17 @@
 #define EDMA_CONT_PARAMS_FIXED_EXACT	 1002
 #define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003
 
+/*
+ * 64bit array registers are plit into two 32bit registers:
+ * reg0: channel/event 0-31
+ * reg1: channel/event 32-63
+ *
+ * bit 5 in the channel number tells the array index (0/1)
+ * bit 0-4 (0x1f) is the bit offset within the register
+ */
+#define EDMA_REG_ARRAY_INDEX(channel)	((channel) >> 5)
+#define EDMA_CHANNEL_BIT(channel)	(BIT((channel) & 0x1f))
+
 /* PaRAM slots are laid out like this */
 struct edmacc_param {
 	u32 opt;
@@ -441,15 +452,14 @@ static void edma_setup_interrupt(struct edma_chan *echan, bool enable)
 {
 	struct edma_cc *ecc = echan->ecc;
 	int channel = EDMA_CHAN_SLOT(echan->ch_num);
+	int idx = EDMA_REG_ARRAY_INDEX(channel);
+	int ch_bit = EDMA_CHANNEL_BIT(channel);
 
 	if (enable) {
-		edma_shadow0_write_array(ecc, SH_ICR, channel >> 5,
-					 BIT(channel & 0x1f));
-		edma_shadow0_write_array(ecc, SH_IESR, channel >> 5,
-					 BIT(channel & 0x1f));
+		edma_shadow0_write_array(ecc, SH_ICR, idx, ch_bit);
+		edma_shadow0_write_array(ecc, SH_IESR, idx, ch_bit);
 	} else {
-		edma_shadow0_write_array(ecc, SH_IECR, channel >> 5,
-					 BIT(channel & 0x1f));
+		edma_shadow0_write_array(ecc, SH_IECR, idx, ch_bit);
 	}
 }
 
@@ -587,26 +597,26 @@ static void edma_start(struct edma_chan *echan)
 {
 	struct edma_cc *ecc = echan->ecc;
 	int channel = EDMA_CHAN_SLOT(echan->ch_num);
-	int j = (channel >> 5);
-	unsigned int mask = BIT(channel & 0x1f);
+	int idx = EDMA_REG_ARRAY_INDEX(channel);
+	int ch_bit = EDMA_CHANNEL_BIT(channel);
 
 	if (!echan->hw_triggered) {
 		/* EDMA channels without event association */
-		dev_dbg(ecc->dev, "ESR%d %08x\n", j,
-			edma_shadow0_read_array(ecc, SH_ESR, j));
-		edma_shadow0_write_array(ecc, SH_ESR, j, mask);
+		dev_dbg(ecc->dev, "ESR%d %08x\n", idx,
+			edma_shadow0_read_array(ecc, SH_ESR, idx));
+		edma_shadow0_write_array(ecc, SH_ESR, idx, ch_bit);
 	} else {
 		/* EDMA channel with event association */
-		dev_dbg(ecc->dev, "ER%d %08x\n", j,
-			edma_shadow0_read_array(ecc, SH_ER, j));
+		dev_dbg(ecc->dev, "ER%d %08x\n", idx,
+			edma_shadow0_read_array(ecc, SH_ER, idx));
 		/* Clear any pending event or error */
-		edma_write_array(ecc, EDMA_ECR, j, mask);
-		edma_write_array(ecc, EDMA_EMCR, j, mask);
+		edma_write_array(ecc, EDMA_ECR, idx, ch_bit);
+		edma_write_array(ecc, EDMA_EMCR, idx, ch_bit);
 		/* Clear any SER */
-		edma_shadow0_write_array(ecc, SH_SECR, j, mask);
-		edma_shadow0_write_array(ecc, SH_EESR, j, mask);
-		dev_dbg(ecc->dev, "EER%d %08x\n", j,
-			edma_shadow0_read_array(ecc, SH_EER, j));
+		edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit);
+		edma_shadow0_write_array(ecc, SH_EESR, idx, ch_bit);
+		dev_dbg(ecc->dev, "EER%d %08x\n", idx,
+			edma_shadow0_read_array(ecc, SH_EER, idx));
 	}
 }
 
@@ -614,19 +624,19 @@ static void edma_stop(struct edma_chan *echan)
 {
 	struct edma_cc *ecc = echan->ecc;
 	int channel = EDMA_CHAN_SLOT(echan->ch_num);
-	int j = (channel >> 5);
-	unsigned int mask = BIT(channel & 0x1f);
+	int idx = EDMA_REG_ARRAY_INDEX(channel);
+	int ch_bit = EDMA_CHANNEL_BIT(channel);
 
-	edma_shadow0_write_array(ecc, SH_EECR, j, mask);
-	edma_shadow0_write_array(ecc, SH_ECR, j, mask);
-	edma_shadow0_write_array(ecc, SH_SECR, j, mask);
-	edma_write_array(ecc, EDMA_EMCR, j, mask);
+	edma_shadow0_write_array(ecc, SH_EECR, idx, ch_bit);
+	edma_shadow0_write_array(ecc, SH_ECR, idx, ch_bit);
+	edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit);
+	edma_write_array(ecc, EDMA_EMCR, idx, ch_bit);
 
 	/* clear possibly pending completion interrupt */
-	edma_shadow0_write_array(ecc, SH_ICR, j, mask);
+	edma_shadow0_write_array(ecc, SH_ICR, idx, ch_bit);
 
-	dev_dbg(ecc->dev, "EER%d %08x\n", j,
-		edma_shadow0_read_array(ecc, SH_EER, j));
+	dev_dbg(ecc->dev, "EER%d %08x\n", idx,
+		edma_shadow0_read_array(ecc, SH_EER, idx));
 
 	/* REVISIT:  consider guarding against inappropriate event
 	 * chaining by overwriting with dummy_paramset.
@@ -640,45 +650,49 @@ static void edma_stop(struct edma_chan *echan)
 static void edma_pause(struct edma_chan *echan)
 {
 	int channel = EDMA_CHAN_SLOT(echan->ch_num);
-	unsigned int mask = BIT(channel & 0x1f);
 
-	edma_shadow0_write_array(echan->ecc, SH_EECR, channel >> 5, mask);
+	edma_shadow0_write_array(echan->ecc, SH_EECR,
+				 EDMA_REG_ARRAY_INDEX(channel),
+				 EDMA_CHANNEL_BIT(channel));
 }
 
 /* Re-enable EDMA hardware events on the specified channel.  */
 static void edma_resume(struct edma_chan *echan)
 {
 	int channel = EDMA_CHAN_SLOT(echan->ch_num);
-	unsigned int mask = BIT(channel & 0x1f);
 
-	edma_shadow0_write_array(echan->ecc, SH_EESR, channel >> 5, mask);
+	edma_shadow0_write_array(echan->ecc, SH_EESR,
+				 EDMA_REG_ARRAY_INDEX(channel),
+				 EDMA_CHANNEL_BIT(channel));
 }
 
 static void edma_trigger_channel(struct edma_chan *echan)
 {
 	struct edma_cc *ecc = echan->ecc;
 	int channel = EDMA_CHAN_SLOT(echan->ch_num);
-	unsigned int mask = BIT(channel & 0x1f);
+	int idx = EDMA_REG_ARRAY_INDEX(channel);
+	int ch_bit = EDMA_CHANNEL_BIT(channel);
 
-	edma_shadow0_write_array(ecc, SH_ESR, (channel >> 5), mask);
+	edma_shadow0_write_array(ecc, SH_ESR, idx, ch_bit);
 
-	dev_dbg(ecc->dev, "ESR%d %08x\n", (channel >> 5),
-		edma_shadow0_read_array(ecc, SH_ESR, (channel >> 5)));
+	dev_dbg(ecc->dev, "ESR%d %08x\n", idx,
+		edma_shadow0_read_array(ecc, SH_ESR, idx));
 }
 
 static void edma_clean_channel(struct edma_chan *echan)
 {
 	struct edma_cc *ecc = echan->ecc;
 	int channel = EDMA_CHAN_SLOT(echan->ch_num);
-	int j = (channel >> 5);
-	unsigned int mask = BIT(channel & 0x1f);
+	int idx = EDMA_REG_ARRAY_INDEX(channel);
+	int ch_bit = EDMA_CHANNEL_BIT(channel);
 
-	dev_dbg(ecc->dev, "EMR%d %08x\n", j, edma_read_array(ecc, EDMA_EMR, j));
-	edma_shadow0_write_array(ecc, SH_ECR, j, mask);
+	dev_dbg(ecc->dev, "EMR%d %08x\n", idx,
+		edma_read_array(ecc, EDMA_EMR, idx));
+	edma_shadow0_write_array(ecc, SH_ECR, idx, ch_bit);
 	/* Clear the corresponding EMR bits */
-	edma_write_array(ecc, EDMA_EMCR, j, mask);
+	edma_write_array(ecc, EDMA_EMCR, idx, ch_bit);
 	/* Clear any SER */
-	edma_shadow0_write_array(ecc, SH_SECR, j, mask);
+	edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit);
 	edma_write(ecc, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
 }
 
@@ -708,7 +722,8 @@ static int edma_alloc_channel(struct edma_chan *echan,
 	int channel = EDMA_CHAN_SLOT(echan->ch_num);
 
 	/* ensure access through shadow region 0 */
-	edma_or_array2(ecc, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f));
+	edma_or_array2(ecc, EDMA_DRAE, 0, EDMA_REG_ARRAY_INDEX(channel),
+		       EDMA_CHANNEL_BIT(channel));
 
 	/* ensure no events are pending */
 	edma_stop(echan);
@@ -2482,8 +2497,9 @@ static int edma_pm_resume(struct device *dev)
 	for (i = 0; i < ecc->num_channels; i++) {
 		if (echan[i].alloced) {
 			/* ensure access through shadow region 0 */
-			edma_or_array2(ecc, EDMA_DRAE, 0, i >> 5,
-				       BIT(i & 0x1f));
+			edma_or_array2(ecc, EDMA_DRAE, 0,
+				       EDMA_REG_ARRAY_INDEX(i),
+				       EDMA_CHANNEL_BIT(i));
 
 			edma_setup_interrupt(&echan[i], true);
 
-- 
Peter

Texas Instruments Finland Oy, Porkkalankatu 22, 00180 Helsinki.
Y-tunnus/Business ID: 0615521-4. Kotipaikka/Domicile: Helsinki


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH v2 2/2] dmaengine: ti: edma: Enable support for polled (memcpy) completion
  2019-05-21  7:59 [PATCH v2 0/2] dmaengine: ti: edma: Polled completion support Peter Ujfalusi
  2019-05-21  7:59 ` [PATCH v2 1/2] dmaengine: ti: edma: Clean up the 2x32bit array register accesses Peter Ujfalusi
@ 2019-05-21  7:59 ` Peter Ujfalusi
  1 sibling, 0 replies; 4+ messages in thread
From: Peter Ujfalusi @ 2019-05-21  7:59 UTC (permalink / raw)
  To: vkoul; +Cc: dan.j.williams, dmaengine, linux-arm-kernel, linux-omap

When a DMA client driver decides that it is not providing callback for
completion of a transfer (and/or does not set the DMA_PREP_INTERRUPT) but
it will poll the status of the transfer (in case of short memcpy for
example) we will not get interrupt for the completion of the transfer and
will not mark the transaction as done.

Check the event registers (ER and EER) and if the channel is inactive then
return with DMA_COMPLETE to let the client know that the transfer is
completed.

Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
---
 drivers/dma/ti/edma.c | 23 ++++++++++++++++++++---
 1 file changed, 20 insertions(+), 3 deletions(-)

diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index a5822925a327..0f4873c2aa12 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -1226,8 +1226,9 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
 
 	edesc->pset[0].param.opt |= ITCCHEN;
 	if (nslots == 1) {
-		/* Enable transfer complete interrupt */
-		edesc->pset[0].param.opt |= TCINTEN;
+		/* Enable transfer complete interrupt if requested */
+		if (tx_flags & DMA_PREP_INTERRUPT)
+			edesc->pset[0].param.opt |= TCINTEN;
 	} else {
 		/* Enable transfer complete chaining for the first slot */
 		edesc->pset[0].param.opt |= TCCHEN;
@@ -1254,7 +1255,9 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
 		}
 
 		edesc->pset[1].param.opt |= ITCCHEN;
-		edesc->pset[1].param.opt |= TCINTEN;
+		/* Enable transfer complete interrupt if requested */
+		if (tx_flags & DMA_PREP_INTERRUPT)
+			edesc->pset[1].param.opt |= TCINTEN;
 	}
 
 	return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
@@ -1816,6 +1819,20 @@ static enum dma_status edma_tx_status(struct dma_chan *chan,
 	unsigned long flags;
 
 	ret = dma_cookie_status(chan, cookie, txstate);
+
+	if (ret != DMA_COMPLETE && echan->edesc && !echan->edesc->cyclic) {
+		struct edma_cc *ecc = echan->ecc;
+		int channel = EDMA_CHAN_SLOT(echan->ch_num);
+		int idx = EDMA_REG_ARRAY_INDEX(channel);
+		int ch_bit = EDMA_CHANNEL_BIT(channel);
+		unsigned int sh_er = edma_shadow0_read_array(ecc, SH_ER, idx);
+		unsigned int sh_eer = edma_shadow0_read_array(ecc, SH_EER, idx);
+
+		/* The channel is no longer active */
+		if (!(sh_er & ch_bit) && !(sh_eer & ch_bit))
+			ret = DMA_COMPLETE;
+	}
+
 	if (ret == DMA_COMPLETE || !txstate)
 		return ret;
 
-- 
Peter

Texas Instruments Finland Oy, Porkkalankatu 22, 00180 Helsinki.
Y-tunnus/Business ID: 0615521-4. Kotipaikka/Domicile: Helsinki


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH v2 1/2] dmaengine: ti: edma: Clean up the 2x32bit array register accesses
  2019-05-21  7:59 ` [PATCH v2 1/2] dmaengine: ti: edma: Clean up the 2x32bit array register accesses Peter Ujfalusi
@ 2019-05-21  8:01   ` Peter Ujfalusi
  0 siblings, 0 replies; 4+ messages in thread
From: Peter Ujfalusi @ 2019-05-21  8:01 UTC (permalink / raw)
  To: vkoul; +Cc: dan.j.williams, dmaengine, linux-arm-kernel, linux-omap



On 21/05/2019 10.59, Peter Ujfalusi wrote:
> Introduce defines for getting the array index and the bit number within the
> 64bit array register pairs.
> 
> Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
> ---
>  drivers/dma/ti/edma.c | 106 ++++++++++++++++++++++++------------------
>  1 file changed, 61 insertions(+), 45 deletions(-)
> 
> diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
> index ceabdea40ae0..a5822925a327 100644
> --- a/drivers/dma/ti/edma.c
> +++ b/drivers/dma/ti/edma.c
> @@ -133,6 +133,17 @@
>  #define EDMA_CONT_PARAMS_FIXED_EXACT	 1002
>  #define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003
>  
> +/*
> + * 64bit array registers are plit into two 32bit registers:

typo s/plit/split

> + * reg0: channel/event 0-31
> + * reg1: channel/event 32-63
> + *
> + * bit 5 in the channel number tells the array index (0/1)
> + * bit 0-4 (0x1f) is the bit offset within the register
> + */
> +#define EDMA_REG_ARRAY_INDEX(channel)	((channel) >> 5)
> +#define EDMA_CHANNEL_BIT(channel)	(BIT((channel) & 0x1f))
> +
>  /* PaRAM slots are laid out like this */
>  struct edmacc_param {
>  	u32 opt;
> @@ -441,15 +452,14 @@ static void edma_setup_interrupt(struct edma_chan *echan, bool enable)
>  {
>  	struct edma_cc *ecc = echan->ecc;
>  	int channel = EDMA_CHAN_SLOT(echan->ch_num);
> +	int idx = EDMA_REG_ARRAY_INDEX(channel);
> +	int ch_bit = EDMA_CHANNEL_BIT(channel);
>  
>  	if (enable) {
> -		edma_shadow0_write_array(ecc, SH_ICR, channel >> 5,
> -					 BIT(channel & 0x1f));
> -		edma_shadow0_write_array(ecc, SH_IESR, channel >> 5,
> -					 BIT(channel & 0x1f));
> +		edma_shadow0_write_array(ecc, SH_ICR, idx, ch_bit);
> +		edma_shadow0_write_array(ecc, SH_IESR, idx, ch_bit);
>  	} else {
> -		edma_shadow0_write_array(ecc, SH_IECR, channel >> 5,
> -					 BIT(channel & 0x1f));
> +		edma_shadow0_write_array(ecc, SH_IECR, idx, ch_bit);
>  	}
>  }
>  
> @@ -587,26 +597,26 @@ static void edma_start(struct edma_chan *echan)
>  {
>  	struct edma_cc *ecc = echan->ecc;
>  	int channel = EDMA_CHAN_SLOT(echan->ch_num);
> -	int j = (channel >> 5);
> -	unsigned int mask = BIT(channel & 0x1f);
> +	int idx = EDMA_REG_ARRAY_INDEX(channel);
> +	int ch_bit = EDMA_CHANNEL_BIT(channel);
>  
>  	if (!echan->hw_triggered) {
>  		/* EDMA channels without event association */
> -		dev_dbg(ecc->dev, "ESR%d %08x\n", j,
> -			edma_shadow0_read_array(ecc, SH_ESR, j));
> -		edma_shadow0_write_array(ecc, SH_ESR, j, mask);
> +		dev_dbg(ecc->dev, "ESR%d %08x\n", idx,
> +			edma_shadow0_read_array(ecc, SH_ESR, idx));
> +		edma_shadow0_write_array(ecc, SH_ESR, idx, ch_bit);
>  	} else {
>  		/* EDMA channel with event association */
> -		dev_dbg(ecc->dev, "ER%d %08x\n", j,
> -			edma_shadow0_read_array(ecc, SH_ER, j));
> +		dev_dbg(ecc->dev, "ER%d %08x\n", idx,
> +			edma_shadow0_read_array(ecc, SH_ER, idx));
>  		/* Clear any pending event or error */
> -		edma_write_array(ecc, EDMA_ECR, j, mask);
> -		edma_write_array(ecc, EDMA_EMCR, j, mask);
> +		edma_write_array(ecc, EDMA_ECR, idx, ch_bit);
> +		edma_write_array(ecc, EDMA_EMCR, idx, ch_bit);
>  		/* Clear any SER */
> -		edma_shadow0_write_array(ecc, SH_SECR, j, mask);
> -		edma_shadow0_write_array(ecc, SH_EESR, j, mask);
> -		dev_dbg(ecc->dev, "EER%d %08x\n", j,
> -			edma_shadow0_read_array(ecc, SH_EER, j));
> +		edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit);
> +		edma_shadow0_write_array(ecc, SH_EESR, idx, ch_bit);
> +		dev_dbg(ecc->dev, "EER%d %08x\n", idx,
> +			edma_shadow0_read_array(ecc, SH_EER, idx));
>  	}
>  }
>  
> @@ -614,19 +624,19 @@ static void edma_stop(struct edma_chan *echan)
>  {
>  	struct edma_cc *ecc = echan->ecc;
>  	int channel = EDMA_CHAN_SLOT(echan->ch_num);
> -	int j = (channel >> 5);
> -	unsigned int mask = BIT(channel & 0x1f);
> +	int idx = EDMA_REG_ARRAY_INDEX(channel);
> +	int ch_bit = EDMA_CHANNEL_BIT(channel);
>  
> -	edma_shadow0_write_array(ecc, SH_EECR, j, mask);
> -	edma_shadow0_write_array(ecc, SH_ECR, j, mask);
> -	edma_shadow0_write_array(ecc, SH_SECR, j, mask);
> -	edma_write_array(ecc, EDMA_EMCR, j, mask);
> +	edma_shadow0_write_array(ecc, SH_EECR, idx, ch_bit);
> +	edma_shadow0_write_array(ecc, SH_ECR, idx, ch_bit);
> +	edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit);
> +	edma_write_array(ecc, EDMA_EMCR, idx, ch_bit);
>  
>  	/* clear possibly pending completion interrupt */
> -	edma_shadow0_write_array(ecc, SH_ICR, j, mask);
> +	edma_shadow0_write_array(ecc, SH_ICR, idx, ch_bit);
>  
> -	dev_dbg(ecc->dev, "EER%d %08x\n", j,
> -		edma_shadow0_read_array(ecc, SH_EER, j));
> +	dev_dbg(ecc->dev, "EER%d %08x\n", idx,
> +		edma_shadow0_read_array(ecc, SH_EER, idx));
>  
>  	/* REVISIT:  consider guarding against inappropriate event
>  	 * chaining by overwriting with dummy_paramset.
> @@ -640,45 +650,49 @@ static void edma_stop(struct edma_chan *echan)
>  static void edma_pause(struct edma_chan *echan)
>  {
>  	int channel = EDMA_CHAN_SLOT(echan->ch_num);
> -	unsigned int mask = BIT(channel & 0x1f);
>  
> -	edma_shadow0_write_array(echan->ecc, SH_EECR, channel >> 5, mask);
> +	edma_shadow0_write_array(echan->ecc, SH_EECR,
> +				 EDMA_REG_ARRAY_INDEX(channel),
> +				 EDMA_CHANNEL_BIT(channel));
>  }
>  
>  /* Re-enable EDMA hardware events on the specified channel.  */
>  static void edma_resume(struct edma_chan *echan)
>  {
>  	int channel = EDMA_CHAN_SLOT(echan->ch_num);
> -	unsigned int mask = BIT(channel & 0x1f);
>  
> -	edma_shadow0_write_array(echan->ecc, SH_EESR, channel >> 5, mask);
> +	edma_shadow0_write_array(echan->ecc, SH_EESR,
> +				 EDMA_REG_ARRAY_INDEX(channel),
> +				 EDMA_CHANNEL_BIT(channel));
>  }
>  
>  static void edma_trigger_channel(struct edma_chan *echan)
>  {
>  	struct edma_cc *ecc = echan->ecc;
>  	int channel = EDMA_CHAN_SLOT(echan->ch_num);
> -	unsigned int mask = BIT(channel & 0x1f);
> +	int idx = EDMA_REG_ARRAY_INDEX(channel);
> +	int ch_bit = EDMA_CHANNEL_BIT(channel);
>  
> -	edma_shadow0_write_array(ecc, SH_ESR, (channel >> 5), mask);
> +	edma_shadow0_write_array(ecc, SH_ESR, idx, ch_bit);
>  
> -	dev_dbg(ecc->dev, "ESR%d %08x\n", (channel >> 5),
> -		edma_shadow0_read_array(ecc, SH_ESR, (channel >> 5)));
> +	dev_dbg(ecc->dev, "ESR%d %08x\n", idx,
> +		edma_shadow0_read_array(ecc, SH_ESR, idx));
>  }
>  
>  static void edma_clean_channel(struct edma_chan *echan)
>  {
>  	struct edma_cc *ecc = echan->ecc;
>  	int channel = EDMA_CHAN_SLOT(echan->ch_num);
> -	int j = (channel >> 5);
> -	unsigned int mask = BIT(channel & 0x1f);
> +	int idx = EDMA_REG_ARRAY_INDEX(channel);
> +	int ch_bit = EDMA_CHANNEL_BIT(channel);
>  
> -	dev_dbg(ecc->dev, "EMR%d %08x\n", j, edma_read_array(ecc, EDMA_EMR, j));
> -	edma_shadow0_write_array(ecc, SH_ECR, j, mask);
> +	dev_dbg(ecc->dev, "EMR%d %08x\n", idx,
> +		edma_read_array(ecc, EDMA_EMR, idx));
> +	edma_shadow0_write_array(ecc, SH_ECR, idx, ch_bit);
>  	/* Clear the corresponding EMR bits */
> -	edma_write_array(ecc, EDMA_EMCR, j, mask);
> +	edma_write_array(ecc, EDMA_EMCR, idx, ch_bit);
>  	/* Clear any SER */
> -	edma_shadow0_write_array(ecc, SH_SECR, j, mask);
> +	edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit);
>  	edma_write(ecc, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
>  }
>  
> @@ -708,7 +722,8 @@ static int edma_alloc_channel(struct edma_chan *echan,
>  	int channel = EDMA_CHAN_SLOT(echan->ch_num);
>  
>  	/* ensure access through shadow region 0 */
> -	edma_or_array2(ecc, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f));
> +	edma_or_array2(ecc, EDMA_DRAE, 0, EDMA_REG_ARRAY_INDEX(channel),
> +		       EDMA_CHANNEL_BIT(channel));
>  
>  	/* ensure no events are pending */
>  	edma_stop(echan);
> @@ -2482,8 +2497,9 @@ static int edma_pm_resume(struct device *dev)
>  	for (i = 0; i < ecc->num_channels; i++) {
>  		if (echan[i].alloced) {
>  			/* ensure access through shadow region 0 */
> -			edma_or_array2(ecc, EDMA_DRAE, 0, i >> 5,
> -				       BIT(i & 0x1f));
> +			edma_or_array2(ecc, EDMA_DRAE, 0,
> +				       EDMA_REG_ARRAY_INDEX(i),
> +				       EDMA_CHANNEL_BIT(i));
>  
>  			edma_setup_interrupt(&echan[i], true);
>  
> 

- Péter

Texas Instruments Finland Oy, Porkkalankatu 22, 00180 Helsinki.
Y-tunnus/Business ID: 0615521-4. Kotipaikka/Domicile: Helsinki

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2019-05-21  8:00 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-05-21  7:59 [PATCH v2 0/2] dmaengine: ti: edma: Polled completion support Peter Ujfalusi
2019-05-21  7:59 ` [PATCH v2 1/2] dmaengine: ti: edma: Clean up the 2x32bit array register accesses Peter Ujfalusi
2019-05-21  8:01   ` Peter Ujfalusi
2019-05-21  7:59 ` [PATCH v2 2/2] dmaengine: ti: edma: Enable support for polled (memcpy) completion Peter Ujfalusi

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).