All of lore.kernel.org
 help / color / mirror / Atom feed
From: Mans Rullgard <mans@mansr.com>
To: Viresh Kumar <vireshk@kernel.org>,
	Andy Shevchenko <andriy.shevchenko@linux.intel.com>,
	Vinod Koul <vinod.koul@intel.com>,
	linux-kernel@vger.kernel.org, dmaengine@vger.kernel.org
Cc: Dan Williams <dan.j.williams@intel.com>
Subject: [PATCH 01/15] dmaengine: dw: fix byte order of hw descriptor fields
Date: Sun, 24 Jan 2016 19:21:48 +0000	[thread overview]
Message-ID: <1453663322-14474-2-git-send-email-mans@mansr.com> (raw)
In-Reply-To: <1453663322-14474-1-git-send-email-mans@mansr.com>

If the DMA controller uses a different byte order than the host CPU,
the hardware linked list descriptor fields need to be byte-swapped.

This patch makes the driver write these fields using the same byte
order it uses for mmio accesses to the DMA engine.  I do not know
if this is guaranteed to always be correct.

Signed-off-by: Mans Rullgard <mans@mansr.com>
---
 drivers/dma/dw/core.c | 105 ++++++++++++++++++++++++--------------------------
 drivers/dma/dw/regs.h |  32 +++++++++++----
 2 files changed, 76 insertions(+), 61 deletions(-)

diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index e893318560db..cc7c1acc8188 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -210,12 +210,12 @@ static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
 	 * Software emulation of LLP mode relies on interrupts to continue
 	 * multi block transfer.
 	 */
-	ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN;
+	ctllo = lli_read(desc, ctllo) | DWC_CTLL_INT_EN;
 
-	channel_writel(dwc, SAR, desc->lli.sar);
-	channel_writel(dwc, DAR, desc->lli.dar);
+	channel_writel(dwc, SAR, lli_read(desc, sar));
+	channel_writel(dwc, DAR, lli_read(desc, dar));
 	channel_writel(dwc, CTL_LO, ctllo);
-	channel_writel(dwc, CTL_HI, desc->lli.ctlhi);
+	channel_writel(dwc, CTL_HI, lli_read(desc, ctlhi));
 	channel_set_bit(dw, CH_EN, dwc->mask);
 
 	/* Move pointer to next descriptor */
@@ -433,7 +433,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
 		}
 
 		/* Check first descriptors llp */
-		if (desc->lli.llp == llp) {
+		if (lli_read(desc, llp) == llp) {
 			/* This one is currently in progress */
 			dwc->residue -= dwc_get_sent(dwc);
 			spin_unlock_irqrestore(&dwc->lock, flags);
@@ -442,7 +442,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
 
 		dwc->residue -= desc->len;
 		list_for_each_entry(child, &desc->tx_list, desc_node) {
-			if (child->lli.llp == llp) {
+			if (lli_read(child, llp) == llp) {
 				/* Currently in progress */
 				dwc->residue -= dwc_get_sent(dwc);
 				spin_unlock_irqrestore(&dwc->lock, flags);
@@ -744,25 +744,24 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
 		if (!desc)
 			goto err_desc_get;
 
-		desc->lli.sar = src + offset;
-		desc->lli.dar = dest + offset;
-		desc->lli.ctllo = ctllo;
-		desc->lli.ctlhi = xfer_count;
+		lli_write(desc, sar, src + offset);
+		lli_write(desc, dar, dest + offset);
+		lli_write(desc, ctllo, ctllo);
+		lli_write(desc, ctlhi, xfer_count);
 		desc->len = xfer_count << src_width;
 
 		if (!first) {
 			first = desc;
 		} else {
-			prev->lli.llp = desc->txd.phys;
-			list_add_tail(&desc->desc_node,
-					&first->tx_list);
+			lli_write(prev, llp, desc->txd.phys);
+			list_add_tail(&desc->desc_node, &first->tx_list);
 		}
 		prev = desc;
 	}
 
 	if (flags & DMA_PREP_INTERRUPT)
 		/* Trigger interrupt after last block */
-		prev->lli.ctllo |= DWC_CTLL_INT_EN;
+		lli_set(prev, ctllo, DWC_CTLL_INT_EN);
 
 	prev->lli.llp = 0;
 	first->txd.flags = flags;
@@ -832,9 +831,9 @@ slave_sg_todev_fill_desc:
 			if (!desc)
 				goto err_desc_get;
 
-			desc->lli.sar = mem;
-			desc->lli.dar = reg;
-			desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
+			lli_write(desc, sar, mem);
+			lli_write(desc, dar, reg);
+			lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width));
 			if ((len >> mem_width) > dwc->block_size) {
 				dlen = dwc->block_size << mem_width;
 				mem += dlen;
@@ -844,15 +843,14 @@ slave_sg_todev_fill_desc:
 				len = 0;
 			}
 
-			desc->lli.ctlhi = dlen >> mem_width;
+			lli_write(desc, ctlhi, dlen >> mem_width);
 			desc->len = dlen;
 
 			if (!first) {
 				first = desc;
 			} else {
-				prev->lli.llp = desc->txd.phys;
-				list_add_tail(&desc->desc_node,
-						&first->tx_list);
+				lli_write(prev, llp, desc->txd.phys);
+				list_add_tail(&desc->desc_node, &first->tx_list);
 			}
 			prev = desc;
 			total_len += dlen;
@@ -889,9 +887,9 @@ slave_sg_fromdev_fill_desc:
 			if (!desc)
 				goto err_desc_get;
 
-			desc->lli.sar = reg;
-			desc->lli.dar = mem;
-			desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
+			lli_write(desc, sar, reg);
+			lli_write(desc, dar, mem);
+			lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width));
 			if ((len >> reg_width) > dwc->block_size) {
 				dlen = dwc->block_size << reg_width;
 				mem += dlen;
@@ -900,15 +898,14 @@ slave_sg_fromdev_fill_desc:
 				dlen = len;
 				len = 0;
 			}
-			desc->lli.ctlhi = dlen >> reg_width;
+			lli_write(desc, ctlhi, dlen >> reg_width);
 			desc->len = dlen;
 
 			if (!first) {
 				first = desc;
 			} else {
-				prev->lli.llp = desc->txd.phys;
-				list_add_tail(&desc->desc_node,
-						&first->tx_list);
+				lli_write(prev, llp, desc->txd.phys);
+				list_add_tail(&desc->desc_node, &first->tx_list);
 			}
 			prev = desc;
 			total_len += dlen;
@@ -923,7 +920,7 @@ slave_sg_fromdev_fill_desc:
 
 	if (flags & DMA_PREP_INTERRUPT)
 		/* Trigger interrupt after last block */
-		prev->lli.ctllo |= DWC_CTLL_INT_EN;
+		lli_set(prev, ctllo, DWC_CTLL_INT_EN);
 
 	prev->lli.llp = 0;
 	first->total_len = total_len;
@@ -1388,50 +1385,50 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
 
 		switch (direction) {
 		case DMA_MEM_TO_DEV:
-			desc->lli.dar = sconfig->dst_addr;
-			desc->lli.sar = buf_addr + (period_len * i);
-			desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
-					| DWC_CTLL_DST_WIDTH(reg_width)
-					| DWC_CTLL_SRC_WIDTH(reg_width)
-					| DWC_CTLL_DST_FIX
-					| DWC_CTLL_SRC_INC
-					| DWC_CTLL_INT_EN);
+			lli_write(desc, dar, sconfig->dst_addr);
+			lli_write(desc, sar, buf_addr + period_len * i);
+			lli_write(desc, ctllo, (DWC_DEFAULT_CTLLO(chan)
+				| DWC_CTLL_DST_WIDTH(reg_width)
+				| DWC_CTLL_SRC_WIDTH(reg_width)
+				| DWC_CTLL_DST_FIX
+				| DWC_CTLL_SRC_INC
+				| DWC_CTLL_INT_EN));
 
-			desc->lli.ctllo |= sconfig->device_fc ?
-				DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
-				DWC_CTLL_FC(DW_DMA_FC_D_M2P);
+			lli_set(desc, ctllo, sconfig->device_fc ?
+					DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
+					DWC_CTLL_FC(DW_DMA_FC_D_M2P));
 
 			break;
 		case DMA_DEV_TO_MEM:
-			desc->lli.dar = buf_addr + (period_len * i);
-			desc->lli.sar = sconfig->src_addr;
-			desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
-					| DWC_CTLL_SRC_WIDTH(reg_width)
-					| DWC_CTLL_DST_WIDTH(reg_width)
-					| DWC_CTLL_DST_INC
-					| DWC_CTLL_SRC_FIX
-					| DWC_CTLL_INT_EN);
+			lli_write(desc, dar, buf_addr + period_len * i);
+			lli_write(desc, sar, sconfig->src_addr);
+			lli_write(desc, ctllo, (DWC_DEFAULT_CTLLO(chan)
+				| DWC_CTLL_SRC_WIDTH(reg_width)
+				| DWC_CTLL_DST_WIDTH(reg_width)
+				| DWC_CTLL_DST_INC
+				| DWC_CTLL_SRC_FIX
+				| DWC_CTLL_INT_EN));
 
-			desc->lli.ctllo |= sconfig->device_fc ?
-				DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
-				DWC_CTLL_FC(DW_DMA_FC_D_P2M);
+			lli_set(desc, ctllo, sconfig->device_fc ?
+					DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
+					DWC_CTLL_FC(DW_DMA_FC_D_P2M));
 
 			break;
 		default:
 			break;
 		}
 
-		desc->lli.ctlhi = (period_len >> reg_width);
+		lli_write(desc, ctlhi, period_len >> reg_width);
 		cdesc->desc[i] = desc;
 
 		if (last)
-			last->lli.llp = desc->txd.phys;
+			lli_write(last, llp, desc->txd.phys);
 
 		last = desc;
 	}
 
 	/* Let's make a cyclic list */
-	last->lli.llp = cdesc->desc[0]->txd.phys;
+	lli_write(last, llp, cdesc->desc[0]->txd.phys);
 
 	dev_dbg(chan2dev(&dwc->chan),
 			"cyclic prepared buf %pad len %zu period %zu periods %d\n",
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h
index 241ff2b1402b..afd340958266 100644
--- a/drivers/dma/dw/regs.h
+++ b/drivers/dma/dw/regs.h
@@ -308,26 +308,44 @@ static inline struct dw_dma *to_dw_dma(struct dma_device *ddev)
 	return container_of(ddev, struct dw_dma, dma);
 }
 
+#ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO
+typedef __be32 __dw32;
+#else
+typedef __le32 __dw32;
+#endif
+
 /* LLI == Linked List Item; a.k.a. DMA block descriptor */
 struct dw_lli {
 	/* values that are not changed by hardware */
-	u32		sar;
-	u32		dar;
-	u32		llp;		/* chain to next lli */
-	u32		ctllo;
+	__dw32		sar;
+	__dw32		dar;
+	__dw32		llp;		/* chain to next lli */
+	__dw32		ctllo;
 	/* values that may get written back: */
-	u32		ctlhi;
+	__dw32		ctlhi;
 	/* sstat and dstat can snapshot peripheral register state.
 	 * silicon config may discard either or both...
 	 */
-	u32		sstat;
-	u32		dstat;
+	__dw32		sstat;
+	__dw32		dstat;
 };
 
 struct dw_desc {
 	/* FIRST values the hardware uses */
 	struct dw_lli			lli;
 
+#ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO
+#define lli_set(d, reg, v)		((d)->lli.reg |= cpu_to_be32(v))
+#define lli_clear(d, reg, v)		((d)->lli.reg &= ~cpu_to_be32(v))
+#define lli_read(d, reg)		be32_to_cpu((d)->lli.reg)
+#define lli_write(d, reg, v)		((d)->lli.reg = cpu_to_be32(v))
+#else
+#define lli_set(d, reg, v)		((d)->lli.reg |= cpu_to_le32(v))
+#define lli_clear(d, reg, v)		((d)->lli.reg &= ~cpu_to_le32(v))
+#define lli_read(d, reg)		le32_to_cpu((d)->lli.reg)
+#define lli_write(d, reg, v)		((d)->lli.reg = cpu_to_le32(v))
+#endif
+
 	/* THEN values for driver housekeeping */
 	struct list_head		desc_node;
 	struct list_head		tx_list;
-- 
2.7.0

  reply	other threads:[~2016-01-24 19:24 UTC|newest]

Thread overview: 82+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-01-24 19:21 [PATCH 00/15] dmaengine: dw: various fixes and cleanups Mans Rullgard
2016-01-24 19:21 ` Mans Rullgard [this message]
2016-01-24 19:21 ` [PATCH 02/15] dmaengine: dw: clear LLP_[SD]_EN bits in last descriptor of a chain Mans Rullgard
     [not found] ` <1453663322-14474-1-git-send-email-mans-2StjZFpD7GcAvxtiuMwx3w@public.gmane.org>
2016-01-24 19:21   ` [PATCH 03/15] dmaengine: dw: rename masters to reflect actual topology Mans Rullgard
2016-01-24 19:21     ` Mans Rullgard
2016-01-24 19:21     ` Mans Rullgard
2016-01-24 20:09     ` Hans-Christian Noren Egtvedt
2016-01-24 20:09       ` Hans-Christian Noren Egtvedt
     [not found]       ` <20160124200959.GA15633-BrfabpQBY5qlHtIdYg32fQ@public.gmane.org>
2016-01-24 20:19         ` Måns Rullgård
2016-01-24 20:19           ` Måns Rullgård
2016-01-24 20:19           ` Måns Rullgård
2016-01-24 20:37           ` Hans-Christian Noren Egtvedt
2016-01-24 20:37             ` Hans-Christian Noren Egtvedt
     [not found]             ` <20160124203720.GA29010-BrfabpQBY5qlHtIdYg32fQ@public.gmane.org>
2016-01-24 20:57               ` Måns Rullgård
2016-01-24 20:57                 ` Måns Rullgård
2016-01-24 20:57                 ` Måns Rullgård
     [not found]     ` <1453663322-14474-4-git-send-email-mans-2StjZFpD7GcAvxtiuMwx3w@public.gmane.org>
2016-01-24 22:36       ` Mark Brown
2016-01-24 22:36         ` Mark Brown
2016-01-24 22:36         ` Mark Brown
2016-01-24 22:38         ` Måns Rullgård
2016-01-24 22:38           ` Måns Rullgård
     [not found]           ` <yw1xk2myli0u.fsf-OEaqT8BN2ezZK2NkWkPsZwC/G2K4zDHf@public.gmane.org>
2016-01-25  6:03             ` Viresh Kumar
2016-01-25  6:03               ` Viresh Kumar
2016-01-25  6:03               ` Viresh Kumar
2016-01-25  6:03               ` Viresh Kumar
2016-01-25 12:05           ` Vinod Koul
2016-01-25 12:05             ` Vinod Koul
2016-01-25 12:23           ` Mark Brown
2016-01-25 12:23             ` Mark Brown
2016-01-25  8:35         ` Andy Shevchenko
2016-01-25  8:35           ` Andy Shevchenko
2016-01-25 12:24           ` Mark Brown
2016-01-25 12:24             ` Mark Brown
2016-01-25 14:01             ` Andy Shevchenko
2016-01-25 14:01               ` Andy Shevchenko
2016-01-27 12:47     ` Mark Brown
2016-01-27 12:47       ` Mark Brown
2016-01-24 19:21 ` [PATCH 04/15] dmaengine: dw: set src and dst master select according to xfer direction Mans Rullgard
2016-01-24 19:21 ` [PATCH 05/15] dmaengine: dw: set LMS field in descriptors Mans Rullgard
2016-01-24 19:21 ` [PATCH 06/15] dmaengine: dw: substitute dma_read_byaddr by dma_readl_native Mans Rullgard
2016-01-24 19:21 ` [PATCH 07/15] dmaengine: dw: revisit data_width property Mans Rullgard
2016-01-24 19:21   ` Mans Rullgard
2016-01-24 19:21   ` Mans Rullgard
2016-01-25  7:32   ` Vineet Gupta
2016-01-25  7:32     ` Vineet Gupta
2016-01-25  7:32     ` Vineet Gupta
2016-01-25  7:32     ` Vineet Gupta
2016-01-25  8:45     ` Andy Shevchenko
2016-01-25  8:45       ` Andy Shevchenko
2016-01-25  8:45       ` Andy Shevchenko
2016-01-25  8:45       ` Andy Shevchenko
2016-01-25 10:31       ` Måns Rullgård
2016-01-25 10:31         ` Måns Rullgård
2016-01-25 10:31         ` Måns Rullgård
2016-01-25 10:31         ` Måns Rullgård
2016-01-25 10:36         ` Andy Shevchenko
2016-01-25 10:36           ` Andy Shevchenko
2016-01-25 10:36           ` Andy Shevchenko
2016-01-25 10:36           ` Andy Shevchenko
2016-01-25  8:42   ` Andy Shevchenko
2016-01-25  8:42     ` Andy Shevchenko
2016-01-25  8:42     ` Andy Shevchenko
2016-01-25  8:42     ` Andy Shevchenko
2016-01-26 21:07   ` Rob Herring
2016-01-26 21:07     ` Rob Herring
2016-01-26 21:07     ` Rob Herring
2016-01-26 21:07     ` Rob Herring
2016-01-27 12:26     ` Andy Shevchenko
2016-01-27 12:26       ` Andy Shevchenko
2016-01-27 12:26       ` Andy Shevchenko
2016-01-27 12:26       ` Andy Shevchenko
2016-01-24 19:21 ` [PATCH 08/15] dmaengine: dw: define counter variables as unsigned int Mans Rullgard
2016-01-24 19:21 ` [PATCH 09/15] dmaengine: dw: keep entire platform data in struct dw_dma Mans Rullgard
2016-01-24 19:21 ` [PATCH 10/15] dmaengine: dw: pass platform data via struct dw_dma_chip Mans Rullgard
2016-01-24 19:21 ` [PATCH 11/15] dmaengine: dw: platform: use field-by-field initialization Mans Rullgard
2016-01-25  8:48   ` Andy Shevchenko
2016-01-24 19:21 ` [PATCH 12/15] dmaengine: dw: move dwc->paused to dwc->flags Mans Rullgard
2016-01-24 19:22 ` [PATCH 13/15] dmaengine: dw: move dwc->initialized " Mans Rullgard
2016-01-24 19:22 ` [PATCH 14/15] dmaengine: dw: move residue to a descriptor Mans Rullgard
2016-01-24 19:22 ` [PATCH 15/15] dmaengine: dw: set cdesc to NULL when free cyclic transfers Mans Rullgard
2016-01-25 10:37 ` [PATCH 00/15] dmaengine: dw: various fixes and cleanups Andy Shevchenko
2016-01-25 12:07 ` Vinod Koul

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1453663322-14474-2-git-send-email-mans@mansr.com \
    --to=mans@mansr.com \
    --cc=andriy.shevchenko@linux.intel.com \
    --cc=dan.j.williams@intel.com \
    --cc=dmaengine@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=vinod.koul@intel.com \
    --cc=vireshk@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.