linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [RFC] dmaengine: Add DW AXI DMAC driver
@ 2017-01-20 10:50 Eugeniy Paltsev
  2017-01-20 13:38 ` Andy Shevchenko
  0 siblings, 1 reply; 4+ messages in thread
From: Eugeniy Paltsev @ 2017-01-20 10:50 UTC (permalink / raw)
  To: dmaengine
  Cc: linux-kernel, Vinod Koul, Dan Williams, Andy Shevchenko,
	Alexey Brodkin, Eugeniy Paltsev

This patch adds support for the DW AXI DMAC controller.

DW AXI DMAC is a part of upcoming development board from Synopsys.

In this driver implementation only DMA_MEMCPY and DMA_SG transfers
are supported.

Note: there is no DT documentation in this patch yet, but it will
be added in the nearest future.
---
 drivers/dma/axi_dma_platform.c     | 1107 ++++++++++++++++++++++++++++++++++++
 drivers/dma/axi_dma_platform.h     |  128 +++++
 drivers/dma/axi_dma_platform_reg.h |  189 ++++++
 3 files changed, 1424 insertions(+)
 create mode 100644 drivers/dma/axi_dma_platform.c
 create mode 100644 drivers/dma/axi_dma_platform.h
 create mode 100644 drivers/dma/axi_dma_platform_reg.h

diff --git a/drivers/dma/axi_dma_platform.c b/drivers/dma/axi_dma_platform.c
new file mode 100644
index 0000000..05d9e8c
--- /dev/null
+++ b/drivers/dma/axi_dma_platform.c
@@ -0,0 +1,1107 @@
+/*
+ * Synopsys DesignWare AXI DMA Controller driver.
+ *
+ * Copyright (C) 2017 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/bitops.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/err.h>
+
+#include "axi_dma_platform.h"
+#include "axi_dma_platform_reg.h"
+#include "dmaengine.h"
+
+#define DRV_NAME	"axi_dw_dmac"
+
+/*
+ * The set of bus widths supported by the DMA controller. DW AXI DMAC supports
+ * master data bus width up to 512 bits (for both AXI master interfaces), but
+ * it depends on IP block configurarion.
+ */
+#define AXI_DMA_BUSWIDTHS		  \
+	(DMA_SLAVE_BUSWIDTH_UNDEFINED	| \
+	DMA_SLAVE_BUSWIDTH_1_BYTE	| \
+	DMA_SLAVE_BUSWIDTH_2_BYTES	| \
+	DMA_SLAVE_BUSWIDTH_4_BYTES	| \
+	DMA_SLAVE_BUSWIDTH_8_BYTES	| \
+	DMA_SLAVE_BUSWIDTH_16_BYTES	| \
+	DMA_SLAVE_BUSWIDTH_32_BYTES	| \
+	DMA_SLAVE_BUSWIDTH_64_BYTES)
+/* TODO: check: do we need to use BIT() macro here? */
+
+/* NOTE: DW AXI DMAC theoretically can be configured with BE IO */
+static inline void
+axi_dma_iowrite32(struct axi_dma_chip *chip, u32 reg, u32 val)
+{
+	iowrite32(val, chip->regs + reg);
+}
+
+static inline u32 axi_dma_ioread32(struct axi_dma_chip *chip, u32 reg)
+{
+	return ioread32(chip->regs + reg);
+}
+
+static inline void
+axi_chan_iowrite32(struct axi_dma_chan *chan, u32 reg, u32 val)
+{
+	iowrite32(val, chan->chan_regs + reg);
+}
+
+static inline u32 axi_chan_ioread32(struct axi_dma_chan *chan, u32 reg)
+{
+	return ioread32(chan->chan_regs + reg);
+}
+
+static inline void axi_dma_disable(struct axi_dma_chip *chip)
+{
+	u32 val = axi_dma_ioread32(chip, DMAC_CFG);
+	val &= ~DMAC_EN_MASK;
+	axi_dma_iowrite32(chip, DMAC_CFG, val);
+}
+
+static inline void axi_dma_enable(struct axi_dma_chip *chip)
+{
+	u32 val = axi_dma_ioread32(chip, DMAC_CFG);
+	val |= DMAC_EN_MASK;
+	axi_dma_iowrite32(chip, DMAC_CFG, val);
+}
+
+static inline void axi_dma_irq_disable(struct axi_dma_chip *chip)
+{
+	u32 val = axi_dma_ioread32(chip, DMAC_CFG);
+	val &= ~INT_EN_MASK;
+	axi_dma_iowrite32(chip, DMAC_CFG, val);
+}
+
+static inline void axi_dma_irq_enable(struct axi_dma_chip *chip)
+{
+	u32 val = axi_dma_ioread32(chip, DMAC_CFG);
+	val |= INT_EN_MASK;
+	axi_dma_iowrite32(chip, DMAC_CFG, val);
+}
+
+static inline void axi_chan_irq_disable(struct axi_dma_chan *chan, u32 irq_mask)
+{
+	u32 val;
+
+	if (likely(irq_mask == DWAXIDMAC_IRQ_ALL)) {
+		axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, DWAXIDMAC_IRQ_NONE);
+	} else {
+		val = axi_chan_ioread32(chan, CH_INTSTATUS_ENA);
+		val &= ~irq_mask;
+		axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, val);
+	}
+}
+
+static inline void axi_chan_irq_set(struct axi_dma_chan *chan, u32 irq_mask)
+{
+	axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, irq_mask);
+}
+
+static inline void axi_chan_irq_sig_set(struct axi_dma_chan *chan, u32 irq_mask)
+{
+	axi_chan_iowrite32(chan, CH_INTSIGNAL_ENA, irq_mask);
+}
+
+static inline void axi_chan_irq_clear(struct axi_dma_chan *chan, u32 irq_mask)
+{
+	axi_chan_iowrite32(chan, CH_INTCLEAR, irq_mask);
+}
+
+static inline u32 axi_chan_irq_read(struct axi_dma_chan *chan)
+{
+	return axi_chan_ioread32(chan, CH_INTSTATUS);
+}
+
+static inline void axi_chan_disable(struct axi_dma_chan *chan)
+{
+	u32 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
+	val &= ~((1 << chan->id) << DMAC_CHAN_EN_SHIFT);
+	val |=  ((1 << chan->id) << DMAC_CHAN_EN_WE_SHIFT);
+	axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
+}
+
+static inline void axi_chan_enable(struct axi_dma_chan *chan)
+{
+	u32 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
+	val |= ((1 << chan->id) << DMAC_CHAN_EN_SHIFT |
+		(1 << chan->id) << DMAC_CHAN_EN_WE_SHIFT);
+	axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
+}
+
+static inline bool axi_chan_is_hw_enable(struct axi_dma_chan *chan)
+{
+	u32 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
+
+	return !!(val & ((1 << chan->id) << DMAC_CHAN_EN_SHIFT));
+}
+
+static inline bool axi_dma_is_sw_enable(struct axi_dma_chip *chip)
+{
+	struct dw_axi_dma *dw = chip->dw;
+	u32 i;
+
+	for (i = 0; i < dw->hdata->nr_channels; i++) {
+		if (dw->chan[i].in_use)
+			return true;
+	}
+
+	return false;
+}
+
+static void axi_dma_hw_init(struct axi_dma_chip *chip)
+{
+	u32 i;
+
+	axi_dma_disable(chip);
+
+	for (i = 0; i < chip->dw->hdata->nr_channels; i++) {
+		axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
+		axi_chan_disable(&chip->dw->chan[i]);
+	}
+
+	axi_dma_irq_enable(chip);
+}
+
+static u32 axi_chan_get_xfer_width(struct axi_dma_chan *chan, dma_addr_t src,
+				   dma_addr_t dst, size_t len)
+{
+	u32 width;
+	dma_addr_t sdl = (src | dst | len);
+	u32 max_width = chan->chip->dw->hdata->m_data_width;
+
+	width = sdl ? __ffs(sdl) : DWAXIDMAC_TRANS_WIDTH_MAX;
+
+	return width <= max_width ? width : max_width;
+}
+
+static inline const char *axi_chan_name(struct axi_dma_chan *chan)
+{
+	return dma_chan_name(&chan->chan);
+}
+
+static dma_cookie_t axi_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+	struct axi_dma_desc *desc = to_axi_desc(tx);
+	struct axi_dma_chan *chan = to_axi_dma_chan(tx->chan);
+	dma_cookie_t cookie;
+	unsigned long flags;
+
+	spin_lock_irqsave(&chan->lock, flags);
+
+	cookie = dma_cookie_assign(tx);
+
+	/* add desc from tx_descriptor to chanel queue */
+	list_add_tail(&desc->desc_node, &chan->queue);
+
+	spin_unlock_irqrestore(&chan->lock, flags);
+
+	dev_dbg(dchan2dev(tx->chan), "%s: descriptor %u queued\n",
+		axi_chan_name(chan), cookie);
+
+	return cookie;
+}
+
+static struct axi_dma_desc *axi_desc_get(struct axi_dma_chan *chan)
+{
+	struct dw_axi_dma *dw = chan->chip->dw;
+	struct axi_dma_desc *desc;
+	dma_addr_t phys;
+
+	desc = dma_pool_zalloc(dw->desc_pool, GFP_ATOMIC, &phys);
+	if (unlikely(!desc)) {
+		dev_err(chan2dev(chan), "%s: not enough descriptors available\n",
+			axi_chan_name(chan));
+		return NULL;
+	}
+
+	chan->descs_allocated++;
+	INIT_LIST_HEAD(&desc->tx_list);
+	dma_async_tx_descriptor_init(&desc->txd, &chan->chan);
+	desc->txd.tx_submit = axi_tx_submit;
+	/* txd.flags can be overwritten in prep functions */
+	desc->txd.flags = DMA_CTRL_ACK;
+	desc->txd.phys = phys;
+
+	return desc;
+}
+
+static void axi_desc_put(struct axi_dma_chan *chan, struct axi_dma_desc *desc)
+{
+	struct dw_axi_dma *dw = chan->chip->dw;
+	struct axi_dma_desc *child, *_next;
+	unsigned int descs_put = 0;
+
+	if (unlikely(!desc))
+		return;
+
+	list_for_each_entry_safe(child, _next, &desc->tx_list, desc_node) {
+		list_del(&child->desc_node);
+		dma_pool_free(dw->desc_pool, child, child->txd.phys);
+		descs_put++;
+	}
+
+	dma_pool_free(dw->desc_pool, desc, desc->txd.phys);
+	descs_put++;
+
+	chan->descs_allocated -= descs_put;
+
+	dev_dbg(chan2dev(chan), "%s: %d descs put, %d still allocated\n",
+		axi_chan_name(chan), descs_put, chan->descs_allocated);
+}
+
+static void
+axi_chan_descriptor_complete(struct axi_dma_chan *chan,
+			     struct axi_dma_desc *desc, bool callback_required)
+{
+	struct dma_async_tx_descriptor *txd = &desc->txd;
+	struct axi_dma_desc *child;
+	struct dmaengine_desc_callback cb;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: descriptor %u complete\n",
+		axi_chan_name(chan), txd->cookie);
+
+	spin_lock_irqsave(&chan->lock, flags);
+	dma_cookie_complete(txd);
+	if (callback_required)
+		dmaengine_desc_get_callback(txd, &cb);
+
+	/* async_tx_ack */
+	list_for_each_entry(child, &desc->tx_list, desc_node)
+		async_tx_ack(&child->txd);
+	async_tx_ack(&desc->txd);
+	axi_desc_put(chan, desc);
+	spin_unlock_irqrestore(&chan->lock, flags);
+
+	if (callback_required)
+		dmaengine_desc_callback_invoke(&cb, NULL);
+}
+
+static enum dma_status
+dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
+		  struct dma_tx_state *txstate)
+{
+	struct axi_dma_chan *chan = to_axi_dma_chan(dchan);
+	enum dma_status ret;
+
+	/* TODO: implement DMA_ERROR status managment */
+	ret = dma_cookie_status(dchan, cookie, txstate);
+
+	if (chan->is_paused && ret == DMA_IN_PROGRESS)
+		return DMA_PAUSED;
+
+	return ret;
+}
+
+/* TODO: add 64 bit adress support */
+static void write_desc_llp(struct axi_dma_desc *desc, dma_addr_t adr)
+{
+	desc->lli.llp_lo = cpu_to_le32(adr);
+}
+
+/* TODO: add 64 bit adress support */
+static void write_chan_llp(struct axi_dma_chan *chan, dma_addr_t adr)
+{
+	axi_chan_iowrite32(chan, CH_LLP, adr);
+}
+
+/* called in chan locked context */
+static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
+				      struct axi_dma_desc *first)
+{
+	u32 reg, irq_mask;
+	u8 lms = 0; /* TODO: REVISIT: hardcode LLI master to AXI0 (should we?)*/
+	u32 priority = chan->chip->dw->hdata->priority[chan->id];
+
+	if (unlikely(axi_chan_is_hw_enable(chan))) {
+		dev_err(chan2dev(chan), "%s is non-idle!\n",
+			axi_chan_name(chan));
+
+		/* The tasklet will hopefully advance the queue... */
+		return;
+	}
+
+	axi_dma_enable(chan->chip);
+
+	reg = (DWAXIDMAC_MBLK_TYPE_LL << CH_CFG_L_DST_MULTBLK_TYPE_POS |
+	       DWAXIDMAC_MBLK_TYPE_LL << CH_CFG_L_SRC_MULTBLK_TYPE_POS);
+	axi_chan_iowrite32(chan, CH_CFG_L, reg);
+
+	reg = (DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC << CH_CFG_H_TT_FC_POS |
+	       priority << CH_CFG_H_PRIORITY_POS |
+	       DWAXIDMAC_HS_SEL_HW << CH_CFG_H_HS_SEL_DST_POS |
+	       DWAXIDMAC_HS_SEL_HW << CH_CFG_H_HS_SEL_SRC_POS);
+	axi_chan_iowrite32(chan, CH_CFG_H, reg);
+
+	write_chan_llp(chan, first->txd.phys | lms);
+
+	/* TODO: enable llp irq errors */
+	irq_mask = DWAXIDMAC_IRQ_DMA_TRF | DWAXIDMAC_IRQ_ALL_ERR;
+	axi_chan_irq_sig_set(chan, irq_mask);
+
+	/* generate 'suspend' status but don't generate interrupt */
+	irq_mask |= DWAXIDMAC_IRQ_SUSPENDED;
+	axi_chan_irq_set(chan, irq_mask);
+
+	axi_chan_enable(chan);
+}
+
+/* called in chan locked context */
+static void axi_chan_start_first_queued(struct axi_dma_chan *chan)
+{
+	struct axi_dma_desc *desc;
+
+	if (list_empty(&chan->queue))
+		return;
+
+	list_move(chan->queue.next, &chan->active_list);
+	desc = list_to_axi_desc(chan->active_list.next);
+	dev_dbg(chan2dev(chan), "%s: started %u\n", axi_chan_name(chan),
+		desc->txd.cookie);
+	axi_chan_block_xfer_start(chan, desc);
+}
+
+static void dma_chan_issue_pending(struct dma_chan *dchan)
+{
+	struct axi_dma_chan *chan = to_axi_dma_chan(dchan);
+	unsigned long flags;
+
+	dev_dbg(dchan2dev(dchan), "%s: %s\n", __func__, axi_chan_name(chan));
+
+	spin_lock_irqsave(&chan->lock, flags);
+	if (list_empty(&chan->active_list))
+		axi_chan_start_first_queued(chan);
+	spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+static int dma_chan_alloc_chan_resources(struct dma_chan *dchan)
+{
+	struct axi_dma_chan *chan = to_axi_dma_chan(dchan);
+
+	/* ASSERT: channel is idle */
+	if (axi_chan_is_hw_enable(chan)) {
+		dev_err(chan2dev(chan), "%s is non-idle!\n",
+			axi_chan_name(chan));
+		return -EBUSY;
+	}
+
+	dev_dbg(dchan2dev(dchan), "%s: allocating\n", axi_chan_name(chan));
+
+	dma_cookie_init(dchan);
+
+	chan->in_use = true;
+
+	return 0;
+}
+
+static void dma_chan_free_chan_resources(struct dma_chan *dchan)
+{
+	struct axi_dma_chan *chan = to_axi_dma_chan(dchan);
+
+	dev_dbg(dchan2dev(dchan), "%s: %s: descriptor still allocated: %u\n",
+		__func__, axi_chan_name(chan), chan->descs_allocated);
+
+	/* ASSERT: channel lists are empty */
+	if (!list_empty(&chan->active_list) || !list_empty(&chan->queue))
+		dev_err(dchan2dev(dchan), "%s: lists are non-empty!\n",
+			axi_chan_name(chan));
+
+	/* ASSERT: channel is idle */
+	if (axi_chan_is_hw_enable(chan))
+		dev_err(dchan2dev(dchan), "%s is non-idle!\n",
+			axi_chan_name(chan));
+
+	axi_chan_disable(chan);
+	axi_chan_irq_disable(chan, DWAXIDMAC_IRQ_ALL);
+
+	chan->in_use = false;
+
+	/* Disable controller in case it was a last user */
+	if (!axi_dma_is_sw_enable(chan->chip))
+		axi_dma_disable(chan->chip);
+}
+
+/*
+ * If DW_axi_dmac sees CHx_CTL.ShadowReg_Or_LLI_Last bit of the fetched LLI
+ * as 1, it understands that the current block is the final block in the
+ * transfer and completes the DMA transfer operation at the end of current
+ * block transfer.
+ */
+static void set_desc_last(struct axi_dma_desc *desc)
+{
+	u32 val = le32_to_cpu(desc->lli.ctl_hi);
+	val |= CH_CTL_H_LLI_LAST;
+	desc->lli.ctl_hi = cpu_to_le32(val);
+}
+
+/* TODO: add 64 bit adress support */
+static void write_desc_sar(struct axi_dma_desc *desc, dma_addr_t adr)
+{
+	desc->lli.sar_lo = cpu_to_le32(adr);
+}
+
+/* TODO: add 64 bit adress support */
+static void write_desc_dar(struct axi_dma_desc *desc, dma_addr_t adr)
+{
+	desc->lli.dar_lo = cpu_to_le32(adr);
+}
+
+/* TODO: REVISIT: how we should choose AXI master for mem-to-mem transfer? */
+static void set_desc_src_master(struct axi_dma_desc *desc)
+{
+	u32 val;
+
+	/* select AXI0 for source master */
+	val = le32_to_cpu(desc->lli.ctl_lo);
+	val &= ~CH_CTL_L_SRC_MAST;
+	desc->lli.ctl_lo = cpu_to_le32(val);
+}
+
+/* TODO: REVISIT: how we should choose AXI master for mem-to-mem transfer? */
+static void set_desc_dest_master(struct axi_dma_desc *desc)
+{
+	u32 val;
+
+	/* select AXI1 for source master if available*/
+	val = le32_to_cpu(desc->lli.ctl_lo);
+	if (to_axi_dma_chan(desc->txd.chan)->chip->dw->hdata->nr_masters > 1)
+		val |= CH_CTL_L_DST_MAST;
+	else
+		val &= ~CH_CTL_L_DST_MAST;
+
+	desc->lli.ctl_lo = cpu_to_le32(val);
+}
+
+static struct dma_async_tx_descriptor *
+dma_chan_prep_dma_sg(struct dma_chan *dchan,
+		     struct scatterlist *dst_sg, unsigned int dst_nents,
+		     struct scatterlist *src_sg, unsigned int src_nents,
+		     unsigned long flags)
+{
+	struct axi_dma_chan *chan = to_axi_dma_chan(dchan);
+	struct axi_dma_desc *first = NULL, *desc = NULL, *prev = NULL;
+	size_t dst_len = 0, src_len = 0, xfer_len = 0, total_len = 0;
+	dma_addr_t dst_adr = 0, src_adr = 0;
+	u32 src_width, dst_width;
+	size_t block_ts, max_block_ts;
+	u32 reg;
+	u8 lms = 0; /* TODO: REVISIT: hardcode LLI master to AXI0 (should we?)*/
+
+	dev_dbg(chan2dev(chan), "%s: %s: sn: %d dn: %d flags: 0x%lx",
+		__func__, axi_chan_name(chan), src_nents, dst_nents, flags);
+
+	if (unlikely(dst_nents == 0 || src_nents == 0))
+		return NULL;
+
+	if (unlikely(dst_sg == NULL || src_sg == NULL))
+		return NULL;
+
+	max_block_ts = chan->chip->dw->hdata->block_size[chan->id];
+
+	/*
+	 * Loop until there is either no more source or no more destination
+	 * scatterlist entry.
+	 */
+	while (true) {
+		/* process dest sg list */
+		if (dst_len == 0) {
+			/* no more destination scatterlist entries */
+			if (!dst_sg || !dst_nents)
+				break;
+
+			dst_adr = sg_dma_address(dst_sg);
+			dst_len = sg_dma_len(dst_sg);
+
+			dst_sg = sg_next(dst_sg);
+			dst_nents--;
+		}
+
+		/* process src sg list */
+		if (src_len == 0) {
+			/* no more source scatterlist entries */
+			if (!src_sg || !src_nents)
+				break;
+
+			src_adr = sg_dma_address(src_sg);
+			src_len = sg_dma_len(src_sg);
+
+			src_sg = sg_next(src_sg);
+			src_nents--;
+		}
+
+		/* min of src and dest length will be this xfer length */
+		xfer_len = min_t(size_t, src_len, dst_len);
+		if (xfer_len == 0)
+			continue;
+
+		/* take care for the alignment */
+		src_width = axi_chan_get_xfer_width(chan, src_adr,
+						    dst_adr, xfer_len);
+		/*
+		 * Actually src_width and dst_width can be different, but make
+		 * them same to be simpler.
+		 * TODO: REVISIT: Can we optimize it?
+		 */
+		dst_width = src_width;
+
+		/*
+		 * block_ts indicates the total number of data of width
+		 * src_width to be transferred in a DMA block transfer.
+		 * BLOCK_TS register should be set to block_ts -1
+		 */
+		block_ts = xfer_len >> src_width;
+		if (block_ts > max_block_ts) {
+			block_ts = max_block_ts;
+			xfer_len = max_block_ts << src_width;
+		}
+
+		desc = axi_desc_get(chan);
+		if (unlikely(!desc))
+			goto err_desc_get;
+
+		write_desc_sar(desc, src_adr);
+		write_desc_dar(desc, dst_adr);
+		desc->lli.block_ts_lo = cpu_to_le32(block_ts - 1);
+		desc->lli.ctl_hi = cpu_to_le32(CH_CTL_H_LLI_VALID);
+
+		/* TODO: REVISIT: MSIZE - not sure what it is about */
+		reg = (DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |
+		       DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS |
+		       dst_width << CH_CTL_L_DST_WIDTH_POS |
+		       src_width << CH_CTL_L_SRC_WIDTH_POS |
+		       DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |
+		       DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS);
+		desc->lli.ctl_lo = cpu_to_le32(reg);
+
+		set_desc_src_master(desc);
+		set_desc_dest_master(desc);
+
+		/* manage lists */
+		if (!first) {
+			first = desc;
+		} else {
+			list_add_tail(&desc->desc_node, &first->tx_list);
+			write_desc_llp(prev, desc->txd.phys | lms);
+		}
+		prev = desc;
+
+		/* update the lengths and addresses for the next loop cycle */
+		dst_len -= xfer_len;
+		src_len -= xfer_len;
+		dst_adr += xfer_len;
+		src_adr += xfer_len;
+
+		total_len += xfer_len;
+	}
+
+	/* total len of src/dest sg == 0, so no descriptor were allocated */
+	if (unlikely(!first))
+		return NULL;
+
+	/* First descriptor of the chain embedds additional information */
+	first->total_len = total_len;
+	first->txd.flags = flags;
+
+	/* set end-of-link to the last link descriptor of list */
+	set_desc_last(desc);
+
+	return &first->txd;
+
+err_desc_get:
+	axi_desc_put(chan, first);
+	return NULL;
+}
+
+static struct dma_async_tx_descriptor *
+dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dest,
+			 dma_addr_t src, size_t len, unsigned long flags)
+{
+	unsigned int nents = 1;
+	struct scatterlist dst_sg;
+	struct scatterlist src_sg;
+
+	sg_init_table(&dst_sg, nents);
+	sg_init_table(&src_sg, nents);
+
+	sg_dma_address(&dst_sg) = dest;
+	sg_dma_address(&src_sg) = src;
+
+	sg_dma_len(&dst_sg) = len;
+	sg_dma_len(&src_sg) = len;
+
+	/* implement memcpy transfer as sg transfer with single list */
+	return dma_chan_prep_dma_sg(dchan, &dst_sg, nents,
+				    &src_sg, nents, flags);
+}
+
+static irqreturn_t dw_axi_dma_intretupt(int irq, void *dev_id)
+{
+	struct axi_dma_chip *chip = dev_id;
+
+	/* TODO: check for irq source (it must be from enabled chanel) */
+
+	/* disable DMAC inerrupts. We'll enable them in the tasklet */
+	axi_dma_irq_disable(chip);
+
+	tasklet_schedule(&chip->dw->tasklet);
+
+	return IRQ_HANDLED;
+}
+
+static void axi_chan_dump_lli(struct axi_dma_chan *chan,
+			      struct axi_dma_desc *desc)
+{
+	dev_err(dchan2dev(&chan->chan),
+		"SAR: 0x%x DAR: 0x%x LLP: 0x%x BTS 0x%x CTL: 0x%x:%08x",
+		le32_to_cpu(desc->lli.sar_lo),
+		le32_to_cpu(desc->lli.dar_lo),
+		le32_to_cpu(desc->lli.llp_lo),
+		le32_to_cpu(desc->lli.block_ts_lo),
+		le32_to_cpu(desc->lli.ctl_hi),
+		le32_to_cpu(desc->lli.ctl_lo));
+}
+
+static void axi_chan_list_dump_lli(struct axi_dma_chan *chan,
+				   struct axi_dma_desc *desc_head)
+{
+	struct axi_dma_desc *desc;
+
+	axi_chan_dump_lli(chan, desc_head);
+	list_for_each_entry(desc, &desc_head->tx_list, desc_node)
+		axi_chan_dump_lli(chan, desc);
+}
+
+static void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
+{
+	struct axi_dma_desc *desc, *bad_desc, *_desc;
+	LIST_HEAD(list);
+	unsigned long flags;
+
+	spin_lock_irqsave(&chan->lock, flags);
+
+	axi_chan_disable(chan);
+
+	/* The bad descriptor currently is in the active list, so remove it */
+	list_splice_init(&chan->active_list, &list);
+
+	/* Try to restart the controller */
+	axi_chan_start_first_queued(chan);
+
+	/* WARN about bad descriptor */
+	bad_desc = list_to_axi_desc(list.next);
+	dev_err(chan2dev(chan),
+		"Bad descriptor submitted for %s, cookie: %d, irq: 0x%08x\n",
+		axi_chan_name(chan), bad_desc->txd.cookie, status);
+	axi_chan_list_dump_lli(chan, bad_desc);
+
+	spin_unlock_irqrestore(&chan->lock, flags);
+
+	/* Pretend the bad descriptor completed successfully */
+	list_for_each_entry_safe(desc, _desc, &list, desc_node)
+		axi_chan_descriptor_complete(chan, desc, true);
+}
+
+static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
+{
+	struct axi_dma_desc *desc, *_desc;
+	LIST_HEAD(list);
+	unsigned long flags;
+
+	spin_lock_irqsave(&chan->lock, flags);
+	if (axi_chan_is_hw_enable(chan)) {
+		dev_err(chan2dev(chan), "BUG: %s catched DWAXIDMAC_IRQ_DMA_TRF, but channel not idle!\n",
+			axi_chan_name(chan));
+		axi_chan_disable(chan);
+	}
+
+	/* submit queued descriptors before processing the completed ones */
+	list_splice_init(&chan->active_list, &list);
+	axi_chan_start_first_queued(chan);
+
+	spin_unlock_irqrestore(&chan->lock, flags);
+
+	list_for_each_entry_safe(desc, _desc, &list, desc_node)
+		axi_chan_descriptor_complete(chan, desc, true);
+}
+
+static void axi_dma_tasklet(unsigned long data)
+{
+	struct axi_dma_chip *chip = (struct axi_dma_chip *)data;
+	struct axi_dma_chan *chan;
+	struct dw_axi_dma *dw = chip->dw;
+
+	u32 status, i;
+
+	/* poll, clear and process every chanel interrupt status */
+	for (i = 0; i < dw->hdata->nr_channels; i++) {
+		chan = &dw->chan[i];
+		status = axi_chan_irq_read(chan);
+		axi_chan_irq_clear(chan, status);
+
+		dev_dbg(chip->dev, "%s %u IRQ status: 0x%08x\n",
+			axi_chan_name(chan), i, status);
+
+		if (status & DWAXIDMAC_IRQ_ALL_ERR)
+			axi_chan_handle_err(chan, status);
+		else if (status & DWAXIDMAC_IRQ_DMA_TRF)
+			axi_chan_block_xfer_complete(chan);
+	}
+
+	/* re-enable interrupts */
+	axi_dma_irq_enable(chip);
+}
+
+static int dma_chan_terminate_all(struct dma_chan *dchan)
+{
+	struct axi_dma_chan *chan = to_axi_dma_chan(dchan);
+	struct axi_dma_desc *desc, *_desc;
+	unsigned long flags;
+	LIST_HEAD(list);
+
+	spin_lock_irqsave(&chan->lock, flags);
+
+	axi_chan_disable(chan);
+
+	/* Remove all descriptors from active list and queue */
+	list_splice_init(&chan->queue, &list);
+	list_splice_init(&chan->active_list, &list);
+
+	spin_unlock_irqrestore(&chan->lock, flags);
+
+	dev_dbg(dchan2dev(dchan), "terminated: %s\n", axi_chan_name(chan));
+
+	/* Flush all pending and queued descriptors */
+	list_for_each_entry_safe(desc, _desc, &list, desc_node)
+		axi_chan_descriptor_complete(chan, desc, false);
+
+	return 0;
+}
+
+static int dma_chan_pause(struct dma_chan *dchan)
+{
+	struct axi_dma_chan *chan = to_axi_dma_chan(dchan);
+	unsigned long flags;
+	unsigned int timeout = 20; /* timeout iterations */
+	int ret = -EAGAIN;
+	u32 val;
+
+	spin_lock_irqsave(&chan->lock, flags);
+
+	/* TODO: check for SUSP, LOCK bit (probably chan is already disabled) */
+
+	val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
+	val |= ((1 << chan->id) << DMAC_CHAN_SUSP_SHIFT |
+		(1 << chan->id) << DMAC_CHAN_SUSP_WE_SHIFT);
+	axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
+
+	while (timeout--) {
+		if (axi_chan_irq_read(chan) & DWAXIDMAC_IRQ_SUSPENDED) {
+			axi_chan_irq_clear(chan, DWAXIDMAC_IRQ_SUSPENDED);
+			ret = 0;
+			break;
+		}
+		udelay(2);
+	}
+
+	chan->is_paused = true;
+
+	spin_unlock_irqrestore(&chan->lock, flags);
+
+	return ret;
+}
+
+/* called in chan locked context */
+static inline void axi_chan_resume(struct axi_dma_chan *chan)
+{
+	u32 val;
+
+	val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
+	val &= ~((1 << chan->id) << DMAC_CHAN_SUSP_SHIFT);
+	val |=  ((1 << chan->id) << DMAC_CHAN_SUSP_WE_SHIFT);
+	axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
+
+	chan->is_paused = false;
+}
+
+static int dma_chan_resume(struct dma_chan *dchan)
+{
+	struct axi_dma_chan *chan = to_axi_dma_chan(dchan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&chan->lock, flags);
+
+	if (chan->is_paused)
+		axi_chan_resume(chan);
+
+	spin_unlock_irqrestore(&chan->lock, flags);
+
+	return 0;
+}
+
+static int parse_device_properties(struct axi_dma_chip *chip)
+{
+	struct device *dev = chip->dev;
+	u32 tmp, carr[DMAC_MAX_CHANNELS];
+	int ret;
+
+	ret = device_property_read_u32(dev, "dma-channels", &tmp);
+	if (ret)
+		return ret;
+	if (tmp == 0 || tmp > DMAC_MAX_CHANNELS)
+		return -EINVAL;
+
+	chip->dw->hdata->nr_channels = tmp;
+
+	ret = device_property_read_u32(dev, "dma-masters", &tmp);
+	if (ret)
+		return ret;
+	if (tmp == 0 || tmp > DMAC_MAX_MASTERS)
+		return -EINVAL;
+
+	chip->dw->hdata->nr_masters = tmp;
+
+	ret = device_property_read_u32(dev, "data-width", &tmp);
+	if (ret)
+		return ret;
+	if (tmp > DWAXIDMAC_TRANS_WIDTH_MAX)
+		return -EINVAL;
+
+	chip->dw->hdata->m_data_width = tmp;
+
+	ret = device_property_read_u32_array(dev, "block-size", carr,
+					     chip->dw->hdata->nr_channels);
+	if (ret)
+		return ret;
+	for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++)
+		if (carr[tmp] == 0 || carr[tmp] > DMAC_MAX_BLK_SIZE)
+			return -EINVAL;
+		else
+			chip->dw->hdata->block_size[tmp] = carr[tmp];
+
+	ret = device_property_read_u32_array(dev, "priority", carr,
+					     chip->dw->hdata->nr_channels);
+	if (ret)
+		return ret;
+	/* priority value must be programmed within [0:nr_channels-1] range */
+	for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++)
+		if (carr[tmp] >= chip->dw->hdata->nr_channels)
+			return -EINVAL;
+		else
+			chip->dw->hdata->priority[tmp] = carr[tmp];
+
+	return 0;
+}
+
+static int dw_probe(struct platform_device *pdev)
+{
+	struct axi_dma_chip *chip;
+	struct resource *mem;
+	struct dw_axi_dma *dw;
+	struct dw_axi_dma_hcfg *hdata;
+	u32 i;
+	int ret;
+
+	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	dw = devm_kzalloc(&pdev->dev, sizeof(*dw), GFP_KERNEL);
+	if (!dw)
+		return -ENOMEM;
+
+	hdata = devm_kzalloc(&pdev->dev, sizeof(*hdata), GFP_KERNEL);
+	if (!hdata)
+		return -ENOMEM;
+
+	chip->dw = dw;
+	chip->dev = &pdev->dev;
+	chip->dw->hdata = hdata;
+
+	chip->irq = platform_get_irq(pdev, 0);
+	if (chip->irq < 0)
+		return chip->irq;
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	chip->regs = devm_ioremap_resource(chip->dev, mem);
+	if (IS_ERR(chip->regs))
+		return PTR_ERR(chip->regs);
+
+	ret = dma_coerce_mask_and_coherent(chip->dev, DMA_BIT_MASK(32));
+	if (ret)
+		return ret;
+
+	chip->clk = devm_clk_get(chip->dev, NULL);
+	if (IS_ERR(chip->clk))
+		return PTR_ERR(chip->clk);
+
+	ret = parse_device_properties(chip);
+	if (ret)
+		return ret;
+
+	dw->chan = devm_kcalloc(chip->dev, hdata->nr_channels,
+				sizeof(*dw->chan), GFP_KERNEL);
+	if (!dw->chan)
+		return -ENOMEM;
+
+	ret = devm_request_irq(chip->dev, chip->irq, dw_axi_dma_intretupt,
+			       IRQF_SHARED, DRV_NAME, chip);
+	if (ret)
+		return ret;
+
+	/* lli address must be aligned to a 64-byte boundary */
+	dw->desc_pool = dmam_pool_create(DRV_NAME, chip->dev,
+					 sizeof(struct axi_dma_desc), 64, 0);
+	if (!dw->desc_pool) {
+		dev_err(chip->dev, "No memory for descriptors dma pool\n");
+		return -ENOMEM;
+	}
+
+	tasklet_init(&dw->tasklet, axi_dma_tasklet, (unsigned long)chip);
+
+	INIT_LIST_HEAD(&dw->dma.channels);
+	for (i = 0; i < hdata->nr_channels; i++) {
+		struct axi_dma_chan *chan = &dw->chan[i];
+
+		chan->chip = chip;
+		chan->chan.device = &dw->dma;
+		dma_cookie_init(&chan->chan);
+		list_add_tail(&chan->chan.device_node, &dw->dma.channels);
+
+		chan->id = (u8)i;
+		chan->priority = (u8)i;
+		chan->direction = DMA_TRANS_NONE;
+
+		chan->chan_regs = chip->regs + COMMON_REG_LEN + i * CHAN_REG_LEN;
+
+		spin_lock_init(&chan->lock);
+		INIT_LIST_HEAD(&chan->active_list);
+		INIT_LIST_HEAD(&chan->queue);
+	}
+
+	axi_dma_hw_init(chip);
+
+	/* Set capabilities */
+	dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
+	dma_cap_set(DMA_SG, dw->dma.cap_mask);
+
+	/* DMA capabilities */
+	dw->dma.chancnt = hdata->nr_channels;
+	dw->dma.src_addr_widths = AXI_DMA_BUSWIDTHS;
+	dw->dma.dst_addr_widths = AXI_DMA_BUSWIDTHS;
+	dw->dma.directions = BIT(DMA_MEM_TO_MEM);
+	dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+
+	dw->dma.dev = chip->dev;
+	dw->dma.device_tx_status = dma_chan_tx_status;
+	dw->dma.device_issue_pending = dma_chan_issue_pending;
+	dw->dma.device_terminate_all = dma_chan_terminate_all;
+	dw->dma.device_pause = dma_chan_pause;
+	dw->dma.device_resume = dma_chan_resume;
+
+	dw->dma.device_alloc_chan_resources = dma_chan_alloc_chan_resources;
+	dw->dma.device_free_chan_resources = dma_chan_free_chan_resources;
+
+	dw->dma.device_prep_dma_memcpy = dma_chan_prep_dma_memcpy;
+	dw->dma.device_prep_dma_sg = dma_chan_prep_dma_sg;
+
+	ret = clk_prepare_enable(chip->clk);
+	if (ret < 0)
+		return ret;
+
+	ret = dma_async_device_register(&dw->dma);
+	if (ret)
+		goto err_clk_disable;
+
+	platform_set_drvdata(pdev, chip);
+
+	dev_info(chip->dev, "DesignWare AXI DMA Controller, %d channels\n",
+		 dw->hdata->nr_channels);
+
+	return 0;
+
+err_clk_disable:
+	clk_disable_unprepare(chip->clk);
+
+	return ret;
+}
+
+static int dw_remove(struct platform_device *pdev)
+{
+	struct axi_dma_chip *chip = platform_get_drvdata(pdev);
+	struct dw_axi_dma *dw = chip->dw;
+	struct axi_dma_chan *chan, *_chan;
+	u32 i;
+
+	axi_dma_irq_disable(chip);
+	for (i = 0; i < dw->hdata->nr_channels; i++) {
+		axi_chan_disable(&chip->dw->chan[i]);
+		axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
+	}
+	axi_dma_disable(chip);
+
+	tasklet_kill(&dw->tasklet);
+
+	list_for_each_entry_safe(chan, _chan, &dw->dma.channels,
+			chan.device_node) {
+		list_del(&chan->chan.device_node);
+	}
+
+	clk_disable_unprepare(chip->clk);
+
+	return 0;
+}
+
+static const struct of_device_id dw_dma_of_id_table[] = {
+	{ .compatible = "snps,axi-dma" },
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
+
+static struct platform_driver dw_driver = {
+	.probe		= dw_probe,
+	.remove		= dw_remove,
+	.driver = {
+		.name	= DRV_NAME,
+		.of_match_table = of_match_ptr(dw_dma_of_id_table),
+	},
+};
+
+static int __init dw_init(void)
+{
+	return platform_driver_register(&dw_driver);
+}
+subsys_initcall(dw_init);
+
+static void __exit dw_exit(void)
+{
+	platform_driver_unregister(&dw_driver);
+}
+module_exit(dw_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Synopsys DesignWare AXI DMA Controller platform driver");
+MODULE_AUTHOR("Paltsev Eugeniy <Eugeniy.Paltsev@synopsys.com>");
diff --git a/drivers/dma/axi_dma_platform.h b/drivers/dma/axi_dma_platform.h
new file mode 100644
index 0000000..307c8db
--- /dev/null
+++ b/drivers/dma/axi_dma_platform.h
@@ -0,0 +1,128 @@
+/*
+ * Synopsys DesignWare AXI DMA Controller driver.
+ *
+ * Copyright (C) 2017 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _AXI_DMA_PLATFORM_H
+#define _AXI_DMA_PLATFORM_H
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/bitops.h>
+
+#define DMAC_MAX_CHANNELS	8
+#define DMAC_MAX_MASTERS	2
+#define DMAC_MAX_BLK_SIZE	0x200000
+
+struct dw_axi_dma_hcfg {
+	u32	nr_channels;
+	u32	nr_masters;
+	u32	m_data_width;
+	u32	block_size[DMAC_MAX_CHANNELS];
+	u32	priority[DMAC_MAX_CHANNELS];
+};
+
+struct axi_dma_chan {
+	struct dma_chan			chan;
+	struct axi_dma_chip		*chip;
+	void __iomem			*chan_regs;
+	u8				id;
+	u8				priority;
+	enum dma_transfer_direction	direction;
+
+	spinlock_t			lock;
+	bool				in_use;
+
+	/* these other elements are all protected by lock */
+	unsigned long			flags;
+	struct list_head		active_list;
+	struct list_head		queue;
+	bool				is_paused;
+
+	unsigned int			descs_allocated;
+};
+
+struct dw_axi_dma {
+	struct dma_device	dma;
+	struct dw_axi_dma_hcfg	*hdata;
+	struct dma_pool		*desc_pool;
+	struct tasklet_struct	tasklet;
+
+	/* channels */
+	struct axi_dma_chan	*chan;
+};
+
+struct axi_dma_chip {
+	struct device		*dev;
+	int			irq;
+	void __iomem		*regs;
+	struct clk		*clk;
+	struct dw_axi_dma	*dw;
+};
+
+/* NOTE: DW AXI DMAC theoretically can be configured with BE IO */
+/* LLI == Linked List Item */
+struct axi_dma_lli {
+	__le32		sar_lo;
+	__le32		sar_hi;
+	__le32		dar_lo;
+	__le32		dar_hi;
+	__le32		block_ts_lo;
+	__le32		block_ts_hi;
+	__le32		llp_lo;
+	__le32		llp_hi;
+	__le32		ctl_lo;
+	__le32		ctl_hi;
+	__le32		sstat;
+	__le32		dstat;
+	__le32		status_lo;
+	__le32		ststus_hi;
+	__le32		reserved_lo;
+	__le32		reserved_hi;
+};
+
+struct axi_dma_desc {
+	struct axi_dma_lli		lli;
+
+	struct dma_async_tx_descriptor	txd;
+	struct list_head		desc_node;
+	struct list_head		tx_list;
+	size_t				total_len;
+};
+
+#define list_to_axi_desc(h) list_entry(h, struct axi_dma_desc, desc_node)
+
+static inline struct axi_dma_chan *to_axi_dma_chan(struct dma_chan *chan)
+{
+	return container_of(chan, struct axi_dma_chan, chan);
+}
+
+static inline struct axi_dma_desc *
+to_axi_desc(struct dma_async_tx_descriptor *txd)
+{
+	return container_of(txd, struct axi_dma_desc, txd);
+}
+
+static inline struct device *dchan2dev(struct dma_chan *dchan)
+{
+	return &dchan->dev->device;
+}
+
+static inline struct device *chan2dev(struct axi_dma_chan *chan)
+{
+	return &chan->chan.dev->device;
+}
+
+#endif /* _AXI_DMA_PLATFORM_H */
diff --git a/drivers/dma/axi_dma_platform_reg.h b/drivers/dma/axi_dma_platform_reg.h
new file mode 100644
index 0000000..4d62b50
--- /dev/null
+++ b/drivers/dma/axi_dma_platform_reg.h
@@ -0,0 +1,189 @@
+/*
+ * Synopsys DesignWare AXI DMA Controller driver.
+ *
+ * Copyright (C) 2017 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _AXI_DMA_PLATFORM_REG_H
+#define _AXI_DMA_PLATFORM_REG_H
+
+#include <linux/bitops.h>
+
+#define COMMON_REG_LEN		0x100
+#define CHAN_REG_LEN		0x100
+
+/* Common registers offset */
+#define DMAC_ID			0x000 // R DMAC ID
+#define DMAC_COMPVER		0x008 // R DMAC Component Version
+#define DMAC_CFG		0x010 // R/W DMAC Configuration
+#define DMAC_CHEN		0x018 // R/W DMAC Channel Enable
+#define DMAC_CHEN_L		0x018 // R/W DMAC Channel Enable 00-31
+#define DMAC_CHEN_H		0x01C // R/W DMAC Channel Enable 32-63
+#define DMAC_INTSTATUS		0x030 // R DMAC Interrupt Status
+#define DMAC_COMMON_INTCLEAR	0x038 // W DMAC Interrupt Clear
+#define DMAC_COMMON_INTSTATUS_ENA 0x040 // R DMAC Interrupt Status Enable
+#define DMAC_COMMON_INTSIGNAL_ENA 0x048 // R/W DMAC Interrupt Signal Enable
+#define DMAC_COMMON_INTSTATUS	0x050 // R DMAC Interrupt Status
+#define DMAC_RESET		0x058 // R DMAC Reset Register1
+
+/* DMA channel registers offset */
+#define CH_SAR			0x000 // R/W Chan Source Address
+#define CH_DAR			0x008 // R/W Chan Destination Address
+#define CH_BLOCK_TS		0x010 // R/W Chan Block Transfer Size
+#define CH_CTL			0x018 // R/W Chan Control
+#define CH_CTL_L		0x018 // R/W Chan Control 00-31
+#define CH_CTL_H		0x01C // R/W Chan Control 32-63
+#define CH_CFG			0x020 // R/W Chan Configuration
+#define CH_CFG_L		0x020 // R/W Chan Configuration 00-31
+#define CH_CFG_H		0x024 // R/W Chan Configuration 32-63
+#define CH_LLP			0x028 // R/W Chan Linked List Pointer
+#define CH_STATUS		0x030 // R Chan Status
+#define CH_SWHSSRC		0x038 // R/W Chan SW Handshake Source
+#define CH_SWHSDST		0x040 // R/W Chan SW Handshake Destination
+#define CH_BLK_TFR_RESUMEREQ	0x048 // W Chan Block Transfer Resume Req
+#define CH_AXI_ID		0x050 // R/W Chan AXI ID
+#define CH_AXI_QOS		0x058 // R/W Chan AXI QOS
+#define CH_SSTAT		0x060 // R Chan Source Status
+#define CH_DSTAT		0x068 // R Chan Destination Status
+#define CH_SSTATAR		0x070 // R/W Chan Source Status Fetch Addr
+#define CH_DSTATAR		0x078 // R/W Chan Destination Status Fetch Addr
+#define CH_INTSTATUS_ENA	0x080 // R/W Chan Interrupt Status Enable
+#define CH_INTSTATUS		0x088 // R/W Chan Interrupt Status
+#define CH_INTSIGNAL_ENA	0x090 // R/W Chan Interrupt Signal Enable
+#define CH_INTCLEAR		0x098 // W Chan Interrupt Clear
+
+
+/* DMAC_CFG */
+#define DMAC_EN_MASK		0x00000001U
+#define DMAC_EN_POS		0
+
+#define INT_EN_MASK		0x00000002U
+#define INT_EN_POS		1
+
+#define DMAC_CHAN_EN_SHIFT	0
+#define DMAC_CHAN_EN_WE_SHIFT	8
+
+#define DMAC_CHAN_SUSP_SHIFT	16
+#define DMAC_CHAN_SUSP_WE_SHIFT	24
+
+/* CH_CTL_H */
+#define CH_CTL_H_LLI_LAST	BIT(30)
+#define CH_CTL_H_LLI_VALID	BIT(31)
+
+/* CH_CTL_L */
+#define CH_CTL_L_LAST_WRITE_EN	BIT(30)
+
+#define CH_CTL_L_DST_MSIZE_POS	18
+#define CH_CTL_L_SRC_MSIZE_POS	14
+enum {
+	DWAXIDMAC_BURST_TRANS_LEN_1	= 0x0,
+	DWAXIDMAC_BURST_TRANS_LEN_4,
+	DWAXIDMAC_BURST_TRANS_LEN_8,
+	DWAXIDMAC_BURST_TRANS_LEN_16,
+	DWAXIDMAC_BURST_TRANS_LEN_32,
+	DWAXIDMAC_BURST_TRANS_LEN_64,
+	DWAXIDMAC_BURST_TRANS_LEN_128,
+	DWAXIDMAC_BURST_TRANS_LEN_256,
+	DWAXIDMAC_BURST_TRANS_LEN_512,
+	DWAXIDMAC_BURST_TRANS_LEN_1024
+};
+
+#define CH_CTL_L_DST_WIDTH_POS	11
+#define CH_CTL_L_SRC_WIDTH_POS	8
+
+#define CH_CTL_L_DST_INC_POS	6
+#define CH_CTL_L_SRC_INC_POS	4
+enum {
+	DWAXIDMAC_CH_CTL_L_INC	= 0x0,
+	DWAXIDMAC_CH_CTL_L_NOINC
+};
+
+#define CH_CTL_L_DST_MAST_POS	2
+#define CH_CTL_L_DST_MAST	BIT(CH_CTL_L_DST_MAST_POS)
+#define CH_CTL_L_SRC_MAST_POS	0
+#define CH_CTL_L_SRC_MAST	BIT(CH_CTL_L_SRC_MAST_POS)
+
+/* CH_CFG_H */
+#define CH_CFG_H_PRIORITY_POS	17
+#define CH_CFG_H_HS_SEL_DST_POS	4
+#define CH_CFG_H_HS_SEL_SRC_POS	3
+enum {
+	DWAXIDMAC_HS_SEL_HW	= 0x0,
+	DWAXIDMAC_HS_SEL_SW
+};
+
+#define CH_CFG_H_TT_FC_POS	0
+enum {
+	DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC	= 0x0,
+	DWAXIDMAC_TT_FC_MEM_TO_PER_DMAC,
+	DWAXIDMAC_TT_FC_PER_TO_MEM_DMAC,
+	DWAXIDMAC_TT_FC_PER_TO_PER_DMAC,
+	DWAXIDMAC_TT_FC_PER_TO_MEM_SRC,
+	DWAXIDMAC_TT_FC_PER_TO_PER_SRC,
+	DWAXIDMAC_TT_FC_MEM_TO_PER_DST,
+	DWAXIDMAC_TT_FC_PER_TO_PER_DST
+};
+
+/* CH_CFG_L */
+#define CH_CFG_L_DST_MULTBLK_TYPE_POS	2
+#define CH_CFG_L_SRC_MULTBLK_TYPE_POS	0
+enum {
+	DWAXIDMAC_MBLK_TYPE_CONTIGUOUS	= 0x0,
+	DWAXIDMAC_MBLK_TYPE_RELOAD,
+	DWAXIDMAC_MBLK_TYPE_SHADOW_REG,
+	DWAXIDMAC_MBLK_TYPE_LL
+};
+
+enum {
+	DWAXIDMAC_IRQ_NONE		= 0x0,
+	DWAXIDMAC_IRQ_BLOCK_TRF		= BIT(0),  // block transfer complete
+	DWAXIDMAC_IRQ_DMA_TRF		= BIT(1),  // dma transfer complete
+	DWAXIDMAC_IRQ_SRC_TRAN		= BIT(3),  // source transaction complete
+	DWAXIDMAC_IRQ_DST_TRAN		= BIT(4),  // destination transaction complete
+	DWAXIDMAC_IRQ_SRC_DEC_ERR	= BIT(5),  // source decode error
+	DWAXIDMAC_IRQ_DST_DEC_ERR	= BIT(6),  // destination decode error
+	DWAXIDMAC_IRQ_SRC_SLV_ERR	= BIT(7),  // source slave error
+	DWAXIDMAC_IRQ_DST_SLV_ERR	= BIT(8),  // destination slave error
+	DWAXIDMAC_IRQ_LLI_RD_DEC_ERR	= BIT(9),  // LLI read decode error
+	DWAXIDMAC_IRQ_LLI_WR_DEC_ERR	= BIT(10), // LLI write decode error
+	DWAXIDMAC_IRQ_LLI_RD_SLV_ERR	= BIT(11), // LLI read slave error
+	DWAXIDMAC_IRQ_LLI_WR_SLV_ERR	= BIT(12), // LLI write slave error
+	DWAXIDMAC_IRQ_INVALID_ERR	= BIT(13), // LLI invalide error or Shadow register error
+	DWAXIDMAC_IRQ_MULTIBLKTYPE_ERR	= BIT(14), // Slave Interface Multiblock type error
+	DWAXIDMAC_IRQ_DEC_ERR		= BIT(16), // Slave Interface decode error
+	DWAXIDMAC_IRQ_WR2RO_ERR		= BIT(17), // Slave Interface write to read only error
+	DWAXIDMAC_IRQ_RD2RWO_ERR	= BIT(18), // Slave Interface read to write only error
+	DWAXIDMAC_IRQ_WRONCHEN_ERR	= BIT(19), // Slave Interface write to channel error
+	DWAXIDMAC_IRQ_SHADOWREG_ERR	= BIT(20), // Slave Interface shadow reg error
+	DWAXIDMAC_IRQ_WRONHOLD_ERR	= BIT(21), // Slave Interface hold error
+	DWAXIDMAC_IRQ_LOCK_CLEARED	= BIT(27), // Lock Cleared Status
+	DWAXIDMAC_IRQ_SRC_SUSPENDED	= BIT(28), // Source Suspended Status
+	DWAXIDMAC_IRQ_SUSPENDED		= BIT(29), // Channel Suspended Status
+	DWAXIDMAC_IRQ_DISABLED		= BIT(30), // Channel Disabled Status
+	DWAXIDMAC_IRQ_ABORTED		= BIT(31), // Channel Aborted Status
+	DWAXIDMAC_IRQ_ALL_ERR		= 0x003F7FE0,
+	DWAXIDMAC_IRQ_ALL		= 0xFFFFFFFF
+};
+
+enum {
+	DWAXIDMAC_TRANS_WIDTH_8		= 0x0,
+	DWAXIDMAC_TRANS_WIDTH_16,
+	DWAXIDMAC_TRANS_WIDTH_32,
+	DWAXIDMAC_TRANS_WIDTH_64,
+	DWAXIDMAC_TRANS_WIDTH_128,
+	DWAXIDMAC_TRANS_WIDTH_256,
+	DWAXIDMAC_TRANS_WIDTH_512,
+	DWAXIDMAC_TRANS_WIDTH_MAX	= DWAXIDMAC_TRANS_WIDTH_512
+};
+
+#endif /* _AXI_DMA_PLATFORM_H */
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [RFC] dmaengine: Add DW AXI DMAC driver
  2017-01-20 10:50 [RFC] dmaengine: Add DW AXI DMAC driver Eugeniy Paltsev
@ 2017-01-20 13:38 ` Andy Shevchenko
  2017-01-20 18:21   ` Eugeniy Paltsev
  0 siblings, 1 reply; 4+ messages in thread
From: Andy Shevchenko @ 2017-01-20 13:38 UTC (permalink / raw)
  To: Eugeniy Paltsev, dmaengine
  Cc: linux-kernel, Vinod Koul, Dan Williams, Alexey Brodkin

On Fri, 2017-01-20 at 13:50 +0300, Eugeniy Paltsev wrote:
> This patch adds support for the DW AXI DMAC controller.
> 
> DW AXI DMAC is a part of upcoming development board from Synopsys.
> 
> In this driver implementation only DMA_MEMCPY and DMA_SG transfers
> are supported.
> 
> Note: there is no DT documentation in this patch yet, but it will
> be added in the nearest future.

First of all, please use virt-dma API.
Second, don't look to dw_dmac for examples, it's not a good one to be
learned from.

Some mostly style comments below.

> +#include <linux/kernel.h>
> +#include <linux/module.h>
> +#include <linux/device.h>
> +#include <linux/bitops.h>
> +#include <linux/platform_device.h>
> +#include <linux/delay.h>
> +#include <linux/dmaengine.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/dmapool.h>
> +#include <linux/interrupt.h>
> +#include <linux/io.h>
> +#include <linux/of.h>
> +#include <linux/of_dma.h>
> +#include <linux/err.h>

Alphabetical order?
Check if you need all of them.

If you are going to use this driver as a library I would recommend to do
it as a library in the first place.


> +/*
> + * The set of bus widths supported by the DMA controller. DW AXI DMAC
> supports
> + * master data bus width up to 512 bits (for both AXI master
> interfaces), but
> + * it depends on IP block configurarion.
> + */
> +#define AXI_DMA_BUSWIDTHS		  \
> +	(DMA_SLAVE_BUSWIDTH_UNDEFINED	| \
> +	DMA_SLAVE_BUSWIDTH_1_BYTE	| \
> +	DMA_SLAVE_BUSWIDTH_2_BYTES	| \
> +	DMA_SLAVE_BUSWIDTH_4_BYTES	| \
> +	DMA_SLAVE_BUSWIDTH_8_BYTES	| \
> +	DMA_SLAVE_BUSWIDTH_16_BYTES	| \
> +	DMA_SLAVE_BUSWIDTH_32_BYTES	| \
> +	DMA_SLAVE_BUSWIDTH_64_BYTES)
> +/* TODO: check: do we need to use BIT() macro here? */

Yes, and here is the problem of that API.

> +
> +/* NOTE: DW AXI DMAC theoretically can be configured with BE IO */

Do this as ops in your channel or chip struct.

+static inline void axi_dma_disable(struct axi_dma_chip *chip)
> +{
> +	u32 val = axi_dma_ioread32(chip, DMAC_CFG);
> +	val &= ~DMAC_EN_MASK;
> +	axi_dma_iowrite32(chip, DMAC_CFG, val);

Better to use

u32 val;

val = read();
val &= y;
write(val);

pattern.

Same for similar places.

> +static inline void axi_chan_irq_disable(struct axi_dma_chan *chan,
> u32 irq_mask)
> +{
> +	u32 val;
> +
> +	if (likely(irq_mask == DWAXIDMAC_IRQ_ALL)) {

I doubt likely() is useful here anyhow. Have you looked into assembly?
Does it indeed do what it's supposed to?

> +static inline void axi_chan_disable(struct axi_dma_chan *chan)
> +{
> +	u32 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
> +	val &= ~((1 << chan->id) << DMAC_CHAN_EN_SHIFT);
> +	val |=  ((1 << chan->id) << DMAC_CHAN_EN_WE_SHIFT);

You talked somewhere of a BIT macro, here it is one

val &= ~BIT(chan->id + DMAC_CHAN_EN_SHIFT);

or

val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT);

whatever suits better.

Check all code for this.

> +static inline bool axi_dma_is_sw_enable(struct axi_dma_chip *chip)
> +{
> +	struct dw_axi_dma *dw = chip->dw;
> +	u32 i;
> +
> +	for (i = 0; i < dw->hdata->nr_channels; i++) {
> +		if (dw->chan[i].in_use)

Hmm... I know why we have such flag in dw_dmac, but I doubt it's needed
in this driver. Just double check the need of it.

> +			return true;
> +	}
> +
> +	return false;
> +}
> 

> +static u32 axi_chan_get_xfer_width(struct axi_dma_chan *chan,
> dma_addr_t src,
> +				   dma_addr_t dst, size_t len)
> +{

> +	u32 width;
> +	dma_addr_t sdl = (src | dst | len);
> +	u32 max_width = chan->chip->dw->hdata->m_data_width;

Use reverse tree.

dma_addr_t use above is wrong. (size_t might be bigger in some cases)

> +
> +	width = sdl ? __ffs(sdl) : DWAXIDMAC_TRANS_WIDTH_MAX;
> +
> +	return width <= max_width ? width : max_width;

min() / min_t()

> +}

> +static void write_desc_sar(struct axi_dma_desc *desc, dma_addr_t adr)
> +{
> +	desc->lli.sar_lo = cpu_to_le32(adr);
> +}

Perhaps macros for all them? Choose whatever looks and suits better.


> +/* TODO: REVISIT: how we should choose AXI master for mem-to-mem
> transfer? */
> +static void set_desc_dest_master(struct axi_dma_desc *desc)
> +{
> +	u32 val;
> +
> +	/* select AXI1 for source master if available*/

Fix indentation, capitalize it.

> +}

> +
> +static int dw_probe(struct platform_device *pdev)
> +{
> +	struct axi_dma_chip *chip;
> +	struct resource *mem;
> +	struct dw_axi_dma *dw;
> +	struct dw_axi_dma_hcfg *hdata;
> +	u32 i;
> +	int ret;
> +
> +	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
> +	if (!chip)
> +		return -ENOMEM;
> +
> +	dw = devm_kzalloc(&pdev->dev, sizeof(*dw), GFP_KERNEL);
> +	if (!dw)
> +		return -ENOMEM;
> +
> +	hdata = devm_kzalloc(&pdev->dev, sizeof(*hdata), GFP_KERNEL);
> +	if (!hdata)
> +		return -ENOMEM;
> +
> +	chip->dw = dw;
> +	chip->dev = &pdev->dev;
> +	chip->dw->hdata = hdata;
> +
> +	chip->irq = platform_get_irq(pdev, 0);
> +	if (chip->irq < 0)
> +		return chip->irq;
> +
> +	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
> +	chip->regs = devm_ioremap_resource(chip->dev, mem);
> +	if (IS_ERR(chip->regs))
> +		return PTR_ERR(chip->regs);
> +

> +	ret = dma_coerce_mask_and_coherent(chip->dev,
> DMA_BIT_MASK(32));

It will not work for 64 bits, it will not work for other users of this
driver if any (when you have different DMA mask to be set).

> +	if (ret)
> +		return ret;
> +
> +	chip->clk = devm_clk_get(chip->dev, NULL);
> +	if (IS_ERR(chip->clk))
> +		return PTR_ERR(chip->clk);
> +
> +	ret = parse_device_properties(chip);
> +	if (ret)
> +		return ret;
> +
> +	dw->chan = devm_kcalloc(chip->dev, hdata->nr_channels,
> +				sizeof(*dw->chan), GFP_KERNEL);
> +	if (!dw->chan)
> +		return -ENOMEM;
> +
> +	ret = devm_request_irq(chip->dev, chip->irq,
> dw_axi_dma_intretupt,
> +			       IRQF_SHARED, DRV_NAME, chip);
> +	if (ret)
> +		return ret;
> +
> 

> +
> +	INIT_LIST_HEAD(&dw->dma.channels);
> +	for (i = 0; i < hdata->nr_channels; i++) {
> +		struct axi_dma_chan *chan = &dw->chan[i];
> +
> +		chan->chip = chip;
> +		chan->chan.device = &dw->dma;
> +		dma_cookie_init(&chan->chan);
> +		list_add_tail(&chan->chan.device_node, &dw-
> >dma.channels);
> +
> 

> +		chan->id = (u8)i;

This duplicates what you have in struct dma_chan

> +		chan->priority = (u8)i;
> +		chan->direction = DMA_TRANS_NONE;
> +

> +
> +	dw->dma.device_alloc_chan_resources =
> dma_chan_alloc_chan_resources;
> +	dw->dma.device_free_chan_resources =
> dma_chan_free_chan_resources;
> +
> +	dw->dma.device_prep_dma_memcpy = dma_chan_prep_dma_memcpy;
> +	dw->dma.device_prep_dma_sg = dma_chan_prep_dma_sg;
> +
> +	ret = clk_prepare_enable(chip->clk);
> +	if (ret < 0)
> +		return ret;
> +

> +	ret = dma_async_device_register(&dw->dma);

// offtopic
Perhaps someone can eventually introduce devm_ variant of this?
// offtopic

> +	if (ret)
> +		goto err_clk_disable;
> +

> +MODULE_DESCRIPTION("Synopsys DesignWare AXI DMA Controller platform
> driver");
> +MODULE_AUTHOR("Paltsev Eugeniy <Eugeniy.Paltsev@synopsys.com>");

FirstName LastName <email@address.com> ?


-- 
Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Intel Finland Oy

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [RFC] dmaengine: Add DW AXI DMAC driver
  2017-01-20 13:38 ` Andy Shevchenko
@ 2017-01-20 18:21   ` Eugeniy Paltsev
  2017-01-20 20:48     ` Andy Shevchenko
  0 siblings, 1 reply; 4+ messages in thread
From: Eugeniy Paltsev @ 2017-01-20 18:21 UTC (permalink / raw)
  To: dmaengine, andriy.shevchenko
  Cc: dan.j.williams, linux-kernel, Eugeniy.Paltsev, Alexey.Brodkin,
	vinod.koul, linux-snps-arc

Hi Andy,
thanks for respond.

I'm agree with most of the comments.
My comments below.

On Fri, 2017-01-20 at 15:38 +0200, Andy Shevchenko wrote:
> On Fri, 2017-01-20 at 13:50 +0300, Eugeniy Paltsev wrote:
> > 
> > This patch adds support for the DW AXI DMAC controller.
> > 
> > DW AXI DMAC is a part of upcoming development board from Synopsys.
> > 
> > In this driver implementation only DMA_MEMCPY and DMA_SG transfers
> > are supported.
> > 
> > Note: there is no DT documentation in this patch yet, but it will
> > be added in the nearest future.
> First of all, please use virt-dma API.
Is it necessary?
I couldn't find any notes about virt-dma in documentation. 

> Second, don't look to dw_dmac for examples, it's not a good one to be
> learned from.
Any suggestions about DMA driver to look for examples?

> 
> Some mostly style comments below.
> 
> > 
> > +#include <linux/kernel.h>
> > +#include <linux/module.h>
> > +#include <linux/device.h>
> > +#include <linux/bitops.h>
> > +#include <linux/platform_device.h>
> > +#include <linux/delay.h>
> > +#include <linux/dmaengine.h>
> > +#include <linux/dma-mapping.h>
> > +#include <linux/dmapool.h>
> > +#include <linux/interrupt.h>
> > +#include <linux/io.h>
> > +#include <linux/of.h>
> > +#include <linux/of_dma.h>
> > +#include <linux/err.h>
> Alphabetical order?
> Check if you need all of them.
> 
> If you are going to use this driver as a library I would recommend to
> do
> it as a library in the first place.
> 
> 
> > 
> > +/*
> > + * The set of bus widths supported by the DMA controller. DW AXI
> > DMAC
> > supports
> > + * master data bus width up to 512 bits (for both AXI master
> > interfaces), but
> > + * it depends on IP block configurarion.
> > + */
> > +#define AXI_DMA_BUSWIDTHS		  \
> > +	(DMA_SLAVE_BUSWIDTH_UNDEFINED	| \
> > +	DMA_SLAVE_BUSWIDTH_1_BYTE	| \
> > +	DMA_SLAVE_BUSWIDTH_2_BYTES	| \
> > +	DMA_SLAVE_BUSWIDTH_4_BYTES	| \
> > +	DMA_SLAVE_BUSWIDTH_8_BYTES	| \
> > +	DMA_SLAVE_BUSWIDTH_16_BYTES	| \
> > +	DMA_SLAVE_BUSWIDTH_32_BYTES	| \
> > +	DMA_SLAVE_BUSWIDTH_64_BYTES)
> > +/* TODO: check: do we need to use BIT() macro here? */
> Yes, and here is the problem of that API.
> 
> > 
> > +
> > +/* NOTE: DW AXI DMAC theoretically can be configured with BE IO */
> Do this as ops in your channel or chip struct.
> 
> +static inline void axi_dma_disable(struct axi_dma_chip *chip)
> > 
> > +{
> > +	u32 val = axi_dma_ioread32(chip, DMAC_CFG);
> > +	val &= ~DMAC_EN_MASK;
> > +	axi_dma_iowrite32(chip, DMAC_CFG, val);
> Better to use
> 
> u32 val;
> 
> val = read();
> val &= y;
> write(val);
> 
> pattern.
> 
> Same for similar places.
Are you sure?
I saw opposite advise to use construction like
------------->8---------------
u32 val = read();
val &= y;
write(val);
------------->8---------------
to
reduce space.
> 
> > 
> > +static inline void axi_chan_irq_disable(struct axi_dma_chan *chan,
> > u32 irq_mask)
> > +{
> > +	u32 val;
> > +
> > +	if (likely(irq_mask == DWAXIDMAC_IRQ_ALL)) {
> I doubt likely() is useful here anyhow. Have you looked into
> assembly?
> Does it indeed do what it's supposed to?
Yes, i looked into assembly.
I used "likely()" because irq_mask will be equal DWAXIDMAC_IRQ_ALL in
99.9% of this function call.
It is useful here, am I wrong?

> > 
> > +static inline void axi_chan_disable(struct axi_dma_chan *chan)
> > +{
> > +	u32 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
> > +	val &= ~((1 << chan->id) << DMAC_CHAN_EN_SHIFT);
> > +	val |=  ((1 << chan->id) << DMAC_CHAN_EN_WE_SHIFT);
> You talked somewhere of a BIT macro, here it is one
> 
> val &= ~BIT(chan->id + DMAC_CHAN_EN_SHIFT);
> 
> or
> 
> val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT);
> 
> whatever suits better.
> 
> Check all code for this.
> 
> > 
> > +static inline bool axi_dma_is_sw_enable(struct axi_dma_chip *chip)
> > +{
> > +	struct dw_axi_dma *dw = chip->dw;
> > +	u32 i;
> > +
> > +	for (i = 0; i < dw->hdata->nr_channels; i++) {
> > +		if (dw->chan[i].in_use)
> Hmm... I know why we have such flag in dw_dmac, but I doubt it's
> needed
> in this driver. Just double check the need of it.
I use this flag to check state of channel (used now/unused) to disable
dmac if all channels are unused for now.

> > 
> > +			return true;
> > +	}
> > +
> > +	return false;
> > +}
> > 
> > 
> > +static u32 axi_chan_get_xfer_width(struct axi_dma_chan *chan,
> > dma_addr_t src,
> > +				   dma_addr_t dst, size_t len)
> > +{
> > 
> > +	u32 width;
> > +	dma_addr_t sdl = (src | dst | len);
> > +	u32 max_width = chan->chip->dw->hdata->m_data_width;
> Use reverse tree.
> 
> dma_addr_t use above is wrong. (size_t might be bigger in some cases)
> 
> > 
> > +
> > +	width = sdl ? __ffs(sdl) : DWAXIDMAC_TRANS_WIDTH_MAX;
> > +
> > +	return width <= max_width ? width : max_width;
> min() / min_t()
> 
> > 
> > +}
> > 
> > +static void write_desc_sar(struct axi_dma_desc *desc, dma_addr_t
> > adr)
> > +{
> > +	desc->lli.sar_lo = cpu_to_le32(adr);
> > +}
> Perhaps macros for all them? Choose whatever looks and suits better.
> 
> 
> > 
> > +/* TODO: REVISIT: how we should choose AXI master for mem-to-mem
> > transfer? */
> > +static void set_desc_dest_master(struct axi_dma_desc *desc)
> > +{
> > +	u32 val;
> > +
> > +	/* select AXI1 for source master if available*/
> Fix indentation, capitalize it.
> 
> > 
> > +}
> > 
> > +
> > +static int dw_probe(struct platform_device *pdev)
> > +{
> > +	struct axi_dma_chip *chip;
> > +	struct resource *mem;
> > +	struct dw_axi_dma *dw;
> > +	struct dw_axi_dma_hcfg *hdata;
> > +	u32 i;
> > +	int ret;
> > +
> > +	chip = devm_kzalloc(&pdev->dev, sizeof(*chip),
> > GFP_KERNEL);
> > +	if (!chip)
> > +		return -ENOMEM;
> > +
> > +	dw = devm_kzalloc(&pdev->dev, sizeof(*dw), GFP_KERNEL);
> > +	if (!dw)
> > +		return -ENOMEM;
> > +
> > +	hdata = devm_kzalloc(&pdev->dev, sizeof(*hdata),
> > GFP_KERNEL);
> > +	if (!hdata)
> > +		return -ENOMEM;
> > +
> > +	chip->dw = dw;
> > +	chip->dev = &pdev->dev;
> > +	chip->dw->hdata = hdata;
> > +
> > +	chip->irq = platform_get_irq(pdev, 0);
> > +	if (chip->irq < 0)
> > +		return chip->irq;
> > +
> > +	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
> > +	chip->regs = devm_ioremap_resource(chip->dev, mem);
> > +	if (IS_ERR(chip->regs))
> > +		return PTR_ERR(chip->regs);
> > +
> > 
> > +	ret = dma_coerce_mask_and_coherent(chip->dev,
> > DMA_BIT_MASK(32));
> It will not work for 64 bits, it will not work for other users of
> this
> driver if any (when you have different DMA mask to be set).
Looks like I misunderstood dma_coerce_mask_and_coherent purposes of
using.

> 
> > 
> > +	if (ret)
> > +		return ret;
> > +
> > +	chip->clk = devm_clk_get(chip->dev, NULL);
> > +	if (IS_ERR(chip->clk))
> > +		return PTR_ERR(chip->clk);
> > +
> > +	ret = parse_device_properties(chip);
> > +	if (ret)
> > +		return ret;
> > +
> > +	dw->chan = devm_kcalloc(chip->dev, hdata->nr_channels,
> > +				sizeof(*dw->chan), GFP_KERNEL);
> > +	if (!dw->chan)
> > +		return -ENOMEM;
> > +
> > +	ret = devm_request_irq(chip->dev, chip->irq,
> > dw_axi_dma_intretupt,
> > +			       IRQF_SHARED, DRV_NAME, chip);
> > +	if (ret)
> > +		return ret;
> > +
> > 
> > 
> > +
> > +	INIT_LIST_HEAD(&dw->dma.channels);
> > +	for (i = 0; i < hdata->nr_channels; i++) {
> > +		struct axi_dma_chan *chan = &dw->chan[i];
> > +
> > +		chan->chip = chip;
> > +		chan->chan.device = &dw->dma;
> > +		dma_cookie_init(&chan->chan);
> > +		list_add_tail(&chan->chan.device_node, &dw-
> > > 
> > > dma.channels);
> > +
> > 
> > 
> > +		chan->id = (u8)i;
> This duplicates what you have in struct dma_chan
> 
> > 
> > +		chan->priority = (u8)i;
> > +		chan->direction = DMA_TRANS_NONE;
> > +
> > 
> > +
> > +	dw->dma.device_alloc_chan_resources =
> > dma_chan_alloc_chan_resources;
> > +	dw->dma.device_free_chan_resources =
> > dma_chan_free_chan_resources;
> > +
> > +	dw->dma.device_prep_dma_memcpy = dma_chan_prep_dma_memcpy;
> > +	dw->dma.device_prep_dma_sg = dma_chan_prep_dma_sg;
> > +
> > +	ret = clk_prepare_enable(chip->clk);
> > +	if (ret < 0)
> > +		return ret;
> > +
> > 
> > +	ret = dma_async_device_register(&dw->dma);
> // offtopic
> Perhaps someone can eventually introduce devm_ variant of this?
> // offtopic
> 
> > 
> > +	if (ret)
> > +		goto err_clk_disable;
> > +
> > 
> > +MODULE_DESCRIPTION("Synopsys DesignWare AXI DMA Controller
> > platform
> > driver");
> > +MODULE_AUTHOR("Paltsev Eugeniy <Eugeniy.Paltsev@synopsys.com>");
> FirstName LastName <email@address.com> ?
> 
> 
-- 
 Paltsev Eugeniy

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [RFC] dmaengine: Add DW AXI DMAC driver
  2017-01-20 18:21   ` Eugeniy Paltsev
@ 2017-01-20 20:48     ` Andy Shevchenko
  0 siblings, 0 replies; 4+ messages in thread
From: Andy Shevchenko @ 2017-01-20 20:48 UTC (permalink / raw)
  To: Eugeniy Paltsev
  Cc: dmaengine, andriy.shevchenko, dan.j.williams, linux-kernel,
	Alexey.Brodkin, vinod.koul, linux-snps-arc

On Fri, Jan 20, 2017 at 8:21 PM, Eugeniy Paltsev
<Eugeniy.Paltsev@synopsys.com> wrote:
> Hi Andy,
> thanks for respond.
>
> I'm agree with most of the comments.
> My comments below.

So my answers!

> On Fri, 2017-01-20 at 15:38 +0200, Andy Shevchenko wrote:
>> On Fri, 2017-01-20 at 13:50 +0300, Eugeniy Paltsev wrote:
>> >
>> > This patch adds support for the DW AXI DMAC controller.
>> >
>> > DW AXI DMAC is a part of upcoming development board from Synopsys.
>> >
>> > In this driver implementation only DMA_MEMCPY and DMA_SG transfers
>> > are supported.
>> >
>> > Note: there is no DT documentation in this patch yet, but it will
>> > be added in the nearest future.
>> First of all, please use virt-dma API.
> Is it necessary?
> I couldn't find any notes about virt-dma in documentation.

I can't speak for Vinod or others, but I'm pretty sure their opinion
would be similar.
Rationale is:
- your code will be as less as twice of now
- since it's commonly used (sub)framework it allows you to create less
error prone drivers


>
>> Second, don't look to dw_dmac for examples, it's not a good one to be
>> learned from.
> Any suggestions about DMA driver to look for examples?

...that's why the reasons I would not recommend to look into dw_dmac.
Any recent driver would work. There is I see something with "*-axi-*"
in the name, which looks pretty good and simple.

>> > +   u32 val = axi_dma_ioread32(chip, DMAC_CFG);
>> > +   val &= ~DMAC_EN_MASK;
>> > +   axi_dma_iowrite32(chip, DMAC_CFG, val);
>> Better to use
>>
>> u32 val;
>>
>> val = read();
>> val &= y;
>> write(val);
>>
>> pattern.
>>
>> Same for similar places.
> Are you sure?
> I saw opposite advise to use construction like
> ------------->8---------------
> u32 val = read();
> val &= y;
> write(val);
> ------------->8---------------
> to
> reduce space.

While less lines of code is good, readability is preferred over amount of LOC.

Like I said it's matter of style. Here is my opinion and the rationale
behind it.

>> > +   if (likely(irq_mask == DWAXIDMAC_IRQ_ALL)) {
>> I doubt likely() is useful here anyhow. Have you looked into
>> assembly?
>> Does it indeed do what it's supposed to?
> Yes, i looked into assembly.
> I used "likely()" because irq_mask will be equal DWAXIDMAC_IRQ_ALL in
> 99.9% of this function call.
> It is useful here, am I wrong?

I hear that there are really rare cases when you need to use
[un]likely(). Can you show the difference in the assembly you get
between two?

>> > +static inline bool axi_dma_is_sw_enable(struct axi_dma_chip *chip)
>> > +{
>> > +   struct dw_axi_dma *dw = chip->dw;
>> > +   u32 i;
>> > +
>> > +   for (i = 0; i < dw->hdata->nr_channels; i++) {
>> > +           if (dw->chan[i].in_use)
>> Hmm... I know why we have such flag in dw_dmac, but I doubt it's
>> needed
>> in this driver. Just double check the need of it.
> I use this flag to check state of channel (used now/unused) to disable
> dmac if all channels are unused for now.

Okay, but wouldn't be easier to use runtime PM for that? You will not
need any special counter and runtime PM will take case of
enabling/disabling the device.

At LPC2016 Vinod upped topic about generic runtime PM implementation
for DMAEngine subsystem. I don't know where we are in that right now.
Perhaps he could tell us more.

>> > +
>> > +   width = sdl ? __ffs(sdl) : DWAXIDMAC_TRANS_WIDTH_MAX;

Missed comment, I'm not sure that you ever will have sdl == 0.

>> > +   ret = dma_coerce_mask_and_coherent(chip->dev,
>> > DMA_BIT_MASK(32));
>> It will not work for 64 bits, it will not work for other users of
>> this
>> driver if any (when you have different DMA mask to be set).
> Looks like I misunderstood dma_coerce_mask_and_coherent purposes of
> using.

Yeah, Russell did it as I think a kinda temporary solution for some
cases. Basically the DMA mask is set by generic platform code for
Device Tree cases. I, unfortunately don't know / forgot the details.
Please, invest a bit more time to make it clear. If you go this way,
add a comment why.

I would suggest to wait for Vinod's and perhaps others to comment
before sending a new version (it's apparently material for v4.12+).

-- 
With Best Regards,
Andy Shevchenko

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2017-01-20 20:48 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-01-20 10:50 [RFC] dmaengine: Add DW AXI DMAC driver Eugeniy Paltsev
2017-01-20 13:38 ` Andy Shevchenko
2017-01-20 18:21   ` Eugeniy Paltsev
2017-01-20 20:48     ` Andy Shevchenko

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).