All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] dmaengine: Add MOXA ART DMA engine driver
@ 2013-07-10  8:51 ` Jonas Jensen
  0 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2013-07-10  8:51 UTC (permalink / raw)
  To: linux-arm-kernel
  Cc: linux-kernel, arm, vinod.koul, djbw, arnd, linux, Jonas Jensen

Add dmaengine driver for MOXA ART SoCs based on virt_dma.

Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
---

Notes:
    Applies to next-20130703
    
    The MMC driver I plan to submit next can use this
    (falls back to PIO if unavailable).
    
    Nothing else uses DMA on UC-7112-LX.

 drivers/dma/Kconfig      |   9 +
 drivers/dma/Makefile     |   1 +
 drivers/dma/moxart-dma.c | 473 +++++++++++++++++++++++++++++++++++++++++++++++
 drivers/dma/moxart-dma.h | 188 +++++++++++++++++++
 4 files changed, 671 insertions(+)
 create mode 100644 drivers/dma/moxart-dma.c
 create mode 100644 drivers/dma/moxart-dma.h

diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6825957..d4ba42b 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -300,6 +300,15 @@ config DMA_JZ4740
 	select DMA_ENGINE
 	select DMA_VIRTUAL_CHANNELS
 
+config MOXART_DMA
+	tristate "MOXART DMA support"
+	depends on ARCH_MOXART
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	default n
+	help
+	  Enable support for the MOXA ART SoC DMA controller.
+
 config DMA_ENGINE
 	bool
 
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 5e0f2ef..470c11b 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -39,3 +39,4 @@ obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
 obj-$(CONFIG_DMA_OMAP) += omap-dma.o
 obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..3f1e771
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,473 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+#include "moxart-dma.h"
+
+static DEFINE_SPINLOCK(dma_lock);
+
+struct moxart_dma_chan {
+	struct virt_dma_chan		vchan;
+	int				ch_num;
+	bool				allocated;
+	int				error_flag;
+	struct moxart_dma_reg		*reg;
+	void				(*callback)(void *param);
+	void				*callback_param;
+	struct completion		dma_complete;
+	struct dma_slave_config		cfg;
+	struct dma_async_tx_descriptor	tx_desc;
+};
+
+struct moxart_dma_container {
+	int			ctlr;
+	struct dma_device	dma_slave;
+	struct moxart_dma_chan	slave_chans[APB_DMA_MAX_CHANNEL];
+};
+
+struct moxart_dma_container *mdc;
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+	return &chan->dev->device;
+}
+
+static inline struct moxart_dma_container
+*to_moxart_dma_container(struct dma_device *d)
+{
+	return container_of(d, struct moxart_dma_container, dma_slave);
+}
+
+static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct moxart_dma_chan, vchan.chan);
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	union moxart_dma_reg_cfg mcfg;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+	spin_lock_irqsave(&dma_lock, flags);
+
+	mcfg.ul = readl(&mchan->reg->cfg.ul);
+	mcfg.ul &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+	spin_unlock_irqrestore(&dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+			       struct dma_slave_config *cfg)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	union moxart_dma_reg_cfg mcfg;
+	unsigned long flags;
+	unsigned int data_width, data_inc;
+
+	spin_lock_irqsave(&dma_lock, flags);
+
+	memcpy(&mchan->cfg, cfg, sizeof(mchan->cfg));
+
+	mcfg.ul = readl(&mchan->reg->cfg.ul);
+	mcfg.bits.burst = APB_DMAB_BURST_MODE;
+
+	switch (mchan->cfg.src_addr_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		data_width = APB_DMAB_DATA_WIDTH_1;
+		data_inc = APB_DMAB_DEST_INC_1_4;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		data_width = APB_DMAB_DATA_WIDTH_2;
+		data_inc = APB_DMAB_DEST_INC_2_8;
+		break;
+	default:
+		data_width = APB_DMAB_DATA_WIDTH_4;
+		data_inc = APB_DMAB_DEST_INC_4_16;
+		break;
+	}
+
+	if (mchan->cfg.direction == DMA_MEM_TO_DEV) {
+		mcfg.bits.data_width = data_width;
+		mcfg.bits.dest_sel = APB_DMAB_DEST_APB;
+		mcfg.bits.dest_inc = APB_DMAB_DEST_INC_0;
+		mcfg.bits.source_sel = APB_DMAB_SOURCE_AHB;
+		mcfg.bits.source_inc = data_inc;
+
+		mcfg.bits.dest_req_no = mchan->cfg.slave_id;
+		mcfg.bits.source_req_no = 0;
+	} else {
+		mcfg.bits.data_width = data_width;
+		mcfg.bits.dest_sel = APB_DMAB_SOURCE_AHB;
+		mcfg.bits.dest_inc = data_inc;
+		mcfg.bits.source_sel = APB_DMAB_DEST_APB;
+		mcfg.bits.source_inc = APB_DMAB_DEST_INC_0;
+
+		mcfg.bits.dest_req_no = 0;
+		mcfg.bits.source_req_no = mchan->cfg.slave_id;
+	}
+
+	writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+	spin_unlock_irqrestore(&dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+			  unsigned long arg)
+{
+	int ret = 0;
+	struct dma_slave_config *config;
+
+	switch (cmd) {
+	case DMA_TERMINATE_ALL:
+		moxart_terminate_all(chan);
+		break;
+	case DMA_SLAVE_CONFIG:
+		config = (struct dma_slave_config *)arg;
+		ret = moxart_slave_config(chan, config);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static dma_cookie_t moxart_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(tx->chan);
+	dma_cookie_t cookie;
+	union moxart_dma_reg_cfg mcfg;
+	unsigned long flags;
+
+	mchan->callback = tx->callback;
+	mchan->callback_param = tx->callback_param;
+	mchan->error_flag = 0;
+
+	dev_dbg(chan2dev(tx->chan), "%s: mchan=%p mchan->ch_num=%d mchan->reg=%p\n",
+		__func__, mchan, mchan->ch_num, mchan->reg);
+
+	spin_lock_irqsave(&dma_lock, flags);
+
+	cookie = dma_cookie_assign(tx);
+
+	mcfg.ul = readl(&mchan->reg->cfg.ul);
+	mcfg.ul |= (APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+	spin_unlock_irqrestore(&dma_lock, flags);
+
+	return cookie;
+}
+
+static struct dma_async_tx_descriptor
+*moxart_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+		      unsigned int sg_len,
+		      enum dma_transfer_direction direction,
+		      unsigned long tx_flags, void *context)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	unsigned long flags;
+	union moxart_dma_reg_cfg mcfg;
+	unsigned int size, adr_width;
+
+	spin_lock_irqsave(&dma_lock, flags);
+
+	if (direction == DMA_MEM_TO_DEV) {
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       &mchan->reg->source_addr);
+		writel(mchan->cfg.dst_addr, &mchan->reg->dest_addr);
+		adr_width = mchan->cfg.src_addr_width;
+	} else {
+		writel(mchan->cfg.src_addr, &mchan->reg->source_addr);
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       &mchan->reg->dest_addr);
+		adr_width = mchan->cfg.dst_addr_width;
+	}
+
+	size = sgl->length >> adr_width;
+
+	/*
+	 * size is 4 on 64 bytes copied, i.e. once cycle copies 16 bytes
+	 * ( when data_width == APB_DMAB_DATA_WIDTH_4 )
+	 */
+	writel(size, &mchan->reg->cycles);
+
+	dev_dbg(chan2dev(chan), "%s: set %d DMA cycles (sgl->length=%d adr_width=%d)\n",
+		__func__, size, sgl->length, adr_width);
+
+	dev_dbg(chan2dev(chan), "%s: mcfg.ul=%x read from &mchan->reg->cfg.ul=%x\n",
+		__func__, mcfg.ul, (unsigned int)&mchan->reg->cfg.ul);
+
+	dma_async_tx_descriptor_init(&mchan->tx_desc, chan);
+	mchan->tx_desc.tx_submit = moxart_tx_submit;
+
+	spin_unlock_irqrestore(&dma_lock, flags);
+
+	return &mchan->tx_desc;
+}
+
+static struct platform_driver moxart_driver;
+
+bool moxart_filter_fn(struct dma_chan *chan, void *param)
+{
+	if (chan->device->dev->driver == &moxart_driver.driver) {
+		struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+		unsigned int ch_req = *(unsigned int *)param;
+		dev_dbg(chan2dev(chan), "%s: mchan=%p ch_req=%d mchan->ch_num=%d\n",
+			__func__, mchan, ch_req, mchan->ch_num);
+		return ch_req == mchan->ch_num;
+	} else {
+		dev_dbg(chan2dev(chan), "%s: device not registered to this DMA engine\n",
+			__func__);
+		return false;
+	}
+}
+EXPORT_SYMBOL(moxart_filter_fn);
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	int i;
+	bool found = false;
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++) {
+		if (i == mchan->ch_num
+			&& !mchan->allocated) {
+			dev_dbg(chan2dev(chan), "%s: allocating channel #%d\n",
+				__func__, mchan->ch_num);
+			mchan->allocated = true;
+			found = true;
+			break;
+		}
+	}
+
+	if (!found)
+		return -ENODEV;
+
+	return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+	mchan->allocated = false;
+
+	dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+		__func__, mchan->ch_num);
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	union moxart_dma_reg_cfg mcfg;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+	spin_lock_irqsave(&dma_lock, flags);
+
+	mcfg.ul = readl(&mchan->reg->cfg.ul);
+	mcfg.ul |= APB_DMA_ENABLE;
+	writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+	spin_unlock_irqrestore(&dma_lock, flags);
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+					dma_cookie_t cookie,
+					struct dma_tx_state *txstate)
+{
+	enum dma_status ret;
+
+	ret = dma_cookie_status(chan, cookie, txstate);
+	if (ret == DMA_SUCCESS || !txstate)
+		return ret;
+
+	return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+	dma->device_prep_slave_sg		= moxart_prep_slave_sg;
+	dma->device_alloc_chan_resources	= moxart_alloc_chan_resources;
+	dma->device_free_chan_resources		= moxart_free_chan_resources;
+	dma->device_issue_pending		= moxart_issue_pending;
+	dma->device_tx_status			= moxart_tx_status;
+	dma->device_control			= moxart_control;
+	dma->dev				= dev;
+
+	INIT_LIST_HEAD(&dma->channels);
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+	struct device *dev = devid;
+	struct moxart_dma_chan *mchan = &mdc->slave_chans[0];
+	unsigned int i;
+	union moxart_dma_reg_cfg mcfg;
+
+	dev_dbg(dev, "%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		if (mchan->allocated) {
+			mcfg.ul = readl(&mchan->reg->cfg.ul);
+			if (mcfg.ul & APB_DMA_FIN_INT_STS) {
+				mcfg.ul &= ~APB_DMA_FIN_INT_STS;
+				dma_cookie_complete(&mchan->tx_desc);
+			}
+			if (mcfg.ul & APB_DMA_ERR_INT_STS) {
+				mcfg.ul &= ~APB_DMA_ERR_INT_STS;
+				mchan->error_flag = 1;
+			}
+			if (mchan->callback) {
+				dev_dbg(dev, "%s: call callback for mchan=%p\n",
+					__func__, mchan);
+				mchan->callback(mchan->callback_param);
+			}
+			mchan->error_flag = 0;
+			writel(mcfg.ul, &mchan->reg->cfg.ul);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static struct irqaction moxart_dma_irq = {
+	.name       = "moxart-dma-engine",
+	.flags      = IRQF_DISABLED,
+	.handler    = moxart_dma_interrupt,
+};
+
+static int moxart_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct resource res_dma;
+	static void __iomem *dma_base_addr;
+	int ret, i;
+	unsigned int irq;
+	struct moxart_dma_chan *mchan;
+
+	mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+	if (!mdc) {
+		dev_err(dev, "can't allocate DMA container\n");
+		return -ENOMEM;
+	}
+
+	ret = of_address_to_resource(node, 0, &res_dma);
+	if (ret) {
+		dev_err(dev, "can't get DMA base resource\n");
+		return ret;
+	}
+
+	irq = irq_of_parse_and_map(node, 0);
+
+	dma_base_addr = devm_ioremap_resource(dev, &res_dma);
+	if (IS_ERR(dma_base_addr)) {
+		dev_err(dev, "devm_ioremap_resource failed\n");
+		return PTR_ERR(dma_base_addr);
+	}
+
+	mdc->ctlr = pdev->id;
+
+	dma_cap_zero(mdc->dma_slave.cap_mask);
+	dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+
+	moxart_dma_init(&mdc->dma_slave, dev);
+
+	mchan = &mdc->slave_chans[0];
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		mchan->ch_num = i;
+		mchan->reg = (struct moxart_dma_reg *)(dma_base_addr + 0x80
+			     + i * sizeof(struct moxart_dma_reg));
+		mchan->callback = NULL;
+		mchan->allocated = 0;
+		mchan->callback_param = NULL;
+		vchan_init(&mchan->vchan, &mdc->dma_slave);
+		dev_dbg(dev, "%s: mchans[%d]: mchan->ch_num=%d mchan->reg=%p\n",
+			__func__, i, mchan->ch_num, mchan->reg);
+	}
+
+	ret = dma_async_device_register(&mdc->dma_slave);
+	platform_set_drvdata(pdev, mdc);
+
+	moxart_dma_irq.dev_id = dev;
+	setup_irq(irq, &moxart_dma_irq);
+
+	dev_dbg(dev, "%s: IRQ=%d\n", __func__, irq);
+
+	return ret;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+	struct moxart_dma_container *m = dev_get_drvdata(&pdev->dev);
+	dma_async_device_unregister(&m->dma_slave);
+	return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+	{ .compatible = "moxa,moxart-dma" },
+	{ }
+};
+
+static struct platform_driver moxart_driver = {
+	.probe	= moxart_probe,
+	.remove	= moxart_remove,
+	.driver = {
+		.name		= "moxart-dma-engine",
+		.owner		= THIS_MODULE,
+		.of_match_table	= moxart_dma_match,
+	},
+};
+
+static int moxart_init(void)
+{
+	return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+	platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/moxart-dma.h b/drivers/dma/moxart-dma.h
new file mode 100644
index 0000000..358c006
--- /dev/null
+++ b/drivers/dma/moxart-dma.h
@@ -0,0 +1,188 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __DMA_MOXART_H
+#define __DMA_MOXART_H
+
+#define APB_DMA_MAX_CHANNEL			4
+
+union moxart_dma_reg_cfg {
+
+#define APB_DMA_ENABLE				(1<<0)
+#define APB_DMA_FIN_INT_STS			(1<<1)
+#define APB_DMA_FIN_INT_EN			(1<<2)
+#define APB_DMA_BURST_MODE			(1<<3)
+#define APB_DMA_ERR_INT_STS			(1<<4)
+#define APB_DMA_ERR_INT_EN			(1<<5)
+#define APB_DMA_SOURCE_AHB			(1<<6)
+#define APB_DMA_SOURCE_APB			0
+#define APB_DMA_DEST_AHB			(1<<7)
+#define APB_DMA_DEST_APB			0
+#define APB_DMA_SOURCE_INC_0			0
+#define APB_DMA_SOURCE_INC_1_4			(1<<8)
+#define APB_DMA_SOURCE_INC_2_8			(2<<8)
+#define APB_DMA_SOURCE_INC_4_16			(3<<8)
+#define APB_DMA_SOURCE_DEC_1_4			(5<<8)
+#define APB_DMA_SOURCE_DEC_2_8			(6<<8)
+#define APB_DMA_SOURCE_DEC_4_16			(7<<8)
+#define APB_DMA_SOURCE_INC_MASK			(7<<8)
+#define APB_DMA_DEST_INC_0			0
+#define APB_DMA_DEST_INC_1_4			(1<<12)
+#define APB_DMA_DEST_INC_2_8			(2<<12)
+#define APB_DMA_DEST_INC_4_16			(3<<12)
+#define APB_DMA_DEST_DEC_1_4			(5<<12)
+#define APB_DMA_DEST_DEC_2_8			(6<<12)
+#define APB_DMA_DEST_DEC_4_16			(7<<12)
+#define APB_DMA_DEST_INC_MASK			(7<<12)
+#define APB_DMA_DEST_REQ_NO_MASK		(15<<16)
+#define APB_DMA_DATA_WIDTH_MASK			(3<<20)
+#define APB_DMA_DATA_WIDTH_4			0
+#define APB_DMA_DATA_WIDTH_2			(1<<20)
+#define APB_DMA_DATA_WIDTH_1			(2<<20)
+#define APB_DMA_SOURCE_REQ_NO_MASK		(15<<24)
+	unsigned int ul;
+
+	struct {
+
+#define APB_DMAB_ENABLE				1
+		/* enable DMA */
+		unsigned int enable:1;
+
+#define APB_DMAB_FIN_INT_STS			1
+		/* finished interrupt status */
+		unsigned int fin_int_sts:1;
+
+#define APB_DMAB_FIN_INT_EN			1
+		/* finished interrupt enable */
+		unsigned int fin_int_en:1;
+
+#define APB_DMAB_BURST_MODE			1
+		/* burst mode */
+		unsigned int burst:1;
+
+#define APB_DMAB_ERR_INT_STS			1
+		/* error interrupt status */
+		unsigned int err_int_sts:1;
+
+#define APB_DMAB_ERR_INT_EN			1
+		/* error interrupt enable */
+		unsigned int err_int_en:1;
+
+#define APB_DMAB_SOURCE_AHB			1
+#define APB_DMAB_SOURCE_APB			0
+		/* 0:APB (device), 1:AHB (RAM) */
+		unsigned int source_sel:1;
+
+#define APB_DMAB_DEST_AHB			1
+#define APB_DMAB_DEST_APB			0
+		/* 0:APB, 1:AHB */
+		unsigned int dest_sel:1;
+
+#define APB_DMAB_SOURCE_INC_0			0
+#define APB_DMAB_SOURCE_INC_1_4			1
+#define APB_DMAB_SOURCE_INC_2_8			2
+#define APB_DMAB_SOURCE_INC_4_16		3
+#define APB_DMAB_SOURCE_DEC_1_4			5
+#define APB_DMAB_SOURCE_DEC_2_8			6
+#define APB_DMAB_SOURCE_DEC_4_16		7
+#define APB_DMAB_SOURCE_INC_MASK		7
+		/*
+		 * 000: no increment
+		 * 001: +1 (busrt=0), +4  (burst=1)
+		 * 010: +2 (burst=0), +8  (burst=1)
+		 * 011: +4 (burst=0), +16 (burst=1)
+		 * 101: -1 (burst=0), -4  (burst=1)
+		 * 110: -2 (burst=0), -8  (burst=1)
+		 * 111: -4 (burst=0), -16 (burst=1)
+		 */
+		unsigned int source_inc:3;
+
+		unsigned int reserved1:1;
+
+#define APB_DMAB_DEST_INC_0			0
+#define APB_DMAB_DEST_INC_1_4			1
+#define APB_DMAB_DEST_INC_2_8			2
+#define APB_DMAB_DEST_INC_4_16			3
+#define APB_DMAB_DEST_DEC_1_4			5
+#define APB_DMAB_DEST_DEC_2_8			6
+#define APB_DMAB_DEST_DEC_4_16			7
+#define APB_DMAB_DEST_INC_MASK			7
+		/*
+		 * 000: no increment
+		 * 001: +1 (busrt=0), +4  (burst=1)
+		 * 010: +2 (burst=0), +8  (burst=1)
+		 * 011: +4 (burst=0), +16 (burst=1)
+		 * 101: -1 (burst=0), -4  (burst=1)
+		 * 110: -2 (burst=0), -8  (burst=1)
+		 * 111: -4 (burst=0), -16 (burst=1)
+		*/
+		unsigned int dest_inc:3;
+
+		unsigned int reserved2:1;
+
+#define APB_DMAB_DEST_REQ_NO_MASK		15
+		/*
+		 * request signal select of destination
+		 * address for DMA hardware handshake
+		 *
+		 * the request line number is a property of
+		 * the DMA controller itself, e.g. MMC must
+		 * always request channels where
+		 * dma_slave_config->slave_id == 5
+		 *
+		 * 0:	 no request / grant signal
+		 * 1-15: request / grant signal
+		 */
+		unsigned int dest_req_no:4;
+
+#define APB_DMAB_DATA_WIDTH_MASK		3
+#define APB_DMAB_DATA_WIDTH_4			0
+#define APB_DMAB_DATA_WIDTH_2			1
+#define APB_DMAB_DATA_WIDTH_1			2
+		/*
+		 * data width of transfer
+		 * 00: word
+		 * 01: half
+		 * 10: byte
+		 */
+		unsigned int data_width:2;
+
+		unsigned int reserved3:2;
+
+#define APB_DMAB_SOURCE_REQ_NO_MASK		15
+		/*
+		 * request signal select of source
+		 * address for DMA hardware handshake
+		 *
+		 * the request line number is a property of
+		 * the DMA controller itself, e.g. MMC must
+		 * always request channels where
+		 * dma_slave_config->slave_id == 5
+		 *
+		 * 0:	 no request / grant signal
+		 * 1-15: request / grant signal
+		 */
+		unsigned int source_req_no:4;
+
+		unsigned int reserved4:4;
+	} bits;
+};
+
+struct moxart_dma_reg {
+	unsigned int source_addr;
+	unsigned int dest_addr;
+#define APB_DMA_CYCLES_MASK			0x00ffffff
+	unsigned int cycles;	/* depend on burst mode */
+	union moxart_dma_reg_cfg cfg;
+};
+
+#endif
-- 
1.8.2.1


^ permalink raw reply related	[flat|nested] 80+ messages in thread

* [PATCH] dmaengine: Add MOXA ART DMA engine driver
@ 2013-07-10  8:51 ` Jonas Jensen
  0 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2013-07-10  8:51 UTC (permalink / raw)
  To: linux-arm-kernel

Add dmaengine driver for MOXA ART SoCs based on virt_dma.

Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
---

Notes:
    Applies to next-20130703
    
    The MMC driver I plan to submit next can use this
    (falls back to PIO if unavailable).
    
    Nothing else uses DMA on UC-7112-LX.

 drivers/dma/Kconfig      |   9 +
 drivers/dma/Makefile     |   1 +
 drivers/dma/moxart-dma.c | 473 +++++++++++++++++++++++++++++++++++++++++++++++
 drivers/dma/moxart-dma.h | 188 +++++++++++++++++++
 4 files changed, 671 insertions(+)
 create mode 100644 drivers/dma/moxart-dma.c
 create mode 100644 drivers/dma/moxart-dma.h

diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6825957..d4ba42b 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -300,6 +300,15 @@ config DMA_JZ4740
 	select DMA_ENGINE
 	select DMA_VIRTUAL_CHANNELS
 
+config MOXART_DMA
+	tristate "MOXART DMA support"
+	depends on ARCH_MOXART
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	default n
+	help
+	  Enable support for the MOXA ART SoC DMA controller.
+
 config DMA_ENGINE
 	bool
 
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 5e0f2ef..470c11b 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -39,3 +39,4 @@ obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
 obj-$(CONFIG_DMA_OMAP) += omap-dma.o
 obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..3f1e771
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,473 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+#include "moxart-dma.h"
+
+static DEFINE_SPINLOCK(dma_lock);
+
+struct moxart_dma_chan {
+	struct virt_dma_chan		vchan;
+	int				ch_num;
+	bool				allocated;
+	int				error_flag;
+	struct moxart_dma_reg		*reg;
+	void				(*callback)(void *param);
+	void				*callback_param;
+	struct completion		dma_complete;
+	struct dma_slave_config		cfg;
+	struct dma_async_tx_descriptor	tx_desc;
+};
+
+struct moxart_dma_container {
+	int			ctlr;
+	struct dma_device	dma_slave;
+	struct moxart_dma_chan	slave_chans[APB_DMA_MAX_CHANNEL];
+};
+
+struct moxart_dma_container *mdc;
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+	return &chan->dev->device;
+}
+
+static inline struct moxart_dma_container
+*to_moxart_dma_container(struct dma_device *d)
+{
+	return container_of(d, struct moxart_dma_container, dma_slave);
+}
+
+static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct moxart_dma_chan, vchan.chan);
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	union moxart_dma_reg_cfg mcfg;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+	spin_lock_irqsave(&dma_lock, flags);
+
+	mcfg.ul = readl(&mchan->reg->cfg.ul);
+	mcfg.ul &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+	spin_unlock_irqrestore(&dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+			       struct dma_slave_config *cfg)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	union moxart_dma_reg_cfg mcfg;
+	unsigned long flags;
+	unsigned int data_width, data_inc;
+
+	spin_lock_irqsave(&dma_lock, flags);
+
+	memcpy(&mchan->cfg, cfg, sizeof(mchan->cfg));
+
+	mcfg.ul = readl(&mchan->reg->cfg.ul);
+	mcfg.bits.burst = APB_DMAB_BURST_MODE;
+
+	switch (mchan->cfg.src_addr_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		data_width = APB_DMAB_DATA_WIDTH_1;
+		data_inc = APB_DMAB_DEST_INC_1_4;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		data_width = APB_DMAB_DATA_WIDTH_2;
+		data_inc = APB_DMAB_DEST_INC_2_8;
+		break;
+	default:
+		data_width = APB_DMAB_DATA_WIDTH_4;
+		data_inc = APB_DMAB_DEST_INC_4_16;
+		break;
+	}
+
+	if (mchan->cfg.direction == DMA_MEM_TO_DEV) {
+		mcfg.bits.data_width = data_width;
+		mcfg.bits.dest_sel = APB_DMAB_DEST_APB;
+		mcfg.bits.dest_inc = APB_DMAB_DEST_INC_0;
+		mcfg.bits.source_sel = APB_DMAB_SOURCE_AHB;
+		mcfg.bits.source_inc = data_inc;
+
+		mcfg.bits.dest_req_no = mchan->cfg.slave_id;
+		mcfg.bits.source_req_no = 0;
+	} else {
+		mcfg.bits.data_width = data_width;
+		mcfg.bits.dest_sel = APB_DMAB_SOURCE_AHB;
+		mcfg.bits.dest_inc = data_inc;
+		mcfg.bits.source_sel = APB_DMAB_DEST_APB;
+		mcfg.bits.source_inc = APB_DMAB_DEST_INC_0;
+
+		mcfg.bits.dest_req_no = 0;
+		mcfg.bits.source_req_no = mchan->cfg.slave_id;
+	}
+
+	writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+	spin_unlock_irqrestore(&dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+			  unsigned long arg)
+{
+	int ret = 0;
+	struct dma_slave_config *config;
+
+	switch (cmd) {
+	case DMA_TERMINATE_ALL:
+		moxart_terminate_all(chan);
+		break;
+	case DMA_SLAVE_CONFIG:
+		config = (struct dma_slave_config *)arg;
+		ret = moxart_slave_config(chan, config);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static dma_cookie_t moxart_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(tx->chan);
+	dma_cookie_t cookie;
+	union moxart_dma_reg_cfg mcfg;
+	unsigned long flags;
+
+	mchan->callback = tx->callback;
+	mchan->callback_param = tx->callback_param;
+	mchan->error_flag = 0;
+
+	dev_dbg(chan2dev(tx->chan), "%s: mchan=%p mchan->ch_num=%d mchan->reg=%p\n",
+		__func__, mchan, mchan->ch_num, mchan->reg);
+
+	spin_lock_irqsave(&dma_lock, flags);
+
+	cookie = dma_cookie_assign(tx);
+
+	mcfg.ul = readl(&mchan->reg->cfg.ul);
+	mcfg.ul |= (APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+	spin_unlock_irqrestore(&dma_lock, flags);
+
+	return cookie;
+}
+
+static struct dma_async_tx_descriptor
+*moxart_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+		      unsigned int sg_len,
+		      enum dma_transfer_direction direction,
+		      unsigned long tx_flags, void *context)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	unsigned long flags;
+	union moxart_dma_reg_cfg mcfg;
+	unsigned int size, adr_width;
+
+	spin_lock_irqsave(&dma_lock, flags);
+
+	if (direction == DMA_MEM_TO_DEV) {
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       &mchan->reg->source_addr);
+		writel(mchan->cfg.dst_addr, &mchan->reg->dest_addr);
+		adr_width = mchan->cfg.src_addr_width;
+	} else {
+		writel(mchan->cfg.src_addr, &mchan->reg->source_addr);
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       &mchan->reg->dest_addr);
+		adr_width = mchan->cfg.dst_addr_width;
+	}
+
+	size = sgl->length >> adr_width;
+
+	/*
+	 * size is 4 on 64 bytes copied, i.e. once cycle copies 16 bytes
+	 * ( when data_width == APB_DMAB_DATA_WIDTH_4 )
+	 */
+	writel(size, &mchan->reg->cycles);
+
+	dev_dbg(chan2dev(chan), "%s: set %d DMA cycles (sgl->length=%d adr_width=%d)\n",
+		__func__, size, sgl->length, adr_width);
+
+	dev_dbg(chan2dev(chan), "%s: mcfg.ul=%x read from &mchan->reg->cfg.ul=%x\n",
+		__func__, mcfg.ul, (unsigned int)&mchan->reg->cfg.ul);
+
+	dma_async_tx_descriptor_init(&mchan->tx_desc, chan);
+	mchan->tx_desc.tx_submit = moxart_tx_submit;
+
+	spin_unlock_irqrestore(&dma_lock, flags);
+
+	return &mchan->tx_desc;
+}
+
+static struct platform_driver moxart_driver;
+
+bool moxart_filter_fn(struct dma_chan *chan, void *param)
+{
+	if (chan->device->dev->driver == &moxart_driver.driver) {
+		struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+		unsigned int ch_req = *(unsigned int *)param;
+		dev_dbg(chan2dev(chan), "%s: mchan=%p ch_req=%d mchan->ch_num=%d\n",
+			__func__, mchan, ch_req, mchan->ch_num);
+		return ch_req == mchan->ch_num;
+	} else {
+		dev_dbg(chan2dev(chan), "%s: device not registered to this DMA engine\n",
+			__func__);
+		return false;
+	}
+}
+EXPORT_SYMBOL(moxart_filter_fn);
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	int i;
+	bool found = false;
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++) {
+		if (i == mchan->ch_num
+			&& !mchan->allocated) {
+			dev_dbg(chan2dev(chan), "%s: allocating channel #%d\n",
+				__func__, mchan->ch_num);
+			mchan->allocated = true;
+			found = true;
+			break;
+		}
+	}
+
+	if (!found)
+		return -ENODEV;
+
+	return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+	mchan->allocated = false;
+
+	dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+		__func__, mchan->ch_num);
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	union moxart_dma_reg_cfg mcfg;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+	spin_lock_irqsave(&dma_lock, flags);
+
+	mcfg.ul = readl(&mchan->reg->cfg.ul);
+	mcfg.ul |= APB_DMA_ENABLE;
+	writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+	spin_unlock_irqrestore(&dma_lock, flags);
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+					dma_cookie_t cookie,
+					struct dma_tx_state *txstate)
+{
+	enum dma_status ret;
+
+	ret = dma_cookie_status(chan, cookie, txstate);
+	if (ret == DMA_SUCCESS || !txstate)
+		return ret;
+
+	return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+	dma->device_prep_slave_sg		= moxart_prep_slave_sg;
+	dma->device_alloc_chan_resources	= moxart_alloc_chan_resources;
+	dma->device_free_chan_resources		= moxart_free_chan_resources;
+	dma->device_issue_pending		= moxart_issue_pending;
+	dma->device_tx_status			= moxart_tx_status;
+	dma->device_control			= moxart_control;
+	dma->dev				= dev;
+
+	INIT_LIST_HEAD(&dma->channels);
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+	struct device *dev = devid;
+	struct moxart_dma_chan *mchan = &mdc->slave_chans[0];
+	unsigned int i;
+	union moxart_dma_reg_cfg mcfg;
+
+	dev_dbg(dev, "%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		if (mchan->allocated) {
+			mcfg.ul = readl(&mchan->reg->cfg.ul);
+			if (mcfg.ul & APB_DMA_FIN_INT_STS) {
+				mcfg.ul &= ~APB_DMA_FIN_INT_STS;
+				dma_cookie_complete(&mchan->tx_desc);
+			}
+			if (mcfg.ul & APB_DMA_ERR_INT_STS) {
+				mcfg.ul &= ~APB_DMA_ERR_INT_STS;
+				mchan->error_flag = 1;
+			}
+			if (mchan->callback) {
+				dev_dbg(dev, "%s: call callback for mchan=%p\n",
+					__func__, mchan);
+				mchan->callback(mchan->callback_param);
+			}
+			mchan->error_flag = 0;
+			writel(mcfg.ul, &mchan->reg->cfg.ul);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static struct irqaction moxart_dma_irq = {
+	.name       = "moxart-dma-engine",
+	.flags      = IRQF_DISABLED,
+	.handler    = moxart_dma_interrupt,
+};
+
+static int moxart_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct resource res_dma;
+	static void __iomem *dma_base_addr;
+	int ret, i;
+	unsigned int irq;
+	struct moxart_dma_chan *mchan;
+
+	mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+	if (!mdc) {
+		dev_err(dev, "can't allocate DMA container\n");
+		return -ENOMEM;
+	}
+
+	ret = of_address_to_resource(node, 0, &res_dma);
+	if (ret) {
+		dev_err(dev, "can't get DMA base resource\n");
+		return ret;
+	}
+
+	irq = irq_of_parse_and_map(node, 0);
+
+	dma_base_addr = devm_ioremap_resource(dev, &res_dma);
+	if (IS_ERR(dma_base_addr)) {
+		dev_err(dev, "devm_ioremap_resource failed\n");
+		return PTR_ERR(dma_base_addr);
+	}
+
+	mdc->ctlr = pdev->id;
+
+	dma_cap_zero(mdc->dma_slave.cap_mask);
+	dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+
+	moxart_dma_init(&mdc->dma_slave, dev);
+
+	mchan = &mdc->slave_chans[0];
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		mchan->ch_num = i;
+		mchan->reg = (struct moxart_dma_reg *)(dma_base_addr + 0x80
+			     + i * sizeof(struct moxart_dma_reg));
+		mchan->callback = NULL;
+		mchan->allocated = 0;
+		mchan->callback_param = NULL;
+		vchan_init(&mchan->vchan, &mdc->dma_slave);
+		dev_dbg(dev, "%s: mchans[%d]: mchan->ch_num=%d mchan->reg=%p\n",
+			__func__, i, mchan->ch_num, mchan->reg);
+	}
+
+	ret = dma_async_device_register(&mdc->dma_slave);
+	platform_set_drvdata(pdev, mdc);
+
+	moxart_dma_irq.dev_id = dev;
+	setup_irq(irq, &moxart_dma_irq);
+
+	dev_dbg(dev, "%s: IRQ=%d\n", __func__, irq);
+
+	return ret;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+	struct moxart_dma_container *m = dev_get_drvdata(&pdev->dev);
+	dma_async_device_unregister(&m->dma_slave);
+	return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+	{ .compatible = "moxa,moxart-dma" },
+	{ }
+};
+
+static struct platform_driver moxart_driver = {
+	.probe	= moxart_probe,
+	.remove	= moxart_remove,
+	.driver = {
+		.name		= "moxart-dma-engine",
+		.owner		= THIS_MODULE,
+		.of_match_table	= moxart_dma_match,
+	},
+};
+
+static int moxart_init(void)
+{
+	return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+	platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/moxart-dma.h b/drivers/dma/moxart-dma.h
new file mode 100644
index 0000000..358c006
--- /dev/null
+++ b/drivers/dma/moxart-dma.h
@@ -0,0 +1,188 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __DMA_MOXART_H
+#define __DMA_MOXART_H
+
+#define APB_DMA_MAX_CHANNEL			4
+
+union moxart_dma_reg_cfg {
+
+#define APB_DMA_ENABLE				(1<<0)
+#define APB_DMA_FIN_INT_STS			(1<<1)
+#define APB_DMA_FIN_INT_EN			(1<<2)
+#define APB_DMA_BURST_MODE			(1<<3)
+#define APB_DMA_ERR_INT_STS			(1<<4)
+#define APB_DMA_ERR_INT_EN			(1<<5)
+#define APB_DMA_SOURCE_AHB			(1<<6)
+#define APB_DMA_SOURCE_APB			0
+#define APB_DMA_DEST_AHB			(1<<7)
+#define APB_DMA_DEST_APB			0
+#define APB_DMA_SOURCE_INC_0			0
+#define APB_DMA_SOURCE_INC_1_4			(1<<8)
+#define APB_DMA_SOURCE_INC_2_8			(2<<8)
+#define APB_DMA_SOURCE_INC_4_16			(3<<8)
+#define APB_DMA_SOURCE_DEC_1_4			(5<<8)
+#define APB_DMA_SOURCE_DEC_2_8			(6<<8)
+#define APB_DMA_SOURCE_DEC_4_16			(7<<8)
+#define APB_DMA_SOURCE_INC_MASK			(7<<8)
+#define APB_DMA_DEST_INC_0			0
+#define APB_DMA_DEST_INC_1_4			(1<<12)
+#define APB_DMA_DEST_INC_2_8			(2<<12)
+#define APB_DMA_DEST_INC_4_16			(3<<12)
+#define APB_DMA_DEST_DEC_1_4			(5<<12)
+#define APB_DMA_DEST_DEC_2_8			(6<<12)
+#define APB_DMA_DEST_DEC_4_16			(7<<12)
+#define APB_DMA_DEST_INC_MASK			(7<<12)
+#define APB_DMA_DEST_REQ_NO_MASK		(15<<16)
+#define APB_DMA_DATA_WIDTH_MASK			(3<<20)
+#define APB_DMA_DATA_WIDTH_4			0
+#define APB_DMA_DATA_WIDTH_2			(1<<20)
+#define APB_DMA_DATA_WIDTH_1			(2<<20)
+#define APB_DMA_SOURCE_REQ_NO_MASK		(15<<24)
+	unsigned int ul;
+
+	struct {
+
+#define APB_DMAB_ENABLE				1
+		/* enable DMA */
+		unsigned int enable:1;
+
+#define APB_DMAB_FIN_INT_STS			1
+		/* finished interrupt status */
+		unsigned int fin_int_sts:1;
+
+#define APB_DMAB_FIN_INT_EN			1
+		/* finished interrupt enable */
+		unsigned int fin_int_en:1;
+
+#define APB_DMAB_BURST_MODE			1
+		/* burst mode */
+		unsigned int burst:1;
+
+#define APB_DMAB_ERR_INT_STS			1
+		/* error interrupt status */
+		unsigned int err_int_sts:1;
+
+#define APB_DMAB_ERR_INT_EN			1
+		/* error interrupt enable */
+		unsigned int err_int_en:1;
+
+#define APB_DMAB_SOURCE_AHB			1
+#define APB_DMAB_SOURCE_APB			0
+		/* 0:APB (device), 1:AHB (RAM) */
+		unsigned int source_sel:1;
+
+#define APB_DMAB_DEST_AHB			1
+#define APB_DMAB_DEST_APB			0
+		/* 0:APB, 1:AHB */
+		unsigned int dest_sel:1;
+
+#define APB_DMAB_SOURCE_INC_0			0
+#define APB_DMAB_SOURCE_INC_1_4			1
+#define APB_DMAB_SOURCE_INC_2_8			2
+#define APB_DMAB_SOURCE_INC_4_16		3
+#define APB_DMAB_SOURCE_DEC_1_4			5
+#define APB_DMAB_SOURCE_DEC_2_8			6
+#define APB_DMAB_SOURCE_DEC_4_16		7
+#define APB_DMAB_SOURCE_INC_MASK		7
+		/*
+		 * 000: no increment
+		 * 001: +1 (busrt=0), +4  (burst=1)
+		 * 010: +2 (burst=0), +8  (burst=1)
+		 * 011: +4 (burst=0), +16 (burst=1)
+		 * 101: -1 (burst=0), -4  (burst=1)
+		 * 110: -2 (burst=0), -8  (burst=1)
+		 * 111: -4 (burst=0), -16 (burst=1)
+		 */
+		unsigned int source_inc:3;
+
+		unsigned int reserved1:1;
+
+#define APB_DMAB_DEST_INC_0			0
+#define APB_DMAB_DEST_INC_1_4			1
+#define APB_DMAB_DEST_INC_2_8			2
+#define APB_DMAB_DEST_INC_4_16			3
+#define APB_DMAB_DEST_DEC_1_4			5
+#define APB_DMAB_DEST_DEC_2_8			6
+#define APB_DMAB_DEST_DEC_4_16			7
+#define APB_DMAB_DEST_INC_MASK			7
+		/*
+		 * 000: no increment
+		 * 001: +1 (busrt=0), +4  (burst=1)
+		 * 010: +2 (burst=0), +8  (burst=1)
+		 * 011: +4 (burst=0), +16 (burst=1)
+		 * 101: -1 (burst=0), -4  (burst=1)
+		 * 110: -2 (burst=0), -8  (burst=1)
+		 * 111: -4 (burst=0), -16 (burst=1)
+		*/
+		unsigned int dest_inc:3;
+
+		unsigned int reserved2:1;
+
+#define APB_DMAB_DEST_REQ_NO_MASK		15
+		/*
+		 * request signal select of destination
+		 * address for DMA hardware handshake
+		 *
+		 * the request line number is a property of
+		 * the DMA controller itself, e.g. MMC must
+		 * always request channels where
+		 * dma_slave_config->slave_id == 5
+		 *
+		 * 0:	 no request / grant signal
+		 * 1-15: request / grant signal
+		 */
+		unsigned int dest_req_no:4;
+
+#define APB_DMAB_DATA_WIDTH_MASK		3
+#define APB_DMAB_DATA_WIDTH_4			0
+#define APB_DMAB_DATA_WIDTH_2			1
+#define APB_DMAB_DATA_WIDTH_1			2
+		/*
+		 * data width of transfer
+		 * 00: word
+		 * 01: half
+		 * 10: byte
+		 */
+		unsigned int data_width:2;
+
+		unsigned int reserved3:2;
+
+#define APB_DMAB_SOURCE_REQ_NO_MASK		15
+		/*
+		 * request signal select of source
+		 * address for DMA hardware handshake
+		 *
+		 * the request line number is a property of
+		 * the DMA controller itself, e.g. MMC must
+		 * always request channels where
+		 * dma_slave_config->slave_id == 5
+		 *
+		 * 0:	 no request / grant signal
+		 * 1-15: request / grant signal
+		 */
+		unsigned int source_req_no:4;
+
+		unsigned int reserved4:4;
+	} bits;
+};
+
+struct moxart_dma_reg {
+	unsigned int source_addr;
+	unsigned int dest_addr;
+#define APB_DMA_CYCLES_MASK			0x00ffffff
+	unsigned int cycles;	/* depend on burst mode */
+	union moxart_dma_reg_cfg cfg;
+};
+
+#endif
-- 
1.8.2.1

^ permalink raw reply related	[flat|nested] 80+ messages in thread

* Re: [PATCH] dmaengine: Add MOXA ART DMA engine driver
  2013-07-10  8:51 ` Jonas Jensen
@ 2013-07-10  9:30   ` Russell King - ARM Linux
  -1 siblings, 0 replies; 80+ messages in thread
From: Russell King - ARM Linux @ 2013-07-10  9:30 UTC (permalink / raw)
  To: Jonas Jensen; +Cc: linux-arm-kernel, linux-kernel, arm, vinod.koul, djbw, arnd

On Wed, Jul 10, 2013 at 10:51:03AM +0200, Jonas Jensen wrote:
> +#include "virt-dma.h"
...
> +struct moxart_dma_chan {
> +	struct virt_dma_chan		vchan;
...
> +static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
> +{
> +	return container_of(c, struct moxart_dma_chan, vchan.chan);
> +}
...
> +	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
> +		mchan->ch_num = i;
> +		mchan->reg = (struct moxart_dma_reg *)(dma_base_addr + 0x80
> +			     + i * sizeof(struct moxart_dma_reg));
> +		mchan->callback = NULL;
> +		mchan->allocated = 0;
> +		mchan->callback_param = NULL;
> +		vchan_init(&mchan->vchan, &mdc->dma_slave);

Do you actually make any use what so ever of the vchan support?

^ permalink raw reply	[flat|nested] 80+ messages in thread

* [PATCH] dmaengine: Add MOXA ART DMA engine driver
@ 2013-07-10  9:30   ` Russell King - ARM Linux
  0 siblings, 0 replies; 80+ messages in thread
From: Russell King - ARM Linux @ 2013-07-10  9:30 UTC (permalink / raw)
  To: linux-arm-kernel

On Wed, Jul 10, 2013 at 10:51:03AM +0200, Jonas Jensen wrote:
> +#include "virt-dma.h"
...
> +struct moxart_dma_chan {
> +	struct virt_dma_chan		vchan;
...
> +static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
> +{
> +	return container_of(c, struct moxart_dma_chan, vchan.chan);
> +}
...
> +	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
> +		mchan->ch_num = i;
> +		mchan->reg = (struct moxart_dma_reg *)(dma_base_addr + 0x80
> +			     + i * sizeof(struct moxart_dma_reg));
> +		mchan->callback = NULL;
> +		mchan->allocated = 0;
> +		mchan->callback_param = NULL;
> +		vchan_init(&mchan->vchan, &mdc->dma_slave);

Do you actually make any use what so ever of the vchan support?

^ permalink raw reply	[flat|nested] 80+ messages in thread

* Re: [PATCH] dmaengine: Add MOXA ART DMA engine driver
  2013-07-10  9:30   ` Russell King - ARM Linux
@ 2013-07-10  9:48     ` Jonas Jensen
  -1 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2013-07-10  9:48 UTC (permalink / raw)
  To: Russell King - ARM Linux
  Cc: linux-arm-kernel, linux-kernel, arm, vinod.koul, djbw, arnd

On 10 July 2013 11:30, Russell King - ARM Linux <linux@arm.linux.org.uk> wrote:
> Do you actually make any use what so ever of the vchan support?

Only because it was inspired by the edma driver:

static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
{
        return container_of(c, struct moxart_dma_chan, vchan.chan);
}

It could use struct dma_chan instead I think.


Best regards,
Jonas

^ permalink raw reply	[flat|nested] 80+ messages in thread

* [PATCH] dmaengine: Add MOXA ART DMA engine driver
@ 2013-07-10  9:48     ` Jonas Jensen
  0 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2013-07-10  9:48 UTC (permalink / raw)
  To: linux-arm-kernel

On 10 July 2013 11:30, Russell King - ARM Linux <linux@arm.linux.org.uk> wrote:
> Do you actually make any use what so ever of the vchan support?

Only because it was inspired by the edma driver:

static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
{
        return container_of(c, struct moxart_dma_chan, vchan.chan);
}

It could use struct dma_chan instead I think.


Best regards,
Jonas

^ permalink raw reply	[flat|nested] 80+ messages in thread

* [PATCH v2] dmaengine: Add MOXA ART DMA engine driver
  2013-07-10  8:51 ` Jonas Jensen
@ 2013-07-10 12:43   ` Jonas Jensen
  -1 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2013-07-10 12:43 UTC (permalink / raw)
  To: linux-arm-kernel
  Cc: linux-kernel, arm, vinod.koul, djbw, arnd, linux, Jonas Jensen

Add dmaengine driver for MOXA ART SoCs.

Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
---

Notes:
    Applies to next-20130703
    
    Changes since v1:
    
    1. remove use of vchan support

 drivers/dma/Kconfig      |   9 +
 drivers/dma/Makefile     |   1 +
 drivers/dma/moxart-dma.c | 477 +++++++++++++++++++++++++++++++++++++++++++++++
 drivers/dma/moxart-dma.h | 188 +++++++++++++++++++
 4 files changed, 675 insertions(+)
 create mode 100644 drivers/dma/moxart-dma.c
 create mode 100644 drivers/dma/moxart-dma.h

diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6825957..d4ba42b 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -300,6 +300,15 @@ config DMA_JZ4740
 	select DMA_ENGINE
 	select DMA_VIRTUAL_CHANNELS
 
+config MOXART_DMA
+	tristate "MOXART DMA support"
+	depends on ARCH_MOXART
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	default n
+	help
+	  Enable support for the MOXA ART SoC DMA controller.
+
 config DMA_ENGINE
 	bool
 
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 5e0f2ef..470c11b 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -39,3 +39,4 @@ obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
 obj-$(CONFIG_DMA_OMAP) += omap-dma.o
 obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..16fddf4
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,477 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+#include "moxart-dma.h"
+
+static DEFINE_SPINLOCK(dma_lock);
+
+struct moxart_dma_chan {
+	struct dma_chan			chan;
+	int				ch_num;
+	bool				allocated;
+	int				error_flag;
+	struct moxart_dma_reg		*reg;
+	void				(*callback)(void *param);
+	void				*callback_param;
+	struct completion		dma_complete;
+	struct dma_slave_config		cfg;
+	struct dma_async_tx_descriptor	tx_desc;
+};
+
+struct moxart_dma_container {
+	int			ctlr;
+	struct dma_device	dma_slave;
+	struct moxart_dma_chan	slave_chans[APB_DMA_MAX_CHANNEL];
+};
+
+struct moxart_dma_container *mdc;
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+	return &chan->dev->device;
+}
+
+static inline struct moxart_dma_container
+*to_moxart_dma_container(struct dma_device *d)
+{
+	return container_of(d, struct moxart_dma_container, dma_slave);
+}
+
+static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct moxart_dma_chan, chan);
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	union moxart_dma_reg_cfg mcfg;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+	spin_lock_irqsave(&dma_lock, flags);
+
+	mcfg.ul = readl(&mchan->reg->cfg.ul);
+	mcfg.ul &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+	spin_unlock_irqrestore(&dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+			       struct dma_slave_config *cfg)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	union moxart_dma_reg_cfg mcfg;
+	unsigned long flags;
+	unsigned int data_width, data_inc;
+
+	spin_lock_irqsave(&dma_lock, flags);
+
+	memcpy(&mchan->cfg, cfg, sizeof(mchan->cfg));
+
+	mcfg.ul = readl(&mchan->reg->cfg.ul);
+	mcfg.bits.burst = APB_DMAB_BURST_MODE;
+
+	switch (mchan->cfg.src_addr_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		data_width = APB_DMAB_DATA_WIDTH_1;
+		data_inc = APB_DMAB_DEST_INC_1_4;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		data_width = APB_DMAB_DATA_WIDTH_2;
+		data_inc = APB_DMAB_DEST_INC_2_8;
+		break;
+	default:
+		data_width = APB_DMAB_DATA_WIDTH_4;
+		data_inc = APB_DMAB_DEST_INC_4_16;
+		break;
+	}
+
+	if (mchan->cfg.direction == DMA_MEM_TO_DEV) {
+		mcfg.bits.data_width = data_width;
+		mcfg.bits.dest_sel = APB_DMAB_DEST_APB;
+		mcfg.bits.dest_inc = APB_DMAB_DEST_INC_0;
+		mcfg.bits.source_sel = APB_DMAB_SOURCE_AHB;
+		mcfg.bits.source_inc = data_inc;
+
+		mcfg.bits.dest_req_no = mchan->cfg.slave_id;
+		mcfg.bits.source_req_no = 0;
+	} else {
+		mcfg.bits.data_width = data_width;
+		mcfg.bits.dest_sel = APB_DMAB_SOURCE_AHB;
+		mcfg.bits.dest_inc = data_inc;
+		mcfg.bits.source_sel = APB_DMAB_DEST_APB;
+		mcfg.bits.source_inc = APB_DMAB_DEST_INC_0;
+
+		mcfg.bits.dest_req_no = 0;
+		mcfg.bits.source_req_no = mchan->cfg.slave_id;
+	}
+
+	writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+	spin_unlock_irqrestore(&dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+			  unsigned long arg)
+{
+	int ret = 0;
+	struct dma_slave_config *config;
+
+	switch (cmd) {
+	case DMA_TERMINATE_ALL:
+		moxart_terminate_all(chan);
+		break;
+	case DMA_SLAVE_CONFIG:
+		config = (struct dma_slave_config *)arg;
+		ret = moxart_slave_config(chan, config);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static dma_cookie_t moxart_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(tx->chan);
+	dma_cookie_t cookie;
+	union moxart_dma_reg_cfg mcfg;
+	unsigned long flags;
+
+	mchan->callback = tx->callback;
+	mchan->callback_param = tx->callback_param;
+	mchan->error_flag = 0;
+
+	dev_dbg(chan2dev(tx->chan), "%s: mchan=%p mchan->ch_num=%d mchan->reg=%p\n",
+		__func__, mchan, mchan->ch_num, mchan->reg);
+
+	spin_lock_irqsave(&dma_lock, flags);
+
+	cookie = dma_cookie_assign(tx);
+
+	mcfg.ul = readl(&mchan->reg->cfg.ul);
+	mcfg.ul |= (APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+	spin_unlock_irqrestore(&dma_lock, flags);
+
+	return cookie;
+}
+
+static struct dma_async_tx_descriptor
+*moxart_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+		      unsigned int sg_len,
+		      enum dma_transfer_direction direction,
+		      unsigned long tx_flags, void *context)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	unsigned long flags;
+	union moxart_dma_reg_cfg mcfg;
+	unsigned int size, adr_width;
+
+	spin_lock_irqsave(&dma_lock, flags);
+
+	if (direction == DMA_MEM_TO_DEV) {
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       &mchan->reg->source_addr);
+		writel(mchan->cfg.dst_addr, &mchan->reg->dest_addr);
+		adr_width = mchan->cfg.src_addr_width;
+	} else {
+		writel(mchan->cfg.src_addr, &mchan->reg->source_addr);
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       &mchan->reg->dest_addr);
+		adr_width = mchan->cfg.dst_addr_width;
+	}
+
+	size = sgl->length >> adr_width;
+
+	/*
+	 * size is 4 on 64 bytes copied, i.e. once cycle copies 16 bytes
+	 * ( when data_width == APB_DMAB_DATA_WIDTH_4 )
+	 */
+	writel(size, &mchan->reg->cycles);
+
+	dev_dbg(chan2dev(chan), "%s: set %d DMA cycles (sgl->length=%d adr_width=%d)\n",
+		__func__, size, sgl->length, adr_width);
+
+	dev_dbg(chan2dev(chan), "%s: mcfg.ul=%x read from &mchan->reg->cfg.ul=%x\n",
+		__func__, mcfg.ul, (unsigned int)&mchan->reg->cfg.ul);
+
+	dma_async_tx_descriptor_init(&mchan->tx_desc, chan);
+	mchan->tx_desc.tx_submit = moxart_tx_submit;
+
+	spin_unlock_irqrestore(&dma_lock, flags);
+
+	return &mchan->tx_desc;
+}
+
+static struct platform_driver moxart_driver;
+
+bool moxart_filter_fn(struct dma_chan *chan, void *param)
+{
+	if (chan->device->dev->driver == &moxart_driver.driver) {
+		struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+		unsigned int ch_req = *(unsigned int *)param;
+		dev_dbg(chan2dev(chan), "%s: mchan=%p ch_req=%d mchan->ch_num=%d\n",
+			__func__, mchan, ch_req, mchan->ch_num);
+		return ch_req == mchan->ch_num;
+	} else {
+		dev_dbg(chan2dev(chan), "%s: device not registered to this DMA engine\n",
+			__func__);
+		return false;
+	}
+}
+EXPORT_SYMBOL(moxart_filter_fn);
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	int i;
+	bool found = false;
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++) {
+		if (i == mchan->ch_num
+			&& !mchan->allocated) {
+			dev_dbg(chan2dev(chan), "%s: allocating channel #%d\n",
+				__func__, mchan->ch_num);
+			mchan->allocated = true;
+			found = true;
+			break;
+		}
+	}
+
+	if (!found)
+		return -ENODEV;
+
+	return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+	mchan->allocated = false;
+
+	dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+		__func__, mchan->ch_num);
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	union moxart_dma_reg_cfg mcfg;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+	spin_lock_irqsave(&dma_lock, flags);
+
+	mcfg.ul = readl(&mchan->reg->cfg.ul);
+	mcfg.ul |= APB_DMA_ENABLE;
+	writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+	spin_unlock_irqrestore(&dma_lock, flags);
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+					dma_cookie_t cookie,
+					struct dma_tx_state *txstate)
+{
+	enum dma_status ret;
+
+	ret = dma_cookie_status(chan, cookie, txstate);
+	if (ret == DMA_SUCCESS || !txstate)
+		return ret;
+
+	return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+	dma->device_prep_slave_sg		= moxart_prep_slave_sg;
+	dma->device_alloc_chan_resources	= moxart_alloc_chan_resources;
+	dma->device_free_chan_resources		= moxart_free_chan_resources;
+	dma->device_issue_pending		= moxart_issue_pending;
+	dma->device_tx_status			= moxart_tx_status;
+	dma->device_control			= moxart_control;
+	dma->dev				= dev;
+
+	INIT_LIST_HEAD(&dma->channels);
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+	struct device *dev = devid;
+	struct moxart_dma_chan *mchan = &mdc->slave_chans[0];
+	unsigned int i;
+	union moxart_dma_reg_cfg mcfg;
+
+	dev_dbg(dev, "%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		if (mchan->allocated) {
+			mcfg.ul = readl(&mchan->reg->cfg.ul);
+			if (mcfg.ul & APB_DMA_FIN_INT_STS) {
+				mcfg.ul &= ~APB_DMA_FIN_INT_STS;
+				dma_cookie_complete(&mchan->tx_desc);
+			}
+			if (mcfg.ul & APB_DMA_ERR_INT_STS) {
+				mcfg.ul &= ~APB_DMA_ERR_INT_STS;
+				mchan->error_flag = 1;
+			}
+			if (mchan->callback) {
+				dev_dbg(dev, "%s: call callback for mchan=%p\n",
+					__func__, mchan);
+				mchan->callback(mchan->callback_param);
+			}
+			mchan->error_flag = 0;
+			writel(mcfg.ul, &mchan->reg->cfg.ul);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static struct irqaction moxart_dma_irq = {
+	.name       = "moxart-dma-engine",
+	.flags      = IRQF_DISABLED,
+	.handler    = moxart_dma_interrupt,
+};
+
+static int moxart_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct resource res_dma;
+	static void __iomem *dma_base_addr;
+	int ret, i;
+	unsigned int irq;
+	struct moxart_dma_chan *mchan;
+
+	mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+	if (!mdc) {
+		dev_err(dev, "can't allocate DMA container\n");
+		return -ENOMEM;
+	}
+
+	ret = of_address_to_resource(node, 0, &res_dma);
+	if (ret) {
+		dev_err(dev, "can't get DMA base resource\n");
+		return ret;
+	}
+
+	irq = irq_of_parse_and_map(node, 0);
+
+	dma_base_addr = devm_ioremap_resource(dev, &res_dma);
+	if (IS_ERR(dma_base_addr)) {
+		dev_err(dev, "devm_ioremap_resource failed\n");
+		return PTR_ERR(dma_base_addr);
+	}
+
+	mdc->ctlr = pdev->id;
+
+	dma_cap_zero(mdc->dma_slave.cap_mask);
+	dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+
+	moxart_dma_init(&mdc->dma_slave, dev);
+
+	mchan = &mdc->slave_chans[0];
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		mchan->ch_num = i;
+		mchan->reg = (struct moxart_dma_reg *)(dma_base_addr + 0x80
+			     + i * sizeof(struct moxart_dma_reg));
+		mchan->callback = NULL;
+		mchan->allocated = 0;
+		mchan->callback_param = NULL;
+
+		dma_cookie_init(&mchan->chan);
+		mchan->chan.device = &mdc->dma_slave;
+		list_add_tail(&mchan->chan.device_node,
+			      &mdc->dma_slave.channels);
+
+		dev_dbg(dev, "%s: mchans[%d]: mchan->ch_num=%d mchan->reg=%p\n",
+			__func__, i, mchan->ch_num, mchan->reg);
+	}
+
+	ret = dma_async_device_register(&mdc->dma_slave);
+	platform_set_drvdata(pdev, mdc);
+
+	moxart_dma_irq.dev_id = dev;
+	setup_irq(irq, &moxart_dma_irq);
+
+	dev_dbg(dev, "%s: IRQ=%d\n", __func__, irq);
+
+	return ret;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+	struct moxart_dma_container *m = dev_get_drvdata(&pdev->dev);
+	dma_async_device_unregister(&m->dma_slave);
+	return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+	{ .compatible = "moxa,moxart-dma" },
+	{ }
+};
+
+static struct platform_driver moxart_driver = {
+	.probe	= moxart_probe,
+	.remove	= moxart_remove,
+	.driver = {
+		.name		= "moxart-dma-engine",
+		.owner		= THIS_MODULE,
+		.of_match_table	= moxart_dma_match,
+	},
+};
+
+static int moxart_init(void)
+{
+	return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+	platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/moxart-dma.h b/drivers/dma/moxart-dma.h
new file mode 100644
index 0000000..358c006
--- /dev/null
+++ b/drivers/dma/moxart-dma.h
@@ -0,0 +1,188 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __DMA_MOXART_H
+#define __DMA_MOXART_H
+
+#define APB_DMA_MAX_CHANNEL			4
+
+union moxart_dma_reg_cfg {
+
+#define APB_DMA_ENABLE				(1<<0)
+#define APB_DMA_FIN_INT_STS			(1<<1)
+#define APB_DMA_FIN_INT_EN			(1<<2)
+#define APB_DMA_BURST_MODE			(1<<3)
+#define APB_DMA_ERR_INT_STS			(1<<4)
+#define APB_DMA_ERR_INT_EN			(1<<5)
+#define APB_DMA_SOURCE_AHB			(1<<6)
+#define APB_DMA_SOURCE_APB			0
+#define APB_DMA_DEST_AHB			(1<<7)
+#define APB_DMA_DEST_APB			0
+#define APB_DMA_SOURCE_INC_0			0
+#define APB_DMA_SOURCE_INC_1_4			(1<<8)
+#define APB_DMA_SOURCE_INC_2_8			(2<<8)
+#define APB_DMA_SOURCE_INC_4_16			(3<<8)
+#define APB_DMA_SOURCE_DEC_1_4			(5<<8)
+#define APB_DMA_SOURCE_DEC_2_8			(6<<8)
+#define APB_DMA_SOURCE_DEC_4_16			(7<<8)
+#define APB_DMA_SOURCE_INC_MASK			(7<<8)
+#define APB_DMA_DEST_INC_0			0
+#define APB_DMA_DEST_INC_1_4			(1<<12)
+#define APB_DMA_DEST_INC_2_8			(2<<12)
+#define APB_DMA_DEST_INC_4_16			(3<<12)
+#define APB_DMA_DEST_DEC_1_4			(5<<12)
+#define APB_DMA_DEST_DEC_2_8			(6<<12)
+#define APB_DMA_DEST_DEC_4_16			(7<<12)
+#define APB_DMA_DEST_INC_MASK			(7<<12)
+#define APB_DMA_DEST_REQ_NO_MASK		(15<<16)
+#define APB_DMA_DATA_WIDTH_MASK			(3<<20)
+#define APB_DMA_DATA_WIDTH_4			0
+#define APB_DMA_DATA_WIDTH_2			(1<<20)
+#define APB_DMA_DATA_WIDTH_1			(2<<20)
+#define APB_DMA_SOURCE_REQ_NO_MASK		(15<<24)
+	unsigned int ul;
+
+	struct {
+
+#define APB_DMAB_ENABLE				1
+		/* enable DMA */
+		unsigned int enable:1;
+
+#define APB_DMAB_FIN_INT_STS			1
+		/* finished interrupt status */
+		unsigned int fin_int_sts:1;
+
+#define APB_DMAB_FIN_INT_EN			1
+		/* finished interrupt enable */
+		unsigned int fin_int_en:1;
+
+#define APB_DMAB_BURST_MODE			1
+		/* burst mode */
+		unsigned int burst:1;
+
+#define APB_DMAB_ERR_INT_STS			1
+		/* error interrupt status */
+		unsigned int err_int_sts:1;
+
+#define APB_DMAB_ERR_INT_EN			1
+		/* error interrupt enable */
+		unsigned int err_int_en:1;
+
+#define APB_DMAB_SOURCE_AHB			1
+#define APB_DMAB_SOURCE_APB			0
+		/* 0:APB (device), 1:AHB (RAM) */
+		unsigned int source_sel:1;
+
+#define APB_DMAB_DEST_AHB			1
+#define APB_DMAB_DEST_APB			0
+		/* 0:APB, 1:AHB */
+		unsigned int dest_sel:1;
+
+#define APB_DMAB_SOURCE_INC_0			0
+#define APB_DMAB_SOURCE_INC_1_4			1
+#define APB_DMAB_SOURCE_INC_2_8			2
+#define APB_DMAB_SOURCE_INC_4_16		3
+#define APB_DMAB_SOURCE_DEC_1_4			5
+#define APB_DMAB_SOURCE_DEC_2_8			6
+#define APB_DMAB_SOURCE_DEC_4_16		7
+#define APB_DMAB_SOURCE_INC_MASK		7
+		/*
+		 * 000: no increment
+		 * 001: +1 (busrt=0), +4  (burst=1)
+		 * 010: +2 (burst=0), +8  (burst=1)
+		 * 011: +4 (burst=0), +16 (burst=1)
+		 * 101: -1 (burst=0), -4  (burst=1)
+		 * 110: -2 (burst=0), -8  (burst=1)
+		 * 111: -4 (burst=0), -16 (burst=1)
+		 */
+		unsigned int source_inc:3;
+
+		unsigned int reserved1:1;
+
+#define APB_DMAB_DEST_INC_0			0
+#define APB_DMAB_DEST_INC_1_4			1
+#define APB_DMAB_DEST_INC_2_8			2
+#define APB_DMAB_DEST_INC_4_16			3
+#define APB_DMAB_DEST_DEC_1_4			5
+#define APB_DMAB_DEST_DEC_2_8			6
+#define APB_DMAB_DEST_DEC_4_16			7
+#define APB_DMAB_DEST_INC_MASK			7
+		/*
+		 * 000: no increment
+		 * 001: +1 (busrt=0), +4  (burst=1)
+		 * 010: +2 (burst=0), +8  (burst=1)
+		 * 011: +4 (burst=0), +16 (burst=1)
+		 * 101: -1 (burst=0), -4  (burst=1)
+		 * 110: -2 (burst=0), -8  (burst=1)
+		 * 111: -4 (burst=0), -16 (burst=1)
+		*/
+		unsigned int dest_inc:3;
+
+		unsigned int reserved2:1;
+
+#define APB_DMAB_DEST_REQ_NO_MASK		15
+		/*
+		 * request signal select of destination
+		 * address for DMA hardware handshake
+		 *
+		 * the request line number is a property of
+		 * the DMA controller itself, e.g. MMC must
+		 * always request channels where
+		 * dma_slave_config->slave_id == 5
+		 *
+		 * 0:	 no request / grant signal
+		 * 1-15: request / grant signal
+		 */
+		unsigned int dest_req_no:4;
+
+#define APB_DMAB_DATA_WIDTH_MASK		3
+#define APB_DMAB_DATA_WIDTH_4			0
+#define APB_DMAB_DATA_WIDTH_2			1
+#define APB_DMAB_DATA_WIDTH_1			2
+		/*
+		 * data width of transfer
+		 * 00: word
+		 * 01: half
+		 * 10: byte
+		 */
+		unsigned int data_width:2;
+
+		unsigned int reserved3:2;
+
+#define APB_DMAB_SOURCE_REQ_NO_MASK		15
+		/*
+		 * request signal select of source
+		 * address for DMA hardware handshake
+		 *
+		 * the request line number is a property of
+		 * the DMA controller itself, e.g. MMC must
+		 * always request channels where
+		 * dma_slave_config->slave_id == 5
+		 *
+		 * 0:	 no request / grant signal
+		 * 1-15: request / grant signal
+		 */
+		unsigned int source_req_no:4;
+
+		unsigned int reserved4:4;
+	} bits;
+};
+
+struct moxart_dma_reg {
+	unsigned int source_addr;
+	unsigned int dest_addr;
+#define APB_DMA_CYCLES_MASK			0x00ffffff
+	unsigned int cycles;	/* depend on burst mode */
+	union moxart_dma_reg_cfg cfg;
+};
+
+#endif
-- 
1.8.2.1


^ permalink raw reply related	[flat|nested] 80+ messages in thread

* [PATCH v2] dmaengine: Add MOXA ART DMA engine driver
@ 2013-07-10 12:43   ` Jonas Jensen
  0 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2013-07-10 12:43 UTC (permalink / raw)
  To: linux-arm-kernel

Add dmaengine driver for MOXA ART SoCs.

Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
---

Notes:
    Applies to next-20130703
    
    Changes since v1:
    
    1. remove use of vchan support

 drivers/dma/Kconfig      |   9 +
 drivers/dma/Makefile     |   1 +
 drivers/dma/moxart-dma.c | 477 +++++++++++++++++++++++++++++++++++++++++++++++
 drivers/dma/moxart-dma.h | 188 +++++++++++++++++++
 4 files changed, 675 insertions(+)
 create mode 100644 drivers/dma/moxart-dma.c
 create mode 100644 drivers/dma/moxart-dma.h

diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6825957..d4ba42b 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -300,6 +300,15 @@ config DMA_JZ4740
 	select DMA_ENGINE
 	select DMA_VIRTUAL_CHANNELS
 
+config MOXART_DMA
+	tristate "MOXART DMA support"
+	depends on ARCH_MOXART
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	default n
+	help
+	  Enable support for the MOXA ART SoC DMA controller.
+
 config DMA_ENGINE
 	bool
 
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 5e0f2ef..470c11b 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -39,3 +39,4 @@ obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
 obj-$(CONFIG_DMA_OMAP) += omap-dma.o
 obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..16fddf4
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,477 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+#include "moxart-dma.h"
+
+static DEFINE_SPINLOCK(dma_lock);
+
+struct moxart_dma_chan {
+	struct dma_chan			chan;
+	int				ch_num;
+	bool				allocated;
+	int				error_flag;
+	struct moxart_dma_reg		*reg;
+	void				(*callback)(void *param);
+	void				*callback_param;
+	struct completion		dma_complete;
+	struct dma_slave_config		cfg;
+	struct dma_async_tx_descriptor	tx_desc;
+};
+
+struct moxart_dma_container {
+	int			ctlr;
+	struct dma_device	dma_slave;
+	struct moxart_dma_chan	slave_chans[APB_DMA_MAX_CHANNEL];
+};
+
+struct moxart_dma_container *mdc;
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+	return &chan->dev->device;
+}
+
+static inline struct moxart_dma_container
+*to_moxart_dma_container(struct dma_device *d)
+{
+	return container_of(d, struct moxart_dma_container, dma_slave);
+}
+
+static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct moxart_dma_chan, chan);
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	union moxart_dma_reg_cfg mcfg;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+	spin_lock_irqsave(&dma_lock, flags);
+
+	mcfg.ul = readl(&mchan->reg->cfg.ul);
+	mcfg.ul &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+	spin_unlock_irqrestore(&dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+			       struct dma_slave_config *cfg)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	union moxart_dma_reg_cfg mcfg;
+	unsigned long flags;
+	unsigned int data_width, data_inc;
+
+	spin_lock_irqsave(&dma_lock, flags);
+
+	memcpy(&mchan->cfg, cfg, sizeof(mchan->cfg));
+
+	mcfg.ul = readl(&mchan->reg->cfg.ul);
+	mcfg.bits.burst = APB_DMAB_BURST_MODE;
+
+	switch (mchan->cfg.src_addr_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		data_width = APB_DMAB_DATA_WIDTH_1;
+		data_inc = APB_DMAB_DEST_INC_1_4;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		data_width = APB_DMAB_DATA_WIDTH_2;
+		data_inc = APB_DMAB_DEST_INC_2_8;
+		break;
+	default:
+		data_width = APB_DMAB_DATA_WIDTH_4;
+		data_inc = APB_DMAB_DEST_INC_4_16;
+		break;
+	}
+
+	if (mchan->cfg.direction == DMA_MEM_TO_DEV) {
+		mcfg.bits.data_width = data_width;
+		mcfg.bits.dest_sel = APB_DMAB_DEST_APB;
+		mcfg.bits.dest_inc = APB_DMAB_DEST_INC_0;
+		mcfg.bits.source_sel = APB_DMAB_SOURCE_AHB;
+		mcfg.bits.source_inc = data_inc;
+
+		mcfg.bits.dest_req_no = mchan->cfg.slave_id;
+		mcfg.bits.source_req_no = 0;
+	} else {
+		mcfg.bits.data_width = data_width;
+		mcfg.bits.dest_sel = APB_DMAB_SOURCE_AHB;
+		mcfg.bits.dest_inc = data_inc;
+		mcfg.bits.source_sel = APB_DMAB_DEST_APB;
+		mcfg.bits.source_inc = APB_DMAB_DEST_INC_0;
+
+		mcfg.bits.dest_req_no = 0;
+		mcfg.bits.source_req_no = mchan->cfg.slave_id;
+	}
+
+	writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+	spin_unlock_irqrestore(&dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+			  unsigned long arg)
+{
+	int ret = 0;
+	struct dma_slave_config *config;
+
+	switch (cmd) {
+	case DMA_TERMINATE_ALL:
+		moxart_terminate_all(chan);
+		break;
+	case DMA_SLAVE_CONFIG:
+		config = (struct dma_slave_config *)arg;
+		ret = moxart_slave_config(chan, config);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static dma_cookie_t moxart_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(tx->chan);
+	dma_cookie_t cookie;
+	union moxart_dma_reg_cfg mcfg;
+	unsigned long flags;
+
+	mchan->callback = tx->callback;
+	mchan->callback_param = tx->callback_param;
+	mchan->error_flag = 0;
+
+	dev_dbg(chan2dev(tx->chan), "%s: mchan=%p mchan->ch_num=%d mchan->reg=%p\n",
+		__func__, mchan, mchan->ch_num, mchan->reg);
+
+	spin_lock_irqsave(&dma_lock, flags);
+
+	cookie = dma_cookie_assign(tx);
+
+	mcfg.ul = readl(&mchan->reg->cfg.ul);
+	mcfg.ul |= (APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+	spin_unlock_irqrestore(&dma_lock, flags);
+
+	return cookie;
+}
+
+static struct dma_async_tx_descriptor
+*moxart_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+		      unsigned int sg_len,
+		      enum dma_transfer_direction direction,
+		      unsigned long tx_flags, void *context)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	unsigned long flags;
+	union moxart_dma_reg_cfg mcfg;
+	unsigned int size, adr_width;
+
+	spin_lock_irqsave(&dma_lock, flags);
+
+	if (direction == DMA_MEM_TO_DEV) {
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       &mchan->reg->source_addr);
+		writel(mchan->cfg.dst_addr, &mchan->reg->dest_addr);
+		adr_width = mchan->cfg.src_addr_width;
+	} else {
+		writel(mchan->cfg.src_addr, &mchan->reg->source_addr);
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       &mchan->reg->dest_addr);
+		adr_width = mchan->cfg.dst_addr_width;
+	}
+
+	size = sgl->length >> adr_width;
+
+	/*
+	 * size is 4 on 64 bytes copied, i.e. once cycle copies 16 bytes
+	 * ( when data_width == APB_DMAB_DATA_WIDTH_4 )
+	 */
+	writel(size, &mchan->reg->cycles);
+
+	dev_dbg(chan2dev(chan), "%s: set %d DMA cycles (sgl->length=%d adr_width=%d)\n",
+		__func__, size, sgl->length, adr_width);
+
+	dev_dbg(chan2dev(chan), "%s: mcfg.ul=%x read from &mchan->reg->cfg.ul=%x\n",
+		__func__, mcfg.ul, (unsigned int)&mchan->reg->cfg.ul);
+
+	dma_async_tx_descriptor_init(&mchan->tx_desc, chan);
+	mchan->tx_desc.tx_submit = moxart_tx_submit;
+
+	spin_unlock_irqrestore(&dma_lock, flags);
+
+	return &mchan->tx_desc;
+}
+
+static struct platform_driver moxart_driver;
+
+bool moxart_filter_fn(struct dma_chan *chan, void *param)
+{
+	if (chan->device->dev->driver == &moxart_driver.driver) {
+		struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+		unsigned int ch_req = *(unsigned int *)param;
+		dev_dbg(chan2dev(chan), "%s: mchan=%p ch_req=%d mchan->ch_num=%d\n",
+			__func__, mchan, ch_req, mchan->ch_num);
+		return ch_req == mchan->ch_num;
+	} else {
+		dev_dbg(chan2dev(chan), "%s: device not registered to this DMA engine\n",
+			__func__);
+		return false;
+	}
+}
+EXPORT_SYMBOL(moxart_filter_fn);
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	int i;
+	bool found = false;
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++) {
+		if (i == mchan->ch_num
+			&& !mchan->allocated) {
+			dev_dbg(chan2dev(chan), "%s: allocating channel #%d\n",
+				__func__, mchan->ch_num);
+			mchan->allocated = true;
+			found = true;
+			break;
+		}
+	}
+
+	if (!found)
+		return -ENODEV;
+
+	return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+	mchan->allocated = false;
+
+	dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+		__func__, mchan->ch_num);
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	union moxart_dma_reg_cfg mcfg;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+	spin_lock_irqsave(&dma_lock, flags);
+
+	mcfg.ul = readl(&mchan->reg->cfg.ul);
+	mcfg.ul |= APB_DMA_ENABLE;
+	writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+	spin_unlock_irqrestore(&dma_lock, flags);
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+					dma_cookie_t cookie,
+					struct dma_tx_state *txstate)
+{
+	enum dma_status ret;
+
+	ret = dma_cookie_status(chan, cookie, txstate);
+	if (ret == DMA_SUCCESS || !txstate)
+		return ret;
+
+	return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+	dma->device_prep_slave_sg		= moxart_prep_slave_sg;
+	dma->device_alloc_chan_resources	= moxart_alloc_chan_resources;
+	dma->device_free_chan_resources		= moxart_free_chan_resources;
+	dma->device_issue_pending		= moxart_issue_pending;
+	dma->device_tx_status			= moxart_tx_status;
+	dma->device_control			= moxart_control;
+	dma->dev				= dev;
+
+	INIT_LIST_HEAD(&dma->channels);
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+	struct device *dev = devid;
+	struct moxart_dma_chan *mchan = &mdc->slave_chans[0];
+	unsigned int i;
+	union moxart_dma_reg_cfg mcfg;
+
+	dev_dbg(dev, "%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		if (mchan->allocated) {
+			mcfg.ul = readl(&mchan->reg->cfg.ul);
+			if (mcfg.ul & APB_DMA_FIN_INT_STS) {
+				mcfg.ul &= ~APB_DMA_FIN_INT_STS;
+				dma_cookie_complete(&mchan->tx_desc);
+			}
+			if (mcfg.ul & APB_DMA_ERR_INT_STS) {
+				mcfg.ul &= ~APB_DMA_ERR_INT_STS;
+				mchan->error_flag = 1;
+			}
+			if (mchan->callback) {
+				dev_dbg(dev, "%s: call callback for mchan=%p\n",
+					__func__, mchan);
+				mchan->callback(mchan->callback_param);
+			}
+			mchan->error_flag = 0;
+			writel(mcfg.ul, &mchan->reg->cfg.ul);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static struct irqaction moxart_dma_irq = {
+	.name       = "moxart-dma-engine",
+	.flags      = IRQF_DISABLED,
+	.handler    = moxart_dma_interrupt,
+};
+
+static int moxart_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct resource res_dma;
+	static void __iomem *dma_base_addr;
+	int ret, i;
+	unsigned int irq;
+	struct moxart_dma_chan *mchan;
+
+	mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+	if (!mdc) {
+		dev_err(dev, "can't allocate DMA container\n");
+		return -ENOMEM;
+	}
+
+	ret = of_address_to_resource(node, 0, &res_dma);
+	if (ret) {
+		dev_err(dev, "can't get DMA base resource\n");
+		return ret;
+	}
+
+	irq = irq_of_parse_and_map(node, 0);
+
+	dma_base_addr = devm_ioremap_resource(dev, &res_dma);
+	if (IS_ERR(dma_base_addr)) {
+		dev_err(dev, "devm_ioremap_resource failed\n");
+		return PTR_ERR(dma_base_addr);
+	}
+
+	mdc->ctlr = pdev->id;
+
+	dma_cap_zero(mdc->dma_slave.cap_mask);
+	dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+
+	moxart_dma_init(&mdc->dma_slave, dev);
+
+	mchan = &mdc->slave_chans[0];
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		mchan->ch_num = i;
+		mchan->reg = (struct moxart_dma_reg *)(dma_base_addr + 0x80
+			     + i * sizeof(struct moxart_dma_reg));
+		mchan->callback = NULL;
+		mchan->allocated = 0;
+		mchan->callback_param = NULL;
+
+		dma_cookie_init(&mchan->chan);
+		mchan->chan.device = &mdc->dma_slave;
+		list_add_tail(&mchan->chan.device_node,
+			      &mdc->dma_slave.channels);
+
+		dev_dbg(dev, "%s: mchans[%d]: mchan->ch_num=%d mchan->reg=%p\n",
+			__func__, i, mchan->ch_num, mchan->reg);
+	}
+
+	ret = dma_async_device_register(&mdc->dma_slave);
+	platform_set_drvdata(pdev, mdc);
+
+	moxart_dma_irq.dev_id = dev;
+	setup_irq(irq, &moxart_dma_irq);
+
+	dev_dbg(dev, "%s: IRQ=%d\n", __func__, irq);
+
+	return ret;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+	struct moxart_dma_container *m = dev_get_drvdata(&pdev->dev);
+	dma_async_device_unregister(&m->dma_slave);
+	return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+	{ .compatible = "moxa,moxart-dma" },
+	{ }
+};
+
+static struct platform_driver moxart_driver = {
+	.probe	= moxart_probe,
+	.remove	= moxart_remove,
+	.driver = {
+		.name		= "moxart-dma-engine",
+		.owner		= THIS_MODULE,
+		.of_match_table	= moxart_dma_match,
+	},
+};
+
+static int moxart_init(void)
+{
+	return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+	platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/moxart-dma.h b/drivers/dma/moxart-dma.h
new file mode 100644
index 0000000..358c006
--- /dev/null
+++ b/drivers/dma/moxart-dma.h
@@ -0,0 +1,188 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __DMA_MOXART_H
+#define __DMA_MOXART_H
+
+#define APB_DMA_MAX_CHANNEL			4
+
+union moxart_dma_reg_cfg {
+
+#define APB_DMA_ENABLE				(1<<0)
+#define APB_DMA_FIN_INT_STS			(1<<1)
+#define APB_DMA_FIN_INT_EN			(1<<2)
+#define APB_DMA_BURST_MODE			(1<<3)
+#define APB_DMA_ERR_INT_STS			(1<<4)
+#define APB_DMA_ERR_INT_EN			(1<<5)
+#define APB_DMA_SOURCE_AHB			(1<<6)
+#define APB_DMA_SOURCE_APB			0
+#define APB_DMA_DEST_AHB			(1<<7)
+#define APB_DMA_DEST_APB			0
+#define APB_DMA_SOURCE_INC_0			0
+#define APB_DMA_SOURCE_INC_1_4			(1<<8)
+#define APB_DMA_SOURCE_INC_2_8			(2<<8)
+#define APB_DMA_SOURCE_INC_4_16			(3<<8)
+#define APB_DMA_SOURCE_DEC_1_4			(5<<8)
+#define APB_DMA_SOURCE_DEC_2_8			(6<<8)
+#define APB_DMA_SOURCE_DEC_4_16			(7<<8)
+#define APB_DMA_SOURCE_INC_MASK			(7<<8)
+#define APB_DMA_DEST_INC_0			0
+#define APB_DMA_DEST_INC_1_4			(1<<12)
+#define APB_DMA_DEST_INC_2_8			(2<<12)
+#define APB_DMA_DEST_INC_4_16			(3<<12)
+#define APB_DMA_DEST_DEC_1_4			(5<<12)
+#define APB_DMA_DEST_DEC_2_8			(6<<12)
+#define APB_DMA_DEST_DEC_4_16			(7<<12)
+#define APB_DMA_DEST_INC_MASK			(7<<12)
+#define APB_DMA_DEST_REQ_NO_MASK		(15<<16)
+#define APB_DMA_DATA_WIDTH_MASK			(3<<20)
+#define APB_DMA_DATA_WIDTH_4			0
+#define APB_DMA_DATA_WIDTH_2			(1<<20)
+#define APB_DMA_DATA_WIDTH_1			(2<<20)
+#define APB_DMA_SOURCE_REQ_NO_MASK		(15<<24)
+	unsigned int ul;
+
+	struct {
+
+#define APB_DMAB_ENABLE				1
+		/* enable DMA */
+		unsigned int enable:1;
+
+#define APB_DMAB_FIN_INT_STS			1
+		/* finished interrupt status */
+		unsigned int fin_int_sts:1;
+
+#define APB_DMAB_FIN_INT_EN			1
+		/* finished interrupt enable */
+		unsigned int fin_int_en:1;
+
+#define APB_DMAB_BURST_MODE			1
+		/* burst mode */
+		unsigned int burst:1;
+
+#define APB_DMAB_ERR_INT_STS			1
+		/* error interrupt status */
+		unsigned int err_int_sts:1;
+
+#define APB_DMAB_ERR_INT_EN			1
+		/* error interrupt enable */
+		unsigned int err_int_en:1;
+
+#define APB_DMAB_SOURCE_AHB			1
+#define APB_DMAB_SOURCE_APB			0
+		/* 0:APB (device), 1:AHB (RAM) */
+		unsigned int source_sel:1;
+
+#define APB_DMAB_DEST_AHB			1
+#define APB_DMAB_DEST_APB			0
+		/* 0:APB, 1:AHB */
+		unsigned int dest_sel:1;
+
+#define APB_DMAB_SOURCE_INC_0			0
+#define APB_DMAB_SOURCE_INC_1_4			1
+#define APB_DMAB_SOURCE_INC_2_8			2
+#define APB_DMAB_SOURCE_INC_4_16		3
+#define APB_DMAB_SOURCE_DEC_1_4			5
+#define APB_DMAB_SOURCE_DEC_2_8			6
+#define APB_DMAB_SOURCE_DEC_4_16		7
+#define APB_DMAB_SOURCE_INC_MASK		7
+		/*
+		 * 000: no increment
+		 * 001: +1 (busrt=0), +4  (burst=1)
+		 * 010: +2 (burst=0), +8  (burst=1)
+		 * 011: +4 (burst=0), +16 (burst=1)
+		 * 101: -1 (burst=0), -4  (burst=1)
+		 * 110: -2 (burst=0), -8  (burst=1)
+		 * 111: -4 (burst=0), -16 (burst=1)
+		 */
+		unsigned int source_inc:3;
+
+		unsigned int reserved1:1;
+
+#define APB_DMAB_DEST_INC_0			0
+#define APB_DMAB_DEST_INC_1_4			1
+#define APB_DMAB_DEST_INC_2_8			2
+#define APB_DMAB_DEST_INC_4_16			3
+#define APB_DMAB_DEST_DEC_1_4			5
+#define APB_DMAB_DEST_DEC_2_8			6
+#define APB_DMAB_DEST_DEC_4_16			7
+#define APB_DMAB_DEST_INC_MASK			7
+		/*
+		 * 000: no increment
+		 * 001: +1 (busrt=0), +4  (burst=1)
+		 * 010: +2 (burst=0), +8  (burst=1)
+		 * 011: +4 (burst=0), +16 (burst=1)
+		 * 101: -1 (burst=0), -4  (burst=1)
+		 * 110: -2 (burst=0), -8  (burst=1)
+		 * 111: -4 (burst=0), -16 (burst=1)
+		*/
+		unsigned int dest_inc:3;
+
+		unsigned int reserved2:1;
+
+#define APB_DMAB_DEST_REQ_NO_MASK		15
+		/*
+		 * request signal select of destination
+		 * address for DMA hardware handshake
+		 *
+		 * the request line number is a property of
+		 * the DMA controller itself, e.g. MMC must
+		 * always request channels where
+		 * dma_slave_config->slave_id == 5
+		 *
+		 * 0:	 no request / grant signal
+		 * 1-15: request / grant signal
+		 */
+		unsigned int dest_req_no:4;
+
+#define APB_DMAB_DATA_WIDTH_MASK		3
+#define APB_DMAB_DATA_WIDTH_4			0
+#define APB_DMAB_DATA_WIDTH_2			1
+#define APB_DMAB_DATA_WIDTH_1			2
+		/*
+		 * data width of transfer
+		 * 00: word
+		 * 01: half
+		 * 10: byte
+		 */
+		unsigned int data_width:2;
+
+		unsigned int reserved3:2;
+
+#define APB_DMAB_SOURCE_REQ_NO_MASK		15
+		/*
+		 * request signal select of source
+		 * address for DMA hardware handshake
+		 *
+		 * the request line number is a property of
+		 * the DMA controller itself, e.g. MMC must
+		 * always request channels where
+		 * dma_slave_config->slave_id == 5
+		 *
+		 * 0:	 no request / grant signal
+		 * 1-15: request / grant signal
+		 */
+		unsigned int source_req_no:4;
+
+		unsigned int reserved4:4;
+	} bits;
+};
+
+struct moxart_dma_reg {
+	unsigned int source_addr;
+	unsigned int dest_addr;
+#define APB_DMA_CYCLES_MASK			0x00ffffff
+	unsigned int cycles;	/* depend on burst mode */
+	union moxart_dma_reg_cfg cfg;
+};
+
+#endif
-- 
1.8.2.1

^ permalink raw reply related	[flat|nested] 80+ messages in thread

* [PATCH v3] dmaengine: Add MOXA ART DMA engine driver
  2013-07-10 12:43   ` Jonas Jensen
@ 2013-07-17 10:06     ` Jonas Jensen
  -1 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2013-07-17 10:06 UTC (permalink / raw)
  To: linux-arm-kernel
  Cc: linux-kernel, arm, vinod.koul, djbw, arnd, linux, Jonas Jensen

Add dmaengine driver for MOXA ART SoCs.

Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
---

Notes:
    Changes since v2:
    
    1. add devicetree bindings document
    2. remove DMA_VIRTUAL_CHANNELS and "default n" from Kconfig
    
    Applies to next-20130716

 .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  19 +
 drivers/dma/Kconfig                                |   7 +
 drivers/dma/Makefile                               |   1 +
 drivers/dma/moxart-dma.c                           | 477 +++++++++++++++++++++
 drivers/dma/moxart-dma.h                           | 188 ++++++++
 5 files changed, 692 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
 create mode 100644 drivers/dma/moxart-dma.c
 create mode 100644 drivers/dma/moxart-dma.h

diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644
index 0000000..61a019d
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -0,0 +1,19 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible : Should be "moxa,moxart-dma"
+- reg : Should contain registers location and length
+- interrupts : Should contain the interrupt number
+- #dma-cells : see dma.txt, should be 1
+
+Example:
+
+	dma: dma@90500000 {
+		compatible = "moxa,moxart-dma";
+		reg = <0x90500000 0x1000>;
+		interrupts = <24 0>;
+		#dma-cells = <1>;
+	};
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6825957..56c3aaa 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -300,6 +300,13 @@ config DMA_JZ4740
 	select DMA_ENGINE
 	select DMA_VIRTUAL_CHANNELS
 
+config MOXART_DMA
+	tristate "MOXART DMA support"
+	depends on ARCH_MOXART
+	select DMA_ENGINE
+	help
+	  Enable support for the MOXA ART SoC DMA controller.
+
 config DMA_ENGINE
 	bool
 
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 5e0f2ef..470c11b 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -39,3 +39,4 @@ obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
 obj-$(CONFIG_DMA_OMAP) += omap-dma.o
 obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..16fddf4
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,477 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+#include "moxart-dma.h"
+
+static DEFINE_SPINLOCK(dma_lock);
+
+struct moxart_dma_chan {
+	struct dma_chan			chan;
+	int				ch_num;
+	bool				allocated;
+	int				error_flag;
+	struct moxart_dma_reg		*reg;
+	void				(*callback)(void *param);
+	void				*callback_param;
+	struct completion		dma_complete;
+	struct dma_slave_config		cfg;
+	struct dma_async_tx_descriptor	tx_desc;
+};
+
+struct moxart_dma_container {
+	int			ctlr;
+	struct dma_device	dma_slave;
+	struct moxart_dma_chan	slave_chans[APB_DMA_MAX_CHANNEL];
+};
+
+struct moxart_dma_container *mdc;
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+	return &chan->dev->device;
+}
+
+static inline struct moxart_dma_container
+*to_moxart_dma_container(struct dma_device *d)
+{
+	return container_of(d, struct moxart_dma_container, dma_slave);
+}
+
+static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct moxart_dma_chan, chan);
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	union moxart_dma_reg_cfg mcfg;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+	spin_lock_irqsave(&dma_lock, flags);
+
+	mcfg.ul = readl(&mchan->reg->cfg.ul);
+	mcfg.ul &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+	spin_unlock_irqrestore(&dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+			       struct dma_slave_config *cfg)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	union moxart_dma_reg_cfg mcfg;
+	unsigned long flags;
+	unsigned int data_width, data_inc;
+
+	spin_lock_irqsave(&dma_lock, flags);
+
+	memcpy(&mchan->cfg, cfg, sizeof(mchan->cfg));
+
+	mcfg.ul = readl(&mchan->reg->cfg.ul);
+	mcfg.bits.burst = APB_DMAB_BURST_MODE;
+
+	switch (mchan->cfg.src_addr_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		data_width = APB_DMAB_DATA_WIDTH_1;
+		data_inc = APB_DMAB_DEST_INC_1_4;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		data_width = APB_DMAB_DATA_WIDTH_2;
+		data_inc = APB_DMAB_DEST_INC_2_8;
+		break;
+	default:
+		data_width = APB_DMAB_DATA_WIDTH_4;
+		data_inc = APB_DMAB_DEST_INC_4_16;
+		break;
+	}
+
+	if (mchan->cfg.direction == DMA_MEM_TO_DEV) {
+		mcfg.bits.data_width = data_width;
+		mcfg.bits.dest_sel = APB_DMAB_DEST_APB;
+		mcfg.bits.dest_inc = APB_DMAB_DEST_INC_0;
+		mcfg.bits.source_sel = APB_DMAB_SOURCE_AHB;
+		mcfg.bits.source_inc = data_inc;
+
+		mcfg.bits.dest_req_no = mchan->cfg.slave_id;
+		mcfg.bits.source_req_no = 0;
+	} else {
+		mcfg.bits.data_width = data_width;
+		mcfg.bits.dest_sel = APB_DMAB_SOURCE_AHB;
+		mcfg.bits.dest_inc = data_inc;
+		mcfg.bits.source_sel = APB_DMAB_DEST_APB;
+		mcfg.bits.source_inc = APB_DMAB_DEST_INC_0;
+
+		mcfg.bits.dest_req_no = 0;
+		mcfg.bits.source_req_no = mchan->cfg.slave_id;
+	}
+
+	writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+	spin_unlock_irqrestore(&dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+			  unsigned long arg)
+{
+	int ret = 0;
+	struct dma_slave_config *config;
+
+	switch (cmd) {
+	case DMA_TERMINATE_ALL:
+		moxart_terminate_all(chan);
+		break;
+	case DMA_SLAVE_CONFIG:
+		config = (struct dma_slave_config *)arg;
+		ret = moxart_slave_config(chan, config);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static dma_cookie_t moxart_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(tx->chan);
+	dma_cookie_t cookie;
+	union moxart_dma_reg_cfg mcfg;
+	unsigned long flags;
+
+	mchan->callback = tx->callback;
+	mchan->callback_param = tx->callback_param;
+	mchan->error_flag = 0;
+
+	dev_dbg(chan2dev(tx->chan), "%s: mchan=%p mchan->ch_num=%d mchan->reg=%p\n",
+		__func__, mchan, mchan->ch_num, mchan->reg);
+
+	spin_lock_irqsave(&dma_lock, flags);
+
+	cookie = dma_cookie_assign(tx);
+
+	mcfg.ul = readl(&mchan->reg->cfg.ul);
+	mcfg.ul |= (APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+	spin_unlock_irqrestore(&dma_lock, flags);
+
+	return cookie;
+}
+
+static struct dma_async_tx_descriptor
+*moxart_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+		      unsigned int sg_len,
+		      enum dma_transfer_direction direction,
+		      unsigned long tx_flags, void *context)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	unsigned long flags;
+	union moxart_dma_reg_cfg mcfg;
+	unsigned int size, adr_width;
+
+	spin_lock_irqsave(&dma_lock, flags);
+
+	if (direction == DMA_MEM_TO_DEV) {
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       &mchan->reg->source_addr);
+		writel(mchan->cfg.dst_addr, &mchan->reg->dest_addr);
+		adr_width = mchan->cfg.src_addr_width;
+	} else {
+		writel(mchan->cfg.src_addr, &mchan->reg->source_addr);
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       &mchan->reg->dest_addr);
+		adr_width = mchan->cfg.dst_addr_width;
+	}
+
+	size = sgl->length >> adr_width;
+
+	/*
+	 * size is 4 on 64 bytes copied, i.e. once cycle copies 16 bytes
+	 * ( when data_width == APB_DMAB_DATA_WIDTH_4 )
+	 */
+	writel(size, &mchan->reg->cycles);
+
+	dev_dbg(chan2dev(chan), "%s: set %d DMA cycles (sgl->length=%d adr_width=%d)\n",
+		__func__, size, sgl->length, adr_width);
+
+	dev_dbg(chan2dev(chan), "%s: mcfg.ul=%x read from &mchan->reg->cfg.ul=%x\n",
+		__func__, mcfg.ul, (unsigned int)&mchan->reg->cfg.ul);
+
+	dma_async_tx_descriptor_init(&mchan->tx_desc, chan);
+	mchan->tx_desc.tx_submit = moxart_tx_submit;
+
+	spin_unlock_irqrestore(&dma_lock, flags);
+
+	return &mchan->tx_desc;
+}
+
+static struct platform_driver moxart_driver;
+
+bool moxart_filter_fn(struct dma_chan *chan, void *param)
+{
+	if (chan->device->dev->driver == &moxart_driver.driver) {
+		struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+		unsigned int ch_req = *(unsigned int *)param;
+		dev_dbg(chan2dev(chan), "%s: mchan=%p ch_req=%d mchan->ch_num=%d\n",
+			__func__, mchan, ch_req, mchan->ch_num);
+		return ch_req == mchan->ch_num;
+	} else {
+		dev_dbg(chan2dev(chan), "%s: device not registered to this DMA engine\n",
+			__func__);
+		return false;
+	}
+}
+EXPORT_SYMBOL(moxart_filter_fn);
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	int i;
+	bool found = false;
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++) {
+		if (i == mchan->ch_num
+			&& !mchan->allocated) {
+			dev_dbg(chan2dev(chan), "%s: allocating channel #%d\n",
+				__func__, mchan->ch_num);
+			mchan->allocated = true;
+			found = true;
+			break;
+		}
+	}
+
+	if (!found)
+		return -ENODEV;
+
+	return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+	mchan->allocated = false;
+
+	dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+		__func__, mchan->ch_num);
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	union moxart_dma_reg_cfg mcfg;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+	spin_lock_irqsave(&dma_lock, flags);
+
+	mcfg.ul = readl(&mchan->reg->cfg.ul);
+	mcfg.ul |= APB_DMA_ENABLE;
+	writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+	spin_unlock_irqrestore(&dma_lock, flags);
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+					dma_cookie_t cookie,
+					struct dma_tx_state *txstate)
+{
+	enum dma_status ret;
+
+	ret = dma_cookie_status(chan, cookie, txstate);
+	if (ret == DMA_SUCCESS || !txstate)
+		return ret;
+
+	return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+	dma->device_prep_slave_sg		= moxart_prep_slave_sg;
+	dma->device_alloc_chan_resources	= moxart_alloc_chan_resources;
+	dma->device_free_chan_resources		= moxart_free_chan_resources;
+	dma->device_issue_pending		= moxart_issue_pending;
+	dma->device_tx_status			= moxart_tx_status;
+	dma->device_control			= moxart_control;
+	dma->dev				= dev;
+
+	INIT_LIST_HEAD(&dma->channels);
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+	struct device *dev = devid;
+	struct moxart_dma_chan *mchan = &mdc->slave_chans[0];
+	unsigned int i;
+	union moxart_dma_reg_cfg mcfg;
+
+	dev_dbg(dev, "%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		if (mchan->allocated) {
+			mcfg.ul = readl(&mchan->reg->cfg.ul);
+			if (mcfg.ul & APB_DMA_FIN_INT_STS) {
+				mcfg.ul &= ~APB_DMA_FIN_INT_STS;
+				dma_cookie_complete(&mchan->tx_desc);
+			}
+			if (mcfg.ul & APB_DMA_ERR_INT_STS) {
+				mcfg.ul &= ~APB_DMA_ERR_INT_STS;
+				mchan->error_flag = 1;
+			}
+			if (mchan->callback) {
+				dev_dbg(dev, "%s: call callback for mchan=%p\n",
+					__func__, mchan);
+				mchan->callback(mchan->callback_param);
+			}
+			mchan->error_flag = 0;
+			writel(mcfg.ul, &mchan->reg->cfg.ul);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static struct irqaction moxart_dma_irq = {
+	.name       = "moxart-dma-engine",
+	.flags      = IRQF_DISABLED,
+	.handler    = moxart_dma_interrupt,
+};
+
+static int moxart_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct resource res_dma;
+	static void __iomem *dma_base_addr;
+	int ret, i;
+	unsigned int irq;
+	struct moxart_dma_chan *mchan;
+
+	mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+	if (!mdc) {
+		dev_err(dev, "can't allocate DMA container\n");
+		return -ENOMEM;
+	}
+
+	ret = of_address_to_resource(node, 0, &res_dma);
+	if (ret) {
+		dev_err(dev, "can't get DMA base resource\n");
+		return ret;
+	}
+
+	irq = irq_of_parse_and_map(node, 0);
+
+	dma_base_addr = devm_ioremap_resource(dev, &res_dma);
+	if (IS_ERR(dma_base_addr)) {
+		dev_err(dev, "devm_ioremap_resource failed\n");
+		return PTR_ERR(dma_base_addr);
+	}
+
+	mdc->ctlr = pdev->id;
+
+	dma_cap_zero(mdc->dma_slave.cap_mask);
+	dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+
+	moxart_dma_init(&mdc->dma_slave, dev);
+
+	mchan = &mdc->slave_chans[0];
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		mchan->ch_num = i;
+		mchan->reg = (struct moxart_dma_reg *)(dma_base_addr + 0x80
+			     + i * sizeof(struct moxart_dma_reg));
+		mchan->callback = NULL;
+		mchan->allocated = 0;
+		mchan->callback_param = NULL;
+
+		dma_cookie_init(&mchan->chan);
+		mchan->chan.device = &mdc->dma_slave;
+		list_add_tail(&mchan->chan.device_node,
+			      &mdc->dma_slave.channels);
+
+		dev_dbg(dev, "%s: mchans[%d]: mchan->ch_num=%d mchan->reg=%p\n",
+			__func__, i, mchan->ch_num, mchan->reg);
+	}
+
+	ret = dma_async_device_register(&mdc->dma_slave);
+	platform_set_drvdata(pdev, mdc);
+
+	moxart_dma_irq.dev_id = dev;
+	setup_irq(irq, &moxart_dma_irq);
+
+	dev_dbg(dev, "%s: IRQ=%d\n", __func__, irq);
+
+	return ret;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+	struct moxart_dma_container *m = dev_get_drvdata(&pdev->dev);
+	dma_async_device_unregister(&m->dma_slave);
+	return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+	{ .compatible = "moxa,moxart-dma" },
+	{ }
+};
+
+static struct platform_driver moxart_driver = {
+	.probe	= moxart_probe,
+	.remove	= moxart_remove,
+	.driver = {
+		.name		= "moxart-dma-engine",
+		.owner		= THIS_MODULE,
+		.of_match_table	= moxart_dma_match,
+	},
+};
+
+static int moxart_init(void)
+{
+	return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+	platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/moxart-dma.h b/drivers/dma/moxart-dma.h
new file mode 100644
index 0000000..358c006
--- /dev/null
+++ b/drivers/dma/moxart-dma.h
@@ -0,0 +1,188 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __DMA_MOXART_H
+#define __DMA_MOXART_H
+
+#define APB_DMA_MAX_CHANNEL			4
+
+union moxart_dma_reg_cfg {
+
+#define APB_DMA_ENABLE				(1<<0)
+#define APB_DMA_FIN_INT_STS			(1<<1)
+#define APB_DMA_FIN_INT_EN			(1<<2)
+#define APB_DMA_BURST_MODE			(1<<3)
+#define APB_DMA_ERR_INT_STS			(1<<4)
+#define APB_DMA_ERR_INT_EN			(1<<5)
+#define APB_DMA_SOURCE_AHB			(1<<6)
+#define APB_DMA_SOURCE_APB			0
+#define APB_DMA_DEST_AHB			(1<<7)
+#define APB_DMA_DEST_APB			0
+#define APB_DMA_SOURCE_INC_0			0
+#define APB_DMA_SOURCE_INC_1_4			(1<<8)
+#define APB_DMA_SOURCE_INC_2_8			(2<<8)
+#define APB_DMA_SOURCE_INC_4_16			(3<<8)
+#define APB_DMA_SOURCE_DEC_1_4			(5<<8)
+#define APB_DMA_SOURCE_DEC_2_8			(6<<8)
+#define APB_DMA_SOURCE_DEC_4_16			(7<<8)
+#define APB_DMA_SOURCE_INC_MASK			(7<<8)
+#define APB_DMA_DEST_INC_0			0
+#define APB_DMA_DEST_INC_1_4			(1<<12)
+#define APB_DMA_DEST_INC_2_8			(2<<12)
+#define APB_DMA_DEST_INC_4_16			(3<<12)
+#define APB_DMA_DEST_DEC_1_4			(5<<12)
+#define APB_DMA_DEST_DEC_2_8			(6<<12)
+#define APB_DMA_DEST_DEC_4_16			(7<<12)
+#define APB_DMA_DEST_INC_MASK			(7<<12)
+#define APB_DMA_DEST_REQ_NO_MASK		(15<<16)
+#define APB_DMA_DATA_WIDTH_MASK			(3<<20)
+#define APB_DMA_DATA_WIDTH_4			0
+#define APB_DMA_DATA_WIDTH_2			(1<<20)
+#define APB_DMA_DATA_WIDTH_1			(2<<20)
+#define APB_DMA_SOURCE_REQ_NO_MASK		(15<<24)
+	unsigned int ul;
+
+	struct {
+
+#define APB_DMAB_ENABLE				1
+		/* enable DMA */
+		unsigned int enable:1;
+
+#define APB_DMAB_FIN_INT_STS			1
+		/* finished interrupt status */
+		unsigned int fin_int_sts:1;
+
+#define APB_DMAB_FIN_INT_EN			1
+		/* finished interrupt enable */
+		unsigned int fin_int_en:1;
+
+#define APB_DMAB_BURST_MODE			1
+		/* burst mode */
+		unsigned int burst:1;
+
+#define APB_DMAB_ERR_INT_STS			1
+		/* error interrupt status */
+		unsigned int err_int_sts:1;
+
+#define APB_DMAB_ERR_INT_EN			1
+		/* error interrupt enable */
+		unsigned int err_int_en:1;
+
+#define APB_DMAB_SOURCE_AHB			1
+#define APB_DMAB_SOURCE_APB			0
+		/* 0:APB (device), 1:AHB (RAM) */
+		unsigned int source_sel:1;
+
+#define APB_DMAB_DEST_AHB			1
+#define APB_DMAB_DEST_APB			0
+		/* 0:APB, 1:AHB */
+		unsigned int dest_sel:1;
+
+#define APB_DMAB_SOURCE_INC_0			0
+#define APB_DMAB_SOURCE_INC_1_4			1
+#define APB_DMAB_SOURCE_INC_2_8			2
+#define APB_DMAB_SOURCE_INC_4_16		3
+#define APB_DMAB_SOURCE_DEC_1_4			5
+#define APB_DMAB_SOURCE_DEC_2_8			6
+#define APB_DMAB_SOURCE_DEC_4_16		7
+#define APB_DMAB_SOURCE_INC_MASK		7
+		/*
+		 * 000: no increment
+		 * 001: +1 (busrt=0), +4  (burst=1)
+		 * 010: +2 (burst=0), +8  (burst=1)
+		 * 011: +4 (burst=0), +16 (burst=1)
+		 * 101: -1 (burst=0), -4  (burst=1)
+		 * 110: -2 (burst=0), -8  (burst=1)
+		 * 111: -4 (burst=0), -16 (burst=1)
+		 */
+		unsigned int source_inc:3;
+
+		unsigned int reserved1:1;
+
+#define APB_DMAB_DEST_INC_0			0
+#define APB_DMAB_DEST_INC_1_4			1
+#define APB_DMAB_DEST_INC_2_8			2
+#define APB_DMAB_DEST_INC_4_16			3
+#define APB_DMAB_DEST_DEC_1_4			5
+#define APB_DMAB_DEST_DEC_2_8			6
+#define APB_DMAB_DEST_DEC_4_16			7
+#define APB_DMAB_DEST_INC_MASK			7
+		/*
+		 * 000: no increment
+		 * 001: +1 (busrt=0), +4  (burst=1)
+		 * 010: +2 (burst=0), +8  (burst=1)
+		 * 011: +4 (burst=0), +16 (burst=1)
+		 * 101: -1 (burst=0), -4  (burst=1)
+		 * 110: -2 (burst=0), -8  (burst=1)
+		 * 111: -4 (burst=0), -16 (burst=1)
+		*/
+		unsigned int dest_inc:3;
+
+		unsigned int reserved2:1;
+
+#define APB_DMAB_DEST_REQ_NO_MASK		15
+		/*
+		 * request signal select of destination
+		 * address for DMA hardware handshake
+		 *
+		 * the request line number is a property of
+		 * the DMA controller itself, e.g. MMC must
+		 * always request channels where
+		 * dma_slave_config->slave_id == 5
+		 *
+		 * 0:	 no request / grant signal
+		 * 1-15: request / grant signal
+		 */
+		unsigned int dest_req_no:4;
+
+#define APB_DMAB_DATA_WIDTH_MASK		3
+#define APB_DMAB_DATA_WIDTH_4			0
+#define APB_DMAB_DATA_WIDTH_2			1
+#define APB_DMAB_DATA_WIDTH_1			2
+		/*
+		 * data width of transfer
+		 * 00: word
+		 * 01: half
+		 * 10: byte
+		 */
+		unsigned int data_width:2;
+
+		unsigned int reserved3:2;
+
+#define APB_DMAB_SOURCE_REQ_NO_MASK		15
+		/*
+		 * request signal select of source
+		 * address for DMA hardware handshake
+		 *
+		 * the request line number is a property of
+		 * the DMA controller itself, e.g. MMC must
+		 * always request channels where
+		 * dma_slave_config->slave_id == 5
+		 *
+		 * 0:	 no request / grant signal
+		 * 1-15: request / grant signal
+		 */
+		unsigned int source_req_no:4;
+
+		unsigned int reserved4:4;
+	} bits;
+};
+
+struct moxart_dma_reg {
+	unsigned int source_addr;
+	unsigned int dest_addr;
+#define APB_DMA_CYCLES_MASK			0x00ffffff
+	unsigned int cycles;	/* depend on burst mode */
+	union moxart_dma_reg_cfg cfg;
+};
+
+#endif
-- 
1.8.2.1


^ permalink raw reply related	[flat|nested] 80+ messages in thread

* [PATCH v3] dmaengine: Add MOXA ART DMA engine driver
@ 2013-07-17 10:06     ` Jonas Jensen
  0 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2013-07-17 10:06 UTC (permalink / raw)
  To: linux-arm-kernel

Add dmaengine driver for MOXA ART SoCs.

Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
---

Notes:
    Changes since v2:
    
    1. add devicetree bindings document
    2. remove DMA_VIRTUAL_CHANNELS and "default n" from Kconfig
    
    Applies to next-20130716

 .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  19 +
 drivers/dma/Kconfig                                |   7 +
 drivers/dma/Makefile                               |   1 +
 drivers/dma/moxart-dma.c                           | 477 +++++++++++++++++++++
 drivers/dma/moxart-dma.h                           | 188 ++++++++
 5 files changed, 692 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
 create mode 100644 drivers/dma/moxart-dma.c
 create mode 100644 drivers/dma/moxart-dma.h

diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644
index 0000000..61a019d
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -0,0 +1,19 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible : Should be "moxa,moxart-dma"
+- reg : Should contain registers location and length
+- interrupts : Should contain the interrupt number
+- #dma-cells : see dma.txt, should be 1
+
+Example:
+
+	dma: dma at 90500000 {
+		compatible = "moxa,moxart-dma";
+		reg = <0x90500000 0x1000>;
+		interrupts = <24 0>;
+		#dma-cells = <1>;
+	};
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6825957..56c3aaa 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -300,6 +300,13 @@ config DMA_JZ4740
 	select DMA_ENGINE
 	select DMA_VIRTUAL_CHANNELS
 
+config MOXART_DMA
+	tristate "MOXART DMA support"
+	depends on ARCH_MOXART
+	select DMA_ENGINE
+	help
+	  Enable support for the MOXA ART SoC DMA controller.
+
 config DMA_ENGINE
 	bool
 
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 5e0f2ef..470c11b 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -39,3 +39,4 @@ obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
 obj-$(CONFIG_DMA_OMAP) += omap-dma.o
 obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..16fddf4
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,477 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+#include "moxart-dma.h"
+
+static DEFINE_SPINLOCK(dma_lock);
+
+struct moxart_dma_chan {
+	struct dma_chan			chan;
+	int				ch_num;
+	bool				allocated;
+	int				error_flag;
+	struct moxart_dma_reg		*reg;
+	void				(*callback)(void *param);
+	void				*callback_param;
+	struct completion		dma_complete;
+	struct dma_slave_config		cfg;
+	struct dma_async_tx_descriptor	tx_desc;
+};
+
+struct moxart_dma_container {
+	int			ctlr;
+	struct dma_device	dma_slave;
+	struct moxart_dma_chan	slave_chans[APB_DMA_MAX_CHANNEL];
+};
+
+struct moxart_dma_container *mdc;
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+	return &chan->dev->device;
+}
+
+static inline struct moxart_dma_container
+*to_moxart_dma_container(struct dma_device *d)
+{
+	return container_of(d, struct moxart_dma_container, dma_slave);
+}
+
+static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct moxart_dma_chan, chan);
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	union moxart_dma_reg_cfg mcfg;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+	spin_lock_irqsave(&dma_lock, flags);
+
+	mcfg.ul = readl(&mchan->reg->cfg.ul);
+	mcfg.ul &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+	spin_unlock_irqrestore(&dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+			       struct dma_slave_config *cfg)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	union moxart_dma_reg_cfg mcfg;
+	unsigned long flags;
+	unsigned int data_width, data_inc;
+
+	spin_lock_irqsave(&dma_lock, flags);
+
+	memcpy(&mchan->cfg, cfg, sizeof(mchan->cfg));
+
+	mcfg.ul = readl(&mchan->reg->cfg.ul);
+	mcfg.bits.burst = APB_DMAB_BURST_MODE;
+
+	switch (mchan->cfg.src_addr_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		data_width = APB_DMAB_DATA_WIDTH_1;
+		data_inc = APB_DMAB_DEST_INC_1_4;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		data_width = APB_DMAB_DATA_WIDTH_2;
+		data_inc = APB_DMAB_DEST_INC_2_8;
+		break;
+	default:
+		data_width = APB_DMAB_DATA_WIDTH_4;
+		data_inc = APB_DMAB_DEST_INC_4_16;
+		break;
+	}
+
+	if (mchan->cfg.direction == DMA_MEM_TO_DEV) {
+		mcfg.bits.data_width = data_width;
+		mcfg.bits.dest_sel = APB_DMAB_DEST_APB;
+		mcfg.bits.dest_inc = APB_DMAB_DEST_INC_0;
+		mcfg.bits.source_sel = APB_DMAB_SOURCE_AHB;
+		mcfg.bits.source_inc = data_inc;
+
+		mcfg.bits.dest_req_no = mchan->cfg.slave_id;
+		mcfg.bits.source_req_no = 0;
+	} else {
+		mcfg.bits.data_width = data_width;
+		mcfg.bits.dest_sel = APB_DMAB_SOURCE_AHB;
+		mcfg.bits.dest_inc = data_inc;
+		mcfg.bits.source_sel = APB_DMAB_DEST_APB;
+		mcfg.bits.source_inc = APB_DMAB_DEST_INC_0;
+
+		mcfg.bits.dest_req_no = 0;
+		mcfg.bits.source_req_no = mchan->cfg.slave_id;
+	}
+
+	writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+	spin_unlock_irqrestore(&dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+			  unsigned long arg)
+{
+	int ret = 0;
+	struct dma_slave_config *config;
+
+	switch (cmd) {
+	case DMA_TERMINATE_ALL:
+		moxart_terminate_all(chan);
+		break;
+	case DMA_SLAVE_CONFIG:
+		config = (struct dma_slave_config *)arg;
+		ret = moxart_slave_config(chan, config);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static dma_cookie_t moxart_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(tx->chan);
+	dma_cookie_t cookie;
+	union moxart_dma_reg_cfg mcfg;
+	unsigned long flags;
+
+	mchan->callback = tx->callback;
+	mchan->callback_param = tx->callback_param;
+	mchan->error_flag = 0;
+
+	dev_dbg(chan2dev(tx->chan), "%s: mchan=%p mchan->ch_num=%d mchan->reg=%p\n",
+		__func__, mchan, mchan->ch_num, mchan->reg);
+
+	spin_lock_irqsave(&dma_lock, flags);
+
+	cookie = dma_cookie_assign(tx);
+
+	mcfg.ul = readl(&mchan->reg->cfg.ul);
+	mcfg.ul |= (APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+	spin_unlock_irqrestore(&dma_lock, flags);
+
+	return cookie;
+}
+
+static struct dma_async_tx_descriptor
+*moxart_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+		      unsigned int sg_len,
+		      enum dma_transfer_direction direction,
+		      unsigned long tx_flags, void *context)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	unsigned long flags;
+	union moxart_dma_reg_cfg mcfg;
+	unsigned int size, adr_width;
+
+	spin_lock_irqsave(&dma_lock, flags);
+
+	if (direction == DMA_MEM_TO_DEV) {
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       &mchan->reg->source_addr);
+		writel(mchan->cfg.dst_addr, &mchan->reg->dest_addr);
+		adr_width = mchan->cfg.src_addr_width;
+	} else {
+		writel(mchan->cfg.src_addr, &mchan->reg->source_addr);
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       &mchan->reg->dest_addr);
+		adr_width = mchan->cfg.dst_addr_width;
+	}
+
+	size = sgl->length >> adr_width;
+
+	/*
+	 * size is 4 on 64 bytes copied, i.e. once cycle copies 16 bytes
+	 * ( when data_width == APB_DMAB_DATA_WIDTH_4 )
+	 */
+	writel(size, &mchan->reg->cycles);
+
+	dev_dbg(chan2dev(chan), "%s: set %d DMA cycles (sgl->length=%d adr_width=%d)\n",
+		__func__, size, sgl->length, adr_width);
+
+	dev_dbg(chan2dev(chan), "%s: mcfg.ul=%x read from &mchan->reg->cfg.ul=%x\n",
+		__func__, mcfg.ul, (unsigned int)&mchan->reg->cfg.ul);
+
+	dma_async_tx_descriptor_init(&mchan->tx_desc, chan);
+	mchan->tx_desc.tx_submit = moxart_tx_submit;
+
+	spin_unlock_irqrestore(&dma_lock, flags);
+
+	return &mchan->tx_desc;
+}
+
+static struct platform_driver moxart_driver;
+
+bool moxart_filter_fn(struct dma_chan *chan, void *param)
+{
+	if (chan->device->dev->driver == &moxart_driver.driver) {
+		struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+		unsigned int ch_req = *(unsigned int *)param;
+		dev_dbg(chan2dev(chan), "%s: mchan=%p ch_req=%d mchan->ch_num=%d\n",
+			__func__, mchan, ch_req, mchan->ch_num);
+		return ch_req == mchan->ch_num;
+	} else {
+		dev_dbg(chan2dev(chan), "%s: device not registered to this DMA engine\n",
+			__func__);
+		return false;
+	}
+}
+EXPORT_SYMBOL(moxart_filter_fn);
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	int i;
+	bool found = false;
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++) {
+		if (i == mchan->ch_num
+			&& !mchan->allocated) {
+			dev_dbg(chan2dev(chan), "%s: allocating channel #%d\n",
+				__func__, mchan->ch_num);
+			mchan->allocated = true;
+			found = true;
+			break;
+		}
+	}
+
+	if (!found)
+		return -ENODEV;
+
+	return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+	mchan->allocated = false;
+
+	dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+		__func__, mchan->ch_num);
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	union moxart_dma_reg_cfg mcfg;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+	spin_lock_irqsave(&dma_lock, flags);
+
+	mcfg.ul = readl(&mchan->reg->cfg.ul);
+	mcfg.ul |= APB_DMA_ENABLE;
+	writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+	spin_unlock_irqrestore(&dma_lock, flags);
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+					dma_cookie_t cookie,
+					struct dma_tx_state *txstate)
+{
+	enum dma_status ret;
+
+	ret = dma_cookie_status(chan, cookie, txstate);
+	if (ret == DMA_SUCCESS || !txstate)
+		return ret;
+
+	return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+	dma->device_prep_slave_sg		= moxart_prep_slave_sg;
+	dma->device_alloc_chan_resources	= moxart_alloc_chan_resources;
+	dma->device_free_chan_resources		= moxart_free_chan_resources;
+	dma->device_issue_pending		= moxart_issue_pending;
+	dma->device_tx_status			= moxart_tx_status;
+	dma->device_control			= moxart_control;
+	dma->dev				= dev;
+
+	INIT_LIST_HEAD(&dma->channels);
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+	struct device *dev = devid;
+	struct moxart_dma_chan *mchan = &mdc->slave_chans[0];
+	unsigned int i;
+	union moxart_dma_reg_cfg mcfg;
+
+	dev_dbg(dev, "%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		if (mchan->allocated) {
+			mcfg.ul = readl(&mchan->reg->cfg.ul);
+			if (mcfg.ul & APB_DMA_FIN_INT_STS) {
+				mcfg.ul &= ~APB_DMA_FIN_INT_STS;
+				dma_cookie_complete(&mchan->tx_desc);
+			}
+			if (mcfg.ul & APB_DMA_ERR_INT_STS) {
+				mcfg.ul &= ~APB_DMA_ERR_INT_STS;
+				mchan->error_flag = 1;
+			}
+			if (mchan->callback) {
+				dev_dbg(dev, "%s: call callback for mchan=%p\n",
+					__func__, mchan);
+				mchan->callback(mchan->callback_param);
+			}
+			mchan->error_flag = 0;
+			writel(mcfg.ul, &mchan->reg->cfg.ul);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static struct irqaction moxart_dma_irq = {
+	.name       = "moxart-dma-engine",
+	.flags      = IRQF_DISABLED,
+	.handler    = moxart_dma_interrupt,
+};
+
+static int moxart_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct resource res_dma;
+	static void __iomem *dma_base_addr;
+	int ret, i;
+	unsigned int irq;
+	struct moxart_dma_chan *mchan;
+
+	mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+	if (!mdc) {
+		dev_err(dev, "can't allocate DMA container\n");
+		return -ENOMEM;
+	}
+
+	ret = of_address_to_resource(node, 0, &res_dma);
+	if (ret) {
+		dev_err(dev, "can't get DMA base resource\n");
+		return ret;
+	}
+
+	irq = irq_of_parse_and_map(node, 0);
+
+	dma_base_addr = devm_ioremap_resource(dev, &res_dma);
+	if (IS_ERR(dma_base_addr)) {
+		dev_err(dev, "devm_ioremap_resource failed\n");
+		return PTR_ERR(dma_base_addr);
+	}
+
+	mdc->ctlr = pdev->id;
+
+	dma_cap_zero(mdc->dma_slave.cap_mask);
+	dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+
+	moxart_dma_init(&mdc->dma_slave, dev);
+
+	mchan = &mdc->slave_chans[0];
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		mchan->ch_num = i;
+		mchan->reg = (struct moxart_dma_reg *)(dma_base_addr + 0x80
+			     + i * sizeof(struct moxart_dma_reg));
+		mchan->callback = NULL;
+		mchan->allocated = 0;
+		mchan->callback_param = NULL;
+
+		dma_cookie_init(&mchan->chan);
+		mchan->chan.device = &mdc->dma_slave;
+		list_add_tail(&mchan->chan.device_node,
+			      &mdc->dma_slave.channels);
+
+		dev_dbg(dev, "%s: mchans[%d]: mchan->ch_num=%d mchan->reg=%p\n",
+			__func__, i, mchan->ch_num, mchan->reg);
+	}
+
+	ret = dma_async_device_register(&mdc->dma_slave);
+	platform_set_drvdata(pdev, mdc);
+
+	moxart_dma_irq.dev_id = dev;
+	setup_irq(irq, &moxart_dma_irq);
+
+	dev_dbg(dev, "%s: IRQ=%d\n", __func__, irq);
+
+	return ret;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+	struct moxart_dma_container *m = dev_get_drvdata(&pdev->dev);
+	dma_async_device_unregister(&m->dma_slave);
+	return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+	{ .compatible = "moxa,moxart-dma" },
+	{ }
+};
+
+static struct platform_driver moxart_driver = {
+	.probe	= moxart_probe,
+	.remove	= moxart_remove,
+	.driver = {
+		.name		= "moxart-dma-engine",
+		.owner		= THIS_MODULE,
+		.of_match_table	= moxart_dma_match,
+	},
+};
+
+static int moxart_init(void)
+{
+	return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+	platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/moxart-dma.h b/drivers/dma/moxart-dma.h
new file mode 100644
index 0000000..358c006
--- /dev/null
+++ b/drivers/dma/moxart-dma.h
@@ -0,0 +1,188 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __DMA_MOXART_H
+#define __DMA_MOXART_H
+
+#define APB_DMA_MAX_CHANNEL			4
+
+union moxart_dma_reg_cfg {
+
+#define APB_DMA_ENABLE				(1<<0)
+#define APB_DMA_FIN_INT_STS			(1<<1)
+#define APB_DMA_FIN_INT_EN			(1<<2)
+#define APB_DMA_BURST_MODE			(1<<3)
+#define APB_DMA_ERR_INT_STS			(1<<4)
+#define APB_DMA_ERR_INT_EN			(1<<5)
+#define APB_DMA_SOURCE_AHB			(1<<6)
+#define APB_DMA_SOURCE_APB			0
+#define APB_DMA_DEST_AHB			(1<<7)
+#define APB_DMA_DEST_APB			0
+#define APB_DMA_SOURCE_INC_0			0
+#define APB_DMA_SOURCE_INC_1_4			(1<<8)
+#define APB_DMA_SOURCE_INC_2_8			(2<<8)
+#define APB_DMA_SOURCE_INC_4_16			(3<<8)
+#define APB_DMA_SOURCE_DEC_1_4			(5<<8)
+#define APB_DMA_SOURCE_DEC_2_8			(6<<8)
+#define APB_DMA_SOURCE_DEC_4_16			(7<<8)
+#define APB_DMA_SOURCE_INC_MASK			(7<<8)
+#define APB_DMA_DEST_INC_0			0
+#define APB_DMA_DEST_INC_1_4			(1<<12)
+#define APB_DMA_DEST_INC_2_8			(2<<12)
+#define APB_DMA_DEST_INC_4_16			(3<<12)
+#define APB_DMA_DEST_DEC_1_4			(5<<12)
+#define APB_DMA_DEST_DEC_2_8			(6<<12)
+#define APB_DMA_DEST_DEC_4_16			(7<<12)
+#define APB_DMA_DEST_INC_MASK			(7<<12)
+#define APB_DMA_DEST_REQ_NO_MASK		(15<<16)
+#define APB_DMA_DATA_WIDTH_MASK			(3<<20)
+#define APB_DMA_DATA_WIDTH_4			0
+#define APB_DMA_DATA_WIDTH_2			(1<<20)
+#define APB_DMA_DATA_WIDTH_1			(2<<20)
+#define APB_DMA_SOURCE_REQ_NO_MASK		(15<<24)
+	unsigned int ul;
+
+	struct {
+
+#define APB_DMAB_ENABLE				1
+		/* enable DMA */
+		unsigned int enable:1;
+
+#define APB_DMAB_FIN_INT_STS			1
+		/* finished interrupt status */
+		unsigned int fin_int_sts:1;
+
+#define APB_DMAB_FIN_INT_EN			1
+		/* finished interrupt enable */
+		unsigned int fin_int_en:1;
+
+#define APB_DMAB_BURST_MODE			1
+		/* burst mode */
+		unsigned int burst:1;
+
+#define APB_DMAB_ERR_INT_STS			1
+		/* error interrupt status */
+		unsigned int err_int_sts:1;
+
+#define APB_DMAB_ERR_INT_EN			1
+		/* error interrupt enable */
+		unsigned int err_int_en:1;
+
+#define APB_DMAB_SOURCE_AHB			1
+#define APB_DMAB_SOURCE_APB			0
+		/* 0:APB (device), 1:AHB (RAM) */
+		unsigned int source_sel:1;
+
+#define APB_DMAB_DEST_AHB			1
+#define APB_DMAB_DEST_APB			0
+		/* 0:APB, 1:AHB */
+		unsigned int dest_sel:1;
+
+#define APB_DMAB_SOURCE_INC_0			0
+#define APB_DMAB_SOURCE_INC_1_4			1
+#define APB_DMAB_SOURCE_INC_2_8			2
+#define APB_DMAB_SOURCE_INC_4_16		3
+#define APB_DMAB_SOURCE_DEC_1_4			5
+#define APB_DMAB_SOURCE_DEC_2_8			6
+#define APB_DMAB_SOURCE_DEC_4_16		7
+#define APB_DMAB_SOURCE_INC_MASK		7
+		/*
+		 * 000: no increment
+		 * 001: +1 (busrt=0), +4  (burst=1)
+		 * 010: +2 (burst=0), +8  (burst=1)
+		 * 011: +4 (burst=0), +16 (burst=1)
+		 * 101: -1 (burst=0), -4  (burst=1)
+		 * 110: -2 (burst=0), -8  (burst=1)
+		 * 111: -4 (burst=0), -16 (burst=1)
+		 */
+		unsigned int source_inc:3;
+
+		unsigned int reserved1:1;
+
+#define APB_DMAB_DEST_INC_0			0
+#define APB_DMAB_DEST_INC_1_4			1
+#define APB_DMAB_DEST_INC_2_8			2
+#define APB_DMAB_DEST_INC_4_16			3
+#define APB_DMAB_DEST_DEC_1_4			5
+#define APB_DMAB_DEST_DEC_2_8			6
+#define APB_DMAB_DEST_DEC_4_16			7
+#define APB_DMAB_DEST_INC_MASK			7
+		/*
+		 * 000: no increment
+		 * 001: +1 (busrt=0), +4  (burst=1)
+		 * 010: +2 (burst=0), +8  (burst=1)
+		 * 011: +4 (burst=0), +16 (burst=1)
+		 * 101: -1 (burst=0), -4  (burst=1)
+		 * 110: -2 (burst=0), -8  (burst=1)
+		 * 111: -4 (burst=0), -16 (burst=1)
+		*/
+		unsigned int dest_inc:3;
+
+		unsigned int reserved2:1;
+
+#define APB_DMAB_DEST_REQ_NO_MASK		15
+		/*
+		 * request signal select of destination
+		 * address for DMA hardware handshake
+		 *
+		 * the request line number is a property of
+		 * the DMA controller itself, e.g. MMC must
+		 * always request channels where
+		 * dma_slave_config->slave_id == 5
+		 *
+		 * 0:	 no request / grant signal
+		 * 1-15: request / grant signal
+		 */
+		unsigned int dest_req_no:4;
+
+#define APB_DMAB_DATA_WIDTH_MASK		3
+#define APB_DMAB_DATA_WIDTH_4			0
+#define APB_DMAB_DATA_WIDTH_2			1
+#define APB_DMAB_DATA_WIDTH_1			2
+		/*
+		 * data width of transfer
+		 * 00: word
+		 * 01: half
+		 * 10: byte
+		 */
+		unsigned int data_width:2;
+
+		unsigned int reserved3:2;
+
+#define APB_DMAB_SOURCE_REQ_NO_MASK		15
+		/*
+		 * request signal select of source
+		 * address for DMA hardware handshake
+		 *
+		 * the request line number is a property of
+		 * the DMA controller itself, e.g. MMC must
+		 * always request channels where
+		 * dma_slave_config->slave_id == 5
+		 *
+		 * 0:	 no request / grant signal
+		 * 1-15: request / grant signal
+		 */
+		unsigned int source_req_no:4;
+
+		unsigned int reserved4:4;
+	} bits;
+};
+
+struct moxart_dma_reg {
+	unsigned int source_addr;
+	unsigned int dest_addr;
+#define APB_DMA_CYCLES_MASK			0x00ffffff
+	unsigned int cycles;	/* depend on burst mode */
+	union moxart_dma_reg_cfg cfg;
+};
+
+#endif
-- 
1.8.2.1

^ permalink raw reply related	[flat|nested] 80+ messages in thread

* [PATCH v4] dmaengine: Add MOXA ART DMA engine driver
  2013-07-17 10:06     ` Jonas Jensen
@ 2013-07-29 13:44       ` Jonas Jensen
  -1 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2013-07-29 13:44 UTC (permalink / raw)
  To: linux-arm-kernel
  Cc: linux-kernel, arm, vinod.koul, djbw, arnd, linux, Jonas Jensen

Add dmaengine driver for MOXA ART SoCs.

Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
---

Notes:
    Changes since v3:
    
    1. use BIT() macro in header file
    2. use hardcoded masks in header file
    3. include linux/bitops.h
    
    device tree bindings document:
    4. describe compatible variable "Must be" instead of "Should be"
    
    Applies to next-20130729

 .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  19 +
 drivers/dma/Kconfig                                |   7 +
 drivers/dma/Makefile                               |   1 +
 drivers/dma/moxart-dma.c                           | 478 +++++++++++++++++++++
 drivers/dma/moxart-dma.h                           | 188 ++++++++
 5 files changed, 693 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
 create mode 100644 drivers/dma/moxart-dma.c
 create mode 100644 drivers/dma/moxart-dma.h

diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644
index 0000000..f18f0fb
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -0,0 +1,19 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible : Must be "moxa,moxart-dma"
+- reg : Should contain registers location and length
+- interrupts : Should contain the interrupt number
+- #dma-cells : see dma.txt, should be 1
+
+Example:
+
+	dma: dma@90500000 {
+		compatible = "moxa,moxart-dma";
+		reg = <0x90500000 0x1000>;
+		interrupts = <24 0>;
+		#dma-cells = <1>;
+	};
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6825957..56c3aaa 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -300,6 +300,13 @@ config DMA_JZ4740
 	select DMA_ENGINE
 	select DMA_VIRTUAL_CHANNELS
 
+config MOXART_DMA
+	tristate "MOXART DMA support"
+	depends on ARCH_MOXART
+	select DMA_ENGINE
+	help
+	  Enable support for the MOXA ART SoC DMA controller.
+
 config DMA_ENGINE
 	bool
 
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 5e0f2ef..470c11b 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -39,3 +39,4 @@ obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
 obj-$(CONFIG_DMA_OMAP) += omap-dma.o
 obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..4f80a90
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,478 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/bitops.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+#include "moxart-dma.h"
+
+static DEFINE_SPINLOCK(dma_lock);
+
+struct moxart_dma_chan {
+	struct dma_chan			chan;
+	int				ch_num;
+	bool				allocated;
+	int				error_flag;
+	struct moxart_dma_reg		*reg;
+	void				(*callback)(void *param);
+	void				*callback_param;
+	struct completion		dma_complete;
+	struct dma_slave_config		cfg;
+	struct dma_async_tx_descriptor	tx_desc;
+};
+
+struct moxart_dma_container {
+	int			ctlr;
+	struct dma_device	dma_slave;
+	struct moxart_dma_chan	slave_chans[APB_DMA_MAX_CHANNEL];
+};
+
+struct moxart_dma_container *mdc;
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+	return &chan->dev->device;
+}
+
+static inline struct moxart_dma_container
+*to_moxart_dma_container(struct dma_device *d)
+{
+	return container_of(d, struct moxart_dma_container, dma_slave);
+}
+
+static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct moxart_dma_chan, chan);
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	union moxart_dma_reg_cfg mcfg;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+	spin_lock_irqsave(&dma_lock, flags);
+
+	mcfg.ul = readl(&mchan->reg->cfg.ul);
+	mcfg.ul &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+	spin_unlock_irqrestore(&dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+			       struct dma_slave_config *cfg)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	union moxart_dma_reg_cfg mcfg;
+	unsigned long flags;
+	unsigned int data_width, data_inc;
+
+	spin_lock_irqsave(&dma_lock, flags);
+
+	memcpy(&mchan->cfg, cfg, sizeof(mchan->cfg));
+
+	mcfg.ul = readl(&mchan->reg->cfg.ul);
+	mcfg.bits.burst = APB_DMAB_BURST_MODE;
+
+	switch (mchan->cfg.src_addr_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		data_width = APB_DMAB_DATA_WIDTH_1;
+		data_inc = APB_DMAB_DEST_INC_1_4;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		data_width = APB_DMAB_DATA_WIDTH_2;
+		data_inc = APB_DMAB_DEST_INC_2_8;
+		break;
+	default:
+		data_width = APB_DMAB_DATA_WIDTH_4;
+		data_inc = APB_DMAB_DEST_INC_4_16;
+		break;
+	}
+
+	if (mchan->cfg.direction == DMA_MEM_TO_DEV) {
+		mcfg.bits.data_width = data_width;
+		mcfg.bits.dest_sel = APB_DMAB_DEST_APB;
+		mcfg.bits.dest_inc = APB_DMAB_DEST_INC_0;
+		mcfg.bits.source_sel = APB_DMAB_SOURCE_AHB;
+		mcfg.bits.source_inc = data_inc;
+
+		mcfg.bits.dest_req_no = mchan->cfg.slave_id;
+		mcfg.bits.source_req_no = 0;
+	} else {
+		mcfg.bits.data_width = data_width;
+		mcfg.bits.dest_sel = APB_DMAB_SOURCE_AHB;
+		mcfg.bits.dest_inc = data_inc;
+		mcfg.bits.source_sel = APB_DMAB_DEST_APB;
+		mcfg.bits.source_inc = APB_DMAB_DEST_INC_0;
+
+		mcfg.bits.dest_req_no = 0;
+		mcfg.bits.source_req_no = mchan->cfg.slave_id;
+	}
+
+	writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+	spin_unlock_irqrestore(&dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+			  unsigned long arg)
+{
+	int ret = 0;
+	struct dma_slave_config *config;
+
+	switch (cmd) {
+	case DMA_TERMINATE_ALL:
+		moxart_terminate_all(chan);
+		break;
+	case DMA_SLAVE_CONFIG:
+		config = (struct dma_slave_config *)arg;
+		ret = moxart_slave_config(chan, config);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static dma_cookie_t moxart_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(tx->chan);
+	dma_cookie_t cookie;
+	union moxart_dma_reg_cfg mcfg;
+	unsigned long flags;
+
+	mchan->callback = tx->callback;
+	mchan->callback_param = tx->callback_param;
+	mchan->error_flag = 0;
+
+	dev_dbg(chan2dev(tx->chan), "%s: mchan=%p mchan->ch_num=%d mchan->reg=%p\n",
+		__func__, mchan, mchan->ch_num, mchan->reg);
+
+	spin_lock_irqsave(&dma_lock, flags);
+
+	cookie = dma_cookie_assign(tx);
+
+	mcfg.ul = readl(&mchan->reg->cfg.ul);
+	mcfg.ul |= (APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+	spin_unlock_irqrestore(&dma_lock, flags);
+
+	return cookie;
+}
+
+static struct dma_async_tx_descriptor
+*moxart_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+		      unsigned int sg_len,
+		      enum dma_transfer_direction direction,
+		      unsigned long tx_flags, void *context)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	unsigned long flags;
+	union moxart_dma_reg_cfg mcfg;
+	unsigned int size, adr_width;
+
+	spin_lock_irqsave(&dma_lock, flags);
+
+	if (direction == DMA_MEM_TO_DEV) {
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       &mchan->reg->source_addr);
+		writel(mchan->cfg.dst_addr, &mchan->reg->dest_addr);
+		adr_width = mchan->cfg.src_addr_width;
+	} else {
+		writel(mchan->cfg.src_addr, &mchan->reg->source_addr);
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       &mchan->reg->dest_addr);
+		adr_width = mchan->cfg.dst_addr_width;
+	}
+
+	size = sgl->length >> adr_width;
+
+	/*
+	 * size is 4 on 64 bytes copied, i.e. once cycle copies 16 bytes
+	 * ( when data_width == APB_DMAB_DATA_WIDTH_4 )
+	 */
+	writel(size, &mchan->reg->cycles);
+
+	dev_dbg(chan2dev(chan), "%s: set %d DMA cycles (sgl->length=%d adr_width=%d)\n",
+		__func__, size, sgl->length, adr_width);
+
+	dev_dbg(chan2dev(chan), "%s: mcfg.ul=%x read from &mchan->reg->cfg.ul=%x\n",
+		__func__, mcfg.ul, (unsigned int)&mchan->reg->cfg.ul);
+
+	dma_async_tx_descriptor_init(&mchan->tx_desc, chan);
+	mchan->tx_desc.tx_submit = moxart_tx_submit;
+
+	spin_unlock_irqrestore(&dma_lock, flags);
+
+	return &mchan->tx_desc;
+}
+
+static struct platform_driver moxart_driver;
+
+bool moxart_filter_fn(struct dma_chan *chan, void *param)
+{
+	if (chan->device->dev->driver == &moxart_driver.driver) {
+		struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+		unsigned int ch_req = *(unsigned int *)param;
+		dev_dbg(chan2dev(chan), "%s: mchan=%p ch_req=%d mchan->ch_num=%d\n",
+			__func__, mchan, ch_req, mchan->ch_num);
+		return ch_req == mchan->ch_num;
+	} else {
+		dev_dbg(chan2dev(chan), "%s: device not registered to this DMA engine\n",
+			__func__);
+		return false;
+	}
+}
+EXPORT_SYMBOL(moxart_filter_fn);
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	int i;
+	bool found = false;
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++) {
+		if (i == mchan->ch_num
+			&& !mchan->allocated) {
+			dev_dbg(chan2dev(chan), "%s: allocating channel #%d\n",
+				__func__, mchan->ch_num);
+			mchan->allocated = true;
+			found = true;
+			break;
+		}
+	}
+
+	if (!found)
+		return -ENODEV;
+
+	return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+	mchan->allocated = false;
+
+	dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+		__func__, mchan->ch_num);
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	union moxart_dma_reg_cfg mcfg;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+	spin_lock_irqsave(&dma_lock, flags);
+
+	mcfg.ul = readl(&mchan->reg->cfg.ul);
+	mcfg.ul |= APB_DMA_ENABLE;
+	writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+	spin_unlock_irqrestore(&dma_lock, flags);
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+					dma_cookie_t cookie,
+					struct dma_tx_state *txstate)
+{
+	enum dma_status ret;
+
+	ret = dma_cookie_status(chan, cookie, txstate);
+	if (ret == DMA_SUCCESS || !txstate)
+		return ret;
+
+	return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+	dma->device_prep_slave_sg		= moxart_prep_slave_sg;
+	dma->device_alloc_chan_resources	= moxart_alloc_chan_resources;
+	dma->device_free_chan_resources		= moxart_free_chan_resources;
+	dma->device_issue_pending		= moxart_issue_pending;
+	dma->device_tx_status			= moxart_tx_status;
+	dma->device_control			= moxart_control;
+	dma->dev				= dev;
+
+	INIT_LIST_HEAD(&dma->channels);
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+	struct device *dev = devid;
+	struct moxart_dma_chan *mchan = &mdc->slave_chans[0];
+	unsigned int i;
+	union moxart_dma_reg_cfg mcfg;
+
+	dev_dbg(dev, "%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		if (mchan->allocated) {
+			mcfg.ul = readl(&mchan->reg->cfg.ul);
+			if (mcfg.ul & APB_DMA_FIN_INT_STS) {
+				mcfg.ul &= ~APB_DMA_FIN_INT_STS;
+				dma_cookie_complete(&mchan->tx_desc);
+			}
+			if (mcfg.ul & APB_DMA_ERR_INT_STS) {
+				mcfg.ul &= ~APB_DMA_ERR_INT_STS;
+				mchan->error_flag = 1;
+			}
+			if (mchan->callback) {
+				dev_dbg(dev, "%s: call callback for mchan=%p\n",
+					__func__, mchan);
+				mchan->callback(mchan->callback_param);
+			}
+			mchan->error_flag = 0;
+			writel(mcfg.ul, &mchan->reg->cfg.ul);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static struct irqaction moxart_dma_irq = {
+	.name       = "moxart-dma-engine",
+	.flags      = IRQF_DISABLED,
+	.handler    = moxart_dma_interrupt,
+};
+
+static int moxart_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct resource res_dma;
+	static void __iomem *dma_base_addr;
+	int ret, i;
+	unsigned int irq;
+	struct moxart_dma_chan *mchan;
+
+	mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+	if (!mdc) {
+		dev_err(dev, "can't allocate DMA container\n");
+		return -ENOMEM;
+	}
+
+	ret = of_address_to_resource(node, 0, &res_dma);
+	if (ret) {
+		dev_err(dev, "can't get DMA base resource\n");
+		return ret;
+	}
+
+	irq = irq_of_parse_and_map(node, 0);
+
+	dma_base_addr = devm_ioremap_resource(dev, &res_dma);
+	if (IS_ERR(dma_base_addr)) {
+		dev_err(dev, "devm_ioremap_resource failed\n");
+		return PTR_ERR(dma_base_addr);
+	}
+
+	mdc->ctlr = pdev->id;
+
+	dma_cap_zero(mdc->dma_slave.cap_mask);
+	dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+
+	moxart_dma_init(&mdc->dma_slave, dev);
+
+	mchan = &mdc->slave_chans[0];
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		mchan->ch_num = i;
+		mchan->reg = (struct moxart_dma_reg *)(dma_base_addr + 0x80
+			     + i * sizeof(struct moxart_dma_reg));
+		mchan->callback = NULL;
+		mchan->allocated = 0;
+		mchan->callback_param = NULL;
+
+		dma_cookie_init(&mchan->chan);
+		mchan->chan.device = &mdc->dma_slave;
+		list_add_tail(&mchan->chan.device_node,
+			      &mdc->dma_slave.channels);
+
+		dev_dbg(dev, "%s: mchans[%d]: mchan->ch_num=%d mchan->reg=%p\n",
+			__func__, i, mchan->ch_num, mchan->reg);
+	}
+
+	ret = dma_async_device_register(&mdc->dma_slave);
+	platform_set_drvdata(pdev, mdc);
+
+	moxart_dma_irq.dev_id = dev;
+	setup_irq(irq, &moxart_dma_irq);
+
+	dev_dbg(dev, "%s: IRQ=%d\n", __func__, irq);
+
+	return ret;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+	struct moxart_dma_container *m = dev_get_drvdata(&pdev->dev);
+	dma_async_device_unregister(&m->dma_slave);
+	return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+	{ .compatible = "moxa,moxart-dma" },
+	{ }
+};
+
+static struct platform_driver moxart_driver = {
+	.probe	= moxart_probe,
+	.remove	= moxart_remove,
+	.driver = {
+		.name		= "moxart-dma-engine",
+		.owner		= THIS_MODULE,
+		.of_match_table	= moxart_dma_match,
+	},
+};
+
+static int moxart_init(void)
+{
+	return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+	platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/moxart-dma.h b/drivers/dma/moxart-dma.h
new file mode 100644
index 0000000..a37b13f
--- /dev/null
+++ b/drivers/dma/moxart-dma.h
@@ -0,0 +1,188 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __DMA_MOXART_H
+#define __DMA_MOXART_H
+
+#define APB_DMA_MAX_CHANNEL			4
+
+union moxart_dma_reg_cfg {
+
+#define APB_DMA_ENABLE				BIT(0)
+#define APB_DMA_FIN_INT_STS			BIT(1)
+#define APB_DMA_FIN_INT_EN			BIT(2)
+#define APB_DMA_BURST_MODE			BIT(3)
+#define APB_DMA_ERR_INT_STS			BIT(4)
+#define APB_DMA_ERR_INT_EN			BIT(5)
+#define APB_DMA_SOURCE_AHB			BIT(6)
+#define APB_DMA_SOURCE_APB			0
+#define APB_DMA_DEST_AHB			BIT(7)
+#define APB_DMA_DEST_APB			0
+#define APB_DMA_SOURCE_INC_0			0
+#define APB_DMA_SOURCE_INC_1_4			0x100
+#define APB_DMA_SOURCE_INC_2_8			0x200
+#define APB_DMA_SOURCE_INC_4_16			0x300
+#define APB_DMA_SOURCE_DEC_1_4			0x500
+#define APB_DMA_SOURCE_DEC_2_8			0x600
+#define APB_DMA_SOURCE_DEC_4_16			0x700
+#define APB_DMA_SOURCE_INC_MASK			0x700
+#define APB_DMA_DEST_INC_0			0
+#define APB_DMA_DEST_INC_1_4			0x1000
+#define APB_DMA_DEST_INC_2_8			0x2000
+#define APB_DMA_DEST_INC_4_16			0x3000
+#define APB_DMA_DEST_DEC_1_4			0x5000
+#define APB_DMA_DEST_DEC_2_8			0x6000
+#define APB_DMA_DEST_DEC_4_16			0x7000
+#define APB_DMA_DEST_INC_MASK			0x7000
+#define APB_DMA_DEST_REQ_NO_MASK		0xf0000
+#define APB_DMA_DATA_WIDTH_MASK			0x300000
+#define APB_DMA_DATA_WIDTH_4			0
+#define APB_DMA_DATA_WIDTH_2			0x100000
+#define APB_DMA_DATA_WIDTH_1			0x200000
+#define APB_DMA_SOURCE_REQ_NO_MASK		0xf000000
+	unsigned int ul;
+
+	struct {
+
+#define APB_DMAB_ENABLE				1
+		/* enable DMA */
+		unsigned int enable:1;
+
+#define APB_DMAB_FIN_INT_STS			1
+		/* finished interrupt status */
+		unsigned int fin_int_sts:1;
+
+#define APB_DMAB_FIN_INT_EN			1
+		/* finished interrupt enable */
+		unsigned int fin_int_en:1;
+
+#define APB_DMAB_BURST_MODE			1
+		/* burst mode */
+		unsigned int burst:1;
+
+#define APB_DMAB_ERR_INT_STS			1
+		/* error interrupt status */
+		unsigned int err_int_sts:1;
+
+#define APB_DMAB_ERR_INT_EN			1
+		/* error interrupt enable */
+		unsigned int err_int_en:1;
+
+#define APB_DMAB_SOURCE_AHB			1
+#define APB_DMAB_SOURCE_APB			0
+		/* 0:APB (device), 1:AHB (RAM) */
+		unsigned int source_sel:1;
+
+#define APB_DMAB_DEST_AHB			1
+#define APB_DMAB_DEST_APB			0
+		/* 0:APB, 1:AHB */
+		unsigned int dest_sel:1;
+
+#define APB_DMAB_SOURCE_INC_0			0
+#define APB_DMAB_SOURCE_INC_1_4			1
+#define APB_DMAB_SOURCE_INC_2_8			2
+#define APB_DMAB_SOURCE_INC_4_16		3
+#define APB_DMAB_SOURCE_DEC_1_4			5
+#define APB_DMAB_SOURCE_DEC_2_8			6
+#define APB_DMAB_SOURCE_DEC_4_16		7
+#define APB_DMAB_SOURCE_INC_MASK		7
+		/*
+		 * 000: no increment
+		 * 001: +1 (busrt=0), +4  (burst=1)
+		 * 010: +2 (burst=0), +8  (burst=1)
+		 * 011: +4 (burst=0), +16 (burst=1)
+		 * 101: -1 (burst=0), -4  (burst=1)
+		 * 110: -2 (burst=0), -8  (burst=1)
+		 * 111: -4 (burst=0), -16 (burst=1)
+		 */
+		unsigned int source_inc:3;
+
+		unsigned int reserved1:1;
+
+#define APB_DMAB_DEST_INC_0			0
+#define APB_DMAB_DEST_INC_1_4			1
+#define APB_DMAB_DEST_INC_2_8			2
+#define APB_DMAB_DEST_INC_4_16			3
+#define APB_DMAB_DEST_DEC_1_4			5
+#define APB_DMAB_DEST_DEC_2_8			6
+#define APB_DMAB_DEST_DEC_4_16			7
+#define APB_DMAB_DEST_INC_MASK			7
+		/*
+		 * 000: no increment
+		 * 001: +1 (busrt=0), +4  (burst=1)
+		 * 010: +2 (burst=0), +8  (burst=1)
+		 * 011: +4 (burst=0), +16 (burst=1)
+		 * 101: -1 (burst=0), -4  (burst=1)
+		 * 110: -2 (burst=0), -8  (burst=1)
+		 * 111: -4 (burst=0), -16 (burst=1)
+		*/
+		unsigned int dest_inc:3;
+
+		unsigned int reserved2:1;
+
+#define APB_DMAB_DEST_REQ_NO_MASK		15
+		/*
+		 * request signal select of destination
+		 * address for DMA hardware handshake
+		 *
+		 * the request line number is a property of
+		 * the DMA controller itself, e.g. MMC must
+		 * always request channels where
+		 * dma_slave_config->slave_id == 5
+		 *
+		 * 0:	 no request / grant signal
+		 * 1-15: request / grant signal
+		 */
+		unsigned int dest_req_no:4;
+
+#define APB_DMAB_DATA_WIDTH_MASK		3
+#define APB_DMAB_DATA_WIDTH_4			0
+#define APB_DMAB_DATA_WIDTH_2			1
+#define APB_DMAB_DATA_WIDTH_1			2
+		/*
+		 * data width of transfer
+		 * 00: word
+		 * 01: half
+		 * 10: byte
+		 */
+		unsigned int data_width:2;
+
+		unsigned int reserved3:2;
+
+#define APB_DMAB_SOURCE_REQ_NO_MASK		15
+		/*
+		 * request signal select of source
+		 * address for DMA hardware handshake
+		 *
+		 * the request line number is a property of
+		 * the DMA controller itself, e.g. MMC must
+		 * always request channels where
+		 * dma_slave_config->slave_id == 5
+		 *
+		 * 0:	 no request / grant signal
+		 * 1-15: request / grant signal
+		 */
+		unsigned int source_req_no:4;
+
+		unsigned int reserved4:4;
+	} bits;
+};
+
+struct moxart_dma_reg {
+	unsigned int source_addr;
+	unsigned int dest_addr;
+#define APB_DMA_CYCLES_MASK			0x00ffffff
+	unsigned int cycles;	/* depend on burst mode */
+	union moxart_dma_reg_cfg cfg;
+};
+
+#endif
-- 
1.8.2.1


^ permalink raw reply related	[flat|nested] 80+ messages in thread

* [PATCH v4] dmaengine: Add MOXA ART DMA engine driver
@ 2013-07-29 13:44       ` Jonas Jensen
  0 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2013-07-29 13:44 UTC (permalink / raw)
  To: linux-arm-kernel

Add dmaengine driver for MOXA ART SoCs.

Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
---

Notes:
    Changes since v3:
    
    1. use BIT() macro in header file
    2. use hardcoded masks in header file
    3. include linux/bitops.h
    
    device tree bindings document:
    4. describe compatible variable "Must be" instead of "Should be"
    
    Applies to next-20130729

 .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  19 +
 drivers/dma/Kconfig                                |   7 +
 drivers/dma/Makefile                               |   1 +
 drivers/dma/moxart-dma.c                           | 478 +++++++++++++++++++++
 drivers/dma/moxart-dma.h                           | 188 ++++++++
 5 files changed, 693 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
 create mode 100644 drivers/dma/moxart-dma.c
 create mode 100644 drivers/dma/moxart-dma.h

diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644
index 0000000..f18f0fb
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -0,0 +1,19 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible : Must be "moxa,moxart-dma"
+- reg : Should contain registers location and length
+- interrupts : Should contain the interrupt number
+- #dma-cells : see dma.txt, should be 1
+
+Example:
+
+	dma: dma at 90500000 {
+		compatible = "moxa,moxart-dma";
+		reg = <0x90500000 0x1000>;
+		interrupts = <24 0>;
+		#dma-cells = <1>;
+	};
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6825957..56c3aaa 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -300,6 +300,13 @@ config DMA_JZ4740
 	select DMA_ENGINE
 	select DMA_VIRTUAL_CHANNELS
 
+config MOXART_DMA
+	tristate "MOXART DMA support"
+	depends on ARCH_MOXART
+	select DMA_ENGINE
+	help
+	  Enable support for the MOXA ART SoC DMA controller.
+
 config DMA_ENGINE
 	bool
 
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 5e0f2ef..470c11b 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -39,3 +39,4 @@ obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
 obj-$(CONFIG_DMA_OMAP) += omap-dma.o
 obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..4f80a90
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,478 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/bitops.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+#include "moxart-dma.h"
+
+static DEFINE_SPINLOCK(dma_lock);
+
+struct moxart_dma_chan {
+	struct dma_chan			chan;
+	int				ch_num;
+	bool				allocated;
+	int				error_flag;
+	struct moxart_dma_reg		*reg;
+	void				(*callback)(void *param);
+	void				*callback_param;
+	struct completion		dma_complete;
+	struct dma_slave_config		cfg;
+	struct dma_async_tx_descriptor	tx_desc;
+};
+
+struct moxart_dma_container {
+	int			ctlr;
+	struct dma_device	dma_slave;
+	struct moxart_dma_chan	slave_chans[APB_DMA_MAX_CHANNEL];
+};
+
+struct moxart_dma_container *mdc;
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+	return &chan->dev->device;
+}
+
+static inline struct moxart_dma_container
+*to_moxart_dma_container(struct dma_device *d)
+{
+	return container_of(d, struct moxart_dma_container, dma_slave);
+}
+
+static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct moxart_dma_chan, chan);
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	union moxart_dma_reg_cfg mcfg;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+	spin_lock_irqsave(&dma_lock, flags);
+
+	mcfg.ul = readl(&mchan->reg->cfg.ul);
+	mcfg.ul &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+	spin_unlock_irqrestore(&dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+			       struct dma_slave_config *cfg)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	union moxart_dma_reg_cfg mcfg;
+	unsigned long flags;
+	unsigned int data_width, data_inc;
+
+	spin_lock_irqsave(&dma_lock, flags);
+
+	memcpy(&mchan->cfg, cfg, sizeof(mchan->cfg));
+
+	mcfg.ul = readl(&mchan->reg->cfg.ul);
+	mcfg.bits.burst = APB_DMAB_BURST_MODE;
+
+	switch (mchan->cfg.src_addr_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		data_width = APB_DMAB_DATA_WIDTH_1;
+		data_inc = APB_DMAB_DEST_INC_1_4;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		data_width = APB_DMAB_DATA_WIDTH_2;
+		data_inc = APB_DMAB_DEST_INC_2_8;
+		break;
+	default:
+		data_width = APB_DMAB_DATA_WIDTH_4;
+		data_inc = APB_DMAB_DEST_INC_4_16;
+		break;
+	}
+
+	if (mchan->cfg.direction == DMA_MEM_TO_DEV) {
+		mcfg.bits.data_width = data_width;
+		mcfg.bits.dest_sel = APB_DMAB_DEST_APB;
+		mcfg.bits.dest_inc = APB_DMAB_DEST_INC_0;
+		mcfg.bits.source_sel = APB_DMAB_SOURCE_AHB;
+		mcfg.bits.source_inc = data_inc;
+
+		mcfg.bits.dest_req_no = mchan->cfg.slave_id;
+		mcfg.bits.source_req_no = 0;
+	} else {
+		mcfg.bits.data_width = data_width;
+		mcfg.bits.dest_sel = APB_DMAB_SOURCE_AHB;
+		mcfg.bits.dest_inc = data_inc;
+		mcfg.bits.source_sel = APB_DMAB_DEST_APB;
+		mcfg.bits.source_inc = APB_DMAB_DEST_INC_0;
+
+		mcfg.bits.dest_req_no = 0;
+		mcfg.bits.source_req_no = mchan->cfg.slave_id;
+	}
+
+	writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+	spin_unlock_irqrestore(&dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+			  unsigned long arg)
+{
+	int ret = 0;
+	struct dma_slave_config *config;
+
+	switch (cmd) {
+	case DMA_TERMINATE_ALL:
+		moxart_terminate_all(chan);
+		break;
+	case DMA_SLAVE_CONFIG:
+		config = (struct dma_slave_config *)arg;
+		ret = moxart_slave_config(chan, config);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static dma_cookie_t moxart_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(tx->chan);
+	dma_cookie_t cookie;
+	union moxart_dma_reg_cfg mcfg;
+	unsigned long flags;
+
+	mchan->callback = tx->callback;
+	mchan->callback_param = tx->callback_param;
+	mchan->error_flag = 0;
+
+	dev_dbg(chan2dev(tx->chan), "%s: mchan=%p mchan->ch_num=%d mchan->reg=%p\n",
+		__func__, mchan, mchan->ch_num, mchan->reg);
+
+	spin_lock_irqsave(&dma_lock, flags);
+
+	cookie = dma_cookie_assign(tx);
+
+	mcfg.ul = readl(&mchan->reg->cfg.ul);
+	mcfg.ul |= (APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+	spin_unlock_irqrestore(&dma_lock, flags);
+
+	return cookie;
+}
+
+static struct dma_async_tx_descriptor
+*moxart_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+		      unsigned int sg_len,
+		      enum dma_transfer_direction direction,
+		      unsigned long tx_flags, void *context)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	unsigned long flags;
+	union moxart_dma_reg_cfg mcfg;
+	unsigned int size, adr_width;
+
+	spin_lock_irqsave(&dma_lock, flags);
+
+	if (direction == DMA_MEM_TO_DEV) {
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       &mchan->reg->source_addr);
+		writel(mchan->cfg.dst_addr, &mchan->reg->dest_addr);
+		adr_width = mchan->cfg.src_addr_width;
+	} else {
+		writel(mchan->cfg.src_addr, &mchan->reg->source_addr);
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       &mchan->reg->dest_addr);
+		adr_width = mchan->cfg.dst_addr_width;
+	}
+
+	size = sgl->length >> adr_width;
+
+	/*
+	 * size is 4 on 64 bytes copied, i.e. once cycle copies 16 bytes
+	 * ( when data_width == APB_DMAB_DATA_WIDTH_4 )
+	 */
+	writel(size, &mchan->reg->cycles);
+
+	dev_dbg(chan2dev(chan), "%s: set %d DMA cycles (sgl->length=%d adr_width=%d)\n",
+		__func__, size, sgl->length, adr_width);
+
+	dev_dbg(chan2dev(chan), "%s: mcfg.ul=%x read from &mchan->reg->cfg.ul=%x\n",
+		__func__, mcfg.ul, (unsigned int)&mchan->reg->cfg.ul);
+
+	dma_async_tx_descriptor_init(&mchan->tx_desc, chan);
+	mchan->tx_desc.tx_submit = moxart_tx_submit;
+
+	spin_unlock_irqrestore(&dma_lock, flags);
+
+	return &mchan->tx_desc;
+}
+
+static struct platform_driver moxart_driver;
+
+bool moxart_filter_fn(struct dma_chan *chan, void *param)
+{
+	if (chan->device->dev->driver == &moxart_driver.driver) {
+		struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+		unsigned int ch_req = *(unsigned int *)param;
+		dev_dbg(chan2dev(chan), "%s: mchan=%p ch_req=%d mchan->ch_num=%d\n",
+			__func__, mchan, ch_req, mchan->ch_num);
+		return ch_req == mchan->ch_num;
+	} else {
+		dev_dbg(chan2dev(chan), "%s: device not registered to this DMA engine\n",
+			__func__);
+		return false;
+	}
+}
+EXPORT_SYMBOL(moxart_filter_fn);
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	int i;
+	bool found = false;
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++) {
+		if (i == mchan->ch_num
+			&& !mchan->allocated) {
+			dev_dbg(chan2dev(chan), "%s: allocating channel #%d\n",
+				__func__, mchan->ch_num);
+			mchan->allocated = true;
+			found = true;
+			break;
+		}
+	}
+
+	if (!found)
+		return -ENODEV;
+
+	return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+	mchan->allocated = false;
+
+	dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+		__func__, mchan->ch_num);
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	union moxart_dma_reg_cfg mcfg;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+	spin_lock_irqsave(&dma_lock, flags);
+
+	mcfg.ul = readl(&mchan->reg->cfg.ul);
+	mcfg.ul |= APB_DMA_ENABLE;
+	writel(mcfg.ul, &mchan->reg->cfg.ul);
+
+	spin_unlock_irqrestore(&dma_lock, flags);
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+					dma_cookie_t cookie,
+					struct dma_tx_state *txstate)
+{
+	enum dma_status ret;
+
+	ret = dma_cookie_status(chan, cookie, txstate);
+	if (ret == DMA_SUCCESS || !txstate)
+		return ret;
+
+	return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+	dma->device_prep_slave_sg		= moxart_prep_slave_sg;
+	dma->device_alloc_chan_resources	= moxart_alloc_chan_resources;
+	dma->device_free_chan_resources		= moxart_free_chan_resources;
+	dma->device_issue_pending		= moxart_issue_pending;
+	dma->device_tx_status			= moxart_tx_status;
+	dma->device_control			= moxart_control;
+	dma->dev				= dev;
+
+	INIT_LIST_HEAD(&dma->channels);
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+	struct device *dev = devid;
+	struct moxart_dma_chan *mchan = &mdc->slave_chans[0];
+	unsigned int i;
+	union moxart_dma_reg_cfg mcfg;
+
+	dev_dbg(dev, "%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		if (mchan->allocated) {
+			mcfg.ul = readl(&mchan->reg->cfg.ul);
+			if (mcfg.ul & APB_DMA_FIN_INT_STS) {
+				mcfg.ul &= ~APB_DMA_FIN_INT_STS;
+				dma_cookie_complete(&mchan->tx_desc);
+			}
+			if (mcfg.ul & APB_DMA_ERR_INT_STS) {
+				mcfg.ul &= ~APB_DMA_ERR_INT_STS;
+				mchan->error_flag = 1;
+			}
+			if (mchan->callback) {
+				dev_dbg(dev, "%s: call callback for mchan=%p\n",
+					__func__, mchan);
+				mchan->callback(mchan->callback_param);
+			}
+			mchan->error_flag = 0;
+			writel(mcfg.ul, &mchan->reg->cfg.ul);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static struct irqaction moxart_dma_irq = {
+	.name       = "moxart-dma-engine",
+	.flags      = IRQF_DISABLED,
+	.handler    = moxart_dma_interrupt,
+};
+
+static int moxart_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct resource res_dma;
+	static void __iomem *dma_base_addr;
+	int ret, i;
+	unsigned int irq;
+	struct moxart_dma_chan *mchan;
+
+	mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+	if (!mdc) {
+		dev_err(dev, "can't allocate DMA container\n");
+		return -ENOMEM;
+	}
+
+	ret = of_address_to_resource(node, 0, &res_dma);
+	if (ret) {
+		dev_err(dev, "can't get DMA base resource\n");
+		return ret;
+	}
+
+	irq = irq_of_parse_and_map(node, 0);
+
+	dma_base_addr = devm_ioremap_resource(dev, &res_dma);
+	if (IS_ERR(dma_base_addr)) {
+		dev_err(dev, "devm_ioremap_resource failed\n");
+		return PTR_ERR(dma_base_addr);
+	}
+
+	mdc->ctlr = pdev->id;
+
+	dma_cap_zero(mdc->dma_slave.cap_mask);
+	dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+
+	moxart_dma_init(&mdc->dma_slave, dev);
+
+	mchan = &mdc->slave_chans[0];
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		mchan->ch_num = i;
+		mchan->reg = (struct moxart_dma_reg *)(dma_base_addr + 0x80
+			     + i * sizeof(struct moxart_dma_reg));
+		mchan->callback = NULL;
+		mchan->allocated = 0;
+		mchan->callback_param = NULL;
+
+		dma_cookie_init(&mchan->chan);
+		mchan->chan.device = &mdc->dma_slave;
+		list_add_tail(&mchan->chan.device_node,
+			      &mdc->dma_slave.channels);
+
+		dev_dbg(dev, "%s: mchans[%d]: mchan->ch_num=%d mchan->reg=%p\n",
+			__func__, i, mchan->ch_num, mchan->reg);
+	}
+
+	ret = dma_async_device_register(&mdc->dma_slave);
+	platform_set_drvdata(pdev, mdc);
+
+	moxart_dma_irq.dev_id = dev;
+	setup_irq(irq, &moxart_dma_irq);
+
+	dev_dbg(dev, "%s: IRQ=%d\n", __func__, irq);
+
+	return ret;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+	struct moxart_dma_container *m = dev_get_drvdata(&pdev->dev);
+	dma_async_device_unregister(&m->dma_slave);
+	return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+	{ .compatible = "moxa,moxart-dma" },
+	{ }
+};
+
+static struct platform_driver moxart_driver = {
+	.probe	= moxart_probe,
+	.remove	= moxart_remove,
+	.driver = {
+		.name		= "moxart-dma-engine",
+		.owner		= THIS_MODULE,
+		.of_match_table	= moxart_dma_match,
+	},
+};
+
+static int moxart_init(void)
+{
+	return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+	platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/moxart-dma.h b/drivers/dma/moxart-dma.h
new file mode 100644
index 0000000..a37b13f
--- /dev/null
+++ b/drivers/dma/moxart-dma.h
@@ -0,0 +1,188 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __DMA_MOXART_H
+#define __DMA_MOXART_H
+
+#define APB_DMA_MAX_CHANNEL			4
+
+union moxart_dma_reg_cfg {
+
+#define APB_DMA_ENABLE				BIT(0)
+#define APB_DMA_FIN_INT_STS			BIT(1)
+#define APB_DMA_FIN_INT_EN			BIT(2)
+#define APB_DMA_BURST_MODE			BIT(3)
+#define APB_DMA_ERR_INT_STS			BIT(4)
+#define APB_DMA_ERR_INT_EN			BIT(5)
+#define APB_DMA_SOURCE_AHB			BIT(6)
+#define APB_DMA_SOURCE_APB			0
+#define APB_DMA_DEST_AHB			BIT(7)
+#define APB_DMA_DEST_APB			0
+#define APB_DMA_SOURCE_INC_0			0
+#define APB_DMA_SOURCE_INC_1_4			0x100
+#define APB_DMA_SOURCE_INC_2_8			0x200
+#define APB_DMA_SOURCE_INC_4_16			0x300
+#define APB_DMA_SOURCE_DEC_1_4			0x500
+#define APB_DMA_SOURCE_DEC_2_8			0x600
+#define APB_DMA_SOURCE_DEC_4_16			0x700
+#define APB_DMA_SOURCE_INC_MASK			0x700
+#define APB_DMA_DEST_INC_0			0
+#define APB_DMA_DEST_INC_1_4			0x1000
+#define APB_DMA_DEST_INC_2_8			0x2000
+#define APB_DMA_DEST_INC_4_16			0x3000
+#define APB_DMA_DEST_DEC_1_4			0x5000
+#define APB_DMA_DEST_DEC_2_8			0x6000
+#define APB_DMA_DEST_DEC_4_16			0x7000
+#define APB_DMA_DEST_INC_MASK			0x7000
+#define APB_DMA_DEST_REQ_NO_MASK		0xf0000
+#define APB_DMA_DATA_WIDTH_MASK			0x300000
+#define APB_DMA_DATA_WIDTH_4			0
+#define APB_DMA_DATA_WIDTH_2			0x100000
+#define APB_DMA_DATA_WIDTH_1			0x200000
+#define APB_DMA_SOURCE_REQ_NO_MASK		0xf000000
+	unsigned int ul;
+
+	struct {
+
+#define APB_DMAB_ENABLE				1
+		/* enable DMA */
+		unsigned int enable:1;
+
+#define APB_DMAB_FIN_INT_STS			1
+		/* finished interrupt status */
+		unsigned int fin_int_sts:1;
+
+#define APB_DMAB_FIN_INT_EN			1
+		/* finished interrupt enable */
+		unsigned int fin_int_en:1;
+
+#define APB_DMAB_BURST_MODE			1
+		/* burst mode */
+		unsigned int burst:1;
+
+#define APB_DMAB_ERR_INT_STS			1
+		/* error interrupt status */
+		unsigned int err_int_sts:1;
+
+#define APB_DMAB_ERR_INT_EN			1
+		/* error interrupt enable */
+		unsigned int err_int_en:1;
+
+#define APB_DMAB_SOURCE_AHB			1
+#define APB_DMAB_SOURCE_APB			0
+		/* 0:APB (device), 1:AHB (RAM) */
+		unsigned int source_sel:1;
+
+#define APB_DMAB_DEST_AHB			1
+#define APB_DMAB_DEST_APB			0
+		/* 0:APB, 1:AHB */
+		unsigned int dest_sel:1;
+
+#define APB_DMAB_SOURCE_INC_0			0
+#define APB_DMAB_SOURCE_INC_1_4			1
+#define APB_DMAB_SOURCE_INC_2_8			2
+#define APB_DMAB_SOURCE_INC_4_16		3
+#define APB_DMAB_SOURCE_DEC_1_4			5
+#define APB_DMAB_SOURCE_DEC_2_8			6
+#define APB_DMAB_SOURCE_DEC_4_16		7
+#define APB_DMAB_SOURCE_INC_MASK		7
+		/*
+		 * 000: no increment
+		 * 001: +1 (busrt=0), +4  (burst=1)
+		 * 010: +2 (burst=0), +8  (burst=1)
+		 * 011: +4 (burst=0), +16 (burst=1)
+		 * 101: -1 (burst=0), -4  (burst=1)
+		 * 110: -2 (burst=0), -8  (burst=1)
+		 * 111: -4 (burst=0), -16 (burst=1)
+		 */
+		unsigned int source_inc:3;
+
+		unsigned int reserved1:1;
+
+#define APB_DMAB_DEST_INC_0			0
+#define APB_DMAB_DEST_INC_1_4			1
+#define APB_DMAB_DEST_INC_2_8			2
+#define APB_DMAB_DEST_INC_4_16			3
+#define APB_DMAB_DEST_DEC_1_4			5
+#define APB_DMAB_DEST_DEC_2_8			6
+#define APB_DMAB_DEST_DEC_4_16			7
+#define APB_DMAB_DEST_INC_MASK			7
+		/*
+		 * 000: no increment
+		 * 001: +1 (busrt=0), +4  (burst=1)
+		 * 010: +2 (burst=0), +8  (burst=1)
+		 * 011: +4 (burst=0), +16 (burst=1)
+		 * 101: -1 (burst=0), -4  (burst=1)
+		 * 110: -2 (burst=0), -8  (burst=1)
+		 * 111: -4 (burst=0), -16 (burst=1)
+		*/
+		unsigned int dest_inc:3;
+
+		unsigned int reserved2:1;
+
+#define APB_DMAB_DEST_REQ_NO_MASK		15
+		/*
+		 * request signal select of destination
+		 * address for DMA hardware handshake
+		 *
+		 * the request line number is a property of
+		 * the DMA controller itself, e.g. MMC must
+		 * always request channels where
+		 * dma_slave_config->slave_id == 5
+		 *
+		 * 0:	 no request / grant signal
+		 * 1-15: request / grant signal
+		 */
+		unsigned int dest_req_no:4;
+
+#define APB_DMAB_DATA_WIDTH_MASK		3
+#define APB_DMAB_DATA_WIDTH_4			0
+#define APB_DMAB_DATA_WIDTH_2			1
+#define APB_DMAB_DATA_WIDTH_1			2
+		/*
+		 * data width of transfer
+		 * 00: word
+		 * 01: half
+		 * 10: byte
+		 */
+		unsigned int data_width:2;
+
+		unsigned int reserved3:2;
+
+#define APB_DMAB_SOURCE_REQ_NO_MASK		15
+		/*
+		 * request signal select of source
+		 * address for DMA hardware handshake
+		 *
+		 * the request line number is a property of
+		 * the DMA controller itself, e.g. MMC must
+		 * always request channels where
+		 * dma_slave_config->slave_id == 5
+		 *
+		 * 0:	 no request / grant signal
+		 * 1-15: request / grant signal
+		 */
+		unsigned int source_req_no:4;
+
+		unsigned int reserved4:4;
+	} bits;
+};
+
+struct moxart_dma_reg {
+	unsigned int source_addr;
+	unsigned int dest_addr;
+#define APB_DMA_CYCLES_MASK			0x00ffffff
+	unsigned int cycles;	/* depend on burst mode */
+	union moxart_dma_reg_cfg cfg;
+};
+
+#endif
-- 
1.8.2.1

^ permalink raw reply related	[flat|nested] 80+ messages in thread

* Re: [PATCH v4] dmaengine: Add MOXA ART DMA engine driver
  2013-07-29 13:44       ` Jonas Jensen
@ 2013-07-29 16:35         ` Arnd Bergmann
  -1 siblings, 0 replies; 80+ messages in thread
From: Arnd Bergmann @ 2013-07-29 16:35 UTC (permalink / raw)
  To: Jonas Jensen; +Cc: linux-arm-kernel, linux-kernel, arm, vinod.koul, djbw, linux

On Monday 29 July 2013, Jonas Jensen wrote:

> diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> new file mode 100644
> index 0000000..f18f0fb
> --- /dev/null
> +++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> @@ -0,0 +1,19 @@
> +MOXA ART DMA Controller
> +
> +See dma.txt first
> +
> +Required properties:
> +
> +- compatible : Must be "moxa,moxart-dma"
> +- reg : Should contain registers location and length
> +- interrupts : Should contain the interrupt number
> +- #dma-cells : see dma.txt, should be 1
> +
> +Example:
> +
> +	dma: dma@90500000 {
> +		compatible = "moxa,moxart-dma";
> +		reg = <0x90500000 0x1000>;
> +		interrupts = <24 0>;
> +		#dma-cells = <1>;
> +	};

The binding should really define what the one cell in the dma specifier refers
to. For all I can tell, it is a hardcoded channel number, and each channel
corresponds to exactly one slave request line.

> +static DEFINE_SPINLOCK(dma_lock);

Can't this be part of the device structure? You should not need a global lock here.

> +struct moxart_dma_container {
> +	int			ctlr;
> +	struct dma_device	dma_slave;
> +	struct moxart_dma_chan	slave_chans[APB_DMA_MAX_CHANNEL];
> +};
> +
> +struct moxart_dma_container *mdc;

Same here. Also, you should never have global identifiers with just three characters.
Most of your 'static' variables are already prefixed "moxart_".

> +static int moxart_slave_config(struct dma_chan *chan,
> +			       struct dma_slave_config *cfg)
> +{
> +	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
> +	union moxart_dma_reg_cfg mcfg;
> +	unsigned long flags;
> +	unsigned int data_width, data_inc;
> +
> +	spin_lock_irqsave(&dma_lock, flags);
> +
> +	memcpy(&mchan->cfg, cfg, sizeof(mchan->cfg));
> +
> +	mcfg.ul = readl(&mchan->reg->cfg.ul);
> +	mcfg.bits.burst = APB_DMAB_BURST_MODE;
> +
> +	switch (mchan->cfg.src_addr_width) {
> +	case DMA_SLAVE_BUSWIDTH_1_BYTE:
> +		data_width = APB_DMAB_DATA_WIDTH_1;
> +		data_inc = APB_DMAB_DEST_INC_1_4;
> +		break;
> +	case DMA_SLAVE_BUSWIDTH_2_BYTES:
> +		data_width = APB_DMAB_DATA_WIDTH_2;
> +		data_inc = APB_DMAB_DEST_INC_2_8;
> +		break;
> +	default:
> +		data_width = APB_DMAB_DATA_WIDTH_4;
> +		data_inc = APB_DMAB_DEST_INC_4_16;
> +		break;
> +	}
> +
> +	if (mchan->cfg.direction == DMA_MEM_TO_DEV) {
> +		mcfg.bits.data_width = data_width;
> +		mcfg.bits.dest_sel = APB_DMAB_DEST_APB;
> +		mcfg.bits.dest_inc = APB_DMAB_DEST_INC_0;
> +		mcfg.bits.source_sel = APB_DMAB_SOURCE_AHB;
> +		mcfg.bits.source_inc = data_inc;
> +
> +		mcfg.bits.dest_req_no = mchan->cfg.slave_id;
> +		mcfg.bits.source_req_no = 0;

You must not override the "dest_req_no" and "dest_req_no" in moxart_slave_config
since they are already set by the ->xlate() function and the driver calling 
slave_config generally has no knowledge of what the slave id is.

> +static struct platform_driver moxart_driver;

Please reorder the symbols so you don't need the forward declaration.

> +bool moxart_filter_fn(struct dma_chan *chan, void *param)
> +{
> +	if (chan->device->dev->driver == &moxart_driver.driver) {

No need to check the driver. What you want to check instead is that
the *device* matches.

> +		struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
> +		unsigned int ch_req = *(unsigned int *)param;
> +		dev_dbg(chan2dev(chan), "%s: mchan=%p ch_req=%d mchan->ch_num=%d\n",
> +			__func__, mchan, ch_req, mchan->ch_num);
> +		return ch_req == mchan->ch_num;
> +	} else {
> +		dev_dbg(chan2dev(chan), "%s: device not registered to this DMA engine\n",
> +			__func__);
> +		return false;
> +	}
> +}
> +EXPORT_SYMBOL(moxart_filter_fn);

Don't export the filter function. No slave driver should rely on this, since you
have DT probing.


> diff --git a/drivers/dma/moxart-dma.h b/drivers/dma/moxart-dma.h
> new file mode 100644
> index 0000000..a37b13f
> --- /dev/null
> +++ b/drivers/dma/moxart-dma.h

You don't need a separate file here, just move the contents into moxart-dma.c

> +union moxart_dma_reg_cfg {
> +
> +#define APB_DMA_ENABLE				BIT(0)
> +#define APB_DMA_FIN_INT_STS			BIT(1)
> +#define APB_DMA_FIN_INT_EN			BIT(2)
> +#define APB_DMA_BURST_MODE			BIT(3)
> +#define APB_DMA_ERR_INT_STS			BIT(4)
> +#define APB_DMA_ERR_INT_EN			BIT(5)
> +#define APB_DMA_SOURCE_AHB			BIT(6)
> +#define APB_DMA_SOURCE_APB			0
> +#define APB_DMA_DEST_AHB			BIT(7)
> +#define APB_DMA_DEST_APB			0
> +#define APB_DMA_SOURCE_INC_0			0
> +#define APB_DMA_SOURCE_INC_1_4			0x100
> +#define APB_DMA_SOURCE_INC_2_8			0x200
> +#define APB_DMA_SOURCE_INC_4_16			0x300
> +#define APB_DMA_SOURCE_DEC_1_4			0x500
> +#define APB_DMA_SOURCE_DEC_2_8			0x600
> +#define APB_DMA_SOURCE_DEC_4_16			0x700
> +#define APB_DMA_SOURCE_INC_MASK			0x700
> +#define APB_DMA_DEST_INC_0			0
> +#define APB_DMA_DEST_INC_1_4			0x1000
> +#define APB_DMA_DEST_INC_2_8			0x2000
> +#define APB_DMA_DEST_INC_4_16			0x3000
> +#define APB_DMA_DEST_DEC_1_4			0x5000
> +#define APB_DMA_DEST_DEC_2_8			0x6000
> +#define APB_DMA_DEST_DEC_4_16			0x7000
> +#define APB_DMA_DEST_INC_MASK			0x7000
> +#define APB_DMA_DEST_REQ_NO_MASK		0xf0000
> +#define APB_DMA_DATA_WIDTH_MASK			0x300000
> +#define APB_DMA_DATA_WIDTH_4			0
> +#define APB_DMA_DATA_WIDTH_2			0x100000
> +#define APB_DMA_DATA_WIDTH_1			0x200000
> +#define APB_DMA_SOURCE_REQ_NO_MASK		0xf000000
> +	unsigned int ul;
> +
> +	struct {
> +
> +#define APB_DMAB_ENABLE				1
> +		/* enable DMA */
> +		unsigned int enable:1;
> +
> +#define APB_DMAB_FIN_INT_STS			1
> +		/* finished interrupt status */
> +		unsigned int fin_int_sts:1;

The bit numbers don't actually match here if you build the kernel as
big-endian. You cannot use bitfields for hw data structures.

While you are here, get rid of the silly 'BIT' macro use as well.
Using hexadecimal literals is much clearer and you do that for
some fields anyway.

	Arnd

^ permalink raw reply	[flat|nested] 80+ messages in thread

* [PATCH v4] dmaengine: Add MOXA ART DMA engine driver
@ 2013-07-29 16:35         ` Arnd Bergmann
  0 siblings, 0 replies; 80+ messages in thread
From: Arnd Bergmann @ 2013-07-29 16:35 UTC (permalink / raw)
  To: linux-arm-kernel

On Monday 29 July 2013, Jonas Jensen wrote:

> diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> new file mode 100644
> index 0000000..f18f0fb
> --- /dev/null
> +++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> @@ -0,0 +1,19 @@
> +MOXA ART DMA Controller
> +
> +See dma.txt first
> +
> +Required properties:
> +
> +- compatible : Must be "moxa,moxart-dma"
> +- reg : Should contain registers location and length
> +- interrupts : Should contain the interrupt number
> +- #dma-cells : see dma.txt, should be 1
> +
> +Example:
> +
> +	dma: dma at 90500000 {
> +		compatible = "moxa,moxart-dma";
> +		reg = <0x90500000 0x1000>;
> +		interrupts = <24 0>;
> +		#dma-cells = <1>;
> +	};

The binding should really define what the one cell in the dma specifier refers
to. For all I can tell, it is a hardcoded channel number, and each channel
corresponds to exactly one slave request line.

> +static DEFINE_SPINLOCK(dma_lock);

Can't this be part of the device structure? You should not need a global lock here.

> +struct moxart_dma_container {
> +	int			ctlr;
> +	struct dma_device	dma_slave;
> +	struct moxart_dma_chan	slave_chans[APB_DMA_MAX_CHANNEL];
> +};
> +
> +struct moxart_dma_container *mdc;

Same here. Also, you should never have global identifiers with just three characters.
Most of your 'static' variables are already prefixed "moxart_".

> +static int moxart_slave_config(struct dma_chan *chan,
> +			       struct dma_slave_config *cfg)
> +{
> +	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
> +	union moxart_dma_reg_cfg mcfg;
> +	unsigned long flags;
> +	unsigned int data_width, data_inc;
> +
> +	spin_lock_irqsave(&dma_lock, flags);
> +
> +	memcpy(&mchan->cfg, cfg, sizeof(mchan->cfg));
> +
> +	mcfg.ul = readl(&mchan->reg->cfg.ul);
> +	mcfg.bits.burst = APB_DMAB_BURST_MODE;
> +
> +	switch (mchan->cfg.src_addr_width) {
> +	case DMA_SLAVE_BUSWIDTH_1_BYTE:
> +		data_width = APB_DMAB_DATA_WIDTH_1;
> +		data_inc = APB_DMAB_DEST_INC_1_4;
> +		break;
> +	case DMA_SLAVE_BUSWIDTH_2_BYTES:
> +		data_width = APB_DMAB_DATA_WIDTH_2;
> +		data_inc = APB_DMAB_DEST_INC_2_8;
> +		break;
> +	default:
> +		data_width = APB_DMAB_DATA_WIDTH_4;
> +		data_inc = APB_DMAB_DEST_INC_4_16;
> +		break;
> +	}
> +
> +	if (mchan->cfg.direction == DMA_MEM_TO_DEV) {
> +		mcfg.bits.data_width = data_width;
> +		mcfg.bits.dest_sel = APB_DMAB_DEST_APB;
> +		mcfg.bits.dest_inc = APB_DMAB_DEST_INC_0;
> +		mcfg.bits.source_sel = APB_DMAB_SOURCE_AHB;
> +		mcfg.bits.source_inc = data_inc;
> +
> +		mcfg.bits.dest_req_no = mchan->cfg.slave_id;
> +		mcfg.bits.source_req_no = 0;

You must not override the "dest_req_no" and "dest_req_no" in moxart_slave_config
since they are already set by the ->xlate() function and the driver calling 
slave_config generally has no knowledge of what the slave id is.

> +static struct platform_driver moxart_driver;

Please reorder the symbols so you don't need the forward declaration.

> +bool moxart_filter_fn(struct dma_chan *chan, void *param)
> +{
> +	if (chan->device->dev->driver == &moxart_driver.driver) {

No need to check the driver. What you want to check instead is that
the *device* matches.

> +		struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
> +		unsigned int ch_req = *(unsigned int *)param;
> +		dev_dbg(chan2dev(chan), "%s: mchan=%p ch_req=%d mchan->ch_num=%d\n",
> +			__func__, mchan, ch_req, mchan->ch_num);
> +		return ch_req == mchan->ch_num;
> +	} else {
> +		dev_dbg(chan2dev(chan), "%s: device not registered to this DMA engine\n",
> +			__func__);
> +		return false;
> +	}
> +}
> +EXPORT_SYMBOL(moxart_filter_fn);

Don't export the filter function. No slave driver should rely on this, since you
have DT probing.


> diff --git a/drivers/dma/moxart-dma.h b/drivers/dma/moxart-dma.h
> new file mode 100644
> index 0000000..a37b13f
> --- /dev/null
> +++ b/drivers/dma/moxart-dma.h

You don't need a separate file here, just move the contents into moxart-dma.c

> +union moxart_dma_reg_cfg {
> +
> +#define APB_DMA_ENABLE				BIT(0)
> +#define APB_DMA_FIN_INT_STS			BIT(1)
> +#define APB_DMA_FIN_INT_EN			BIT(2)
> +#define APB_DMA_BURST_MODE			BIT(3)
> +#define APB_DMA_ERR_INT_STS			BIT(4)
> +#define APB_DMA_ERR_INT_EN			BIT(5)
> +#define APB_DMA_SOURCE_AHB			BIT(6)
> +#define APB_DMA_SOURCE_APB			0
> +#define APB_DMA_DEST_AHB			BIT(7)
> +#define APB_DMA_DEST_APB			0
> +#define APB_DMA_SOURCE_INC_0			0
> +#define APB_DMA_SOURCE_INC_1_4			0x100
> +#define APB_DMA_SOURCE_INC_2_8			0x200
> +#define APB_DMA_SOURCE_INC_4_16			0x300
> +#define APB_DMA_SOURCE_DEC_1_4			0x500
> +#define APB_DMA_SOURCE_DEC_2_8			0x600
> +#define APB_DMA_SOURCE_DEC_4_16			0x700
> +#define APB_DMA_SOURCE_INC_MASK			0x700
> +#define APB_DMA_DEST_INC_0			0
> +#define APB_DMA_DEST_INC_1_4			0x1000
> +#define APB_DMA_DEST_INC_2_8			0x2000
> +#define APB_DMA_DEST_INC_4_16			0x3000
> +#define APB_DMA_DEST_DEC_1_4			0x5000
> +#define APB_DMA_DEST_DEC_2_8			0x6000
> +#define APB_DMA_DEST_DEC_4_16			0x7000
> +#define APB_DMA_DEST_INC_MASK			0x7000
> +#define APB_DMA_DEST_REQ_NO_MASK		0xf0000
> +#define APB_DMA_DATA_WIDTH_MASK			0x300000
> +#define APB_DMA_DATA_WIDTH_4			0
> +#define APB_DMA_DATA_WIDTH_2			0x100000
> +#define APB_DMA_DATA_WIDTH_1			0x200000
> +#define APB_DMA_SOURCE_REQ_NO_MASK		0xf000000
> +	unsigned int ul;
> +
> +	struct {
> +
> +#define APB_DMAB_ENABLE				1
> +		/* enable DMA */
> +		unsigned int enable:1;
> +
> +#define APB_DMAB_FIN_INT_STS			1
> +		/* finished interrupt status */
> +		unsigned int fin_int_sts:1;

The bit numbers don't actually match here if you build the kernel as
big-endian. You cannot use bitfields for hw data structures.

While you are here, get rid of the silly 'BIT' macro use as well.
Using hexadecimal literals is much clearer and you do that for
some fields anyway.

	Arnd

^ permalink raw reply	[flat|nested] 80+ messages in thread

* [PATCH v5] dmaengine: Add MOXA ART DMA engine driver
  2013-07-29 13:44       ` Jonas Jensen
@ 2013-08-02 12:03         ` Jonas Jensen
  -1 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2013-08-02 12:03 UTC (permalink / raw)
  To: linux-arm-kernel
  Cc: linux-kernel, arm, vinod.koul, djbw, arnd, linux, Jonas Jensen

Add dmaengine driver for MOXA ART SoCs.

Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
---

Notes:
    Changes since v4:
    
    1. use DT probing / remove EXPORT_SYMBOL(moxart_filter_fn)
    2. remove struct moxart_dma_reg_cfg
    3. refactor and use hex literals
    4. moxart_dma_filter_fn(): compare device instead of driver
    5. remove moxart-dma.h
    6. move spinlock to moxart_dma_container
    7. use u32 instead of unsigned int (registers)
    8. use platform_get_resource()
    9. remove use of BIT()
    
    device tree bindings document:
    10. describe single cell #dma-cells property
    
    Applies to next-20130802

 .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  20 +
 drivers/dma/Kconfig                                |   7 +
 drivers/dma/Makefile                               |   1 +
 drivers/dma/moxart-dma.c                           | 589 +++++++++++++++++++++
 4 files changed, 617 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
 create mode 100644 drivers/dma/moxart-dma.c

diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644
index 0000000..9a4db43
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -0,0 +1,20 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible :	Must be "moxa,moxart-dma"
+- reg :		Should contain registers location and length
+- interrupts :	Should contain the interrupt number
+- #dma-cells :	Should be 1, this is a single cell used to
+		specify	a channel number between 0-3
+
+Example:
+
+	dma: dma@90500000 {
+		compatible = "moxa,moxart-dma";
+		reg = <0x90500000 0x1000>;
+		interrupts = <24 0>;
+		#dma-cells = <1>;
+	};
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6825957..56c3aaa 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -300,6 +300,13 @@ config DMA_JZ4740
 	select DMA_ENGINE
 	select DMA_VIRTUAL_CHANNELS
 
+config MOXART_DMA
+	tristate "MOXART DMA support"
+	depends on ARCH_MOXART
+	select DMA_ENGINE
+	help
+	  Enable support for the MOXA ART SoC DMA controller.
+
 config DMA_ENGINE
 	bool
 
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 5e0f2ef..470c11b 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -39,3 +39,4 @@ obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
 obj-$(CONFIG_DMA_OMAP) += omap-dma.o
 obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..708c238
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,589 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+
+#define APB_DMA_MAX_CHANNEL			4
+
+#define APB_DMA_ENABLE				0x1
+#define APB_DMA_FIN_INT_STS			0x2
+#define APB_DMA_FIN_INT_EN			0x4
+#define APB_DMA_BURST_MODE			0x8
+#define APB_DMA_ERR_INT_STS			0x10
+#define APB_DMA_ERR_INT_EN			0x20
+
+/*
+ * unset to select APB source
+ * set to select AHB source
+ */
+#define APB_DMA_SOURCE_SELECT			0x40
+
+/*
+ * unset to select APB destination
+ * set to select AHB destination
+ */
+#define APB_DMA_DEST_SELECT			0x80
+
+#define APB_DMA_SOURCE				0x100
+#define APB_DMA_SOURCE_MASK			0x700
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4  (burst=1)
+ * 010: +2 (burst=0), +8  (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4  (burst=1)
+ * 110: -2 (burst=0), -8  (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+ */
+#define APB_DMA_SOURCE_INC_0			0
+#define APB_DMA_SOURCE_INC_1_4			0x100
+#define APB_DMA_SOURCE_INC_2_8			0x200
+#define APB_DMA_SOURCE_INC_4_16			0x300
+#define APB_DMA_SOURCE_DEC_1_4			0x500
+#define APB_DMA_SOURCE_DEC_2_8			0x600
+#define APB_DMA_SOURCE_DEC_4_16			0x700
+
+#define APB_DMA_DEST				0x1000
+#define APB_DMA_DEST_MASK			0x7000
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4  (burst=1)
+ * 010: +2 (burst=0), +8  (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4  (burst=1)
+ * 110: -2 (burst=0), -8  (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+*/
+#define APB_DMA_DEST_INC_0			0
+#define APB_DMA_DEST_INC_1_4			0x1000
+#define APB_DMA_DEST_INC_2_8			0x2000
+#define APB_DMA_DEST_INC_4_16			0x3000
+#define APB_DMA_DEST_DEC_1_4			0x5000
+#define APB_DMA_DEST_DEC_2_8			0x6000
+#define APB_DMA_DEST_DEC_4_16			0x7000
+
+/*
+ * request signal select of destination
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0:    no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_DEST_REQ_NO			0x10000
+#define APB_DMA_DEST_REQ_NO_MASK		0xf0000
+
+#define APB_DMA_DATA_WIDTH			0x100000
+#define APB_DMA_DATA_WIDTH_MASK			0x300000
+/*
+ * data width of transfer
+ * 00: word
+ * 01: half
+ * 10: byte
+ */
+#define APB_DMA_DATA_WIDTH_4			0
+#define APB_DMA_DATA_WIDTH_2			0x100000
+#define APB_DMA_DATA_WIDTH_1			0x200000
+
+/*
+ * request signal select of source
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0:    no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_SOURCE_REQ_NO			0x1000000
+#define APB_DMA_SOURCE_REQ_NO_MASK		0xf000000
+
+struct moxart_dma_reg {
+	u32 source_addr;
+	u32 dest_addr;
+#define APB_DMA_CYCLES_MASK			0x00ffffff
+	u32 cycles;	/* depend on burst mode */
+	u32 ctrl;
+};
+
+struct moxart_dma_chan {
+	struct dma_chan			chan;
+	int				ch_num;
+	bool				allocated;
+	int				error_flag;
+	struct moxart_dma_reg		*reg;
+	void				(*callback)(void *param);
+	void				*callback_param;
+	struct completion		dma_complete;
+	struct dma_slave_config		cfg;
+	struct dma_async_tx_descriptor	tx_desc;
+};
+
+struct moxart_dma_container {
+	int			ctlr;
+	struct dma_device	dma_slave;
+	struct moxart_dma_chan	slave_chans[APB_DMA_MAX_CHANNEL];
+	spinlock_t dma_lock;
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+	return &chan->dev->device;
+}
+
+static inline struct moxart_dma_container
+*to_dma_container(struct dma_device *d)
+{
+	return container_of(d, struct moxart_dma_container, dma_slave);
+}
+
+static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct moxart_dma_chan, chan);
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *ch = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *c = to_dma_container(ch->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
+
+	spin_lock_irqsave(&c->dma_lock, flags);
+
+	ctrl = readl(&ch->reg->ctrl);
+	ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, &ch->reg->ctrl);
+
+	spin_unlock_irqrestore(&c->dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+			       struct dma_slave_config *cfg)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	memcpy(&mchan->cfg, cfg, sizeof(mchan->cfg));
+
+	ctrl = readl(&mchan->reg->ctrl);
+	ctrl |= APB_DMA_BURST_MODE;
+	ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
+	ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
+
+	switch (mchan->cfg.src_addr_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		ctrl |= APB_DMA_DATA_WIDTH_1;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_1_4;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_1_4;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		ctrl |= APB_DMA_DATA_WIDTH_2;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_2_8;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_2_8;
+		break;
+	default:
+		ctrl &= ~APB_DMA_DATA_WIDTH;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_4_16;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_4_16;
+		break;
+	}
+
+	if (mchan->cfg.direction == DMA_MEM_TO_DEV) {
+		ctrl &= ~APB_DMA_DEST_SELECT;
+		ctrl |= APB_DMA_SOURCE_SELECT;
+		ctrl |= (mchan->cfg.slave_id << 16 &
+			 APB_DMA_DEST_REQ_NO_MASK);
+	} else {
+		ctrl |= APB_DMA_DEST_SELECT;
+		ctrl &= ~APB_DMA_SOURCE_SELECT;
+		ctrl |= (mchan->cfg.slave_id << 24 &
+			 APB_DMA_SOURCE_REQ_NO_MASK);
+	}
+
+	writel(ctrl, &mchan->reg->ctrl);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+			  unsigned long arg)
+{
+	int ret = 0;
+	struct dma_slave_config *config;
+
+	switch (cmd) {
+	case DMA_TERMINATE_ALL:
+		moxart_terminate_all(chan);
+		break;
+	case DMA_SLAVE_CONFIG:
+		config = (struct dma_slave_config *)arg;
+		ret = moxart_slave_config(chan, config);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static dma_cookie_t moxart_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(tx->chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	dma_cookie_t cookie;
+	u32 ctrl;
+	unsigned long flags;
+
+	mchan->callback = tx->callback;
+	mchan->callback_param = tx->callback_param;
+	mchan->error_flag = 0;
+
+	dev_dbg(chan2dev(tx->chan), "%s: mchan=%p mchan->ch_num=%d mchan->reg=%p\n",
+		__func__, mchan, mchan->ch_num, mchan->reg);
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	cookie = dma_cookie_assign(tx);
+
+	ctrl = readl(&mchan->reg->ctrl);
+	ctrl |= (APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, &mchan->reg->ctrl);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return cookie;
+}
+
+static struct dma_async_tx_descriptor
+*moxart_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+		      unsigned int sg_len,
+		      enum dma_transfer_direction direction,
+		      unsigned long tx_flags, void *context)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	unsigned long flags;
+	unsigned int size, adr_width;
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	if (direction == DMA_MEM_TO_DEV) {
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       &mchan->reg->source_addr);
+		writel(mchan->cfg.dst_addr, &mchan->reg->dest_addr);
+		adr_width = mchan->cfg.src_addr_width;
+	} else {
+		writel(mchan->cfg.src_addr, &mchan->reg->source_addr);
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       &mchan->reg->dest_addr);
+		adr_width = mchan->cfg.dst_addr_width;
+	}
+
+	size = sgl->length >> adr_width;
+
+	/*
+	 * size is 4 on 64 bytes copied, i.e. once cycle copies 16 bytes
+	 * ( when data_width == APB_DMAB_DATA_WIDTH_4 )
+	 */
+	writel(size, &mchan->reg->cycles);
+
+	dev_dbg(chan2dev(chan), "%s: set %d DMA cycles (sgl->length=%d adr_width=%d)\n",
+		__func__, size, sgl->length, adr_width);
+
+	dma_async_tx_descriptor_init(&mchan->tx_desc, chan);
+	mchan->tx_desc.tx_submit = moxart_tx_submit;
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return &mchan->tx_desc;
+}
+
+bool moxart_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+
+	if (chan->device->dev == mc->dma_slave.dev) {
+		struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+		unsigned int ch_req = *(unsigned int *)param;
+		dev_dbg(chan2dev(chan), "%s: mchan=%p ch_req=%d mchan->ch_num=%d\n",
+			__func__, mchan, ch_req, mchan->ch_num);
+		return ch_req == mchan->ch_num;
+	} else {
+		dev_dbg(chan2dev(chan), "%s: device not registered to this DMA engine\n",
+			__func__);
+		return false;
+	}
+}
+
+static struct of_dma_filter_info moxart_dma_info = {
+	.filter_fn = moxart_dma_filter_fn,
+};
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	int i;
+	bool found = false;
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++) {
+		if (i == mchan->ch_num
+			&& !mchan->allocated) {
+			dev_dbg(chan2dev(chan), "%s: allocating channel #%d\n",
+				__func__, mchan->ch_num);
+			mchan->allocated = true;
+			found = true;
+			break;
+		}
+	}
+
+	if (!found)
+		return -ENODEV;
+
+	return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+	mchan->allocated = false;
+
+	dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+		__func__, mchan->ch_num);
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	ctrl = readl(&mchan->reg->ctrl);
+	ctrl |= APB_DMA_ENABLE;
+	writel(ctrl, &mchan->reg->ctrl);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+					dma_cookie_t cookie,
+					struct dma_tx_state *txstate)
+{
+	enum dma_status ret;
+
+	ret = dma_cookie_status(chan, cookie, txstate);
+	if (ret == DMA_SUCCESS || !txstate)
+		return ret;
+
+	return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+	dma->device_prep_slave_sg		= moxart_prep_slave_sg;
+	dma->device_alloc_chan_resources	= moxart_alloc_chan_resources;
+	dma->device_free_chan_resources		= moxart_free_chan_resources;
+	dma->device_issue_pending		= moxart_issue_pending;
+	dma->device_tx_status			= moxart_tx_status;
+	dma->device_control			= moxart_control;
+	dma->dev				= dev;
+
+	INIT_LIST_HEAD(&dma->channels);
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+	struct moxart_dma_container *mc = to_dma_container(devid);
+	struct moxart_dma_chan *mchan = &mc->slave_chans[0];
+	unsigned int i;
+	u32 ctrl;
+
+	pr_debug("%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		if (mchan->allocated) {
+			ctrl = readl(&mchan->reg->ctrl);
+			if (ctrl & APB_DMA_FIN_INT_STS) {
+				ctrl &= ~APB_DMA_FIN_INT_STS;
+				dma_cookie_complete(&mchan->tx_desc);
+			}
+			if (ctrl & APB_DMA_ERR_INT_STS) {
+				ctrl &= ~APB_DMA_ERR_INT_STS;
+				mchan->error_flag = 1;
+			}
+			if (mchan->callback) {
+				pr_debug("%s: call callback for mchan=%p\n",
+					 __func__, mchan);
+				mchan->callback(mchan->callback_param);
+			}
+			mchan->error_flag = 0;
+			writel(ctrl, &mchan->reg->ctrl);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static struct irqaction moxart_dma_irq = {
+	.name       = "moxart-dma-engine",
+	.flags      = IRQF_DISABLED,
+	.handler    = moxart_dma_interrupt,
+};
+
+static int moxart_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct resource *res;
+	static void __iomem *dma_base_addr;
+	int ret, i;
+	unsigned int irq;
+	struct moxart_dma_chan *mchan;
+	struct moxart_dma_container *mdc;
+
+	mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+	if (!mdc) {
+		dev_err(dev, "can't allocate DMA container\n");
+		return -ENOMEM;
+	}
+
+	irq = irq_of_parse_and_map(node, 0);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dma_base_addr = devm_ioremap_resource(dev, res);
+	if (IS_ERR(dma_base_addr)) {
+		dev_err(dev, "devm_ioremap_resource failed\n");
+		return PTR_ERR(dma_base_addr);
+	}
+
+	mdc->ctlr = pdev->id;
+	spin_lock_init(&mdc->dma_lock);
+
+	dma_cap_zero(mdc->dma_slave.cap_mask);
+	dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+
+	moxart_dma_init(&mdc->dma_slave, dev);
+
+	mchan = &mdc->slave_chans[0];
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		mchan->ch_num = i;
+		mchan->reg = (struct moxart_dma_reg *)(dma_base_addr + 0x80
+			     + i * sizeof(struct moxart_dma_reg));
+		mchan->callback = NULL;
+		mchan->allocated = 0;
+		mchan->callback_param = NULL;
+
+		dma_cookie_init(&mchan->chan);
+		mchan->chan.device = &mdc->dma_slave;
+		list_add_tail(&mchan->chan.device_node,
+			      &mdc->dma_slave.channels);
+
+		dev_dbg(dev, "%s: mchans[%d]: mchan->ch_num=%d mchan->reg=%p\n",
+			__func__, i, mchan->ch_num, mchan->reg);
+	}
+
+	ret = dma_async_device_register(&mdc->dma_slave);
+	platform_set_drvdata(pdev, mdc);
+
+	of_dma_controller_register(node, of_dma_simple_xlate, &moxart_dma_info);
+
+	moxart_dma_irq.dev_id = &mdc->dma_slave;
+	setup_irq(irq, &moxart_dma_irq);
+
+	dev_dbg(dev, "%s: IRQ=%d\n", __func__, irq);
+
+	return ret;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+	struct moxart_dma_container *m = dev_get_drvdata(&pdev->dev);
+	dma_async_device_unregister(&m->dma_slave);
+	return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+	{ .compatible = "moxa,moxart-dma" },
+	{ }
+};
+
+static struct platform_driver moxart_driver = {
+	.probe	= moxart_probe,
+	.remove	= moxart_remove,
+	.driver = {
+		.name		= "moxart-dma-engine",
+		.owner		= THIS_MODULE,
+		.of_match_table	= moxart_dma_match,
+	},
+};
+
+static int moxart_init(void)
+{
+	return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+	platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
-- 
1.8.2.1


^ permalink raw reply related	[flat|nested] 80+ messages in thread

* [PATCH v5] dmaengine: Add MOXA ART DMA engine driver
@ 2013-08-02 12:03         ` Jonas Jensen
  0 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2013-08-02 12:03 UTC (permalink / raw)
  To: linux-arm-kernel

Add dmaengine driver for MOXA ART SoCs.

Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
---

Notes:
    Changes since v4:
    
    1. use DT probing / remove EXPORT_SYMBOL(moxart_filter_fn)
    2. remove struct moxart_dma_reg_cfg
    3. refactor and use hex literals
    4. moxart_dma_filter_fn(): compare device instead of driver
    5. remove moxart-dma.h
    6. move spinlock to moxart_dma_container
    7. use u32 instead of unsigned int (registers)
    8. use platform_get_resource()
    9. remove use of BIT()
    
    device tree bindings document:
    10. describe single cell #dma-cells property
    
    Applies to next-20130802

 .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  20 +
 drivers/dma/Kconfig                                |   7 +
 drivers/dma/Makefile                               |   1 +
 drivers/dma/moxart-dma.c                           | 589 +++++++++++++++++++++
 4 files changed, 617 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
 create mode 100644 drivers/dma/moxart-dma.c

diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644
index 0000000..9a4db43
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -0,0 +1,20 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible :	Must be "moxa,moxart-dma"
+- reg :		Should contain registers location and length
+- interrupts :	Should contain the interrupt number
+- #dma-cells :	Should be 1, this is a single cell used to
+		specify	a channel number between 0-3
+
+Example:
+
+	dma: dma at 90500000 {
+		compatible = "moxa,moxart-dma";
+		reg = <0x90500000 0x1000>;
+		interrupts = <24 0>;
+		#dma-cells = <1>;
+	};
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6825957..56c3aaa 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -300,6 +300,13 @@ config DMA_JZ4740
 	select DMA_ENGINE
 	select DMA_VIRTUAL_CHANNELS
 
+config MOXART_DMA
+	tristate "MOXART DMA support"
+	depends on ARCH_MOXART
+	select DMA_ENGINE
+	help
+	  Enable support for the MOXA ART SoC DMA controller.
+
 config DMA_ENGINE
 	bool
 
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 5e0f2ef..470c11b 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -39,3 +39,4 @@ obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
 obj-$(CONFIG_DMA_OMAP) += omap-dma.o
 obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..708c238
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,589 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+
+#define APB_DMA_MAX_CHANNEL			4
+
+#define APB_DMA_ENABLE				0x1
+#define APB_DMA_FIN_INT_STS			0x2
+#define APB_DMA_FIN_INT_EN			0x4
+#define APB_DMA_BURST_MODE			0x8
+#define APB_DMA_ERR_INT_STS			0x10
+#define APB_DMA_ERR_INT_EN			0x20
+
+/*
+ * unset to select APB source
+ * set to select AHB source
+ */
+#define APB_DMA_SOURCE_SELECT			0x40
+
+/*
+ * unset to select APB destination
+ * set to select AHB destination
+ */
+#define APB_DMA_DEST_SELECT			0x80
+
+#define APB_DMA_SOURCE				0x100
+#define APB_DMA_SOURCE_MASK			0x700
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4  (burst=1)
+ * 010: +2 (burst=0), +8  (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4  (burst=1)
+ * 110: -2 (burst=0), -8  (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+ */
+#define APB_DMA_SOURCE_INC_0			0
+#define APB_DMA_SOURCE_INC_1_4			0x100
+#define APB_DMA_SOURCE_INC_2_8			0x200
+#define APB_DMA_SOURCE_INC_4_16			0x300
+#define APB_DMA_SOURCE_DEC_1_4			0x500
+#define APB_DMA_SOURCE_DEC_2_8			0x600
+#define APB_DMA_SOURCE_DEC_4_16			0x700
+
+#define APB_DMA_DEST				0x1000
+#define APB_DMA_DEST_MASK			0x7000
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4  (burst=1)
+ * 010: +2 (burst=0), +8  (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4  (burst=1)
+ * 110: -2 (burst=0), -8  (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+*/
+#define APB_DMA_DEST_INC_0			0
+#define APB_DMA_DEST_INC_1_4			0x1000
+#define APB_DMA_DEST_INC_2_8			0x2000
+#define APB_DMA_DEST_INC_4_16			0x3000
+#define APB_DMA_DEST_DEC_1_4			0x5000
+#define APB_DMA_DEST_DEC_2_8			0x6000
+#define APB_DMA_DEST_DEC_4_16			0x7000
+
+/*
+ * request signal select of destination
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0:    no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_DEST_REQ_NO			0x10000
+#define APB_DMA_DEST_REQ_NO_MASK		0xf0000
+
+#define APB_DMA_DATA_WIDTH			0x100000
+#define APB_DMA_DATA_WIDTH_MASK			0x300000
+/*
+ * data width of transfer
+ * 00: word
+ * 01: half
+ * 10: byte
+ */
+#define APB_DMA_DATA_WIDTH_4			0
+#define APB_DMA_DATA_WIDTH_2			0x100000
+#define APB_DMA_DATA_WIDTH_1			0x200000
+
+/*
+ * request signal select of source
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0:    no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_SOURCE_REQ_NO			0x1000000
+#define APB_DMA_SOURCE_REQ_NO_MASK		0xf000000
+
+struct moxart_dma_reg {
+	u32 source_addr;
+	u32 dest_addr;
+#define APB_DMA_CYCLES_MASK			0x00ffffff
+	u32 cycles;	/* depend on burst mode */
+	u32 ctrl;
+};
+
+struct moxart_dma_chan {
+	struct dma_chan			chan;
+	int				ch_num;
+	bool				allocated;
+	int				error_flag;
+	struct moxart_dma_reg		*reg;
+	void				(*callback)(void *param);
+	void				*callback_param;
+	struct completion		dma_complete;
+	struct dma_slave_config		cfg;
+	struct dma_async_tx_descriptor	tx_desc;
+};
+
+struct moxart_dma_container {
+	int			ctlr;
+	struct dma_device	dma_slave;
+	struct moxart_dma_chan	slave_chans[APB_DMA_MAX_CHANNEL];
+	spinlock_t dma_lock;
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+	return &chan->dev->device;
+}
+
+static inline struct moxart_dma_container
+*to_dma_container(struct dma_device *d)
+{
+	return container_of(d, struct moxart_dma_container, dma_slave);
+}
+
+static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct moxart_dma_chan, chan);
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *ch = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *c = to_dma_container(ch->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
+
+	spin_lock_irqsave(&c->dma_lock, flags);
+
+	ctrl = readl(&ch->reg->ctrl);
+	ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, &ch->reg->ctrl);
+
+	spin_unlock_irqrestore(&c->dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+			       struct dma_slave_config *cfg)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	memcpy(&mchan->cfg, cfg, sizeof(mchan->cfg));
+
+	ctrl = readl(&mchan->reg->ctrl);
+	ctrl |= APB_DMA_BURST_MODE;
+	ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
+	ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
+
+	switch (mchan->cfg.src_addr_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		ctrl |= APB_DMA_DATA_WIDTH_1;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_1_4;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_1_4;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		ctrl |= APB_DMA_DATA_WIDTH_2;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_2_8;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_2_8;
+		break;
+	default:
+		ctrl &= ~APB_DMA_DATA_WIDTH;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_4_16;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_4_16;
+		break;
+	}
+
+	if (mchan->cfg.direction == DMA_MEM_TO_DEV) {
+		ctrl &= ~APB_DMA_DEST_SELECT;
+		ctrl |= APB_DMA_SOURCE_SELECT;
+		ctrl |= (mchan->cfg.slave_id << 16 &
+			 APB_DMA_DEST_REQ_NO_MASK);
+	} else {
+		ctrl |= APB_DMA_DEST_SELECT;
+		ctrl &= ~APB_DMA_SOURCE_SELECT;
+		ctrl |= (mchan->cfg.slave_id << 24 &
+			 APB_DMA_SOURCE_REQ_NO_MASK);
+	}
+
+	writel(ctrl, &mchan->reg->ctrl);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+			  unsigned long arg)
+{
+	int ret = 0;
+	struct dma_slave_config *config;
+
+	switch (cmd) {
+	case DMA_TERMINATE_ALL:
+		moxart_terminate_all(chan);
+		break;
+	case DMA_SLAVE_CONFIG:
+		config = (struct dma_slave_config *)arg;
+		ret = moxart_slave_config(chan, config);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static dma_cookie_t moxart_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(tx->chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	dma_cookie_t cookie;
+	u32 ctrl;
+	unsigned long flags;
+
+	mchan->callback = tx->callback;
+	mchan->callback_param = tx->callback_param;
+	mchan->error_flag = 0;
+
+	dev_dbg(chan2dev(tx->chan), "%s: mchan=%p mchan->ch_num=%d mchan->reg=%p\n",
+		__func__, mchan, mchan->ch_num, mchan->reg);
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	cookie = dma_cookie_assign(tx);
+
+	ctrl = readl(&mchan->reg->ctrl);
+	ctrl |= (APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, &mchan->reg->ctrl);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return cookie;
+}
+
+static struct dma_async_tx_descriptor
+*moxart_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+		      unsigned int sg_len,
+		      enum dma_transfer_direction direction,
+		      unsigned long tx_flags, void *context)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	unsigned long flags;
+	unsigned int size, adr_width;
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	if (direction == DMA_MEM_TO_DEV) {
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       &mchan->reg->source_addr);
+		writel(mchan->cfg.dst_addr, &mchan->reg->dest_addr);
+		adr_width = mchan->cfg.src_addr_width;
+	} else {
+		writel(mchan->cfg.src_addr, &mchan->reg->source_addr);
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       &mchan->reg->dest_addr);
+		adr_width = mchan->cfg.dst_addr_width;
+	}
+
+	size = sgl->length >> adr_width;
+
+	/*
+	 * size is 4 on 64 bytes copied, i.e. once cycle copies 16 bytes
+	 * ( when data_width == APB_DMAB_DATA_WIDTH_4 )
+	 */
+	writel(size, &mchan->reg->cycles);
+
+	dev_dbg(chan2dev(chan), "%s: set %d DMA cycles (sgl->length=%d adr_width=%d)\n",
+		__func__, size, sgl->length, adr_width);
+
+	dma_async_tx_descriptor_init(&mchan->tx_desc, chan);
+	mchan->tx_desc.tx_submit = moxart_tx_submit;
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return &mchan->tx_desc;
+}
+
+bool moxart_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+
+	if (chan->device->dev == mc->dma_slave.dev) {
+		struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+		unsigned int ch_req = *(unsigned int *)param;
+		dev_dbg(chan2dev(chan), "%s: mchan=%p ch_req=%d mchan->ch_num=%d\n",
+			__func__, mchan, ch_req, mchan->ch_num);
+		return ch_req == mchan->ch_num;
+	} else {
+		dev_dbg(chan2dev(chan), "%s: device not registered to this DMA engine\n",
+			__func__);
+		return false;
+	}
+}
+
+static struct of_dma_filter_info moxart_dma_info = {
+	.filter_fn = moxart_dma_filter_fn,
+};
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	int i;
+	bool found = false;
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++) {
+		if (i == mchan->ch_num
+			&& !mchan->allocated) {
+			dev_dbg(chan2dev(chan), "%s: allocating channel #%d\n",
+				__func__, mchan->ch_num);
+			mchan->allocated = true;
+			found = true;
+			break;
+		}
+	}
+
+	if (!found)
+		return -ENODEV;
+
+	return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+	mchan->allocated = false;
+
+	dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+		__func__, mchan->ch_num);
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	ctrl = readl(&mchan->reg->ctrl);
+	ctrl |= APB_DMA_ENABLE;
+	writel(ctrl, &mchan->reg->ctrl);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+					dma_cookie_t cookie,
+					struct dma_tx_state *txstate)
+{
+	enum dma_status ret;
+
+	ret = dma_cookie_status(chan, cookie, txstate);
+	if (ret == DMA_SUCCESS || !txstate)
+		return ret;
+
+	return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+	dma->device_prep_slave_sg		= moxart_prep_slave_sg;
+	dma->device_alloc_chan_resources	= moxart_alloc_chan_resources;
+	dma->device_free_chan_resources		= moxart_free_chan_resources;
+	dma->device_issue_pending		= moxart_issue_pending;
+	dma->device_tx_status			= moxart_tx_status;
+	dma->device_control			= moxart_control;
+	dma->dev				= dev;
+
+	INIT_LIST_HEAD(&dma->channels);
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+	struct moxart_dma_container *mc = to_dma_container(devid);
+	struct moxart_dma_chan *mchan = &mc->slave_chans[0];
+	unsigned int i;
+	u32 ctrl;
+
+	pr_debug("%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		if (mchan->allocated) {
+			ctrl = readl(&mchan->reg->ctrl);
+			if (ctrl & APB_DMA_FIN_INT_STS) {
+				ctrl &= ~APB_DMA_FIN_INT_STS;
+				dma_cookie_complete(&mchan->tx_desc);
+			}
+			if (ctrl & APB_DMA_ERR_INT_STS) {
+				ctrl &= ~APB_DMA_ERR_INT_STS;
+				mchan->error_flag = 1;
+			}
+			if (mchan->callback) {
+				pr_debug("%s: call callback for mchan=%p\n",
+					 __func__, mchan);
+				mchan->callback(mchan->callback_param);
+			}
+			mchan->error_flag = 0;
+			writel(ctrl, &mchan->reg->ctrl);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static struct irqaction moxart_dma_irq = {
+	.name       = "moxart-dma-engine",
+	.flags      = IRQF_DISABLED,
+	.handler    = moxart_dma_interrupt,
+};
+
+static int moxart_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct resource *res;
+	static void __iomem *dma_base_addr;
+	int ret, i;
+	unsigned int irq;
+	struct moxart_dma_chan *mchan;
+	struct moxart_dma_container *mdc;
+
+	mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+	if (!mdc) {
+		dev_err(dev, "can't allocate DMA container\n");
+		return -ENOMEM;
+	}
+
+	irq = irq_of_parse_and_map(node, 0);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dma_base_addr = devm_ioremap_resource(dev, res);
+	if (IS_ERR(dma_base_addr)) {
+		dev_err(dev, "devm_ioremap_resource failed\n");
+		return PTR_ERR(dma_base_addr);
+	}
+
+	mdc->ctlr = pdev->id;
+	spin_lock_init(&mdc->dma_lock);
+
+	dma_cap_zero(mdc->dma_slave.cap_mask);
+	dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+
+	moxart_dma_init(&mdc->dma_slave, dev);
+
+	mchan = &mdc->slave_chans[0];
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		mchan->ch_num = i;
+		mchan->reg = (struct moxart_dma_reg *)(dma_base_addr + 0x80
+			     + i * sizeof(struct moxart_dma_reg));
+		mchan->callback = NULL;
+		mchan->allocated = 0;
+		mchan->callback_param = NULL;
+
+		dma_cookie_init(&mchan->chan);
+		mchan->chan.device = &mdc->dma_slave;
+		list_add_tail(&mchan->chan.device_node,
+			      &mdc->dma_slave.channels);
+
+		dev_dbg(dev, "%s: mchans[%d]: mchan->ch_num=%d mchan->reg=%p\n",
+			__func__, i, mchan->ch_num, mchan->reg);
+	}
+
+	ret = dma_async_device_register(&mdc->dma_slave);
+	platform_set_drvdata(pdev, mdc);
+
+	of_dma_controller_register(node, of_dma_simple_xlate, &moxart_dma_info);
+
+	moxart_dma_irq.dev_id = &mdc->dma_slave;
+	setup_irq(irq, &moxart_dma_irq);
+
+	dev_dbg(dev, "%s: IRQ=%d\n", __func__, irq);
+
+	return ret;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+	struct moxart_dma_container *m = dev_get_drvdata(&pdev->dev);
+	dma_async_device_unregister(&m->dma_slave);
+	return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+	{ .compatible = "moxa,moxart-dma" },
+	{ }
+};
+
+static struct platform_driver moxart_driver = {
+	.probe	= moxart_probe,
+	.remove	= moxart_remove,
+	.driver = {
+		.name		= "moxart-dma-engine",
+		.owner		= THIS_MODULE,
+		.of_match_table	= moxart_dma_match,
+	},
+};
+
+static int moxart_init(void)
+{
+	return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+	platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
-- 
1.8.2.1

^ permalink raw reply related	[flat|nested] 80+ messages in thread

* Re: [PATCH v4] dmaengine: Add MOXA ART DMA engine driver
  2013-07-29 16:35         ` Arnd Bergmann
@ 2013-08-02 12:28           ` Jonas Jensen
  -1 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2013-08-02 12:28 UTC (permalink / raw)
  To: Arnd Bergmann
  Cc: linux-arm-kernel, linux-kernel, arm, vinod.koul, djbw, linux

Hi Arnd,

Thanks for the replies. I think what you mention should now be fixed.

Except one thing where I still have questions:

On 29 July 2013 18:35, Arnd Bergmann <arnd@arndb.de> wrote:
> You must not override the "dest_req_no" and "dest_req_no" in moxart_slave_config
> since they are already set by the ->xlate() function and the driver calling
> slave_config generally has no knowledge of what the slave id is.

MMC now has a device tree node:

mmc: mmc@98e00000 {
compatible = "moxa,moxart-mmc";
reg = <0x98e00000 0x5C>;
interrupts = <5 0>;
clocks = <&coreclk>;
dmas = <&dma 0>,
            <&dma 1>;
dma-names = "tx", "rx";
};

.. where the driver requests channel 0-1 and sets cfg.slave_id =
APB_DMA_SD_REQ_NO for both.

Perhaps this is not how slave_id is intended to be used?

Maybe it would be more appropriate to have two DMA cells?

APB_DMA_SD_REQ_NO can then be moved from driver code to DT.

Best regards,
Jonas

^ permalink raw reply	[flat|nested] 80+ messages in thread

* [PATCH v4] dmaengine: Add MOXA ART DMA engine driver
@ 2013-08-02 12:28           ` Jonas Jensen
  0 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2013-08-02 12:28 UTC (permalink / raw)
  To: linux-arm-kernel

Hi Arnd,

Thanks for the replies. I think what you mention should now be fixed.

Except one thing where I still have questions:

On 29 July 2013 18:35, Arnd Bergmann <arnd@arndb.de> wrote:
> You must not override the "dest_req_no" and "dest_req_no" in moxart_slave_config
> since they are already set by the ->xlate() function and the driver calling
> slave_config generally has no knowledge of what the slave id is.

MMC now has a device tree node:

mmc: mmc at 98e00000 {
compatible = "moxa,moxart-mmc";
reg = <0x98e00000 0x5C>;
interrupts = <5 0>;
clocks = <&coreclk>;
dmas = <&dma 0>,
            <&dma 1>;
dma-names = "tx", "rx";
};

.. where the driver requests channel 0-1 and sets cfg.slave_id =
APB_DMA_SD_REQ_NO for both.

Perhaps this is not how slave_id is intended to be used?

Maybe it would be more appropriate to have two DMA cells?

APB_DMA_SD_REQ_NO can then be moved from driver code to DT.

Best regards,
Jonas

^ permalink raw reply	[flat|nested] 80+ messages in thread

* [PATCH v6] dmaengine: Add MOXA ART DMA engine driver
  2013-08-02 12:03         ` Jonas Jensen
@ 2013-08-02 13:28           ` Jonas Jensen
  -1 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2013-08-02 13:28 UTC (permalink / raw)
  To: linux-arm-kernel
  Cc: linux-kernel, arm, vinod.koul, djbw, arnd, linux, Jonas Jensen

Add dmaengine driver for MOXA ART SoCs.

Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
---

Notes:
    Preemptively submitting a new version that has the previously
    mentioned two cell xlate.
    
    Changes since v5:
    
    1. add line request number and use two cell xlate
    
    device tree bindings document:
    2. update description, describe the two cells of #dma-cells
    
    Applies to next-20130802

 .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  21 +
 drivers/dma/Kconfig                                |   7 +
 drivers/dma/Makefile                               |   1 +
 drivers/dma/moxart-dma.c                           | 610 +++++++++++++++++++++
 4 files changed, 639 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
 create mode 100644 drivers/dma/moxart-dma.c

diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644
index 0000000..dc2b686
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -0,0 +1,21 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible :	Must be "moxa,moxart-dma"
+- reg :		Should contain registers location and length
+- interrupts :	Should contain the interrupt number
+- #dma-cells :	Should be 2
+		cell index 0: channel number between 0-3
+		cell index 1: line request number
+
+Example:
+
+	dma: dma@90500000 {
+		compatible = "moxa,moxart-dma";
+		reg = <0x90500000 0x1000>;
+		interrupts = <24 0>;
+		#dma-cells = <1>;
+	};
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6825957..56c3aaa 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -300,6 +300,13 @@ config DMA_JZ4740
 	select DMA_ENGINE
 	select DMA_VIRTUAL_CHANNELS
 
+config MOXART_DMA
+	tristate "MOXART DMA support"
+	depends on ARCH_MOXART
+	select DMA_ENGINE
+	help
+	  Enable support for the MOXA ART SoC DMA controller.
+
 config DMA_ENGINE
 	bool
 
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 5e0f2ef..470c11b 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -39,3 +39,4 @@ obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
 obj-$(CONFIG_DMA_OMAP) += omap-dma.o
 obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..3ed270f
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,610 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+
+#define APB_DMA_MAX_CHANNEL			4
+
+#define APB_DMA_ENABLE				0x1
+#define APB_DMA_FIN_INT_STS			0x2
+#define APB_DMA_FIN_INT_EN			0x4
+#define APB_DMA_BURST_MODE			0x8
+#define APB_DMA_ERR_INT_STS			0x10
+#define APB_DMA_ERR_INT_EN			0x20
+
+/*
+ * unset to select APB source
+ * set to select AHB source
+ */
+#define APB_DMA_SOURCE_SELECT			0x40
+
+/*
+ * unset to select APB destination
+ * set to select AHB destination
+ */
+#define APB_DMA_DEST_SELECT			0x80
+
+#define APB_DMA_SOURCE				0x100
+#define APB_DMA_SOURCE_MASK			0x700
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4  (burst=1)
+ * 010: +2 (burst=0), +8  (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4  (burst=1)
+ * 110: -2 (burst=0), -8  (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+ */
+#define APB_DMA_SOURCE_INC_0			0
+#define APB_DMA_SOURCE_INC_1_4			0x100
+#define APB_DMA_SOURCE_INC_2_8			0x200
+#define APB_DMA_SOURCE_INC_4_16			0x300
+#define APB_DMA_SOURCE_DEC_1_4			0x500
+#define APB_DMA_SOURCE_DEC_2_8			0x600
+#define APB_DMA_SOURCE_DEC_4_16			0x700
+
+#define APB_DMA_DEST				0x1000
+#define APB_DMA_DEST_MASK			0x7000
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4  (burst=1)
+ * 010: +2 (burst=0), +8  (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4  (burst=1)
+ * 110: -2 (burst=0), -8  (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+*/
+#define APB_DMA_DEST_INC_0			0
+#define APB_DMA_DEST_INC_1_4			0x1000
+#define APB_DMA_DEST_INC_2_8			0x2000
+#define APB_DMA_DEST_INC_4_16			0x3000
+#define APB_DMA_DEST_DEC_1_4			0x5000
+#define APB_DMA_DEST_DEC_2_8			0x6000
+#define APB_DMA_DEST_DEC_4_16			0x7000
+
+/*
+ * request signal select of destination
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0:    no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_DEST_REQ_NO			0x10000
+#define APB_DMA_DEST_REQ_NO_MASK		0xf0000
+
+#define APB_DMA_DATA_WIDTH			0x100000
+#define APB_DMA_DATA_WIDTH_MASK			0x300000
+/*
+ * data width of transfer
+ * 00: word
+ * 01: half
+ * 10: byte
+ */
+#define APB_DMA_DATA_WIDTH_4			0
+#define APB_DMA_DATA_WIDTH_2			0x100000
+#define APB_DMA_DATA_WIDTH_1			0x200000
+
+/*
+ * request signal select of source
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0:    no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_SOURCE_REQ_NO			0x1000000
+#define APB_DMA_SOURCE_REQ_NO_MASK		0xf000000
+
+struct moxart_dma_reg {
+	u32 source_addr;
+	u32 dest_addr;
+#define APB_DMA_CYCLES_MASK			0x00ffffff
+	u32 cycles;	/* depend on burst mode */
+	u32 ctrl;
+};
+
+struct moxart_dma_chan {
+	struct dma_chan			chan;
+	int				ch_num;
+	bool				allocated;
+	int				error_flag;
+	struct moxart_dma_reg		*reg;
+	void				(*callback)(void *param);
+	void				*callback_param;
+	struct completion		dma_complete;
+	struct dma_slave_config		cfg;
+	struct dma_async_tx_descriptor	tx_desc;
+	unsigned int			line;
+};
+
+struct moxart_dma_container {
+	int			ctlr;
+	struct dma_device	dma_slave;
+	struct moxart_dma_chan	slave_chans[APB_DMA_MAX_CHANNEL];
+	spinlock_t dma_lock;
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+	return &chan->dev->device;
+}
+
+static inline struct moxart_dma_container
+*to_dma_container(struct dma_device *d)
+{
+	return container_of(d, struct moxart_dma_container, dma_slave);
+}
+
+static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct moxart_dma_chan, chan);
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *ch = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *c = to_dma_container(ch->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
+
+	spin_lock_irqsave(&c->dma_lock, flags);
+
+	ctrl = readl(&ch->reg->ctrl);
+	ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, &ch->reg->ctrl);
+
+	spin_unlock_irqrestore(&c->dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+			       struct dma_slave_config *cfg)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	memcpy(&mchan->cfg, cfg, sizeof(mchan->cfg));
+
+	ctrl = readl(&mchan->reg->ctrl);
+	ctrl |= APB_DMA_BURST_MODE;
+	ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
+	ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
+
+	switch (mchan->cfg.src_addr_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		ctrl |= APB_DMA_DATA_WIDTH_1;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_1_4;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_1_4;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		ctrl |= APB_DMA_DATA_WIDTH_2;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_2_8;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_2_8;
+		break;
+	default:
+		ctrl &= ~APB_DMA_DATA_WIDTH;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_4_16;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_4_16;
+		break;
+	}
+
+	if (mchan->cfg.direction == DMA_MEM_TO_DEV) {
+		ctrl &= ~APB_DMA_DEST_SELECT;
+		ctrl |= APB_DMA_SOURCE_SELECT;
+		ctrl |= (mchan->line << 16 &
+			 APB_DMA_DEST_REQ_NO_MASK);
+	} else {
+		ctrl |= APB_DMA_DEST_SELECT;
+		ctrl &= ~APB_DMA_SOURCE_SELECT;
+		ctrl |= (mchan->line << 24 &
+			 APB_DMA_SOURCE_REQ_NO_MASK);
+	}
+
+	writel(ctrl, &mchan->reg->ctrl);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+			  unsigned long arg)
+{
+	int ret = 0;
+	struct dma_slave_config *config;
+
+	switch (cmd) {
+	case DMA_TERMINATE_ALL:
+		moxart_terminate_all(chan);
+		break;
+	case DMA_SLAVE_CONFIG:
+		config = (struct dma_slave_config *)arg;
+		ret = moxart_slave_config(chan, config);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static dma_cookie_t moxart_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(tx->chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	dma_cookie_t cookie;
+	u32 ctrl;
+	unsigned long flags;
+
+	mchan->callback = tx->callback;
+	mchan->callback_param = tx->callback_param;
+	mchan->error_flag = 0;
+
+	dev_dbg(chan2dev(tx->chan), "%s: mchan=%p mchan->ch_num=%d mchan->reg=%p\n",
+		__func__, mchan, mchan->ch_num, mchan->reg);
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	cookie = dma_cookie_assign(tx);
+
+	ctrl = readl(&mchan->reg->ctrl);
+	ctrl |= (APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, &mchan->reg->ctrl);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return cookie;
+}
+
+static struct dma_async_tx_descriptor
+*moxart_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+		      unsigned int sg_len,
+		      enum dma_transfer_direction direction,
+		      unsigned long tx_flags, void *context)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	unsigned long flags;
+	unsigned int size, adr_width;
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	if (direction == DMA_MEM_TO_DEV) {
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       &mchan->reg->source_addr);
+		writel(mchan->cfg.dst_addr, &mchan->reg->dest_addr);
+		adr_width = mchan->cfg.src_addr_width;
+	} else {
+		writel(mchan->cfg.src_addr, &mchan->reg->source_addr);
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       &mchan->reg->dest_addr);
+		adr_width = mchan->cfg.dst_addr_width;
+	}
+
+	size = sgl->length >> adr_width;
+
+	/*
+	 * size is 4 on 64 bytes copied, i.e. once cycle copies 16 bytes
+	 * ( when data_width == APB_DMAB_DATA_WIDTH_4 )
+	 */
+	writel(size, &mchan->reg->cycles);
+
+	dev_dbg(chan2dev(chan), "%s: set %d DMA cycles (sgl->length=%d adr_width=%d)\n",
+		__func__, size, sgl->length, adr_width);
+
+	dma_async_tx_descriptor_init(&mchan->tx_desc, chan);
+	mchan->tx_desc.tx_submit = moxart_tx_submit;
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return &mchan->tx_desc;
+}
+
+bool moxart_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+
+	if (chan->device->dev == mc->dma_slave.dev) {
+		struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+		unsigned int ch_req = *(unsigned int *)param;
+		dev_dbg(chan2dev(chan), "%s: mchan=%p ch_req=%d mchan->ch_num=%d\n",
+			__func__, mchan, ch_req, mchan->ch_num);
+		return ch_req == mchan->ch_num;
+	} else {
+		dev_dbg(chan2dev(chan), "%s: device not registered to this DMA engine\n",
+			__func__);
+		return false;
+	}
+}
+
+static struct of_dma_filter_info moxart_dma_info = {
+	.filter_fn = moxart_dma_filter_fn,
+};
+
+static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
+					struct of_dma *ofdma)
+{
+	struct dma_chan *chan;
+	struct of_dma_filter_info *info = ofdma->of_dma_data;
+
+	if (!info || !info->filter_fn)
+		return NULL;
+
+	if (dma_spec->args_count != 2)
+		return NULL;
+
+	chan = dma_request_channel(info->dma_cap, info->filter_fn,
+				   &dma_spec->args[0]);
+	if (chan)
+		to_moxart_dma_chan(chan)->line = dma_spec->args[1];
+
+	return chan;
+}
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	int i;
+	bool found = false;
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++) {
+		if (i == mchan->ch_num
+			&& !mchan->allocated) {
+			dev_dbg(chan2dev(chan), "%s: allocating channel #%d\n",
+				__func__, mchan->ch_num);
+			mchan->allocated = true;
+			found = true;
+			break;
+		}
+	}
+
+	if (!found)
+		return -ENODEV;
+
+	return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+	mchan->allocated = false;
+
+	dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+		__func__, mchan->ch_num);
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	ctrl = readl(&mchan->reg->ctrl);
+	ctrl |= APB_DMA_ENABLE;
+	writel(ctrl, &mchan->reg->ctrl);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+					dma_cookie_t cookie,
+					struct dma_tx_state *txstate)
+{
+	enum dma_status ret;
+
+	ret = dma_cookie_status(chan, cookie, txstate);
+	if (ret == DMA_SUCCESS || !txstate)
+		return ret;
+
+	return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+	dma->device_prep_slave_sg		= moxart_prep_slave_sg;
+	dma->device_alloc_chan_resources	= moxart_alloc_chan_resources;
+	dma->device_free_chan_resources		= moxart_free_chan_resources;
+	dma->device_issue_pending		= moxart_issue_pending;
+	dma->device_tx_status			= moxart_tx_status;
+	dma->device_control			= moxart_control;
+	dma->dev				= dev;
+
+	INIT_LIST_HEAD(&dma->channels);
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+	struct moxart_dma_container *mc = to_dma_container(devid);
+	struct moxart_dma_chan *mchan = &mc->slave_chans[0];
+	unsigned int i;
+	u32 ctrl;
+
+	pr_debug("%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		if (mchan->allocated) {
+			ctrl = readl(&mchan->reg->ctrl);
+			if (ctrl & APB_DMA_FIN_INT_STS) {
+				ctrl &= ~APB_DMA_FIN_INT_STS;
+				dma_cookie_complete(&mchan->tx_desc);
+			}
+			if (ctrl & APB_DMA_ERR_INT_STS) {
+				ctrl &= ~APB_DMA_ERR_INT_STS;
+				mchan->error_flag = 1;
+			}
+			if (mchan->callback) {
+				pr_debug("%s: call callback for mchan=%p\n",
+					 __func__, mchan);
+				mchan->callback(mchan->callback_param);
+			}
+			mchan->error_flag = 0;
+			writel(ctrl, &mchan->reg->ctrl);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static struct irqaction moxart_dma_irq = {
+	.name       = "moxart-dma-engine",
+	.flags      = IRQF_DISABLED,
+	.handler    = moxart_dma_interrupt,
+};
+
+static int moxart_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct resource *res;
+	static void __iomem *dma_base_addr;
+	int ret, i;
+	unsigned int irq;
+	struct moxart_dma_chan *mchan;
+	struct moxart_dma_container *mdc;
+
+	mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+	if (!mdc) {
+		dev_err(dev, "can't allocate DMA container\n");
+		return -ENOMEM;
+	}
+
+	irq = irq_of_parse_and_map(node, 0);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dma_base_addr = devm_ioremap_resource(dev, res);
+	if (IS_ERR(dma_base_addr)) {
+		dev_err(dev, "devm_ioremap_resource failed\n");
+		return PTR_ERR(dma_base_addr);
+	}
+
+	mdc->ctlr = pdev->id;
+	spin_lock_init(&mdc->dma_lock);
+
+	dma_cap_zero(mdc->dma_slave.cap_mask);
+	dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+
+	moxart_dma_init(&mdc->dma_slave, dev);
+
+	mchan = &mdc->slave_chans[0];
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		mchan->ch_num = i;
+		mchan->reg = (struct moxart_dma_reg *)(dma_base_addr + 0x80
+			     + i * sizeof(struct moxart_dma_reg));
+		mchan->callback = NULL;
+		mchan->allocated = 0;
+		mchan->callback_param = NULL;
+
+		dma_cookie_init(&mchan->chan);
+		mchan->chan.device = &mdc->dma_slave;
+		list_add_tail(&mchan->chan.device_node,
+			      &mdc->dma_slave.channels);
+
+		dev_dbg(dev, "%s: mchans[%d]: mchan->ch_num=%d mchan->reg=%p\n",
+			__func__, i, mchan->ch_num, mchan->reg);
+	}
+
+	ret = dma_async_device_register(&mdc->dma_slave);
+	platform_set_drvdata(pdev, mdc);
+
+	of_dma_controller_register(node, moxart_of_xlate, &moxart_dma_info);
+
+	moxart_dma_irq.dev_id = &mdc->dma_slave;
+	setup_irq(irq, &moxart_dma_irq);
+
+	dev_dbg(dev, "%s: IRQ=%d\n", __func__, irq);
+
+	return ret;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+	struct moxart_dma_container *m = dev_get_drvdata(&pdev->dev);
+	dma_async_device_unregister(&m->dma_slave);
+	return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+	{ .compatible = "moxa,moxart-dma" },
+	{ }
+};
+
+static struct platform_driver moxart_driver = {
+	.probe	= moxart_probe,
+	.remove	= moxart_remove,
+	.driver = {
+		.name		= "moxart-dma-engine",
+		.owner		= THIS_MODULE,
+		.of_match_table	= moxart_dma_match,
+	},
+};
+
+static int moxart_init(void)
+{
+	return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+	platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
-- 
1.8.2.1


^ permalink raw reply related	[flat|nested] 80+ messages in thread

* [PATCH v6] dmaengine: Add MOXA ART DMA engine driver
@ 2013-08-02 13:28           ` Jonas Jensen
  0 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2013-08-02 13:28 UTC (permalink / raw)
  To: linux-arm-kernel

Add dmaengine driver for MOXA ART SoCs.

Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
---

Notes:
    Preemptively submitting a new version that has the previously
    mentioned two cell xlate.
    
    Changes since v5:
    
    1. add line request number and use two cell xlate
    
    device tree bindings document:
    2. update description, describe the two cells of #dma-cells
    
    Applies to next-20130802

 .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  21 +
 drivers/dma/Kconfig                                |   7 +
 drivers/dma/Makefile                               |   1 +
 drivers/dma/moxart-dma.c                           | 610 +++++++++++++++++++++
 4 files changed, 639 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
 create mode 100644 drivers/dma/moxart-dma.c

diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644
index 0000000..dc2b686
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -0,0 +1,21 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible :	Must be "moxa,moxart-dma"
+- reg :		Should contain registers location and length
+- interrupts :	Should contain the interrupt number
+- #dma-cells :	Should be 2
+		cell index 0: channel number between 0-3
+		cell index 1: line request number
+
+Example:
+
+	dma: dma at 90500000 {
+		compatible = "moxa,moxart-dma";
+		reg = <0x90500000 0x1000>;
+		interrupts = <24 0>;
+		#dma-cells = <1>;
+	};
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6825957..56c3aaa 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -300,6 +300,13 @@ config DMA_JZ4740
 	select DMA_ENGINE
 	select DMA_VIRTUAL_CHANNELS
 
+config MOXART_DMA
+	tristate "MOXART DMA support"
+	depends on ARCH_MOXART
+	select DMA_ENGINE
+	help
+	  Enable support for the MOXA ART SoC DMA controller.
+
 config DMA_ENGINE
 	bool
 
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 5e0f2ef..470c11b 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -39,3 +39,4 @@ obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
 obj-$(CONFIG_DMA_OMAP) += omap-dma.o
 obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..3ed270f
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,610 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+
+#define APB_DMA_MAX_CHANNEL			4
+
+#define APB_DMA_ENABLE				0x1
+#define APB_DMA_FIN_INT_STS			0x2
+#define APB_DMA_FIN_INT_EN			0x4
+#define APB_DMA_BURST_MODE			0x8
+#define APB_DMA_ERR_INT_STS			0x10
+#define APB_DMA_ERR_INT_EN			0x20
+
+/*
+ * unset to select APB source
+ * set to select AHB source
+ */
+#define APB_DMA_SOURCE_SELECT			0x40
+
+/*
+ * unset to select APB destination
+ * set to select AHB destination
+ */
+#define APB_DMA_DEST_SELECT			0x80
+
+#define APB_DMA_SOURCE				0x100
+#define APB_DMA_SOURCE_MASK			0x700
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4  (burst=1)
+ * 010: +2 (burst=0), +8  (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4  (burst=1)
+ * 110: -2 (burst=0), -8  (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+ */
+#define APB_DMA_SOURCE_INC_0			0
+#define APB_DMA_SOURCE_INC_1_4			0x100
+#define APB_DMA_SOURCE_INC_2_8			0x200
+#define APB_DMA_SOURCE_INC_4_16			0x300
+#define APB_DMA_SOURCE_DEC_1_4			0x500
+#define APB_DMA_SOURCE_DEC_2_8			0x600
+#define APB_DMA_SOURCE_DEC_4_16			0x700
+
+#define APB_DMA_DEST				0x1000
+#define APB_DMA_DEST_MASK			0x7000
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4  (burst=1)
+ * 010: +2 (burst=0), +8  (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4  (burst=1)
+ * 110: -2 (burst=0), -8  (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+*/
+#define APB_DMA_DEST_INC_0			0
+#define APB_DMA_DEST_INC_1_4			0x1000
+#define APB_DMA_DEST_INC_2_8			0x2000
+#define APB_DMA_DEST_INC_4_16			0x3000
+#define APB_DMA_DEST_DEC_1_4			0x5000
+#define APB_DMA_DEST_DEC_2_8			0x6000
+#define APB_DMA_DEST_DEC_4_16			0x7000
+
+/*
+ * request signal select of destination
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0:    no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_DEST_REQ_NO			0x10000
+#define APB_DMA_DEST_REQ_NO_MASK		0xf0000
+
+#define APB_DMA_DATA_WIDTH			0x100000
+#define APB_DMA_DATA_WIDTH_MASK			0x300000
+/*
+ * data width of transfer
+ * 00: word
+ * 01: half
+ * 10: byte
+ */
+#define APB_DMA_DATA_WIDTH_4			0
+#define APB_DMA_DATA_WIDTH_2			0x100000
+#define APB_DMA_DATA_WIDTH_1			0x200000
+
+/*
+ * request signal select of source
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0:    no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_SOURCE_REQ_NO			0x1000000
+#define APB_DMA_SOURCE_REQ_NO_MASK		0xf000000
+
+struct moxart_dma_reg {
+	u32 source_addr;
+	u32 dest_addr;
+#define APB_DMA_CYCLES_MASK			0x00ffffff
+	u32 cycles;	/* depend on burst mode */
+	u32 ctrl;
+};
+
+struct moxart_dma_chan {
+	struct dma_chan			chan;
+	int				ch_num;
+	bool				allocated;
+	int				error_flag;
+	struct moxart_dma_reg		*reg;
+	void				(*callback)(void *param);
+	void				*callback_param;
+	struct completion		dma_complete;
+	struct dma_slave_config		cfg;
+	struct dma_async_tx_descriptor	tx_desc;
+	unsigned int			line;
+};
+
+struct moxart_dma_container {
+	int			ctlr;
+	struct dma_device	dma_slave;
+	struct moxart_dma_chan	slave_chans[APB_DMA_MAX_CHANNEL];
+	spinlock_t dma_lock;
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+	return &chan->dev->device;
+}
+
+static inline struct moxart_dma_container
+*to_dma_container(struct dma_device *d)
+{
+	return container_of(d, struct moxart_dma_container, dma_slave);
+}
+
+static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct moxart_dma_chan, chan);
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *ch = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *c = to_dma_container(ch->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
+
+	spin_lock_irqsave(&c->dma_lock, flags);
+
+	ctrl = readl(&ch->reg->ctrl);
+	ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, &ch->reg->ctrl);
+
+	spin_unlock_irqrestore(&c->dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+			       struct dma_slave_config *cfg)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	memcpy(&mchan->cfg, cfg, sizeof(mchan->cfg));
+
+	ctrl = readl(&mchan->reg->ctrl);
+	ctrl |= APB_DMA_BURST_MODE;
+	ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
+	ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
+
+	switch (mchan->cfg.src_addr_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		ctrl |= APB_DMA_DATA_WIDTH_1;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_1_4;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_1_4;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		ctrl |= APB_DMA_DATA_WIDTH_2;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_2_8;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_2_8;
+		break;
+	default:
+		ctrl &= ~APB_DMA_DATA_WIDTH;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_4_16;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_4_16;
+		break;
+	}
+
+	if (mchan->cfg.direction == DMA_MEM_TO_DEV) {
+		ctrl &= ~APB_DMA_DEST_SELECT;
+		ctrl |= APB_DMA_SOURCE_SELECT;
+		ctrl |= (mchan->line << 16 &
+			 APB_DMA_DEST_REQ_NO_MASK);
+	} else {
+		ctrl |= APB_DMA_DEST_SELECT;
+		ctrl &= ~APB_DMA_SOURCE_SELECT;
+		ctrl |= (mchan->line << 24 &
+			 APB_DMA_SOURCE_REQ_NO_MASK);
+	}
+
+	writel(ctrl, &mchan->reg->ctrl);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+			  unsigned long arg)
+{
+	int ret = 0;
+	struct dma_slave_config *config;
+
+	switch (cmd) {
+	case DMA_TERMINATE_ALL:
+		moxart_terminate_all(chan);
+		break;
+	case DMA_SLAVE_CONFIG:
+		config = (struct dma_slave_config *)arg;
+		ret = moxart_slave_config(chan, config);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static dma_cookie_t moxart_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(tx->chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	dma_cookie_t cookie;
+	u32 ctrl;
+	unsigned long flags;
+
+	mchan->callback = tx->callback;
+	mchan->callback_param = tx->callback_param;
+	mchan->error_flag = 0;
+
+	dev_dbg(chan2dev(tx->chan), "%s: mchan=%p mchan->ch_num=%d mchan->reg=%p\n",
+		__func__, mchan, mchan->ch_num, mchan->reg);
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	cookie = dma_cookie_assign(tx);
+
+	ctrl = readl(&mchan->reg->ctrl);
+	ctrl |= (APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, &mchan->reg->ctrl);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return cookie;
+}
+
+static struct dma_async_tx_descriptor
+*moxart_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+		      unsigned int sg_len,
+		      enum dma_transfer_direction direction,
+		      unsigned long tx_flags, void *context)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	unsigned long flags;
+	unsigned int size, adr_width;
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	if (direction == DMA_MEM_TO_DEV) {
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       &mchan->reg->source_addr);
+		writel(mchan->cfg.dst_addr, &mchan->reg->dest_addr);
+		adr_width = mchan->cfg.src_addr_width;
+	} else {
+		writel(mchan->cfg.src_addr, &mchan->reg->source_addr);
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       &mchan->reg->dest_addr);
+		adr_width = mchan->cfg.dst_addr_width;
+	}
+
+	size = sgl->length >> adr_width;
+
+	/*
+	 * size is 4 on 64 bytes copied, i.e. once cycle copies 16 bytes
+	 * ( when data_width == APB_DMAB_DATA_WIDTH_4 )
+	 */
+	writel(size, &mchan->reg->cycles);
+
+	dev_dbg(chan2dev(chan), "%s: set %d DMA cycles (sgl->length=%d adr_width=%d)\n",
+		__func__, size, sgl->length, adr_width);
+
+	dma_async_tx_descriptor_init(&mchan->tx_desc, chan);
+	mchan->tx_desc.tx_submit = moxart_tx_submit;
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return &mchan->tx_desc;
+}
+
+bool moxart_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+
+	if (chan->device->dev == mc->dma_slave.dev) {
+		struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+		unsigned int ch_req = *(unsigned int *)param;
+		dev_dbg(chan2dev(chan), "%s: mchan=%p ch_req=%d mchan->ch_num=%d\n",
+			__func__, mchan, ch_req, mchan->ch_num);
+		return ch_req == mchan->ch_num;
+	} else {
+		dev_dbg(chan2dev(chan), "%s: device not registered to this DMA engine\n",
+			__func__);
+		return false;
+	}
+}
+
+static struct of_dma_filter_info moxart_dma_info = {
+	.filter_fn = moxart_dma_filter_fn,
+};
+
+static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
+					struct of_dma *ofdma)
+{
+	struct dma_chan *chan;
+	struct of_dma_filter_info *info = ofdma->of_dma_data;
+
+	if (!info || !info->filter_fn)
+		return NULL;
+
+	if (dma_spec->args_count != 2)
+		return NULL;
+
+	chan = dma_request_channel(info->dma_cap, info->filter_fn,
+				   &dma_spec->args[0]);
+	if (chan)
+		to_moxart_dma_chan(chan)->line = dma_spec->args[1];
+
+	return chan;
+}
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	int i;
+	bool found = false;
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++) {
+		if (i == mchan->ch_num
+			&& !mchan->allocated) {
+			dev_dbg(chan2dev(chan), "%s: allocating channel #%d\n",
+				__func__, mchan->ch_num);
+			mchan->allocated = true;
+			found = true;
+			break;
+		}
+	}
+
+	if (!found)
+		return -ENODEV;
+
+	return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+	mchan->allocated = false;
+
+	dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+		__func__, mchan->ch_num);
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	ctrl = readl(&mchan->reg->ctrl);
+	ctrl |= APB_DMA_ENABLE;
+	writel(ctrl, &mchan->reg->ctrl);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+					dma_cookie_t cookie,
+					struct dma_tx_state *txstate)
+{
+	enum dma_status ret;
+
+	ret = dma_cookie_status(chan, cookie, txstate);
+	if (ret == DMA_SUCCESS || !txstate)
+		return ret;
+
+	return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+	dma->device_prep_slave_sg		= moxart_prep_slave_sg;
+	dma->device_alloc_chan_resources	= moxart_alloc_chan_resources;
+	dma->device_free_chan_resources		= moxart_free_chan_resources;
+	dma->device_issue_pending		= moxart_issue_pending;
+	dma->device_tx_status			= moxart_tx_status;
+	dma->device_control			= moxart_control;
+	dma->dev				= dev;
+
+	INIT_LIST_HEAD(&dma->channels);
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+	struct moxart_dma_container *mc = to_dma_container(devid);
+	struct moxart_dma_chan *mchan = &mc->slave_chans[0];
+	unsigned int i;
+	u32 ctrl;
+
+	pr_debug("%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		if (mchan->allocated) {
+			ctrl = readl(&mchan->reg->ctrl);
+			if (ctrl & APB_DMA_FIN_INT_STS) {
+				ctrl &= ~APB_DMA_FIN_INT_STS;
+				dma_cookie_complete(&mchan->tx_desc);
+			}
+			if (ctrl & APB_DMA_ERR_INT_STS) {
+				ctrl &= ~APB_DMA_ERR_INT_STS;
+				mchan->error_flag = 1;
+			}
+			if (mchan->callback) {
+				pr_debug("%s: call callback for mchan=%p\n",
+					 __func__, mchan);
+				mchan->callback(mchan->callback_param);
+			}
+			mchan->error_flag = 0;
+			writel(ctrl, &mchan->reg->ctrl);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static struct irqaction moxart_dma_irq = {
+	.name       = "moxart-dma-engine",
+	.flags      = IRQF_DISABLED,
+	.handler    = moxart_dma_interrupt,
+};
+
+static int moxart_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct resource *res;
+	static void __iomem *dma_base_addr;
+	int ret, i;
+	unsigned int irq;
+	struct moxart_dma_chan *mchan;
+	struct moxart_dma_container *mdc;
+
+	mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+	if (!mdc) {
+		dev_err(dev, "can't allocate DMA container\n");
+		return -ENOMEM;
+	}
+
+	irq = irq_of_parse_and_map(node, 0);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dma_base_addr = devm_ioremap_resource(dev, res);
+	if (IS_ERR(dma_base_addr)) {
+		dev_err(dev, "devm_ioremap_resource failed\n");
+		return PTR_ERR(dma_base_addr);
+	}
+
+	mdc->ctlr = pdev->id;
+	spin_lock_init(&mdc->dma_lock);
+
+	dma_cap_zero(mdc->dma_slave.cap_mask);
+	dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+
+	moxart_dma_init(&mdc->dma_slave, dev);
+
+	mchan = &mdc->slave_chans[0];
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		mchan->ch_num = i;
+		mchan->reg = (struct moxart_dma_reg *)(dma_base_addr + 0x80
+			     + i * sizeof(struct moxart_dma_reg));
+		mchan->callback = NULL;
+		mchan->allocated = 0;
+		mchan->callback_param = NULL;
+
+		dma_cookie_init(&mchan->chan);
+		mchan->chan.device = &mdc->dma_slave;
+		list_add_tail(&mchan->chan.device_node,
+			      &mdc->dma_slave.channels);
+
+		dev_dbg(dev, "%s: mchans[%d]: mchan->ch_num=%d mchan->reg=%p\n",
+			__func__, i, mchan->ch_num, mchan->reg);
+	}
+
+	ret = dma_async_device_register(&mdc->dma_slave);
+	platform_set_drvdata(pdev, mdc);
+
+	of_dma_controller_register(node, moxart_of_xlate, &moxart_dma_info);
+
+	moxart_dma_irq.dev_id = &mdc->dma_slave;
+	setup_irq(irq, &moxart_dma_irq);
+
+	dev_dbg(dev, "%s: IRQ=%d\n", __func__, irq);
+
+	return ret;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+	struct moxart_dma_container *m = dev_get_drvdata(&pdev->dev);
+	dma_async_device_unregister(&m->dma_slave);
+	return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+	{ .compatible = "moxa,moxart-dma" },
+	{ }
+};
+
+static struct platform_driver moxart_driver = {
+	.probe	= moxart_probe,
+	.remove	= moxart_remove,
+	.driver = {
+		.name		= "moxart-dma-engine",
+		.owner		= THIS_MODULE,
+		.of_match_table	= moxart_dma_match,
+	},
+};
+
+static int moxart_init(void)
+{
+	return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+	platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
-- 
1.8.2.1

^ permalink raw reply related	[flat|nested] 80+ messages in thread

* Re: [PATCH v6] dmaengine: Add MOXA ART DMA engine driver
  2013-08-02 13:28           ` Jonas Jensen
@ 2013-08-02 13:51             ` Russell King - ARM Linux
  -1 siblings, 0 replies; 80+ messages in thread
From: Russell King - ARM Linux @ 2013-08-02 13:51 UTC (permalink / raw)
  To: Jonas Jensen; +Cc: linux-arm-kernel, linux-kernel, arm, vinod.koul, djbw, arnd

On Fri, Aug 02, 2013 at 03:28:45PM +0200, Jonas Jensen wrote:
> +struct moxart_dma_chan {
> +	struct dma_chan			chan;
> +	int				ch_num;
> +	bool				allocated;
> +	int				error_flag;
> +	struct moxart_dma_reg		*reg;
> +	void				(*callback)(void *param);
> +	void				*callback_param;
> +	struct completion		dma_complete;

Is this completion used anywhere?

> +	struct dma_slave_config		cfg;
> +	struct dma_async_tx_descriptor	tx_desc;
> +	unsigned int			line;
> +};
...
> +static dma_cookie_t moxart_tx_submit(struct dma_async_tx_descriptor *tx)
> +{
> +	struct moxart_dma_chan *mchan = to_moxart_dma_chan(tx->chan);
> +	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
> +	dma_cookie_t cookie;
> +	u32 ctrl;
> +	unsigned long flags;
> +
> +	mchan->callback = tx->callback;
> +	mchan->callback_param = tx->callback_param;

As 'mchan' contains the tx descriptor, I don't know why you feel that you
need to copy these.

> +static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
> +{
> +	struct moxart_dma_container *mc = to_dma_container(devid);
> +	struct moxart_dma_chan *mchan = &mc->slave_chans[0];
> +	unsigned int i;
> +	u32 ctrl;
> +
> +	pr_debug("%s\n", __func__);
> +
> +	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
> +		if (mchan->allocated) {
> +			ctrl = readl(&mchan->reg->ctrl);
> +			if (ctrl & APB_DMA_FIN_INT_STS) {
> +				ctrl &= ~APB_DMA_FIN_INT_STS;
> +				dma_cookie_complete(&mchan->tx_desc);
> +			}
> +			if (ctrl & APB_DMA_ERR_INT_STS) {
> +				ctrl &= ~APB_DMA_ERR_INT_STS;
> +				mchan->error_flag = 1;
> +			}
> +			if (mchan->callback) {
> +				pr_debug("%s: call callback for mchan=%p\n",
> +					 __func__, mchan);
> +				mchan->callback(mchan->callback_param);

Calling the callback from interrupt context is not on.

2/ Specify a completion callback.  The callback routine runs in tasklet
   context if the offload engine driver supports interrupts, or it is
   called in application context if the operation is carried out
   synchronously in software.

That can be found in Documentation/crypto/async-tx-api.txt and applies
to all DMA engine implementations (which is the underlying implementation
of the async-tx API.)

^ permalink raw reply	[flat|nested] 80+ messages in thread

* [PATCH v6] dmaengine: Add MOXA ART DMA engine driver
@ 2013-08-02 13:51             ` Russell King - ARM Linux
  0 siblings, 0 replies; 80+ messages in thread
From: Russell King - ARM Linux @ 2013-08-02 13:51 UTC (permalink / raw)
  To: linux-arm-kernel

On Fri, Aug 02, 2013 at 03:28:45PM +0200, Jonas Jensen wrote:
> +struct moxart_dma_chan {
> +	struct dma_chan			chan;
> +	int				ch_num;
> +	bool				allocated;
> +	int				error_flag;
> +	struct moxart_dma_reg		*reg;
> +	void				(*callback)(void *param);
> +	void				*callback_param;
> +	struct completion		dma_complete;

Is this completion used anywhere?

> +	struct dma_slave_config		cfg;
> +	struct dma_async_tx_descriptor	tx_desc;
> +	unsigned int			line;
> +};
...
> +static dma_cookie_t moxart_tx_submit(struct dma_async_tx_descriptor *tx)
> +{
> +	struct moxart_dma_chan *mchan = to_moxart_dma_chan(tx->chan);
> +	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
> +	dma_cookie_t cookie;
> +	u32 ctrl;
> +	unsigned long flags;
> +
> +	mchan->callback = tx->callback;
> +	mchan->callback_param = tx->callback_param;

As 'mchan' contains the tx descriptor, I don't know why you feel that you
need to copy these.

> +static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
> +{
> +	struct moxart_dma_container *mc = to_dma_container(devid);
> +	struct moxart_dma_chan *mchan = &mc->slave_chans[0];
> +	unsigned int i;
> +	u32 ctrl;
> +
> +	pr_debug("%s\n", __func__);
> +
> +	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
> +		if (mchan->allocated) {
> +			ctrl = readl(&mchan->reg->ctrl);
> +			if (ctrl & APB_DMA_FIN_INT_STS) {
> +				ctrl &= ~APB_DMA_FIN_INT_STS;
> +				dma_cookie_complete(&mchan->tx_desc);
> +			}
> +			if (ctrl & APB_DMA_ERR_INT_STS) {
> +				ctrl &= ~APB_DMA_ERR_INT_STS;
> +				mchan->error_flag = 1;
> +			}
> +			if (mchan->callback) {
> +				pr_debug("%s: call callback for mchan=%p\n",
> +					 __func__, mchan);
> +				mchan->callback(mchan->callback_param);

Calling the callback from interrupt context is not on.

2/ Specify a completion callback.  The callback routine runs in tasklet
   context if the offload engine driver supports interrupts, or it is
   called in application context if the operation is carried out
   synchronously in software.

That can be found in Documentation/crypto/async-tx-api.txt and applies
to all DMA engine implementations (which is the underlying implementation
of the async-tx API.)

^ permalink raw reply	[flat|nested] 80+ messages in thread

* Re: [PATCH v6] dmaengine: Add MOXA ART DMA engine driver
  2013-08-02 13:28           ` Jonas Jensen
@ 2013-08-02 14:09             ` Mark Rutland
  -1 siblings, 0 replies; 80+ messages in thread
From: Mark Rutland @ 2013-08-02 14:09 UTC (permalink / raw)
  To: Jonas Jensen
  Cc: linux-arm-kernel, linux, arnd, vinod.koul, linux-kernel, arm, djbw

On Fri, Aug 02, 2013 at 02:28:45PM +0100, Jonas Jensen wrote:
> Add dmaengine driver for MOXA ART SoCs.
> 
> Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
> ---
> 
> Notes:
>     Preemptively submitting a new version that has the previously
>     mentioned two cell xlate.
> 
>     Changes since v5:
> 
>     1. add line request number and use two cell xlate
> 
>     device tree bindings document:
>     2. update description, describe the two cells of #dma-cells
> 
>     Applies to next-20130802
> 
>  .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  21 +
>  drivers/dma/Kconfig                                |   7 +
>  drivers/dma/Makefile                               |   1 +
>  drivers/dma/moxart-dma.c                           | 610 +++++++++++++++++++++
>  4 files changed, 639 insertions(+)
>  create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
>  create mode 100644 drivers/dma/moxart-dma.c
> 
> diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> new file mode 100644
> index 0000000..dc2b686
> --- /dev/null
> +++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> @@ -0,0 +1,21 @@
> +MOXA ART DMA Controller
> +
> +See dma.txt first
> +
> +Required properties:
> +
> +- compatible : Must be "moxa,moxart-dma"
> +- reg :                Should contain registers location and length
> +- interrupts : Should contain the interrupt number
> +- #dma-cells : Should be 2
> +               cell index 0: channel number between 0-3
> +               cell index 1: line request number
> +
> +Example:
> +
> +       dma: dma@90500000 {
> +               compatible = "moxa,moxart-dma";
> +               reg = <0x90500000 0x1000>;
> +               interrupts = <24 0>;
> +               #dma-cells = <1>;

This should be #dma-cells = <2>;

[...]

> +struct moxart_dma_reg {
> +       u32 source_addr;
> +       u32 dest_addr;
> +#define APB_DMA_CYCLES_MASK                    0x00ffffff
> +       u32 cycles;     /* depend on burst mode */
> +       u32 ctrl;
> +};

I'm not keen on relying on structs for register offsets, but at least
they're exact width u32s.

[...]

> +static int moxart_alloc_chan_resources(struct dma_chan *chan)
> +{
> +       struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
> +       int i;
> +       bool found = false;
> +
> +       for (i = 0; i < APB_DMA_MAX_CHANNEL; i++) {
> +               if (i == mchan->ch_num
> +                       && !mchan->allocated) {
> +                       dev_dbg(chan2dev(chan), "%s: allocating channel #%d\n",
> +                               __func__, mchan->ch_num);
> +                       mchan->allocated = true;
> +                       found = true;

Why not return 0 here...

> +                       break;
> +               }
> +       }


...and always return -ENODEV here?

That way you can also get rid of the found variable.

> +
> +       if (!found)
> +               return -ENODEV;
> +
> +       return 0;
> +}

[...]

> +static struct irqaction moxart_dma_irq = {
> +       .name       = "moxart-dma-engine",
> +       .flags      = IRQF_DISABLED,
> +       .handler    = moxart_dma_interrupt,
> +};
> +
> +static int moxart_probe(struct platform_device *pdev)
> +{
> +       struct device *dev = &pdev->dev;
> +       struct device_node *node = dev->of_node;
> +       struct resource *res;
> +       static void __iomem *dma_base_addr;
> +       int ret, i;
> +       unsigned int irq;
> +       struct moxart_dma_chan *mchan;
> +       struct moxart_dma_container *mdc;
> +
> +       mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
> +       if (!mdc) {
> +               dev_err(dev, "can't allocate DMA container\n");
> +               return -ENOMEM;
> +       }
> +
> +       irq = irq_of_parse_and_map(node, 0);

What if this fails (where irq == 0)?.

> +
> +       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
> +       dma_base_addr = devm_ioremap_resource(dev, res);
> +       if (IS_ERR(dma_base_addr)) {
> +               dev_err(dev, "devm_ioremap_resource failed\n");
> +               return PTR_ERR(dma_base_addr);
> +       }
> +
> +       mdc->ctlr = pdev->id;
> +       spin_lock_init(&mdc->dma_lock);
> +
> +       dma_cap_zero(mdc->dma_slave.cap_mask);
> +       dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
> +
> +       moxart_dma_init(&mdc->dma_slave, dev);
> +
> +       mchan = &mdc->slave_chans[0];
> +       for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
> +               mchan->ch_num = i;
> +               mchan->reg = (struct moxart_dma_reg *)(dma_base_addr + 0x80
> +                            + i * sizeof(struct moxart_dma_reg));
> +               mchan->callback = NULL;
> +               mchan->allocated = 0;
> +               mchan->callback_param = NULL;
> +
> +               dma_cookie_init(&mchan->chan);
> +               mchan->chan.device = &mdc->dma_slave;
> +               list_add_tail(&mchan->chan.device_node,
> +                             &mdc->dma_slave.channels);
> +
> +               dev_dbg(dev, "%s: mchans[%d]: mchan->ch_num=%d mchan->reg=%p\n",
> +                       __func__, i, mchan->ch_num, mchan->reg);
> +       }
> +
> +       ret = dma_async_device_register(&mdc->dma_slave);
> +       platform_set_drvdata(pdev, mdc);
> +
> +       of_dma_controller_register(node, moxart_of_xlate, &moxart_dma_info);
> +
> +       moxart_dma_irq.dev_id = &mdc->dma_slave;
> +       setup_irq(irq, &moxart_dma_irq);

What if this fails?

Is there any reason you can't use request_irq over setup_irq?

> +
> +       dev_dbg(dev, "%s: IRQ=%d\n", __func__, irq);
> +
> +       return ret;
> +}

Thanks,
Mark.

^ permalink raw reply	[flat|nested] 80+ messages in thread

* [PATCH v6] dmaengine: Add MOXA ART DMA engine driver
@ 2013-08-02 14:09             ` Mark Rutland
  0 siblings, 0 replies; 80+ messages in thread
From: Mark Rutland @ 2013-08-02 14:09 UTC (permalink / raw)
  To: linux-arm-kernel

On Fri, Aug 02, 2013 at 02:28:45PM +0100, Jonas Jensen wrote:
> Add dmaengine driver for MOXA ART SoCs.
> 
> Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
> ---
> 
> Notes:
>     Preemptively submitting a new version that has the previously
>     mentioned two cell xlate.
> 
>     Changes since v5:
> 
>     1. add line request number and use two cell xlate
> 
>     device tree bindings document:
>     2. update description, describe the two cells of #dma-cells
> 
>     Applies to next-20130802
> 
>  .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  21 +
>  drivers/dma/Kconfig                                |   7 +
>  drivers/dma/Makefile                               |   1 +
>  drivers/dma/moxart-dma.c                           | 610 +++++++++++++++++++++
>  4 files changed, 639 insertions(+)
>  create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
>  create mode 100644 drivers/dma/moxart-dma.c
> 
> diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> new file mode 100644
> index 0000000..dc2b686
> --- /dev/null
> +++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> @@ -0,0 +1,21 @@
> +MOXA ART DMA Controller
> +
> +See dma.txt first
> +
> +Required properties:
> +
> +- compatible : Must be "moxa,moxart-dma"
> +- reg :                Should contain registers location and length
> +- interrupts : Should contain the interrupt number
> +- #dma-cells : Should be 2
> +               cell index 0: channel number between 0-3
> +               cell index 1: line request number
> +
> +Example:
> +
> +       dma: dma at 90500000 {
> +               compatible = "moxa,moxart-dma";
> +               reg = <0x90500000 0x1000>;
> +               interrupts = <24 0>;
> +               #dma-cells = <1>;

This should be #dma-cells = <2>;

[...]

> +struct moxart_dma_reg {
> +       u32 source_addr;
> +       u32 dest_addr;
> +#define APB_DMA_CYCLES_MASK                    0x00ffffff
> +       u32 cycles;     /* depend on burst mode */
> +       u32 ctrl;
> +};

I'm not keen on relying on structs for register offsets, but at least
they're exact width u32s.

[...]

> +static int moxart_alloc_chan_resources(struct dma_chan *chan)
> +{
> +       struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
> +       int i;
> +       bool found = false;
> +
> +       for (i = 0; i < APB_DMA_MAX_CHANNEL; i++) {
> +               if (i == mchan->ch_num
> +                       && !mchan->allocated) {
> +                       dev_dbg(chan2dev(chan), "%s: allocating channel #%d\n",
> +                               __func__, mchan->ch_num);
> +                       mchan->allocated = true;
> +                       found = true;

Why not return 0 here...

> +                       break;
> +               }
> +       }


...and always return -ENODEV here?

That way you can also get rid of the found variable.

> +
> +       if (!found)
> +               return -ENODEV;
> +
> +       return 0;
> +}

[...]

> +static struct irqaction moxart_dma_irq = {
> +       .name       = "moxart-dma-engine",
> +       .flags      = IRQF_DISABLED,
> +       .handler    = moxart_dma_interrupt,
> +};
> +
> +static int moxart_probe(struct platform_device *pdev)
> +{
> +       struct device *dev = &pdev->dev;
> +       struct device_node *node = dev->of_node;
> +       struct resource *res;
> +       static void __iomem *dma_base_addr;
> +       int ret, i;
> +       unsigned int irq;
> +       struct moxart_dma_chan *mchan;
> +       struct moxart_dma_container *mdc;
> +
> +       mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
> +       if (!mdc) {
> +               dev_err(dev, "can't allocate DMA container\n");
> +               return -ENOMEM;
> +       }
> +
> +       irq = irq_of_parse_and_map(node, 0);

What if this fails (where irq == 0)?.

> +
> +       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
> +       dma_base_addr = devm_ioremap_resource(dev, res);
> +       if (IS_ERR(dma_base_addr)) {
> +               dev_err(dev, "devm_ioremap_resource failed\n");
> +               return PTR_ERR(dma_base_addr);
> +       }
> +
> +       mdc->ctlr = pdev->id;
> +       spin_lock_init(&mdc->dma_lock);
> +
> +       dma_cap_zero(mdc->dma_slave.cap_mask);
> +       dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
> +
> +       moxart_dma_init(&mdc->dma_slave, dev);
> +
> +       mchan = &mdc->slave_chans[0];
> +       for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
> +               mchan->ch_num = i;
> +               mchan->reg = (struct moxart_dma_reg *)(dma_base_addr + 0x80
> +                            + i * sizeof(struct moxart_dma_reg));
> +               mchan->callback = NULL;
> +               mchan->allocated = 0;
> +               mchan->callback_param = NULL;
> +
> +               dma_cookie_init(&mchan->chan);
> +               mchan->chan.device = &mdc->dma_slave;
> +               list_add_tail(&mchan->chan.device_node,
> +                             &mdc->dma_slave.channels);
> +
> +               dev_dbg(dev, "%s: mchans[%d]: mchan->ch_num=%d mchan->reg=%p\n",
> +                       __func__, i, mchan->ch_num, mchan->reg);
> +       }
> +
> +       ret = dma_async_device_register(&mdc->dma_slave);
> +       platform_set_drvdata(pdev, mdc);
> +
> +       of_dma_controller_register(node, moxart_of_xlate, &moxart_dma_info);
> +
> +       moxart_dma_irq.dev_id = &mdc->dma_slave;
> +       setup_irq(irq, &moxart_dma_irq);

What if this fails?

Is there any reason you can't use request_irq over setup_irq?

> +
> +       dev_dbg(dev, "%s: IRQ=%d\n", __func__, irq);
> +
> +       return ret;
> +}

Thanks,
Mark.

^ permalink raw reply	[flat|nested] 80+ messages in thread

* Re: [PATCH v4] dmaengine: Add MOXA ART DMA engine driver
  2013-08-02 12:28           ` Jonas Jensen
@ 2013-08-02 19:28             ` Arnd Bergmann
  -1 siblings, 0 replies; 80+ messages in thread
From: Arnd Bergmann @ 2013-08-02 19:28 UTC (permalink / raw)
  To: Jonas Jensen; +Cc: linux-arm-kernel, linux-kernel, arm, vinod.koul, djbw, linux

On Friday 02 August 2013 14:28:28 Jonas Jensen wrote:
> 
> On 29 July 2013 18:35, Arnd Bergmann <arnd@arndb.de> wrote:
> > You must not override the "dest_req_no" and "dest_req_no" in moxart_slave_config
> > since they are already set by the ->xlate() function and the driver calling
> > slave_config generally has no knowledge of what the slave id is.
> 
> MMC now has a device tree node:
> 
> mmc: mmc@98e00000 {
> compatible = "moxa,moxart-mmc";
> reg = <0x98e00000 0x5C>;
> interrupts = <5 0>;
> clocks = <&coreclk>;
> dmas = <&dma 0>,
>             <&dma 1>;
> dma-names = "tx", "rx";
> };
> 
> .. where the driver requests channel 0-1 and sets cfg.slave_id =
> APB_DMA_SD_REQ_NO for both.
> 
> Perhaps this is not how slave_id is intended to be used?
> 
> Maybe it would be more appropriate to have two DMA cells?
> 
> APB_DMA_SD_REQ_NO can then be moved from driver code to DT.

In most drivers, you can use any channel with any request line number
and let the dmaengine driver pick a channel while you pass just the
request line (slave id) in a single cell in DT. If this does not
work, using two cells is the best approach here.

Removing APB_DMA_SD_REQ_NO from the driver code is definitely the
right approach, since that number is not something specific to the
device, but to the way it is connected to the DMA engine, which
belongs into DT.

	Arnd

^ permalink raw reply	[flat|nested] 80+ messages in thread

* [PATCH v4] dmaengine: Add MOXA ART DMA engine driver
@ 2013-08-02 19:28             ` Arnd Bergmann
  0 siblings, 0 replies; 80+ messages in thread
From: Arnd Bergmann @ 2013-08-02 19:28 UTC (permalink / raw)
  To: linux-arm-kernel

On Friday 02 August 2013 14:28:28 Jonas Jensen wrote:
> 
> On 29 July 2013 18:35, Arnd Bergmann <arnd@arndb.de> wrote:
> > You must not override the "dest_req_no" and "dest_req_no" in moxart_slave_config
> > since they are already set by the ->xlate() function and the driver calling
> > slave_config generally has no knowledge of what the slave id is.
> 
> MMC now has a device tree node:
> 
> mmc: mmc at 98e00000 {
> compatible = "moxa,moxart-mmc";
> reg = <0x98e00000 0x5C>;
> interrupts = <5 0>;
> clocks = <&coreclk>;
> dmas = <&dma 0>,
>             <&dma 1>;
> dma-names = "tx", "rx";
> };
> 
> .. where the driver requests channel 0-1 and sets cfg.slave_id =
> APB_DMA_SD_REQ_NO for both.
> 
> Perhaps this is not how slave_id is intended to be used?
> 
> Maybe it would be more appropriate to have two DMA cells?
> 
> APB_DMA_SD_REQ_NO can then be moved from driver code to DT.

In most drivers, you can use any channel with any request line number
and let the dmaengine driver pick a channel while you pass just the
request line (slave id) in a single cell in DT. If this does not
work, using two cells is the best approach here.

Removing APB_DMA_SD_REQ_NO from the driver code is definitely the
right approach, since that number is not something specific to the
device, but to the way it is connected to the DMA engine, which
belongs into DT.

	Arnd

^ permalink raw reply	[flat|nested] 80+ messages in thread

* [PATCH v7] dmaengine: Add MOXA ART DMA engine driver
  2013-08-02 13:28           ` Jonas Jensen
@ 2013-08-05 14:37             ` Jonas Jensen
  -1 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2013-08-05 14:37 UTC (permalink / raw)
  To: linux-arm-kernel
  Cc: linux-kernel, arm, vinod.koul, djbw, arnd, linux, mark.rutland,
	Jonas Jensen

The MOXA ART SoC has a DMA controller capable of offloading expensive
memory operations, such as large copies. This patch adds support for
the controller including four channels. Two of these are used to
handle MMC copy on the UC-7112-LX hardware. The remaining two can be
used in a future audio driver or client application.

Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
---

Notes:
    Thanks for the replies.
    
    Changes since v6:
    
    1. move callback from interrupt context to tasklet
    2. remove callback and callback_param, use those provided by tx_desc
    3. don't rely on structs for register offsets
    4. remove local bool "found" variable from moxart_alloc_chan_resources()
    5. check return value of irq_of_parse_and_map
    6. use devm_request_irq instead of setup_irq
    7. elaborate commit message
    
    device tree bindings document:
    8. in the example, change "#dma-cells" to "<2>"
    
    Applies to next-20130805

 .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  21 +
 drivers/dma/Kconfig                                |   7 +
 drivers/dma/Makefile                               |   1 +
 drivers/dma/moxart-dma.c                           | 614 +++++++++++++++++++++
 4 files changed, 643 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
 create mode 100644 drivers/dma/moxart-dma.c

diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644
index 0000000..5b9f82c
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -0,0 +1,21 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible :	Must be "moxa,moxart-dma"
+- reg :		Should contain registers location and length
+- interrupts :	Should contain the interrupt number
+- #dma-cells :	Should be 2
+		cell index 0: channel number between 0-3
+		cell index 1: line request number
+
+Example:
+
+	dma: dma@90500000 {
+		compatible = "moxa,moxart-dma";
+		reg = <0x90500000 0x1000>;
+		interrupts = <24 0>;
+		#dma-cells = <2>;
+	};
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6825957..56c3aaa 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -300,6 +300,13 @@ config DMA_JZ4740
 	select DMA_ENGINE
 	select DMA_VIRTUAL_CHANNELS
 
+config MOXART_DMA
+	tristate "MOXART DMA support"
+	depends on ARCH_MOXART
+	select DMA_ENGINE
+	help
+	  Enable support for the MOXA ART SoC DMA controller.
+
 config DMA_ENGINE
 	bool
 
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 5e0f2ef..470c11b 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -39,3 +39,4 @@ obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
 obj-$(CONFIG_DMA_OMAP) += omap-dma.o
 obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..7160cc3
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,614 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+
+#define APB_DMA_MAX_CHANNEL			4
+
+#define REG_ADDRESS_SOURCE			0
+#define REG_ADDRESS_DEST			4
+#define REG_CYCLES				8
+#define REG_CTRL				12
+#define REG_CHAN_SIZE				16
+
+#define APB_DMA_ENABLE				0x1
+#define APB_DMA_FIN_INT_STS			0x2
+#define APB_DMA_FIN_INT_EN			0x4
+#define APB_DMA_BURST_MODE			0x8
+#define APB_DMA_ERR_INT_STS			0x10
+#define APB_DMA_ERR_INT_EN			0x20
+
+/*
+ * unset to select APB source
+ * set to select AHB source
+ */
+#define APB_DMA_SOURCE_SELECT			0x40
+
+/*
+ * unset to select APB destination
+ * set to select AHB destination
+ */
+#define APB_DMA_DEST_SELECT			0x80
+
+#define APB_DMA_SOURCE				0x100
+#define APB_DMA_SOURCE_MASK			0x700
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4  (burst=1)
+ * 010: +2 (burst=0), +8  (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4  (burst=1)
+ * 110: -2 (burst=0), -8  (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+ */
+#define APB_DMA_SOURCE_INC_0			0
+#define APB_DMA_SOURCE_INC_1_4			0x100
+#define APB_DMA_SOURCE_INC_2_8			0x200
+#define APB_DMA_SOURCE_INC_4_16			0x300
+#define APB_DMA_SOURCE_DEC_1_4			0x500
+#define APB_DMA_SOURCE_DEC_2_8			0x600
+#define APB_DMA_SOURCE_DEC_4_16			0x700
+
+#define APB_DMA_DEST				0x1000
+#define APB_DMA_DEST_MASK			0x7000
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4  (burst=1)
+ * 010: +2 (burst=0), +8  (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4  (burst=1)
+ * 110: -2 (burst=0), -8  (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+*/
+#define APB_DMA_DEST_INC_0			0
+#define APB_DMA_DEST_INC_1_4			0x1000
+#define APB_DMA_DEST_INC_2_8			0x2000
+#define APB_DMA_DEST_INC_4_16			0x3000
+#define APB_DMA_DEST_DEC_1_4			0x5000
+#define APB_DMA_DEST_DEC_2_8			0x6000
+#define APB_DMA_DEST_DEC_4_16			0x7000
+
+/*
+ * request signal select of destination
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0:    no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_DEST_REQ_NO			0x10000
+#define APB_DMA_DEST_REQ_NO_MASK		0xf0000
+
+#define APB_DMA_DATA_WIDTH			0x100000
+#define APB_DMA_DATA_WIDTH_MASK			0x300000
+/*
+ * data width of transfer
+ * 00: word
+ * 01: half
+ * 10: byte
+ */
+#define APB_DMA_DATA_WIDTH_4			0
+#define APB_DMA_DATA_WIDTH_2			0x100000
+#define APB_DMA_DATA_WIDTH_1			0x200000
+
+/*
+ * request signal select of source
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0:    no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_SOURCE_REQ_NO			0x1000000
+#define APB_DMA_SOURCE_REQ_NO_MASK		0xf000000
+#define APB_DMA_CYCLES_MASK			0x00ffffff
+
+struct moxart_dma_chan {
+	struct dma_chan			chan;
+	int				ch_num;
+	bool				allocated;
+	int				error_flag;
+	void __iomem			*base;
+	struct completion		dma_complete;
+	struct dma_slave_config		cfg;
+	struct dma_async_tx_descriptor	tx_desc;
+	unsigned int			line;
+};
+
+struct moxart_dma_container {
+	int			ctlr;
+	struct dma_device	dma_slave;
+	struct moxart_dma_chan	slave_chans[APB_DMA_MAX_CHANNEL];
+	spinlock_t dma_lock;
+	struct tasklet_struct	tasklet;
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+	return &chan->dev->device;
+}
+
+static inline struct moxart_dma_container
+*to_dma_container(struct dma_device *d)
+{
+	return container_of(d, struct moxart_dma_container, dma_slave);
+}
+
+static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct moxart_dma_chan, chan);
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *ch = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *c = to_dma_container(ch->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
+
+	spin_lock_irqsave(&c->dma_lock, flags);
+
+	ctrl = readl(ch->base + REG_CTRL);
+	ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, ch->base + REG_CTRL);
+
+	spin_unlock_irqrestore(&c->dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+			       struct dma_slave_config *cfg)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	memcpy(&mchan->cfg, cfg, sizeof(mchan->cfg));
+
+	ctrl = readl(mchan->base + REG_CTRL);
+	ctrl |= APB_DMA_BURST_MODE;
+	ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
+	ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
+
+	switch (mchan->cfg.src_addr_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		ctrl |= APB_DMA_DATA_WIDTH_1;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_1_4;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_1_4;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		ctrl |= APB_DMA_DATA_WIDTH_2;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_2_8;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_2_8;
+		break;
+	default:
+		ctrl &= ~APB_DMA_DATA_WIDTH;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_4_16;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_4_16;
+		break;
+	}
+
+	if (mchan->cfg.direction == DMA_MEM_TO_DEV) {
+		ctrl &= ~APB_DMA_DEST_SELECT;
+		ctrl |= APB_DMA_SOURCE_SELECT;
+		ctrl |= (mchan->line << 16 &
+			 APB_DMA_DEST_REQ_NO_MASK);
+	} else {
+		ctrl |= APB_DMA_DEST_SELECT;
+		ctrl &= ~APB_DMA_SOURCE_SELECT;
+		ctrl |= (mchan->line << 24 &
+			 APB_DMA_SOURCE_REQ_NO_MASK);
+	}
+
+	writel(ctrl, mchan->base + REG_CTRL);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+			  unsigned long arg)
+{
+	int ret = 0;
+	struct dma_slave_config *config;
+
+	switch (cmd) {
+	case DMA_TERMINATE_ALL:
+		moxart_terminate_all(chan);
+		break;
+	case DMA_SLAVE_CONFIG:
+		config = (struct dma_slave_config *)arg;
+		ret = moxart_slave_config(chan, config);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static dma_cookie_t moxart_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(tx->chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	dma_cookie_t cookie;
+	u32 ctrl;
+	unsigned long flags;
+
+	mchan->error_flag = 0;
+
+	dev_dbg(chan2dev(tx->chan), "%s: mchan=%p mchan->ch_num=%d mchan->base=%p\n",
+		__func__, mchan, mchan->ch_num, mchan->base);
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	cookie = dma_cookie_assign(tx);
+
+	ctrl = readl(mchan->base + REG_CTRL);
+	ctrl |= (APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, mchan->base + REG_CTRL);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return cookie;
+}
+
+static struct dma_async_tx_descriptor
+*moxart_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+		      unsigned int sg_len,
+		      enum dma_transfer_direction direction,
+		      unsigned long tx_flags, void *context)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	unsigned long flags;
+	unsigned int size, adr_width;
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	if (direction == DMA_MEM_TO_DEV) {
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       mchan->base + REG_ADDRESS_SOURCE);
+		writel(mchan->cfg.dst_addr, mchan->base + REG_ADDRESS_DEST);
+
+		adr_width = mchan->cfg.src_addr_width;
+	} else {
+		writel(mchan->cfg.src_addr, mchan->base + REG_ADDRESS_SOURCE);
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       mchan->base + REG_ADDRESS_DEST);
+
+		adr_width = mchan->cfg.dst_addr_width;
+	}
+
+	size = sgl->length >> adr_width;
+
+	/*
+	 * size is 4 on 64 bytes copied, i.e. once cycle copies 16 bytes
+	 * ( when data_width == APB_DMAB_DATA_WIDTH_4 )
+	 */
+	writel(size, mchan->base + REG_CYCLES);
+
+	dev_dbg(chan2dev(chan), "%s: set %d DMA cycles (sgl->length=%d adr_width=%d)\n",
+		__func__, size, sgl->length, adr_width);
+
+	dma_async_tx_descriptor_init(&mchan->tx_desc, chan);
+	mchan->tx_desc.tx_submit = moxart_tx_submit;
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return &mchan->tx_desc;
+}
+
+bool moxart_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+
+	if (chan->device->dev == mc->dma_slave.dev) {
+		struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+		unsigned int ch_req = *(unsigned int *)param;
+		dev_dbg(chan2dev(chan), "%s: mchan=%p ch_req=%d mchan->ch_num=%d\n",
+			__func__, mchan, ch_req, mchan->ch_num);
+		return ch_req == mchan->ch_num;
+	} else {
+		dev_dbg(chan2dev(chan), "%s: device not registered to this DMA engine\n",
+			__func__);
+		return false;
+	}
+}
+
+static struct of_dma_filter_info moxart_dma_info = {
+	.filter_fn = moxart_dma_filter_fn,
+};
+
+static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
+					struct of_dma *ofdma)
+{
+	struct dma_chan *chan;
+	struct of_dma_filter_info *info = ofdma->of_dma_data;
+
+	if (!info || !info->filter_fn)
+		return NULL;
+
+	if (dma_spec->args_count != 2)
+		return NULL;
+
+	chan = dma_request_channel(info->dma_cap, info->filter_fn,
+				   &dma_spec->args[0]);
+	if (chan)
+		to_moxart_dma_chan(chan)->line = dma_spec->args[1];
+
+	return chan;
+}
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	int i;
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++) {
+		if (i == mchan->ch_num
+			&& !mchan->allocated) {
+			dev_dbg(chan2dev(chan), "%s: allocating channel #%d\n",
+				__func__, mchan->ch_num);
+			mchan->allocated = true;
+			return 0;
+		}
+	}
+
+	return -ENODEV;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+	mchan->allocated = false;
+
+	dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+		__func__, mchan->ch_num);
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	ctrl = readl(mchan->base + REG_CTRL);
+	ctrl |= APB_DMA_ENABLE;
+	writel(ctrl, mchan->base + REG_CTRL);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+					dma_cookie_t cookie,
+					struct dma_tx_state *txstate)
+{
+	enum dma_status ret;
+
+	ret = dma_cookie_status(chan, cookie, txstate);
+	if (ret == DMA_SUCCESS || !txstate)
+		return ret;
+
+	return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+	dma->device_prep_slave_sg		= moxart_prep_slave_sg;
+	dma->device_alloc_chan_resources	= moxart_alloc_chan_resources;
+	dma->device_free_chan_resources		= moxart_free_chan_resources;
+	dma->device_issue_pending		= moxart_issue_pending;
+	dma->device_tx_status			= moxart_tx_status;
+	dma->device_control			= moxart_control;
+	dma->dev				= dev;
+
+	INIT_LIST_HEAD(&dma->channels);
+}
+
+static void moxart_dma_tasklet(unsigned long data)
+{
+	struct moxart_dma_container *mc = (void *)data;
+	struct moxart_dma_chan *ch = &mc->slave_chans[0];
+	unsigned int i;
+
+	pr_debug("%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+		if (ch->allocated && ch->tx_desc.callback) {
+			pr_debug("%s: call callback for ch=%p\n",
+				 __func__, ch);
+			ch->tx_desc.callback(ch->tx_desc.callback_param);
+		}
+	}
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+	struct moxart_dma_container *mc = devid;
+	struct moxart_dma_chan *mchan = &mc->slave_chans[0];
+	unsigned int i;
+	u32 ctrl;
+
+	pr_debug("%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		if (mchan->allocated) {
+			ctrl = readl(mchan->base + REG_CTRL);
+			if (ctrl & APB_DMA_FIN_INT_STS) {
+				ctrl &= ~APB_DMA_FIN_INT_STS;
+				dma_cookie_complete(&mchan->tx_desc);
+			}
+			if (ctrl & APB_DMA_ERR_INT_STS) {
+				ctrl &= ~APB_DMA_ERR_INT_STS;
+				mchan->error_flag = 1;
+			}
+			mchan->error_flag = 0;
+			writel(ctrl, mchan->base + REG_CTRL);
+		}
+	}
+
+	tasklet_schedule(&mc->tasklet);
+
+	return IRQ_HANDLED;
+}
+
+static int moxart_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct resource *res;
+	static void __iomem *dma_base_addr;
+	int ret, i;
+	unsigned int irq;
+	struct moxart_dma_chan *mchan;
+	struct moxart_dma_container *mdc;
+
+	mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+	if (!mdc) {
+		dev_err(dev, "can't allocate DMA container\n");
+		return -ENOMEM;
+	}
+
+	irq = irq_of_parse_and_map(node, 0);
+	if (irq <= 0) {
+		dev_err(dev, "irq_of_parse_and_map failed\n");
+		return -EINVAL;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dma_base_addr = devm_ioremap_resource(dev, res);
+	if (IS_ERR(dma_base_addr)) {
+		dev_err(dev, "devm_ioremap_resource failed\n");
+		return PTR_ERR(dma_base_addr);
+	}
+
+	mdc->ctlr = pdev->id;
+	spin_lock_init(&mdc->dma_lock);
+
+	dma_cap_zero(mdc->dma_slave.cap_mask);
+	dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+
+	moxart_dma_init(&mdc->dma_slave, dev);
+
+	mchan = &mdc->slave_chans[0];
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		mchan->ch_num = i;
+		mchan->base = dma_base_addr + 0x80 + i * REG_CHAN_SIZE;
+		mchan->allocated = 0;
+
+		dma_cookie_init(&mchan->chan);
+		mchan->chan.device = &mdc->dma_slave;
+		list_add_tail(&mchan->chan.device_node,
+			      &mdc->dma_slave.channels);
+
+		dev_dbg(dev, "%s: mchans[%d]: mchan->ch_num=%d mchan->base=%p\n",
+			__func__, i, mchan->ch_num, mchan->base);
+	}
+
+	ret = dma_async_device_register(&mdc->dma_slave);
+	platform_set_drvdata(pdev, mdc);
+
+	of_dma_controller_register(node, moxart_of_xlate, &moxart_dma_info);
+
+	tasklet_init(&mdc->tasklet, moxart_dma_tasklet, (unsigned long)mdc);
+
+	devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
+			 "moxart-dma-engine", mdc);
+
+	dev_dbg(dev, "%s: IRQ=%d\n", __func__, irq);
+
+	return ret;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+	struct moxart_dma_container *m = dev_get_drvdata(&pdev->dev);
+	dma_async_device_unregister(&m->dma_slave);
+	return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+	{ .compatible = "moxa,moxart-dma" },
+	{ }
+};
+
+static struct platform_driver moxart_driver = {
+	.probe	= moxart_probe,
+	.remove	= moxart_remove,
+	.driver = {
+		.name		= "moxart-dma-engine",
+		.owner		= THIS_MODULE,
+		.of_match_table	= moxart_dma_match,
+	},
+};
+
+static int moxart_init(void)
+{
+	return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+	platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
-- 
1.8.2.1


^ permalink raw reply related	[flat|nested] 80+ messages in thread

* [PATCH v7] dmaengine: Add MOXA ART DMA engine driver
@ 2013-08-05 14:37             ` Jonas Jensen
  0 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2013-08-05 14:37 UTC (permalink / raw)
  To: linux-arm-kernel

The MOXA ART SoC has a DMA controller capable of offloading expensive
memory operations, such as large copies. This patch adds support for
the controller including four channels. Two of these are used to
handle MMC copy on the UC-7112-LX hardware. The remaining two can be
used in a future audio driver or client application.

Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
---

Notes:
    Thanks for the replies.
    
    Changes since v6:
    
    1. move callback from interrupt context to tasklet
    2. remove callback and callback_param, use those provided by tx_desc
    3. don't rely on structs for register offsets
    4. remove local bool "found" variable from moxart_alloc_chan_resources()
    5. check return value of irq_of_parse_and_map
    6. use devm_request_irq instead of setup_irq
    7. elaborate commit message
    
    device tree bindings document:
    8. in the example, change "#dma-cells" to "<2>"
    
    Applies to next-20130805

 .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  21 +
 drivers/dma/Kconfig                                |   7 +
 drivers/dma/Makefile                               |   1 +
 drivers/dma/moxart-dma.c                           | 614 +++++++++++++++++++++
 4 files changed, 643 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
 create mode 100644 drivers/dma/moxart-dma.c

diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644
index 0000000..5b9f82c
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -0,0 +1,21 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible :	Must be "moxa,moxart-dma"
+- reg :		Should contain registers location and length
+- interrupts :	Should contain the interrupt number
+- #dma-cells :	Should be 2
+		cell index 0: channel number between 0-3
+		cell index 1: line request number
+
+Example:
+
+	dma: dma at 90500000 {
+		compatible = "moxa,moxart-dma";
+		reg = <0x90500000 0x1000>;
+		interrupts = <24 0>;
+		#dma-cells = <2>;
+	};
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6825957..56c3aaa 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -300,6 +300,13 @@ config DMA_JZ4740
 	select DMA_ENGINE
 	select DMA_VIRTUAL_CHANNELS
 
+config MOXART_DMA
+	tristate "MOXART DMA support"
+	depends on ARCH_MOXART
+	select DMA_ENGINE
+	help
+	  Enable support for the MOXA ART SoC DMA controller.
+
 config DMA_ENGINE
 	bool
 
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 5e0f2ef..470c11b 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -39,3 +39,4 @@ obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
 obj-$(CONFIG_DMA_OMAP) += omap-dma.o
 obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..7160cc3
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,614 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+
+#define APB_DMA_MAX_CHANNEL			4
+
+#define REG_ADDRESS_SOURCE			0
+#define REG_ADDRESS_DEST			4
+#define REG_CYCLES				8
+#define REG_CTRL				12
+#define REG_CHAN_SIZE				16
+
+#define APB_DMA_ENABLE				0x1
+#define APB_DMA_FIN_INT_STS			0x2
+#define APB_DMA_FIN_INT_EN			0x4
+#define APB_DMA_BURST_MODE			0x8
+#define APB_DMA_ERR_INT_STS			0x10
+#define APB_DMA_ERR_INT_EN			0x20
+
+/*
+ * unset to select APB source
+ * set to select AHB source
+ */
+#define APB_DMA_SOURCE_SELECT			0x40
+
+/*
+ * unset to select APB destination
+ * set to select AHB destination
+ */
+#define APB_DMA_DEST_SELECT			0x80
+
+#define APB_DMA_SOURCE				0x100
+#define APB_DMA_SOURCE_MASK			0x700
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4  (burst=1)
+ * 010: +2 (burst=0), +8  (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4  (burst=1)
+ * 110: -2 (burst=0), -8  (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+ */
+#define APB_DMA_SOURCE_INC_0			0
+#define APB_DMA_SOURCE_INC_1_4			0x100
+#define APB_DMA_SOURCE_INC_2_8			0x200
+#define APB_DMA_SOURCE_INC_4_16			0x300
+#define APB_DMA_SOURCE_DEC_1_4			0x500
+#define APB_DMA_SOURCE_DEC_2_8			0x600
+#define APB_DMA_SOURCE_DEC_4_16			0x700
+
+#define APB_DMA_DEST				0x1000
+#define APB_DMA_DEST_MASK			0x7000
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4  (burst=1)
+ * 010: +2 (burst=0), +8  (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4  (burst=1)
+ * 110: -2 (burst=0), -8  (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+*/
+#define APB_DMA_DEST_INC_0			0
+#define APB_DMA_DEST_INC_1_4			0x1000
+#define APB_DMA_DEST_INC_2_8			0x2000
+#define APB_DMA_DEST_INC_4_16			0x3000
+#define APB_DMA_DEST_DEC_1_4			0x5000
+#define APB_DMA_DEST_DEC_2_8			0x6000
+#define APB_DMA_DEST_DEC_4_16			0x7000
+
+/*
+ * request signal select of destination
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0:    no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_DEST_REQ_NO			0x10000
+#define APB_DMA_DEST_REQ_NO_MASK		0xf0000
+
+#define APB_DMA_DATA_WIDTH			0x100000
+#define APB_DMA_DATA_WIDTH_MASK			0x300000
+/*
+ * data width of transfer
+ * 00: word
+ * 01: half
+ * 10: byte
+ */
+#define APB_DMA_DATA_WIDTH_4			0
+#define APB_DMA_DATA_WIDTH_2			0x100000
+#define APB_DMA_DATA_WIDTH_1			0x200000
+
+/*
+ * request signal select of source
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0:    no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_SOURCE_REQ_NO			0x1000000
+#define APB_DMA_SOURCE_REQ_NO_MASK		0xf000000
+#define APB_DMA_CYCLES_MASK			0x00ffffff
+
+struct moxart_dma_chan {
+	struct dma_chan			chan;
+	int				ch_num;
+	bool				allocated;
+	int				error_flag;
+	void __iomem			*base;
+	struct completion		dma_complete;
+	struct dma_slave_config		cfg;
+	struct dma_async_tx_descriptor	tx_desc;
+	unsigned int			line;
+};
+
+struct moxart_dma_container {
+	int			ctlr;
+	struct dma_device	dma_slave;
+	struct moxart_dma_chan	slave_chans[APB_DMA_MAX_CHANNEL];
+	spinlock_t dma_lock;
+	struct tasklet_struct	tasklet;
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+	return &chan->dev->device;
+}
+
+static inline struct moxart_dma_container
+*to_dma_container(struct dma_device *d)
+{
+	return container_of(d, struct moxart_dma_container, dma_slave);
+}
+
+static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct moxart_dma_chan, chan);
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *ch = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *c = to_dma_container(ch->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
+
+	spin_lock_irqsave(&c->dma_lock, flags);
+
+	ctrl = readl(ch->base + REG_CTRL);
+	ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, ch->base + REG_CTRL);
+
+	spin_unlock_irqrestore(&c->dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+			       struct dma_slave_config *cfg)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	memcpy(&mchan->cfg, cfg, sizeof(mchan->cfg));
+
+	ctrl = readl(mchan->base + REG_CTRL);
+	ctrl |= APB_DMA_BURST_MODE;
+	ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
+	ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
+
+	switch (mchan->cfg.src_addr_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		ctrl |= APB_DMA_DATA_WIDTH_1;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_1_4;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_1_4;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		ctrl |= APB_DMA_DATA_WIDTH_2;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_2_8;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_2_8;
+		break;
+	default:
+		ctrl &= ~APB_DMA_DATA_WIDTH;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_4_16;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_4_16;
+		break;
+	}
+
+	if (mchan->cfg.direction == DMA_MEM_TO_DEV) {
+		ctrl &= ~APB_DMA_DEST_SELECT;
+		ctrl |= APB_DMA_SOURCE_SELECT;
+		ctrl |= (mchan->line << 16 &
+			 APB_DMA_DEST_REQ_NO_MASK);
+	} else {
+		ctrl |= APB_DMA_DEST_SELECT;
+		ctrl &= ~APB_DMA_SOURCE_SELECT;
+		ctrl |= (mchan->line << 24 &
+			 APB_DMA_SOURCE_REQ_NO_MASK);
+	}
+
+	writel(ctrl, mchan->base + REG_CTRL);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+			  unsigned long arg)
+{
+	int ret = 0;
+	struct dma_slave_config *config;
+
+	switch (cmd) {
+	case DMA_TERMINATE_ALL:
+		moxart_terminate_all(chan);
+		break;
+	case DMA_SLAVE_CONFIG:
+		config = (struct dma_slave_config *)arg;
+		ret = moxart_slave_config(chan, config);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static dma_cookie_t moxart_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(tx->chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	dma_cookie_t cookie;
+	u32 ctrl;
+	unsigned long flags;
+
+	mchan->error_flag = 0;
+
+	dev_dbg(chan2dev(tx->chan), "%s: mchan=%p mchan->ch_num=%d mchan->base=%p\n",
+		__func__, mchan, mchan->ch_num, mchan->base);
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	cookie = dma_cookie_assign(tx);
+
+	ctrl = readl(mchan->base + REG_CTRL);
+	ctrl |= (APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, mchan->base + REG_CTRL);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return cookie;
+}
+
+static struct dma_async_tx_descriptor
+*moxart_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+		      unsigned int sg_len,
+		      enum dma_transfer_direction direction,
+		      unsigned long tx_flags, void *context)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	unsigned long flags;
+	unsigned int size, adr_width;
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	if (direction == DMA_MEM_TO_DEV) {
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       mchan->base + REG_ADDRESS_SOURCE);
+		writel(mchan->cfg.dst_addr, mchan->base + REG_ADDRESS_DEST);
+
+		adr_width = mchan->cfg.src_addr_width;
+	} else {
+		writel(mchan->cfg.src_addr, mchan->base + REG_ADDRESS_SOURCE);
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       mchan->base + REG_ADDRESS_DEST);
+
+		adr_width = mchan->cfg.dst_addr_width;
+	}
+
+	size = sgl->length >> adr_width;
+
+	/*
+	 * size is 4 on 64 bytes copied, i.e. once cycle copies 16 bytes
+	 * ( when data_width == APB_DMAB_DATA_WIDTH_4 )
+	 */
+	writel(size, mchan->base + REG_CYCLES);
+
+	dev_dbg(chan2dev(chan), "%s: set %d DMA cycles (sgl->length=%d adr_width=%d)\n",
+		__func__, size, sgl->length, adr_width);
+
+	dma_async_tx_descriptor_init(&mchan->tx_desc, chan);
+	mchan->tx_desc.tx_submit = moxart_tx_submit;
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return &mchan->tx_desc;
+}
+
+bool moxart_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+
+	if (chan->device->dev == mc->dma_slave.dev) {
+		struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+		unsigned int ch_req = *(unsigned int *)param;
+		dev_dbg(chan2dev(chan), "%s: mchan=%p ch_req=%d mchan->ch_num=%d\n",
+			__func__, mchan, ch_req, mchan->ch_num);
+		return ch_req == mchan->ch_num;
+	} else {
+		dev_dbg(chan2dev(chan), "%s: device not registered to this DMA engine\n",
+			__func__);
+		return false;
+	}
+}
+
+static struct of_dma_filter_info moxart_dma_info = {
+	.filter_fn = moxart_dma_filter_fn,
+};
+
+static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
+					struct of_dma *ofdma)
+{
+	struct dma_chan *chan;
+	struct of_dma_filter_info *info = ofdma->of_dma_data;
+
+	if (!info || !info->filter_fn)
+		return NULL;
+
+	if (dma_spec->args_count != 2)
+		return NULL;
+
+	chan = dma_request_channel(info->dma_cap, info->filter_fn,
+				   &dma_spec->args[0]);
+	if (chan)
+		to_moxart_dma_chan(chan)->line = dma_spec->args[1];
+
+	return chan;
+}
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	int i;
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++) {
+		if (i == mchan->ch_num
+			&& !mchan->allocated) {
+			dev_dbg(chan2dev(chan), "%s: allocating channel #%d\n",
+				__func__, mchan->ch_num);
+			mchan->allocated = true;
+			return 0;
+		}
+	}
+
+	return -ENODEV;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+	mchan->allocated = false;
+
+	dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+		__func__, mchan->ch_num);
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	ctrl = readl(mchan->base + REG_CTRL);
+	ctrl |= APB_DMA_ENABLE;
+	writel(ctrl, mchan->base + REG_CTRL);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+					dma_cookie_t cookie,
+					struct dma_tx_state *txstate)
+{
+	enum dma_status ret;
+
+	ret = dma_cookie_status(chan, cookie, txstate);
+	if (ret == DMA_SUCCESS || !txstate)
+		return ret;
+
+	return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+	dma->device_prep_slave_sg		= moxart_prep_slave_sg;
+	dma->device_alloc_chan_resources	= moxart_alloc_chan_resources;
+	dma->device_free_chan_resources		= moxart_free_chan_resources;
+	dma->device_issue_pending		= moxart_issue_pending;
+	dma->device_tx_status			= moxart_tx_status;
+	dma->device_control			= moxart_control;
+	dma->dev				= dev;
+
+	INIT_LIST_HEAD(&dma->channels);
+}
+
+static void moxart_dma_tasklet(unsigned long data)
+{
+	struct moxart_dma_container *mc = (void *)data;
+	struct moxart_dma_chan *ch = &mc->slave_chans[0];
+	unsigned int i;
+
+	pr_debug("%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+		if (ch->allocated && ch->tx_desc.callback) {
+			pr_debug("%s: call callback for ch=%p\n",
+				 __func__, ch);
+			ch->tx_desc.callback(ch->tx_desc.callback_param);
+		}
+	}
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+	struct moxart_dma_container *mc = devid;
+	struct moxart_dma_chan *mchan = &mc->slave_chans[0];
+	unsigned int i;
+	u32 ctrl;
+
+	pr_debug("%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		if (mchan->allocated) {
+			ctrl = readl(mchan->base + REG_CTRL);
+			if (ctrl & APB_DMA_FIN_INT_STS) {
+				ctrl &= ~APB_DMA_FIN_INT_STS;
+				dma_cookie_complete(&mchan->tx_desc);
+			}
+			if (ctrl & APB_DMA_ERR_INT_STS) {
+				ctrl &= ~APB_DMA_ERR_INT_STS;
+				mchan->error_flag = 1;
+			}
+			mchan->error_flag = 0;
+			writel(ctrl, mchan->base + REG_CTRL);
+		}
+	}
+
+	tasklet_schedule(&mc->tasklet);
+
+	return IRQ_HANDLED;
+}
+
+static int moxart_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct resource *res;
+	static void __iomem *dma_base_addr;
+	int ret, i;
+	unsigned int irq;
+	struct moxart_dma_chan *mchan;
+	struct moxart_dma_container *mdc;
+
+	mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+	if (!mdc) {
+		dev_err(dev, "can't allocate DMA container\n");
+		return -ENOMEM;
+	}
+
+	irq = irq_of_parse_and_map(node, 0);
+	if (irq <= 0) {
+		dev_err(dev, "irq_of_parse_and_map failed\n");
+		return -EINVAL;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dma_base_addr = devm_ioremap_resource(dev, res);
+	if (IS_ERR(dma_base_addr)) {
+		dev_err(dev, "devm_ioremap_resource failed\n");
+		return PTR_ERR(dma_base_addr);
+	}
+
+	mdc->ctlr = pdev->id;
+	spin_lock_init(&mdc->dma_lock);
+
+	dma_cap_zero(mdc->dma_slave.cap_mask);
+	dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+
+	moxart_dma_init(&mdc->dma_slave, dev);
+
+	mchan = &mdc->slave_chans[0];
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		mchan->ch_num = i;
+		mchan->base = dma_base_addr + 0x80 + i * REG_CHAN_SIZE;
+		mchan->allocated = 0;
+
+		dma_cookie_init(&mchan->chan);
+		mchan->chan.device = &mdc->dma_slave;
+		list_add_tail(&mchan->chan.device_node,
+			      &mdc->dma_slave.channels);
+
+		dev_dbg(dev, "%s: mchans[%d]: mchan->ch_num=%d mchan->base=%p\n",
+			__func__, i, mchan->ch_num, mchan->base);
+	}
+
+	ret = dma_async_device_register(&mdc->dma_slave);
+	platform_set_drvdata(pdev, mdc);
+
+	of_dma_controller_register(node, moxart_of_xlate, &moxart_dma_info);
+
+	tasklet_init(&mdc->tasklet, moxart_dma_tasklet, (unsigned long)mdc);
+
+	devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
+			 "moxart-dma-engine", mdc);
+
+	dev_dbg(dev, "%s: IRQ=%d\n", __func__, irq);
+
+	return ret;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+	struct moxart_dma_container *m = dev_get_drvdata(&pdev->dev);
+	dma_async_device_unregister(&m->dma_slave);
+	return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+	{ .compatible = "moxa,moxart-dma" },
+	{ }
+};
+
+static struct platform_driver moxart_driver = {
+	.probe	= moxart_probe,
+	.remove	= moxart_remove,
+	.driver = {
+		.name		= "moxart-dma-engine",
+		.owner		= THIS_MODULE,
+		.of_match_table	= moxart_dma_match,
+	},
+};
+
+static int moxart_init(void)
+{
+	return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+	platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
-- 
1.8.2.1

^ permalink raw reply related	[flat|nested] 80+ messages in thread

* Re: [PATCH v7] dmaengine: Add MOXA ART DMA engine driver
  2013-08-05 14:37             ` Jonas Jensen
@ 2013-08-05 16:57               ` Mark Rutland
  -1 siblings, 0 replies; 80+ messages in thread
From: Mark Rutland @ 2013-08-05 16:57 UTC (permalink / raw)
  To: Jonas Jensen
  Cc: linux-arm-kernel, linux-kernel, arm, vinod.koul, djbw, arnd, linux

On Mon, Aug 05, 2013 at 03:37:37PM +0100, Jonas Jensen wrote:
> The MOXA ART SoC has a DMA controller capable of offloading expensive
> memory operations, such as large copies. This patch adds support for
> the controller including four channels. Two of these are used to
> handle MMC copy on the UC-7112-LX hardware. The remaining two can be
> used in a future audio driver or client application.
>
> Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
> ---
>
> Notes:
>     Thanks for the replies.
>
>     Changes since v6:
>
>     1. move callback from interrupt context to tasklet
>     2. remove callback and callback_param, use those provided by tx_desc
>     3. don't rely on structs for register offsets
>     4. remove local bool "found" variable from moxart_alloc_chan_resources()
>     5. check return value of irq_of_parse_and_map
>     6. use devm_request_irq instead of setup_irq
>     7. elaborate commit message
>
>     device tree bindings document:
>     8. in the example, change "#dma-cells" to "<2>"
>
>     Applies to next-20130805
>
>  .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  21 +
>  drivers/dma/Kconfig                                |   7 +
>  drivers/dma/Makefile                               |   1 +
>  drivers/dma/moxart-dma.c                           | 614 +++++++++++++++++++++
>  4 files changed, 643 insertions(+)
>  create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
>  create mode 100644 drivers/dma/moxart-dma.c
>
> diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> new file mode 100644
> index 0000000..5b9f82c
> --- /dev/null
> +++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> @@ -0,0 +1,21 @@
> +MOXA ART DMA Controller
> +
> +See dma.txt first
> +
> +Required properties:
> +
> +- compatible : Must be "moxa,moxart-dma"
> +- reg :                Should contain registers location and length
> +- interrupts : Should contain the interrupt number
> +- #dma-cells : Should be 2
> +               cell index 0: channel number between 0-3
> +               cell index 1: line request number
> +
> +Example:
> +
> +       dma: dma@90500000 {
> +               compatible = "moxa,moxart-dma";
> +               reg = <0x90500000 0x1000>;
> +               interrupts = <24 0>;
> +               #dma-cells = <2>;
> +       };

Thanks for the updates on this. :)

The binding and example look sensible to me; it would be nice if someone
familiar with the dma subsystem could check that this has the necessary
information.

[...]

> +static int moxart_alloc_chan_resources(struct dma_chan *chan)
> +{
> +       struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
> +       int i;
> +
> +       for (i = 0; i < APB_DMA_MAX_CHANNEL; i++) {
> +               if (i == mchan->ch_num
> +                       && !mchan->allocated) {
> +                       dev_dbg(chan2dev(chan), "%s: allocating channel #%d\n",
> +                               __func__, mchan->ch_num);
> +                       mchan->allocated = true;
> +                       return 0;
> +               }
> +       }

Come to think of it, why do you need to iterate over all of the channels
to handle a particular channel number that you already know, and already
have the struct for?

I'm not familiar with the dma subsystem, and I couldn't spot when the
dma channel is actually assigned/selected prior to this.

[...]

> +static enum dma_status moxart_tx_status(struct dma_chan *chan,
> +                                       dma_cookie_t cookie,
> +                                       struct dma_tx_state *txstate)
> +{
> +       enum dma_status ret;
> +
> +       ret = dma_cookie_status(chan, cookie, txstate);
> +       if (ret == DMA_SUCCESS || !txstate)
> +               return ret;
> +
> +       return ret;

No special status handling?

This function is equivalent to:

        return dma_cookie_status(chan, cookie, txstate);

[...]

> +static int moxart_probe(struct platform_device *pdev)
> +{
> +       struct device *dev = &pdev->dev;
> +       struct device_node *node = dev->of_node;
> +       struct resource *res;
> +       static void __iomem *dma_base_addr;
> +       int ret, i;
> +       unsigned int irq;
> +       struct moxart_dma_chan *mchan;
> +       struct moxart_dma_container *mdc;
> +
> +       mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
> +       if (!mdc) {
> +               dev_err(dev, "can't allocate DMA container\n");
> +               return -ENOMEM;
> +       }
> +
> +       irq = irq_of_parse_and_map(node, 0);
> +       if (irq <= 0) {
> +               dev_err(dev, "irq_of_parse_and_map failed\n");
> +               return -EINVAL;
> +       }
> +
> +       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
> +       dma_base_addr = devm_ioremap_resource(dev, res);
> +       if (IS_ERR(dma_base_addr)) {
> +               dev_err(dev, "devm_ioremap_resource failed\n");
> +               return PTR_ERR(dma_base_addr);
> +       }
> +
> +       mdc->ctlr = pdev->id;
> +       spin_lock_init(&mdc->dma_lock);
> +
> +       dma_cap_zero(mdc->dma_slave.cap_mask);
> +       dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
> +
> +       moxart_dma_init(&mdc->dma_slave, dev);
> +
> +       mchan = &mdc->slave_chans[0];
> +       for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
> +               mchan->ch_num = i;
> +               mchan->base = dma_base_addr + 0x80 + i * REG_CHAN_SIZE;
> +               mchan->allocated = 0;
> +
> +               dma_cookie_init(&mchan->chan);
> +               mchan->chan.device = &mdc->dma_slave;
> +               list_add_tail(&mchan->chan.device_node,
> +                             &mdc->dma_slave.channels);
> +
> +               dev_dbg(dev, "%s: mchans[%d]: mchan->ch_num=%d mchan->base=%p\n",
> +                       __func__, i, mchan->ch_num, mchan->base);
> +       }
> +
> +       ret = dma_async_device_register(&mdc->dma_slave);

What if this fails?

> +       platform_set_drvdata(pdev, mdc);
> +
> +       of_dma_controller_register(node, moxart_of_xlate, &moxart_dma_info);
> +
> +       tasklet_init(&mdc->tasklet, moxart_dma_tasklet, (unsigned long)mdc);
> +
> +       devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
> +                        "moxart-dma-engine", mdc);

The return value of devm_request_irq should be checked; it might fail.

> +
> +       dev_dbg(dev, "%s: IRQ=%d\n", __func__, irq);
> +
> +       return ret;
> +}

Thanks,
Mark.

^ permalink raw reply	[flat|nested] 80+ messages in thread

* [PATCH v7] dmaengine: Add MOXA ART DMA engine driver
@ 2013-08-05 16:57               ` Mark Rutland
  0 siblings, 0 replies; 80+ messages in thread
From: Mark Rutland @ 2013-08-05 16:57 UTC (permalink / raw)
  To: linux-arm-kernel

On Mon, Aug 05, 2013 at 03:37:37PM +0100, Jonas Jensen wrote:
> The MOXA ART SoC has a DMA controller capable of offloading expensive
> memory operations, such as large copies. This patch adds support for
> the controller including four channels. Two of these are used to
> handle MMC copy on the UC-7112-LX hardware. The remaining two can be
> used in a future audio driver or client application.
>
> Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
> ---
>
> Notes:
>     Thanks for the replies.
>
>     Changes since v6:
>
>     1. move callback from interrupt context to tasklet
>     2. remove callback and callback_param, use those provided by tx_desc
>     3. don't rely on structs for register offsets
>     4. remove local bool "found" variable from moxart_alloc_chan_resources()
>     5. check return value of irq_of_parse_and_map
>     6. use devm_request_irq instead of setup_irq
>     7. elaborate commit message
>
>     device tree bindings document:
>     8. in the example, change "#dma-cells" to "<2>"
>
>     Applies to next-20130805
>
>  .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  21 +
>  drivers/dma/Kconfig                                |   7 +
>  drivers/dma/Makefile                               |   1 +
>  drivers/dma/moxart-dma.c                           | 614 +++++++++++++++++++++
>  4 files changed, 643 insertions(+)
>  create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
>  create mode 100644 drivers/dma/moxart-dma.c
>
> diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> new file mode 100644
> index 0000000..5b9f82c
> --- /dev/null
> +++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> @@ -0,0 +1,21 @@
> +MOXA ART DMA Controller
> +
> +See dma.txt first
> +
> +Required properties:
> +
> +- compatible : Must be "moxa,moxart-dma"
> +- reg :                Should contain registers location and length
> +- interrupts : Should contain the interrupt number
> +- #dma-cells : Should be 2
> +               cell index 0: channel number between 0-3
> +               cell index 1: line request number
> +
> +Example:
> +
> +       dma: dma at 90500000 {
> +               compatible = "moxa,moxart-dma";
> +               reg = <0x90500000 0x1000>;
> +               interrupts = <24 0>;
> +               #dma-cells = <2>;
> +       };

Thanks for the updates on this. :)

The binding and example look sensible to me; it would be nice if someone
familiar with the dma subsystem could check that this has the necessary
information.

[...]

> +static int moxart_alloc_chan_resources(struct dma_chan *chan)
> +{
> +       struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
> +       int i;
> +
> +       for (i = 0; i < APB_DMA_MAX_CHANNEL; i++) {
> +               if (i == mchan->ch_num
> +                       && !mchan->allocated) {
> +                       dev_dbg(chan2dev(chan), "%s: allocating channel #%d\n",
> +                               __func__, mchan->ch_num);
> +                       mchan->allocated = true;
> +                       return 0;
> +               }
> +       }

Come to think of it, why do you need to iterate over all of the channels
to handle a particular channel number that you already know, and already
have the struct for?

I'm not familiar with the dma subsystem, and I couldn't spot when the
dma channel is actually assigned/selected prior to this.

[...]

> +static enum dma_status moxart_tx_status(struct dma_chan *chan,
> +                                       dma_cookie_t cookie,
> +                                       struct dma_tx_state *txstate)
> +{
> +       enum dma_status ret;
> +
> +       ret = dma_cookie_status(chan, cookie, txstate);
> +       if (ret == DMA_SUCCESS || !txstate)
> +               return ret;
> +
> +       return ret;

No special status handling?

This function is equivalent to:

        return dma_cookie_status(chan, cookie, txstate);

[...]

> +static int moxart_probe(struct platform_device *pdev)
> +{
> +       struct device *dev = &pdev->dev;
> +       struct device_node *node = dev->of_node;
> +       struct resource *res;
> +       static void __iomem *dma_base_addr;
> +       int ret, i;
> +       unsigned int irq;
> +       struct moxart_dma_chan *mchan;
> +       struct moxart_dma_container *mdc;
> +
> +       mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
> +       if (!mdc) {
> +               dev_err(dev, "can't allocate DMA container\n");
> +               return -ENOMEM;
> +       }
> +
> +       irq = irq_of_parse_and_map(node, 0);
> +       if (irq <= 0) {
> +               dev_err(dev, "irq_of_parse_and_map failed\n");
> +               return -EINVAL;
> +       }
> +
> +       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
> +       dma_base_addr = devm_ioremap_resource(dev, res);
> +       if (IS_ERR(dma_base_addr)) {
> +               dev_err(dev, "devm_ioremap_resource failed\n");
> +               return PTR_ERR(dma_base_addr);
> +       }
> +
> +       mdc->ctlr = pdev->id;
> +       spin_lock_init(&mdc->dma_lock);
> +
> +       dma_cap_zero(mdc->dma_slave.cap_mask);
> +       dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
> +
> +       moxart_dma_init(&mdc->dma_slave, dev);
> +
> +       mchan = &mdc->slave_chans[0];
> +       for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
> +               mchan->ch_num = i;
> +               mchan->base = dma_base_addr + 0x80 + i * REG_CHAN_SIZE;
> +               mchan->allocated = 0;
> +
> +               dma_cookie_init(&mchan->chan);
> +               mchan->chan.device = &mdc->dma_slave;
> +               list_add_tail(&mchan->chan.device_node,
> +                             &mdc->dma_slave.channels);
> +
> +               dev_dbg(dev, "%s: mchans[%d]: mchan->ch_num=%d mchan->base=%p\n",
> +                       __func__, i, mchan->ch_num, mchan->base);
> +       }
> +
> +       ret = dma_async_device_register(&mdc->dma_slave);

What if this fails?

> +       platform_set_drvdata(pdev, mdc);
> +
> +       of_dma_controller_register(node, moxart_of_xlate, &moxart_dma_info);
> +
> +       tasklet_init(&mdc->tasklet, moxart_dma_tasklet, (unsigned long)mdc);
> +
> +       devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
> +                        "moxart-dma-engine", mdc);

The return value of devm_request_irq should be checked; it might fail.

> +
> +       dev_dbg(dev, "%s: IRQ=%d\n", __func__, irq);
> +
> +       return ret;
> +}

Thanks,
Mark.

^ permalink raw reply	[flat|nested] 80+ messages in thread

* Re: [PATCH v7] dmaengine: Add MOXA ART DMA engine driver
  2013-08-05 14:37             ` Jonas Jensen
@ 2013-08-05 20:49               ` Arnd Bergmann
  -1 siblings, 0 replies; 80+ messages in thread
From: Arnd Bergmann @ 2013-08-05 20:49 UTC (permalink / raw)
  To: Jonas Jensen
  Cc: linux-arm-kernel, linux-kernel, arm, vinod.koul, djbw, linux,
	mark.rutland

On Monday 05 August 2013, Jonas Jensen wrote:

> +bool moxart_dma_filter_fn(struct dma_chan *chan, void *param)
> +{
> +	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
> +	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
> +
> +	if (chan->device->dev == mc->dma_slave.dev) {

This comparison seems rather pointless -- you only check that the
device owning the channel is the same as the device that belongs
to channel's "container", which would naturally be the case.

What you don't check here is that it matches the device that was passed
to of_dma_controller_register().

> +		struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
> +		unsigned int ch_req = *(unsigned int *)param;
> +		dev_dbg(chan2dev(chan), "%s: mchan=%p ch_req=%d mchan->ch_num=%d\n",
> +			__func__, mchan, ch_req, mchan->ch_num);
> +		return ch_req == mchan->ch_num;
> +	} else {
> +		dev_dbg(chan2dev(chan), "%s: device not registered to this DMA engine\n",
> +			__func__);
> +		return false;
> +	}
> +}
> +
> +static struct of_dma_filter_info moxart_dma_info = {
> +	.filter_fn = moxart_dma_filter_fn,
> +};
> +
> +static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
> +					struct of_dma *ofdma)
> +{
> +	struct dma_chan *chan;
> +	struct of_dma_filter_info *info = ofdma->of_dma_data;
> +
> +	if (!info || !info->filter_fn)
> +		return NULL;

This seems pointless too. Why do you pass a of_dma_filter_info pointer
as ofdma->of_dma_data? It's constant after all and you can just access
it a couple of lines higher.

> +	if (dma_spec->args_count != 2)
> +		return NULL;
> +
> +	chan = dma_request_channel(info->dma_cap, info->filter_fn,
> +				   &dma_spec->args[0]);

The filter function is also constant. However, you need to pass the
device pointer here so the filter can compare it.

> +	if (chan)
> +		to_moxart_dma_chan(chan)->line = dma_spec->args[1];
> +
> +	return chan;
> +}

There is still an open question here regarding whether or not the
channel number is actually required to be fixed or not. In most
dma engines, the channels are actually interchangeable, so you only
need to specify the request number, not the channel. Does this still
work if you just pick the first

	Arnd

^ permalink raw reply	[flat|nested] 80+ messages in thread

* [PATCH v7] dmaengine: Add MOXA ART DMA engine driver
@ 2013-08-05 20:49               ` Arnd Bergmann
  0 siblings, 0 replies; 80+ messages in thread
From: Arnd Bergmann @ 2013-08-05 20:49 UTC (permalink / raw)
  To: linux-arm-kernel

On Monday 05 August 2013, Jonas Jensen wrote:

> +bool moxart_dma_filter_fn(struct dma_chan *chan, void *param)
> +{
> +	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
> +	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
> +
> +	if (chan->device->dev == mc->dma_slave.dev) {

This comparison seems rather pointless -- you only check that the
device owning the channel is the same as the device that belongs
to channel's "container", which would naturally be the case.

What you don't check here is that it matches the device that was passed
to of_dma_controller_register().

> +		struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
> +		unsigned int ch_req = *(unsigned int *)param;
> +		dev_dbg(chan2dev(chan), "%s: mchan=%p ch_req=%d mchan->ch_num=%d\n",
> +			__func__, mchan, ch_req, mchan->ch_num);
> +		return ch_req == mchan->ch_num;
> +	} else {
> +		dev_dbg(chan2dev(chan), "%s: device not registered to this DMA engine\n",
> +			__func__);
> +		return false;
> +	}
> +}
> +
> +static struct of_dma_filter_info moxart_dma_info = {
> +	.filter_fn = moxart_dma_filter_fn,
> +};
> +
> +static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
> +					struct of_dma *ofdma)
> +{
> +	struct dma_chan *chan;
> +	struct of_dma_filter_info *info = ofdma->of_dma_data;
> +
> +	if (!info || !info->filter_fn)
> +		return NULL;

This seems pointless too. Why do you pass a of_dma_filter_info pointer
as ofdma->of_dma_data? It's constant after all and you can just access
it a couple of lines higher.

> +	if (dma_spec->args_count != 2)
> +		return NULL;
> +
> +	chan = dma_request_channel(info->dma_cap, info->filter_fn,
> +				   &dma_spec->args[0]);

The filter function is also constant. However, you need to pass the
device pointer here so the filter can compare it.

> +	if (chan)
> +		to_moxart_dma_chan(chan)->line = dma_spec->args[1];
> +
> +	return chan;
> +}

There is still an open question here regarding whether or not the
channel number is actually required to be fixed or not. In most
dma engines, the channels are actually interchangeable, so you only
need to specify the request number, not the channel. Does this still
work if you just pick the first

	Arnd

^ permalink raw reply	[flat|nested] 80+ messages in thread

* [PATCH v8] dmaengine: Add MOXA ART DMA engine driver
  2013-08-05 14:37             ` Jonas Jensen
@ 2013-08-06 12:38               ` Jonas Jensen
  -1 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2013-08-06 12:38 UTC (permalink / raw)
  To: linux-arm-kernel
  Cc: linux-kernel, arm, vinod.koul, djbw, arnd, linux, mark.rutland,
	Jonas Jensen

The MOXA ART SoC has a DMA controller capable of offloading expensive
memory operations, such as large copies. This patch adds support for
the controller including four channels. Two of these are used to
handle MMC copy on the UC-7112-LX hardware. The remaining two can be
used in a future audio driver or client application.

Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
---

Notes:
    Add test dummy DMA channels to MMC, prove the controller
    has support for interchangeable channel numbers [0].
    
    Add new filter data struct, store dma_spec passed in xlate,
    similar to proposed patch for omap/edma [1][2].
    
    [0] https://bitbucket.org/Kasreyn/linux-next/commits/2f17ac38c5d3af49bc0c559c429a351ddd40063d
    [1] https://lkml.org/lkml/2013/8/1/750  "[PATCH] DMA: let filter functions of of_dma_simple_xlate possible check of_node"
    [2] https://lkml.org/lkml/2013/3/11/203 "A proposal to check the device in generic way"
    
    Changes since v7:
    
    1. remove unnecessary loop in moxart_alloc_chan_resources()
    2. remove unnecessary status check in moxart_tx_status()
    3. check/handle dma_async_device_register() return value
    4. check/handle devm_request_irq() return value
    5. add and use filter data struct
    6. check if channel device is the same as passed to
       of_dma_controller_register()
    7. add check if chan->device->dev->of_node is the same as
       dma_spec->np (xlate)
    8. support interchangeable channels, #dma-cells is now <1>
    
    device tree bindings document:
    9. update description and example, change "#dma-cells" to "<1>"
    
    Applies to next-20130806

 .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  19 +
 drivers/dma/Kconfig                                |   7 +
 drivers/dma/Makefile                               |   1 +
 drivers/dma/moxart-dma.c                           | 614 +++++++++++++++++++++
 4 files changed, 641 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
 create mode 100644 drivers/dma/moxart-dma.c

diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644
index 0000000..69e7001
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -0,0 +1,19 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible :	Must be "moxa,moxart-dma"
+- reg :		Should contain registers location and length
+- interrupts :	Should contain the interrupt number
+- #dma-cells :	Should be 1, a single cell holding a line request number
+
+Example:
+
+	dma: dma@90500000 {
+		compatible = "moxa,moxart-dma";
+		reg = <0x90500000 0x1000>;
+		interrupts = <24 0>;
+		#dma-cells = <1>;
+	};
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6825957..56c3aaa 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -300,6 +300,13 @@ config DMA_JZ4740
 	select DMA_ENGINE
 	select DMA_VIRTUAL_CHANNELS
 
+config MOXART_DMA
+	tristate "MOXART DMA support"
+	depends on ARCH_MOXART
+	select DMA_ENGINE
+	help
+	  Enable support for the MOXA ART SoC DMA controller.
+
 config DMA_ENGINE
 	bool
 
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 5e0f2ef..470c11b 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -39,3 +39,4 @@ obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
 obj-$(CONFIG_DMA_OMAP) += omap-dma.o
 obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..36923cf
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,614 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+
+#define APB_DMA_MAX_CHANNEL			4
+
+#define REG_ADDRESS_SOURCE			0
+#define REG_ADDRESS_DEST			4
+#define REG_CYCLES				8
+#define REG_CTRL				12
+#define REG_CHAN_SIZE				16
+
+#define APB_DMA_ENABLE				0x1
+#define APB_DMA_FIN_INT_STS			0x2
+#define APB_DMA_FIN_INT_EN			0x4
+#define APB_DMA_BURST_MODE			0x8
+#define APB_DMA_ERR_INT_STS			0x10
+#define APB_DMA_ERR_INT_EN			0x20
+
+/*
+ * unset to select APB source
+ * set to select AHB source
+ */
+#define APB_DMA_SOURCE_SELECT			0x40
+
+/*
+ * unset to select APB destination
+ * set to select AHB destination
+ */
+#define APB_DMA_DEST_SELECT			0x80
+
+#define APB_DMA_SOURCE				0x100
+#define APB_DMA_SOURCE_MASK			0x700
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4  (burst=1)
+ * 010: +2 (burst=0), +8  (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4  (burst=1)
+ * 110: -2 (burst=0), -8  (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+ */
+#define APB_DMA_SOURCE_INC_0			0
+#define APB_DMA_SOURCE_INC_1_4			0x100
+#define APB_DMA_SOURCE_INC_2_8			0x200
+#define APB_DMA_SOURCE_INC_4_16			0x300
+#define APB_DMA_SOURCE_DEC_1_4			0x500
+#define APB_DMA_SOURCE_DEC_2_8			0x600
+#define APB_DMA_SOURCE_DEC_4_16			0x700
+
+#define APB_DMA_DEST				0x1000
+#define APB_DMA_DEST_MASK			0x7000
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4  (burst=1)
+ * 010: +2 (burst=0), +8  (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4  (burst=1)
+ * 110: -2 (burst=0), -8  (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+*/
+#define APB_DMA_DEST_INC_0			0
+#define APB_DMA_DEST_INC_1_4			0x1000
+#define APB_DMA_DEST_INC_2_8			0x2000
+#define APB_DMA_DEST_INC_4_16			0x3000
+#define APB_DMA_DEST_DEC_1_4			0x5000
+#define APB_DMA_DEST_DEC_2_8			0x6000
+#define APB_DMA_DEST_DEC_4_16			0x7000
+
+/*
+ * request signal select of destination
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0:    no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_DEST_REQ_NO			0x10000
+#define APB_DMA_DEST_REQ_NO_MASK		0xf0000
+
+#define APB_DMA_DATA_WIDTH			0x100000
+#define APB_DMA_DATA_WIDTH_MASK			0x300000
+/*
+ * data width of transfer
+ * 00: word
+ * 01: half
+ * 10: byte
+ */
+#define APB_DMA_DATA_WIDTH_4			0
+#define APB_DMA_DATA_WIDTH_2			0x100000
+#define APB_DMA_DATA_WIDTH_1			0x200000
+
+/*
+ * request signal select of source
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0:    no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_SOURCE_REQ_NO			0x1000000
+#define APB_DMA_SOURCE_REQ_NO_MASK		0xf000000
+#define APB_DMA_CYCLES_MASK			0x00ffffff
+
+struct moxart_dma_chan {
+	struct dma_chan			chan;
+	int				ch_num;
+	bool				allocated;
+	int				error_flag;
+	void __iomem			*base;
+	struct completion		dma_complete;
+	struct dma_slave_config		cfg;
+	struct dma_async_tx_descriptor	tx_desc;
+	unsigned int			line_reqno;
+};
+
+struct moxart_dma_container {
+	int			ctlr;
+	struct dma_device	dma_slave;
+	struct moxart_dma_chan	slave_chans[APB_DMA_MAX_CHANNEL];
+	spinlock_t dma_lock;
+	struct tasklet_struct	tasklet;
+};
+
+struct moxart_dma_filter_data {
+	struct moxart_dma_container	*mdc;
+	struct of_phandle_args		*dma_spec;
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+	return &chan->dev->device;
+}
+
+static inline struct moxart_dma_container
+*to_dma_container(struct dma_device *d)
+{
+	return container_of(d, struct moxart_dma_container, dma_slave);
+}
+
+static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct moxart_dma_chan, chan);
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *ch = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *c = to_dma_container(ch->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
+
+	spin_lock_irqsave(&c->dma_lock, flags);
+
+	ctrl = readl(ch->base + REG_CTRL);
+	ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, ch->base + REG_CTRL);
+
+	spin_unlock_irqrestore(&c->dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+			       struct dma_slave_config *cfg)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	memcpy(&mchan->cfg, cfg, sizeof(mchan->cfg));
+
+	ctrl = readl(mchan->base + REG_CTRL);
+	ctrl |= APB_DMA_BURST_MODE;
+	ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
+	ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
+
+	switch (mchan->cfg.src_addr_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		ctrl |= APB_DMA_DATA_WIDTH_1;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_1_4;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_1_4;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		ctrl |= APB_DMA_DATA_WIDTH_2;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_2_8;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_2_8;
+		break;
+	default:
+		ctrl &= ~APB_DMA_DATA_WIDTH;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_4_16;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_4_16;
+		break;
+	}
+
+	if (mchan->cfg.direction == DMA_MEM_TO_DEV) {
+		ctrl &= ~APB_DMA_DEST_SELECT;
+		ctrl |= APB_DMA_SOURCE_SELECT;
+		ctrl |= (mchan->line_reqno << 16 &
+			 APB_DMA_DEST_REQ_NO_MASK);
+	} else {
+		ctrl |= APB_DMA_DEST_SELECT;
+		ctrl &= ~APB_DMA_SOURCE_SELECT;
+		ctrl |= (mchan->line_reqno << 24 &
+			 APB_DMA_SOURCE_REQ_NO_MASK);
+	}
+
+	writel(ctrl, mchan->base + REG_CTRL);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+			  unsigned long arg)
+{
+	int ret = 0;
+	struct dma_slave_config *config;
+
+	switch (cmd) {
+	case DMA_TERMINATE_ALL:
+		moxart_terminate_all(chan);
+		break;
+	case DMA_SLAVE_CONFIG:
+		config = (struct dma_slave_config *)arg;
+		ret = moxart_slave_config(chan, config);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static dma_cookie_t moxart_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(tx->chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	dma_cookie_t cookie;
+	u32 ctrl;
+	unsigned long flags;
+
+	mchan->error_flag = 0;
+
+	dev_dbg(chan2dev(tx->chan), "%s: mchan=%p mchan->ch_num=%u mchan->base=%p\n",
+		__func__, mchan, mchan->ch_num, mchan->base);
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	cookie = dma_cookie_assign(tx);
+
+	ctrl = readl(mchan->base + REG_CTRL);
+	ctrl |= (APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, mchan->base + REG_CTRL);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return cookie;
+}
+
+static struct dma_async_tx_descriptor
+*moxart_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+		      unsigned int sg_len,
+		      enum dma_transfer_direction direction,
+		      unsigned long tx_flags, void *context)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	unsigned long flags;
+	unsigned int size, adr_width;
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	if (direction == DMA_MEM_TO_DEV) {
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       mchan->base + REG_ADDRESS_SOURCE);
+		writel(mchan->cfg.dst_addr, mchan->base + REG_ADDRESS_DEST);
+
+		adr_width = mchan->cfg.src_addr_width;
+	} else {
+		writel(mchan->cfg.src_addr, mchan->base + REG_ADDRESS_SOURCE);
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       mchan->base + REG_ADDRESS_DEST);
+
+		adr_width = mchan->cfg.dst_addr_width;
+	}
+
+	size = sgl->length >> adr_width;
+
+	/*
+	 * size is 4 on 64 bytes copied, i.e. once cycle copies 16 bytes
+	 * ( when data_width == APB_DMAB_DATA_WIDTH_4 )
+	 */
+	writel(size, mchan->base + REG_CYCLES);
+
+	dev_dbg(chan2dev(chan), "%s: set %u DMA cycles (sgl->length=%u adr_width=%u)\n",
+		__func__, size, sgl->length, adr_width);
+
+	dma_async_tx_descriptor_init(&mchan->tx_desc, chan);
+	mchan->tx_desc.tx_submit = moxart_tx_submit;
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return &mchan->tx_desc;
+}
+
+bool moxart_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+	struct moxart_dma_filter_data *fdata = param;
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+	if (chan->device->dev != fdata->mdc->dma_slave.dev ||
+	    chan->device->dev->of_node != fdata->dma_spec->np) {
+		dev_dbg(chan2dev(chan), "device not registered to this DMA engine\n");
+		return 0;
+	}
+
+	dev_dbg(chan2dev(chan), "%s: mchan=%p line_reqno=%u mchan->ch_num=%u\n",
+		__func__, mchan, fdata->dma_spec->args[0], mchan->ch_num);
+
+	mchan->line_reqno = fdata->dma_spec->args[0];
+
+	return 1;
+}
+
+static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
+					struct of_dma *ofdma)
+{
+	struct dma_chan *chan;
+	struct moxart_dma_container *mdc = ofdma->of_dma_data;
+	struct moxart_dma_filter_data fdata = {
+		.mdc = mdc,
+	};
+
+	if (dma_spec->args_count < 1)
+		return NULL;
+
+	fdata.dma_spec = dma_spec;
+
+	return dma_request_channel(mdc->dma_slave.cap_mask,
+				   moxart_dma_filter_fn, &fdata);
+}
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+	dev_dbg(chan2dev(chan), "%s: allocating channel #%u\n",
+		__func__, mchan->ch_num);
+	mchan->allocated = 1;
+
+	return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+	dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+		__func__, mchan->ch_num);
+	mchan->allocated = 0;
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	ctrl = readl(mchan->base + REG_CTRL);
+	ctrl |= APB_DMA_ENABLE;
+	writel(ctrl, mchan->base + REG_CTRL);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+					dma_cookie_t cookie,
+					struct dma_tx_state *txstate)
+{
+	return dma_cookie_status(chan, cookie, txstate);
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+	dma->device_prep_slave_sg		= moxart_prep_slave_sg;
+	dma->device_alloc_chan_resources	= moxart_alloc_chan_resources;
+	dma->device_free_chan_resources		= moxart_free_chan_resources;
+	dma->device_issue_pending		= moxart_issue_pending;
+	dma->device_tx_status			= moxart_tx_status;
+	dma->device_control			= moxart_control;
+	dma->dev				= dev;
+
+	INIT_LIST_HEAD(&dma->channels);
+}
+
+static void moxart_dma_tasklet(unsigned long data)
+{
+	struct moxart_dma_container *mc = (void *)data;
+	struct moxart_dma_chan *ch = &mc->slave_chans[0];
+	unsigned int i;
+
+	pr_debug("%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+		if (ch->allocated && ch->tx_desc.callback) {
+			pr_debug("%s: call callback for ch=%p\n",
+				 __func__, ch);
+			ch->tx_desc.callback(ch->tx_desc.callback_param);
+		}
+	}
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+	struct moxart_dma_container *mc = devid;
+	struct moxart_dma_chan *mchan = &mc->slave_chans[0];
+	unsigned int i;
+	u32 ctrl;
+
+	pr_debug("%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		if (mchan->allocated) {
+			ctrl = readl(mchan->base + REG_CTRL);
+			if (ctrl & APB_DMA_FIN_INT_STS) {
+				ctrl &= ~APB_DMA_FIN_INT_STS;
+				dma_cookie_complete(&mchan->tx_desc);
+			}
+			if (ctrl & APB_DMA_ERR_INT_STS) {
+				ctrl &= ~APB_DMA_ERR_INT_STS;
+				mchan->error_flag = 1;
+			}
+			mchan->error_flag = 0;
+			writel(ctrl, mchan->base + REG_CTRL);
+		}
+	}
+
+	tasklet_schedule(&mc->tasklet);
+
+	return IRQ_HANDLED;
+}
+
+static int moxart_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct resource *res;
+	static void __iomem *dma_base_addr;
+	int ret, i;
+	unsigned int irq;
+	struct moxart_dma_chan *mchan;
+	struct moxart_dma_container *mdc;
+
+	mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+	if (!mdc) {
+		dev_err(dev, "can't allocate DMA container\n");
+		return -ENOMEM;
+	}
+
+	irq = irq_of_parse_and_map(node, 0);
+	if (irq <= 0) {
+		dev_err(dev, "irq_of_parse_and_map failed\n");
+		return -EINVAL;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dma_base_addr = devm_ioremap_resource(dev, res);
+	if (IS_ERR(dma_base_addr)) {
+		dev_err(dev, "devm_ioremap_resource failed\n");
+		return PTR_ERR(dma_base_addr);
+	}
+
+	mdc->ctlr = pdev->id;
+	spin_lock_init(&mdc->dma_lock);
+
+	dma_cap_zero(mdc->dma_slave.cap_mask);
+	dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+
+	moxart_dma_init(&mdc->dma_slave, dev);
+
+	mchan = &mdc->slave_chans[0];
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		mchan->ch_num = i;
+		mchan->base = dma_base_addr + 0x80 + i * REG_CHAN_SIZE;
+		mchan->allocated = 0;
+
+		dma_cookie_init(&mchan->chan);
+		mchan->chan.device = &mdc->dma_slave;
+		list_add_tail(&mchan->chan.device_node,
+			      &mdc->dma_slave.channels);
+
+		dev_dbg(dev, "%s: mchans[%d]: mchan->ch_num=%u mchan->base=%p\n",
+			__func__, i, mchan->ch_num, mchan->base);
+	}
+
+	ret = dma_async_device_register(&mdc->dma_slave);
+	if (ret) {
+		dev_err(dev, "dma_async_device_register failed\n");
+		return ret;
+	}
+
+	ret = of_dma_controller_register(node, moxart_of_xlate, mdc);
+	if (ret) {
+		dev_err(dev, "of_dma_controller_register failed\n");
+		dma_async_device_unregister(&mdc->dma_slave);
+		return ret;
+	}
+
+	platform_set_drvdata(pdev, mdc);
+
+	tasklet_init(&mdc->tasklet, moxart_dma_tasklet, (unsigned long)mdc);
+
+	ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
+			       "moxart-dma-engine", mdc);
+	if (ret) {
+		dev_err(dev, "devm_request_irq failed\n");
+		return ret;
+	}
+
+	dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq);
+
+	return 0;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+	struct moxart_dma_container *m = dev_get_drvdata(&pdev->dev);
+	dma_async_device_unregister(&m->dma_slave);
+	return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+	{ .compatible = "moxa,moxart-dma" },
+	{ }
+};
+
+static struct platform_driver moxart_driver = {
+	.probe	= moxart_probe,
+	.remove	= moxart_remove,
+	.driver = {
+		.name		= "moxart-dma-engine",
+		.owner		= THIS_MODULE,
+		.of_match_table	= moxart_dma_match,
+	},
+};
+
+static int moxart_init(void)
+{
+	return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+	platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
-- 
1.8.2.1


^ permalink raw reply related	[flat|nested] 80+ messages in thread

* [PATCH v8] dmaengine: Add MOXA ART DMA engine driver
@ 2013-08-06 12:38               ` Jonas Jensen
  0 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2013-08-06 12:38 UTC (permalink / raw)
  To: linux-arm-kernel

The MOXA ART SoC has a DMA controller capable of offloading expensive
memory operations, such as large copies. This patch adds support for
the controller including four channels. Two of these are used to
handle MMC copy on the UC-7112-LX hardware. The remaining two can be
used in a future audio driver or client application.

Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
---

Notes:
    Add test dummy DMA channels to MMC, prove the controller
    has support for interchangeable channel numbers [0].
    
    Add new filter data struct, store dma_spec passed in xlate,
    similar to proposed patch for omap/edma [1][2].
    
    [0] https://bitbucket.org/Kasreyn/linux-next/commits/2f17ac38c5d3af49bc0c559c429a351ddd40063d
    [1] https://lkml.org/lkml/2013/8/1/750  "[PATCH] DMA: let filter functions of of_dma_simple_xlate possible check of_node"
    [2] https://lkml.org/lkml/2013/3/11/203 "A proposal to check the device in generic way"
    
    Changes since v7:
    
    1. remove unnecessary loop in moxart_alloc_chan_resources()
    2. remove unnecessary status check in moxart_tx_status()
    3. check/handle dma_async_device_register() return value
    4. check/handle devm_request_irq() return value
    5. add and use filter data struct
    6. check if channel device is the same as passed to
       of_dma_controller_register()
    7. add check if chan->device->dev->of_node is the same as
       dma_spec->np (xlate)
    8. support interchangeable channels, #dma-cells is now <1>
    
    device tree bindings document:
    9. update description and example, change "#dma-cells" to "<1>"
    
    Applies to next-20130806

 .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  19 +
 drivers/dma/Kconfig                                |   7 +
 drivers/dma/Makefile                               |   1 +
 drivers/dma/moxart-dma.c                           | 614 +++++++++++++++++++++
 4 files changed, 641 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
 create mode 100644 drivers/dma/moxart-dma.c

diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644
index 0000000..69e7001
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -0,0 +1,19 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible :	Must be "moxa,moxart-dma"
+- reg :		Should contain registers location and length
+- interrupts :	Should contain the interrupt number
+- #dma-cells :	Should be 1, a single cell holding a line request number
+
+Example:
+
+	dma: dma at 90500000 {
+		compatible = "moxa,moxart-dma";
+		reg = <0x90500000 0x1000>;
+		interrupts = <24 0>;
+		#dma-cells = <1>;
+	};
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6825957..56c3aaa 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -300,6 +300,13 @@ config DMA_JZ4740
 	select DMA_ENGINE
 	select DMA_VIRTUAL_CHANNELS
 
+config MOXART_DMA
+	tristate "MOXART DMA support"
+	depends on ARCH_MOXART
+	select DMA_ENGINE
+	help
+	  Enable support for the MOXA ART SoC DMA controller.
+
 config DMA_ENGINE
 	bool
 
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 5e0f2ef..470c11b 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -39,3 +39,4 @@ obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
 obj-$(CONFIG_DMA_OMAP) += omap-dma.o
 obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..36923cf
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,614 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+
+#define APB_DMA_MAX_CHANNEL			4
+
+#define REG_ADDRESS_SOURCE			0
+#define REG_ADDRESS_DEST			4
+#define REG_CYCLES				8
+#define REG_CTRL				12
+#define REG_CHAN_SIZE				16
+
+#define APB_DMA_ENABLE				0x1
+#define APB_DMA_FIN_INT_STS			0x2
+#define APB_DMA_FIN_INT_EN			0x4
+#define APB_DMA_BURST_MODE			0x8
+#define APB_DMA_ERR_INT_STS			0x10
+#define APB_DMA_ERR_INT_EN			0x20
+
+/*
+ * unset to select APB source
+ * set to select AHB source
+ */
+#define APB_DMA_SOURCE_SELECT			0x40
+
+/*
+ * unset to select APB destination
+ * set to select AHB destination
+ */
+#define APB_DMA_DEST_SELECT			0x80
+
+#define APB_DMA_SOURCE				0x100
+#define APB_DMA_SOURCE_MASK			0x700
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4  (burst=1)
+ * 010: +2 (burst=0), +8  (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4  (burst=1)
+ * 110: -2 (burst=0), -8  (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+ */
+#define APB_DMA_SOURCE_INC_0			0
+#define APB_DMA_SOURCE_INC_1_4			0x100
+#define APB_DMA_SOURCE_INC_2_8			0x200
+#define APB_DMA_SOURCE_INC_4_16			0x300
+#define APB_DMA_SOURCE_DEC_1_4			0x500
+#define APB_DMA_SOURCE_DEC_2_8			0x600
+#define APB_DMA_SOURCE_DEC_4_16			0x700
+
+#define APB_DMA_DEST				0x1000
+#define APB_DMA_DEST_MASK			0x7000
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4  (burst=1)
+ * 010: +2 (burst=0), +8  (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4  (burst=1)
+ * 110: -2 (burst=0), -8  (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+*/
+#define APB_DMA_DEST_INC_0			0
+#define APB_DMA_DEST_INC_1_4			0x1000
+#define APB_DMA_DEST_INC_2_8			0x2000
+#define APB_DMA_DEST_INC_4_16			0x3000
+#define APB_DMA_DEST_DEC_1_4			0x5000
+#define APB_DMA_DEST_DEC_2_8			0x6000
+#define APB_DMA_DEST_DEC_4_16			0x7000
+
+/*
+ * request signal select of destination
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0:    no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_DEST_REQ_NO			0x10000
+#define APB_DMA_DEST_REQ_NO_MASK		0xf0000
+
+#define APB_DMA_DATA_WIDTH			0x100000
+#define APB_DMA_DATA_WIDTH_MASK			0x300000
+/*
+ * data width of transfer
+ * 00: word
+ * 01: half
+ * 10: byte
+ */
+#define APB_DMA_DATA_WIDTH_4			0
+#define APB_DMA_DATA_WIDTH_2			0x100000
+#define APB_DMA_DATA_WIDTH_1			0x200000
+
+/*
+ * request signal select of source
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0:    no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_SOURCE_REQ_NO			0x1000000
+#define APB_DMA_SOURCE_REQ_NO_MASK		0xf000000
+#define APB_DMA_CYCLES_MASK			0x00ffffff
+
+struct moxart_dma_chan {
+	struct dma_chan			chan;
+	int				ch_num;
+	bool				allocated;
+	int				error_flag;
+	void __iomem			*base;
+	struct completion		dma_complete;
+	struct dma_slave_config		cfg;
+	struct dma_async_tx_descriptor	tx_desc;
+	unsigned int			line_reqno;
+};
+
+struct moxart_dma_container {
+	int			ctlr;
+	struct dma_device	dma_slave;
+	struct moxart_dma_chan	slave_chans[APB_DMA_MAX_CHANNEL];
+	spinlock_t dma_lock;
+	struct tasklet_struct	tasklet;
+};
+
+struct moxart_dma_filter_data {
+	struct moxart_dma_container	*mdc;
+	struct of_phandle_args		*dma_spec;
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+	return &chan->dev->device;
+}
+
+static inline struct moxart_dma_container
+*to_dma_container(struct dma_device *d)
+{
+	return container_of(d, struct moxart_dma_container, dma_slave);
+}
+
+static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct moxart_dma_chan, chan);
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *ch = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *c = to_dma_container(ch->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
+
+	spin_lock_irqsave(&c->dma_lock, flags);
+
+	ctrl = readl(ch->base + REG_CTRL);
+	ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, ch->base + REG_CTRL);
+
+	spin_unlock_irqrestore(&c->dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+			       struct dma_slave_config *cfg)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	memcpy(&mchan->cfg, cfg, sizeof(mchan->cfg));
+
+	ctrl = readl(mchan->base + REG_CTRL);
+	ctrl |= APB_DMA_BURST_MODE;
+	ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
+	ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
+
+	switch (mchan->cfg.src_addr_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		ctrl |= APB_DMA_DATA_WIDTH_1;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_1_4;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_1_4;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		ctrl |= APB_DMA_DATA_WIDTH_2;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_2_8;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_2_8;
+		break;
+	default:
+		ctrl &= ~APB_DMA_DATA_WIDTH;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_4_16;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_4_16;
+		break;
+	}
+
+	if (mchan->cfg.direction == DMA_MEM_TO_DEV) {
+		ctrl &= ~APB_DMA_DEST_SELECT;
+		ctrl |= APB_DMA_SOURCE_SELECT;
+		ctrl |= (mchan->line_reqno << 16 &
+			 APB_DMA_DEST_REQ_NO_MASK);
+	} else {
+		ctrl |= APB_DMA_DEST_SELECT;
+		ctrl &= ~APB_DMA_SOURCE_SELECT;
+		ctrl |= (mchan->line_reqno << 24 &
+			 APB_DMA_SOURCE_REQ_NO_MASK);
+	}
+
+	writel(ctrl, mchan->base + REG_CTRL);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+			  unsigned long arg)
+{
+	int ret = 0;
+	struct dma_slave_config *config;
+
+	switch (cmd) {
+	case DMA_TERMINATE_ALL:
+		moxart_terminate_all(chan);
+		break;
+	case DMA_SLAVE_CONFIG:
+		config = (struct dma_slave_config *)arg;
+		ret = moxart_slave_config(chan, config);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static dma_cookie_t moxart_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(tx->chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	dma_cookie_t cookie;
+	u32 ctrl;
+	unsigned long flags;
+
+	mchan->error_flag = 0;
+
+	dev_dbg(chan2dev(tx->chan), "%s: mchan=%p mchan->ch_num=%u mchan->base=%p\n",
+		__func__, mchan, mchan->ch_num, mchan->base);
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	cookie = dma_cookie_assign(tx);
+
+	ctrl = readl(mchan->base + REG_CTRL);
+	ctrl |= (APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, mchan->base + REG_CTRL);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return cookie;
+}
+
+static struct dma_async_tx_descriptor
+*moxart_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+		      unsigned int sg_len,
+		      enum dma_transfer_direction direction,
+		      unsigned long tx_flags, void *context)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	unsigned long flags;
+	unsigned int size, adr_width;
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	if (direction == DMA_MEM_TO_DEV) {
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       mchan->base + REG_ADDRESS_SOURCE);
+		writel(mchan->cfg.dst_addr, mchan->base + REG_ADDRESS_DEST);
+
+		adr_width = mchan->cfg.src_addr_width;
+	} else {
+		writel(mchan->cfg.src_addr, mchan->base + REG_ADDRESS_SOURCE);
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       mchan->base + REG_ADDRESS_DEST);
+
+		adr_width = mchan->cfg.dst_addr_width;
+	}
+
+	size = sgl->length >> adr_width;
+
+	/*
+	 * size is 4 on 64 bytes copied, i.e. once cycle copies 16 bytes
+	 * ( when data_width == APB_DMAB_DATA_WIDTH_4 )
+	 */
+	writel(size, mchan->base + REG_CYCLES);
+
+	dev_dbg(chan2dev(chan), "%s: set %u DMA cycles (sgl->length=%u adr_width=%u)\n",
+		__func__, size, sgl->length, adr_width);
+
+	dma_async_tx_descriptor_init(&mchan->tx_desc, chan);
+	mchan->tx_desc.tx_submit = moxart_tx_submit;
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return &mchan->tx_desc;
+}
+
+bool moxart_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+	struct moxart_dma_filter_data *fdata = param;
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+	if (chan->device->dev != fdata->mdc->dma_slave.dev ||
+	    chan->device->dev->of_node != fdata->dma_spec->np) {
+		dev_dbg(chan2dev(chan), "device not registered to this DMA engine\n");
+		return 0;
+	}
+
+	dev_dbg(chan2dev(chan), "%s: mchan=%p line_reqno=%u mchan->ch_num=%u\n",
+		__func__, mchan, fdata->dma_spec->args[0], mchan->ch_num);
+
+	mchan->line_reqno = fdata->dma_spec->args[0];
+
+	return 1;
+}
+
+static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
+					struct of_dma *ofdma)
+{
+	struct dma_chan *chan;
+	struct moxart_dma_container *mdc = ofdma->of_dma_data;
+	struct moxart_dma_filter_data fdata = {
+		.mdc = mdc,
+	};
+
+	if (dma_spec->args_count < 1)
+		return NULL;
+
+	fdata.dma_spec = dma_spec;
+
+	return dma_request_channel(mdc->dma_slave.cap_mask,
+				   moxart_dma_filter_fn, &fdata);
+}
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+	dev_dbg(chan2dev(chan), "%s: allocating channel #%u\n",
+		__func__, mchan->ch_num);
+	mchan->allocated = 1;
+
+	return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+	dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+		__func__, mchan->ch_num);
+	mchan->allocated = 0;
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	ctrl = readl(mchan->base + REG_CTRL);
+	ctrl |= APB_DMA_ENABLE;
+	writel(ctrl, mchan->base + REG_CTRL);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+					dma_cookie_t cookie,
+					struct dma_tx_state *txstate)
+{
+	return dma_cookie_status(chan, cookie, txstate);
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+	dma->device_prep_slave_sg		= moxart_prep_slave_sg;
+	dma->device_alloc_chan_resources	= moxart_alloc_chan_resources;
+	dma->device_free_chan_resources		= moxart_free_chan_resources;
+	dma->device_issue_pending		= moxart_issue_pending;
+	dma->device_tx_status			= moxart_tx_status;
+	dma->device_control			= moxart_control;
+	dma->dev				= dev;
+
+	INIT_LIST_HEAD(&dma->channels);
+}
+
+static void moxart_dma_tasklet(unsigned long data)
+{
+	struct moxart_dma_container *mc = (void *)data;
+	struct moxart_dma_chan *ch = &mc->slave_chans[0];
+	unsigned int i;
+
+	pr_debug("%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+		if (ch->allocated && ch->tx_desc.callback) {
+			pr_debug("%s: call callback for ch=%p\n",
+				 __func__, ch);
+			ch->tx_desc.callback(ch->tx_desc.callback_param);
+		}
+	}
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+	struct moxart_dma_container *mc = devid;
+	struct moxart_dma_chan *mchan = &mc->slave_chans[0];
+	unsigned int i;
+	u32 ctrl;
+
+	pr_debug("%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		if (mchan->allocated) {
+			ctrl = readl(mchan->base + REG_CTRL);
+			if (ctrl & APB_DMA_FIN_INT_STS) {
+				ctrl &= ~APB_DMA_FIN_INT_STS;
+				dma_cookie_complete(&mchan->tx_desc);
+			}
+			if (ctrl & APB_DMA_ERR_INT_STS) {
+				ctrl &= ~APB_DMA_ERR_INT_STS;
+				mchan->error_flag = 1;
+			}
+			mchan->error_flag = 0;
+			writel(ctrl, mchan->base + REG_CTRL);
+		}
+	}
+
+	tasklet_schedule(&mc->tasklet);
+
+	return IRQ_HANDLED;
+}
+
+static int moxart_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct resource *res;
+	static void __iomem *dma_base_addr;
+	int ret, i;
+	unsigned int irq;
+	struct moxart_dma_chan *mchan;
+	struct moxart_dma_container *mdc;
+
+	mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+	if (!mdc) {
+		dev_err(dev, "can't allocate DMA container\n");
+		return -ENOMEM;
+	}
+
+	irq = irq_of_parse_and_map(node, 0);
+	if (irq <= 0) {
+		dev_err(dev, "irq_of_parse_and_map failed\n");
+		return -EINVAL;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dma_base_addr = devm_ioremap_resource(dev, res);
+	if (IS_ERR(dma_base_addr)) {
+		dev_err(dev, "devm_ioremap_resource failed\n");
+		return PTR_ERR(dma_base_addr);
+	}
+
+	mdc->ctlr = pdev->id;
+	spin_lock_init(&mdc->dma_lock);
+
+	dma_cap_zero(mdc->dma_slave.cap_mask);
+	dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+
+	moxart_dma_init(&mdc->dma_slave, dev);
+
+	mchan = &mdc->slave_chans[0];
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		mchan->ch_num = i;
+		mchan->base = dma_base_addr + 0x80 + i * REG_CHAN_SIZE;
+		mchan->allocated = 0;
+
+		dma_cookie_init(&mchan->chan);
+		mchan->chan.device = &mdc->dma_slave;
+		list_add_tail(&mchan->chan.device_node,
+			      &mdc->dma_slave.channels);
+
+		dev_dbg(dev, "%s: mchans[%d]: mchan->ch_num=%u mchan->base=%p\n",
+			__func__, i, mchan->ch_num, mchan->base);
+	}
+
+	ret = dma_async_device_register(&mdc->dma_slave);
+	if (ret) {
+		dev_err(dev, "dma_async_device_register failed\n");
+		return ret;
+	}
+
+	ret = of_dma_controller_register(node, moxart_of_xlate, mdc);
+	if (ret) {
+		dev_err(dev, "of_dma_controller_register failed\n");
+		dma_async_device_unregister(&mdc->dma_slave);
+		return ret;
+	}
+
+	platform_set_drvdata(pdev, mdc);
+
+	tasklet_init(&mdc->tasklet, moxart_dma_tasklet, (unsigned long)mdc);
+
+	ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
+			       "moxart-dma-engine", mdc);
+	if (ret) {
+		dev_err(dev, "devm_request_irq failed\n");
+		return ret;
+	}
+
+	dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq);
+
+	return 0;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+	struct moxart_dma_container *m = dev_get_drvdata(&pdev->dev);
+	dma_async_device_unregister(&m->dma_slave);
+	return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+	{ .compatible = "moxa,moxart-dma" },
+	{ }
+};
+
+static struct platform_driver moxart_driver = {
+	.probe	= moxart_probe,
+	.remove	= moxart_remove,
+	.driver = {
+		.name		= "moxart-dma-engine",
+		.owner		= THIS_MODULE,
+		.of_match_table	= moxart_dma_match,
+	},
+};
+
+static int moxart_init(void)
+{
+	return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+	platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
-- 
1.8.2.1

^ permalink raw reply related	[flat|nested] 80+ messages in thread

* Re: [PATCH v8] dmaengine: Add MOXA ART DMA engine driver
  2013-08-06 12:38               ` Jonas Jensen
@ 2013-08-06 18:42                 ` Arnd Bergmann
  -1 siblings, 0 replies; 80+ messages in thread
From: Arnd Bergmann @ 2013-08-06 18:42 UTC (permalink / raw)
  To: Jonas Jensen
  Cc: linux-arm-kernel, linux-kernel, arm, vinod.koul, djbw, linux,
	mark.rutland

On Tuesday 06 August 2013, Jonas Jensen wrote:
> The MOXA ART SoC has a DMA controller capable of offloading expensive
> memory operations, such as large copies. This patch adds support for
> the controller including four channels. Two of these are used to
> handle MMC copy on the UC-7112-LX hardware. The remaining two can be
> used in a future audio driver or client application.
> 
> Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>

Looks all good to me now,

Reviewed-by: Arnd Bergmann <arnd@arndb.de>

^ permalink raw reply	[flat|nested] 80+ messages in thread

* [PATCH v8] dmaengine: Add MOXA ART DMA engine driver
@ 2013-08-06 18:42                 ` Arnd Bergmann
  0 siblings, 0 replies; 80+ messages in thread
From: Arnd Bergmann @ 2013-08-06 18:42 UTC (permalink / raw)
  To: linux-arm-kernel

On Tuesday 06 August 2013, Jonas Jensen wrote:
> The MOXA ART SoC has a DMA controller capable of offloading expensive
> memory operations, such as large copies. This patch adds support for
> the controller including four channels. Two of these are used to
> handle MMC copy on the UC-7112-LX hardware. The remaining two can be
> used in a future audio driver or client application.
> 
> Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>

Looks all good to me now,

Reviewed-by: Arnd Bergmann <arnd@arndb.de>

^ permalink raw reply	[flat|nested] 80+ messages in thread

* Re: [PATCH v8] dmaengine: Add MOXA ART DMA engine driver
  2013-08-06 12:38               ` Jonas Jensen
@ 2013-08-07 15:13                 ` Mark Rutland
  -1 siblings, 0 replies; 80+ messages in thread
From: Mark Rutland @ 2013-08-07 15:13 UTC (permalink / raw)
  To: Jonas Jensen
  Cc: linux-arm-kernel, linux-kernel, arm, vinod.koul, djbw, arnd, linux

On Tue, Aug 06, 2013 at 01:38:31PM +0100, Jonas Jensen wrote:
> The MOXA ART SoC has a DMA controller capable of offloading expensive
> memory operations, such as large copies. This patch adds support for
> the controller including four channels. Two of these are used to
> handle MMC copy on the UC-7112-LX hardware. The remaining two can be
> used in a future audio driver or client application.
> 
> Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
> ---
> 
> Notes:
>     Add test dummy DMA channels to MMC, prove the controller
>     has support for interchangeable channel numbers [0].
> 
>     Add new filter data struct, store dma_spec passed in xlate,
>     similar to proposed patch for omap/edma [1][2].
> 
>     [0] https://bitbucket.org/Kasreyn/linux-next/commits/2f17ac38c5d3af49bc0c559c429a351ddd40063d
>     [1] https://lkml.org/lkml/2013/8/1/750  "[PATCH] DMA: let filter functions of of_dma_simple_xlate possible check of_node"
>     [2] https://lkml.org/lkml/2013/3/11/203 "A proposal to check the device in generic way"
> 
>     Changes since v7:
> 
>     1. remove unnecessary loop in moxart_alloc_chan_resources()
>     2. remove unnecessary status check in moxart_tx_status()
>     3. check/handle dma_async_device_register() return value
>     4. check/handle devm_request_irq() return value
>     5. add and use filter data struct
>     6. check if channel device is the same as passed to
>        of_dma_controller_register()
>     7. add check if chan->device->dev->of_node is the same as
>        dma_spec->np (xlate)
>     8. support interchangeable channels, #dma-cells is now <1>
> 
>     device tree bindings document:
>     9. update description and example, change "#dma-cells" to "<1>"
> 
>     Applies to next-20130806
> 
>  .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  19 +
>  drivers/dma/Kconfig                                |   7 +
>  drivers/dma/Makefile                               |   1 +
>  drivers/dma/moxart-dma.c                           | 614 +++++++++++++++++++++
>  4 files changed, 641 insertions(+)
>  create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
>  create mode 100644 drivers/dma/moxart-dma.c
> 
> diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> new file mode 100644
> index 0000000..69e7001
> --- /dev/null
> +++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> @@ -0,0 +1,19 @@
> +MOXA ART DMA Controller
> +
> +See dma.txt first
> +
> +Required properties:
> +
> +- compatible : Must be "moxa,moxart-dma"
> +- reg :                Should contain registers location and length
> +- interrupts : Should contain the interrupt number
> +- #dma-cells : Should be 1, a single cell holding a line request number
> +
> +Example:
> +
> +       dma: dma@90500000 {
> +               compatible = "moxa,moxart-dma";
> +               reg = <0x90500000 0x1000>;
> +               interrupts = <24 0>;
> +               #dma-cells = <1>;
> +       };

The binding looks sensible to me now, but I have a couple of (hopefully
final) questions on the probe failure path.

[...]

> +
> +       ret = dma_async_device_register(&mdc->dma_slave);
> +       if (ret) {
> +               dev_err(dev, "dma_async_device_register failed\n");
> +               return ret;
> +       }
> +
> +       ret = of_dma_controller_register(node, moxart_of_xlate, mdc);
> +       if (ret) {
> +               dev_err(dev, "of_dma_controller_register failed\n");
> +               dma_async_device_unregister(&mdc->dma_slave);
> +               return ret;
> +       }
> +
> +       platform_set_drvdata(pdev, mdc);
> +
> +       tasklet_init(&mdc->tasklet, moxart_dma_tasklet, (unsigned long)mdc);
> +
> +       ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
> +                              "moxart-dma-engine", mdc);
> +       if (ret) {
> +               dev_err(dev, "devm_request_irq failed\n");

Do you not need calls to of_dma_controller_free and
dma_async_device_unregister here? I'm not all that familiar with the DMA
API, so maybe you don't.

> +               return ret;
> +       }
> +
> +       dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq);
> +
> +       return 0;
> +}
> +
> +static int moxart_remove(struct platform_device *pdev)
> +{
> +       struct moxart_dma_container *m = dev_get_drvdata(&pdev->dev);

Similarly, do you not need to call of_dma_controller free here?

> +       dma_async_device_unregister(&m->dma_slave);
> +       return 0;
> +}

Thanks,
Mark.

^ permalink raw reply	[flat|nested] 80+ messages in thread

* [PATCH v8] dmaengine: Add MOXA ART DMA engine driver
@ 2013-08-07 15:13                 ` Mark Rutland
  0 siblings, 0 replies; 80+ messages in thread
From: Mark Rutland @ 2013-08-07 15:13 UTC (permalink / raw)
  To: linux-arm-kernel

On Tue, Aug 06, 2013 at 01:38:31PM +0100, Jonas Jensen wrote:
> The MOXA ART SoC has a DMA controller capable of offloading expensive
> memory operations, such as large copies. This patch adds support for
> the controller including four channels. Two of these are used to
> handle MMC copy on the UC-7112-LX hardware. The remaining two can be
> used in a future audio driver or client application.
> 
> Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
> ---
> 
> Notes:
>     Add test dummy DMA channels to MMC, prove the controller
>     has support for interchangeable channel numbers [0].
> 
>     Add new filter data struct, store dma_spec passed in xlate,
>     similar to proposed patch for omap/edma [1][2].
> 
>     [0] https://bitbucket.org/Kasreyn/linux-next/commits/2f17ac38c5d3af49bc0c559c429a351ddd40063d
>     [1] https://lkml.org/lkml/2013/8/1/750  "[PATCH] DMA: let filter functions of of_dma_simple_xlate possible check of_node"
>     [2] https://lkml.org/lkml/2013/3/11/203 "A proposal to check the device in generic way"
> 
>     Changes since v7:
> 
>     1. remove unnecessary loop in moxart_alloc_chan_resources()
>     2. remove unnecessary status check in moxart_tx_status()
>     3. check/handle dma_async_device_register() return value
>     4. check/handle devm_request_irq() return value
>     5. add and use filter data struct
>     6. check if channel device is the same as passed to
>        of_dma_controller_register()
>     7. add check if chan->device->dev->of_node is the same as
>        dma_spec->np (xlate)
>     8. support interchangeable channels, #dma-cells is now <1>
> 
>     device tree bindings document:
>     9. update description and example, change "#dma-cells" to "<1>"
> 
>     Applies to next-20130806
> 
>  .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  19 +
>  drivers/dma/Kconfig                                |   7 +
>  drivers/dma/Makefile                               |   1 +
>  drivers/dma/moxart-dma.c                           | 614 +++++++++++++++++++++
>  4 files changed, 641 insertions(+)
>  create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
>  create mode 100644 drivers/dma/moxart-dma.c
> 
> diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> new file mode 100644
> index 0000000..69e7001
> --- /dev/null
> +++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> @@ -0,0 +1,19 @@
> +MOXA ART DMA Controller
> +
> +See dma.txt first
> +
> +Required properties:
> +
> +- compatible : Must be "moxa,moxart-dma"
> +- reg :                Should contain registers location and length
> +- interrupts : Should contain the interrupt number
> +- #dma-cells : Should be 1, a single cell holding a line request number
> +
> +Example:
> +
> +       dma: dma at 90500000 {
> +               compatible = "moxa,moxart-dma";
> +               reg = <0x90500000 0x1000>;
> +               interrupts = <24 0>;
> +               #dma-cells = <1>;
> +       };

The binding looks sensible to me now, but I have a couple of (hopefully
final) questions on the probe failure path.

[...]

> +
> +       ret = dma_async_device_register(&mdc->dma_slave);
> +       if (ret) {
> +               dev_err(dev, "dma_async_device_register failed\n");
> +               return ret;
> +       }
> +
> +       ret = of_dma_controller_register(node, moxart_of_xlate, mdc);
> +       if (ret) {
> +               dev_err(dev, "of_dma_controller_register failed\n");
> +               dma_async_device_unregister(&mdc->dma_slave);
> +               return ret;
> +       }
> +
> +       platform_set_drvdata(pdev, mdc);
> +
> +       tasklet_init(&mdc->tasklet, moxart_dma_tasklet, (unsigned long)mdc);
> +
> +       ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
> +                              "moxart-dma-engine", mdc);
> +       if (ret) {
> +               dev_err(dev, "devm_request_irq failed\n");

Do you not need calls to of_dma_controller_free and
dma_async_device_unregister here? I'm not all that familiar with the DMA
API, so maybe you don't.

> +               return ret;
> +       }
> +
> +       dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq);
> +
> +       return 0;
> +}
> +
> +static int moxart_remove(struct platform_device *pdev)
> +{
> +       struct moxart_dma_container *m = dev_get_drvdata(&pdev->dev);

Similarly, do you not need to call of_dma_controller free here?

> +       dma_async_device_unregister(&m->dma_slave);
> +       return 0;
> +}

Thanks,
Mark.

^ permalink raw reply	[flat|nested] 80+ messages in thread

* [PATCH v9] dmaengine: Add MOXA ART DMA engine driver
  2013-08-06 12:38               ` Jonas Jensen
@ 2013-10-07 13:13                 ` Jonas Jensen
  -1 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2013-10-07 13:13 UTC (permalink / raw)
  To: linux-arm-kernel
  Cc: linux-kernel, arm, vinod.koul, djbw, arnd, linux, mark.rutland,
	Jonas Jensen

The MOXA ART SoC has a DMA controller capable of offloading expensive
memory operations, such as large copies. This patch adds support for
the controller including four channels. Two of these are used to
handle MMC copy on the UC-7112-LX hardware. The remaining two can be
used in a future audio driver or client application.

Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
---

Notes:
    Changes since v8:
    
    1. reorder probe path, of_dma_controller_register() now happens after devm_request_irq()
    2. call of_dma_controller_free() on removal
    3. set flag on error, return DMA_ERROR in device_tx_status()
    4. move tasklet_init() to end of probe path
    5. kill tasklet on removal
    6. remove offset to base address (make it so DT includes offset)
    7. update device tree bindings document example (modify register range to what is actually used)
    
    Applies to next-20130927

 .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  19 +
 drivers/dma/Kconfig                                |   7 +
 drivers/dma/Makefile                               |   1 +
 drivers/dma/moxart-dma.c                           | 651 +++++++++++++++++++++
 4 files changed, 678 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
 create mode 100644 drivers/dma/moxart-dma.c

diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644
index 0000000..79facce
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -0,0 +1,19 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible :	Must be "moxa,moxart-dma"
+- reg :		Should contain registers location and length
+- interrupts :	Should contain the interrupt number
+- #dma-cells :	Should be 1, a single cell holding a line request number
+
+Example:
+
+	dma: dma@90500000 {
+		compatible = "moxa,moxart-dma";
+		reg = <0x90500080 0x40>;
+		interrupts = <24 0>;
+		#dma-cells = <1>;
+	};
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index f238cfd..f4ed3a9 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -318,6 +318,13 @@ config K3_DMA
 	  Support the DMA engine for Hisilicon K3 platform
 	  devices.
 
+config MOXART_DMA
+	tristate "MOXART DMA support"
+	depends on ARCH_MOXART
+	select DMA_ENGINE
+	help
+	  Enable support for the MOXA ART SoC DMA controller.
+
 config DMA_ENGINE
 	bool
 
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index db89035..9ef0916 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -41,3 +41,4 @@ obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
 obj-$(CONFIG_TI_CPPI41) += cppi41.o
 obj-$(CONFIG_K3_DMA) += k3dma.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..d418a16
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,651 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+
+#define APB_DMA_MAX_CHANNEL			4
+
+#define REG_ADDRESS_SOURCE			0
+#define REG_ADDRESS_DEST			4
+#define REG_CYCLES				8
+#define REG_CTRL				12
+#define REG_CHAN_SIZE				16
+
+#define APB_DMA_ENABLE				0x1
+#define APB_DMA_FIN_INT_STS			0x2
+#define APB_DMA_FIN_INT_EN			0x4
+#define APB_DMA_BURST_MODE			0x8
+#define APB_DMA_ERR_INT_STS			0x10
+#define APB_DMA_ERR_INT_EN			0x20
+
+/*
+ * unset to select APB source
+ * set to select AHB source
+ */
+#define APB_DMA_SOURCE_SELECT			0x40
+
+/*
+ * unset to select APB destination
+ * set to select AHB destination
+ */
+#define APB_DMA_DEST_SELECT			0x80
+
+#define APB_DMA_SOURCE				0x100
+#define APB_DMA_SOURCE_MASK			0x700
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4  (burst=1)
+ * 010: +2 (burst=0), +8  (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4  (burst=1)
+ * 110: -2 (burst=0), -8  (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+ */
+#define APB_DMA_SOURCE_INC_0			0
+#define APB_DMA_SOURCE_INC_1_4			0x100
+#define APB_DMA_SOURCE_INC_2_8			0x200
+#define APB_DMA_SOURCE_INC_4_16			0x300
+#define APB_DMA_SOURCE_DEC_1_4			0x500
+#define APB_DMA_SOURCE_DEC_2_8			0x600
+#define APB_DMA_SOURCE_DEC_4_16			0x700
+
+#define APB_DMA_DEST				0x1000
+#define APB_DMA_DEST_MASK			0x7000
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4  (burst=1)
+ * 010: +2 (burst=0), +8  (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4  (burst=1)
+ * 110: -2 (burst=0), -8  (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+*/
+#define APB_DMA_DEST_INC_0			0
+#define APB_DMA_DEST_INC_1_4			0x1000
+#define APB_DMA_DEST_INC_2_8			0x2000
+#define APB_DMA_DEST_INC_4_16			0x3000
+#define APB_DMA_DEST_DEC_1_4			0x5000
+#define APB_DMA_DEST_DEC_2_8			0x6000
+#define APB_DMA_DEST_DEC_4_16			0x7000
+
+/*
+ * request signal select of destination
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0:    no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_DEST_REQ_NO			0x10000
+#define APB_DMA_DEST_REQ_NO_MASK		0xf0000
+
+#define APB_DMA_DATA_WIDTH			0x100000
+#define APB_DMA_DATA_WIDTH_MASK			0x300000
+/*
+ * data width of transfer
+ * 00: word
+ * 01: half
+ * 10: byte
+ */
+#define APB_DMA_DATA_WIDTH_4			0
+#define APB_DMA_DATA_WIDTH_2			0x100000
+#define APB_DMA_DATA_WIDTH_1			0x200000
+
+/*
+ * request signal select of source
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0:    no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_SOURCE_REQ_NO			0x1000000
+#define APB_DMA_SOURCE_REQ_NO_MASK		0xf000000
+#define APB_DMA_CYCLES_MASK			0x00ffffff
+
+struct moxart_dma_chan {
+	struct dma_chan			chan;
+	int				ch_num;
+	bool				allocated;
+	bool				error;
+	void __iomem			*base;
+	struct dma_slave_config		cfg;
+	struct dma_async_tx_descriptor	tx_desc;
+	unsigned int			line_reqno;
+};
+
+struct moxart_dma_container {
+	int				ctlr;
+	struct dma_device		dma_slave;
+	struct moxart_dma_chan		slave_chans[APB_DMA_MAX_CHANNEL];
+	spinlock_t			dma_lock;
+	struct tasklet_struct		tasklet;
+};
+
+struct moxart_dma_filter_data {
+	struct moxart_dma_container	*mdc;
+	struct of_phandle_args		*dma_spec;
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+	return &chan->dev->device;
+}
+
+static inline struct moxart_dma_container
+*to_dma_container(struct dma_device *d)
+{
+	return container_of(d, struct moxart_dma_container, dma_slave);
+}
+
+static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct moxart_dma_chan, chan);
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *ch = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *c = to_dma_container(ch->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
+
+	spin_lock_irqsave(&c->dma_lock, flags);
+
+	ctrl = readl(ch->base + REG_CTRL);
+	ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, ch->base + REG_CTRL);
+
+	spin_unlock_irqrestore(&c->dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+			       struct dma_slave_config *cfg)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	memcpy(&mchan->cfg, cfg, sizeof(mchan->cfg));
+
+	ctrl = readl(mchan->base + REG_CTRL);
+	ctrl |= APB_DMA_BURST_MODE;
+	ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
+	ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
+
+	switch (mchan->cfg.src_addr_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		ctrl |= APB_DMA_DATA_WIDTH_1;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_1_4;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_1_4;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		ctrl |= APB_DMA_DATA_WIDTH_2;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_2_8;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_2_8;
+		break;
+	default:
+		ctrl &= ~APB_DMA_DATA_WIDTH;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_4_16;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_4_16;
+		break;
+	}
+
+	if (mchan->cfg.direction == DMA_MEM_TO_DEV) {
+		ctrl &= ~APB_DMA_DEST_SELECT;
+		ctrl |= APB_DMA_SOURCE_SELECT;
+		ctrl |= (mchan->line_reqno << 16 &
+			 APB_DMA_DEST_REQ_NO_MASK);
+	} else {
+		ctrl |= APB_DMA_DEST_SELECT;
+		ctrl &= ~APB_DMA_SOURCE_SELECT;
+		ctrl |= (mchan->line_reqno << 24 &
+			 APB_DMA_SOURCE_REQ_NO_MASK);
+	}
+
+	writel(ctrl, mchan->base + REG_CTRL);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+			  unsigned long arg)
+{
+	int ret = 0;
+	struct dma_slave_config *config;
+
+	switch (cmd) {
+	case DMA_TERMINATE_ALL:
+		moxart_terminate_all(chan);
+		break;
+	case DMA_SLAVE_CONFIG:
+		config = (struct dma_slave_config *)arg;
+		ret = moxart_slave_config(chan, config);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static dma_cookie_t moxart_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(tx->chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	dma_cookie_t cookie;
+	u32 ctrl;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(tx->chan), "%s: mchan=%p mchan->ch_num=%u mchan->base=%p\n",
+		__func__, mchan, mchan->ch_num, mchan->base);
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	cookie = dma_cookie_assign(tx);
+
+	ctrl = readl(mchan->base + REG_CTRL);
+	ctrl |= (APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, mchan->base + REG_CTRL);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return cookie;
+}
+
+static struct dma_async_tx_descriptor
+*moxart_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+		      unsigned int sg_len,
+		      enum dma_transfer_direction direction,
+		      unsigned long tx_flags, void *context)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	unsigned long flags;
+	unsigned int size, adr_width;
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	if (direction == DMA_MEM_TO_DEV) {
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       mchan->base + REG_ADDRESS_SOURCE);
+		writel(mchan->cfg.dst_addr, mchan->base + REG_ADDRESS_DEST);
+
+		adr_width = mchan->cfg.src_addr_width;
+	} else {
+		writel(mchan->cfg.src_addr, mchan->base + REG_ADDRESS_SOURCE);
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       mchan->base + REG_ADDRESS_DEST);
+
+		adr_width = mchan->cfg.dst_addr_width;
+	}
+
+	size = sgl->length >> adr_width;
+
+	/*
+	 * size is 4 on 64 bytes copied, i.e. one cycle copies 16 bytes
+	 * ( when data_width == APB_DMAB_DATA_WIDTH_4 )
+	 */
+	writel(size, mchan->base + REG_CYCLES);
+
+	dev_dbg(chan2dev(chan), "%s: set %u DMA cycles (sgl->length=%u adr_width=%u)\n",
+		__func__, size, sgl->length, adr_width);
+
+	dma_async_tx_descriptor_init(&mchan->tx_desc, chan);
+	mchan->tx_desc.tx_submit = moxart_tx_submit;
+	mchan->error = 0;
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return &mchan->tx_desc;
+}
+
+bool moxart_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+	struct moxart_dma_filter_data *fdata = param;
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+	if (chan->device->dev != fdata->mdc->dma_slave.dev ||
+	    chan->device->dev->of_node != fdata->dma_spec->np) {
+		dev_dbg(chan2dev(chan), "device not registered to this DMA engine\n");
+		return 0;
+	}
+
+	dev_dbg(chan2dev(chan), "%s: mchan=%p line_reqno=%u mchan->ch_num=%u\n",
+		__func__, mchan, fdata->dma_spec->args[0], mchan->ch_num);
+
+	mchan->line_reqno = fdata->dma_spec->args[0];
+
+	return 1;
+}
+
+static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
+					struct of_dma *ofdma)
+{
+	struct moxart_dma_container *mdc = ofdma->of_dma_data;
+	struct moxart_dma_filter_data fdata = {
+		.mdc = mdc,
+	};
+
+	if (dma_spec->args_count < 1)
+		return NULL;
+
+	fdata.dma_spec = dma_spec;
+
+	return dma_request_channel(mdc->dma_slave.cap_mask,
+				   moxart_dma_filter_fn, &fdata);
+}
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+	dev_dbg(chan2dev(chan), "%s: allocating channel #%u\n",
+		__func__, mchan->ch_num);
+	mchan->allocated = 1;
+
+	return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+	dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+		__func__, mchan->ch_num);
+	mchan->allocated = 0;
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	ctrl = readl(mchan->base + REG_CTRL);
+	ctrl |= APB_DMA_ENABLE;
+	writel(ctrl, mchan->base + REG_CTRL);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+					dma_cookie_t cookie,
+					struct dma_tx_state *txs)
+{
+	struct moxart_dma_chan *ch = to_moxart_dma_chan(chan);
+	enum dma_status ret;
+
+	ret = (ch->error) ? DMA_ERROR : dma_cookie_status(chan, cookie, txs);
+
+	return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+	dma->device_prep_slave_sg		= moxart_prep_slave_sg;
+	dma->device_alloc_chan_resources	= moxart_alloc_chan_resources;
+	dma->device_free_chan_resources		= moxart_free_chan_resources;
+	dma->device_issue_pending		= moxart_issue_pending;
+	dma->device_tx_status			= moxart_tx_status;
+	dma->device_control			= moxart_control;
+	dma->dev				= dev;
+
+	INIT_LIST_HEAD(&dma->channels);
+}
+
+static void moxart_dma_tasklet(unsigned long data)
+{
+	struct moxart_dma_container *mc = (void *)data;
+	struct moxart_dma_chan *ch = &mc->slave_chans[0];
+	struct dma_async_tx_descriptor *tx_desc;
+	unsigned int i;
+	enum dma_status s;
+	struct dma_tx_state txs;
+
+	pr_debug("%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+		if (ch->allocated) {
+			tx_desc = &ch->tx_desc;
+
+			s = mc->dma_slave.device_tx_status(&ch->chan,
+							   ch->chan.cookie,
+							   &txs);
+
+			switch (s) {
+			case DMA_ERROR:
+				printk_ratelimited("%s: DMA error\n",
+						   __func__);
+				break;
+			case DMA_SUCCESS:
+				break;
+			case DMA_IN_PROGRESS:
+			case DMA_PAUSED:
+				continue;
+			}
+
+			if (tx_desc->callback) {
+				pr_debug("%s: call callback for ch=%p\n",
+					 __func__, ch);
+				tx_desc->callback(tx_desc->callback_param);
+			}
+		}
+	}
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+	struct moxart_dma_container *mc = devid;
+	struct moxart_dma_chan *mchan = &mc->slave_chans[0];
+	unsigned int i;
+	u32 ctrl;
+
+	pr_debug("%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		if (mchan->allocated) {
+			ctrl = readl(mchan->base + REG_CTRL);
+			pr_debug("%s: ctrl=%x\n", __func__, ctrl);
+
+			if (ctrl & APB_DMA_FIN_INT_STS) {
+				ctrl &= ~APB_DMA_FIN_INT_STS;
+				dma_cookie_complete(&mchan->tx_desc);
+			}
+			if (ctrl & APB_DMA_ERR_INT_STS) {
+				ctrl &= ~APB_DMA_ERR_INT_STS;
+				mchan->error = 1;
+			}
+			/*
+			 * bits must be cleared here, this function
+			 * called in a loop if moved to tasklet
+			 */
+			writel(ctrl, mchan->base + REG_CTRL);
+
+			tasklet_schedule(&mc->tasklet);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int moxart_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct resource *res;
+	static void __iomem *dma_base_addr;
+	int ret, i;
+	unsigned int irq;
+	struct moxart_dma_chan *mchan;
+	struct moxart_dma_container *mdc;
+
+	mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+	if (!mdc) {
+		dev_err(dev, "can't allocate DMA container\n");
+		return -ENOMEM;
+	}
+
+	irq = irq_of_parse_and_map(node, 0);
+	if (irq <= 0) {
+		dev_err(dev, "irq_of_parse_and_map failed\n");
+		return -EINVAL;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dma_base_addr = devm_ioremap_resource(dev, res);
+	if (IS_ERR(dma_base_addr)) {
+		dev_err(dev, "devm_ioremap_resource failed\n");
+		return PTR_ERR(dma_base_addr);
+	}
+
+	mdc->ctlr = pdev->id;
+	spin_lock_init(&mdc->dma_lock);
+
+	dma_cap_zero(mdc->dma_slave.cap_mask);
+	dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+
+	moxart_dma_init(&mdc->dma_slave, dev);
+
+	mchan = &mdc->slave_chans[0];
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		mchan->ch_num = i;
+		mchan->base = dma_base_addr + i * REG_CHAN_SIZE;
+		mchan->allocated = 0;
+
+		dma_cookie_init(&mchan->chan);
+		mchan->chan.device = &mdc->dma_slave;
+		list_add_tail(&mchan->chan.device_node,
+			      &mdc->dma_slave.channels);
+
+		dev_dbg(dev, "%s: mchans[%d]: mchan->ch_num=%u mchan->base=%p\n",
+			__func__, i, mchan->ch_num, mchan->base);
+	}
+
+	ret = dma_async_device_register(&mdc->dma_slave);
+	if (ret) {
+		dev_err(dev, "dma_async_device_register failed\n");
+		return ret;
+	}
+
+	platform_set_drvdata(pdev, mdc);
+
+	ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
+			       "moxart-dma-engine", mdc);
+	if (ret) {
+		dev_err(dev, "devm_request_irq failed\n");
+		return ret;
+	}
+
+	ret = of_dma_controller_register(node, moxart_of_xlate, mdc);
+	if (ret) {
+		dev_err(dev, "of_dma_controller_register failed\n");
+		dma_async_device_unregister(&mdc->dma_slave);
+		return ret;
+	}
+
+	tasklet_init(&mdc->tasklet, moxart_dma_tasklet, (unsigned long)mdc);
+
+	dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq);
+
+	return 0;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+	struct moxart_dma_container *m = platform_get_drvdata(pdev);
+
+	tasklet_kill(&m->tasklet);
+
+	dma_async_device_unregister(&m->dma_slave);
+
+	if (pdev->dev.of_node)
+		of_dma_controller_free(pdev->dev.of_node);
+
+	return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+	{ .compatible = "moxa,moxart-dma" },
+	{ }
+};
+
+static struct platform_driver moxart_driver = {
+	.probe	= moxart_probe,
+	.remove	= moxart_remove,
+	.driver = {
+		.name		= "moxart-dma-engine",
+		.owner		= THIS_MODULE,
+		.of_match_table	= moxart_dma_match,
+	},
+};
+
+static int moxart_init(void)
+{
+	return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+	platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
-- 
1.8.2.1


^ permalink raw reply related	[flat|nested] 80+ messages in thread

* [PATCH v9] dmaengine: Add MOXA ART DMA engine driver
@ 2013-10-07 13:13                 ` Jonas Jensen
  0 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2013-10-07 13:13 UTC (permalink / raw)
  To: linux-arm-kernel

The MOXA ART SoC has a DMA controller capable of offloading expensive
memory operations, such as large copies. This patch adds support for
the controller including four channels. Two of these are used to
handle MMC copy on the UC-7112-LX hardware. The remaining two can be
used in a future audio driver or client application.

Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
---

Notes:
    Changes since v8:
    
    1. reorder probe path, of_dma_controller_register() now happens after devm_request_irq()
    2. call of_dma_controller_free() on removal
    3. set flag on error, return DMA_ERROR in device_tx_status()
    4. move tasklet_init() to end of probe path
    5. kill tasklet on removal
    6. remove offset to base address (make it so DT includes offset)
    7. update device tree bindings document example (modify register range to what is actually used)
    
    Applies to next-20130927

 .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  19 +
 drivers/dma/Kconfig                                |   7 +
 drivers/dma/Makefile                               |   1 +
 drivers/dma/moxart-dma.c                           | 651 +++++++++++++++++++++
 4 files changed, 678 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
 create mode 100644 drivers/dma/moxart-dma.c

diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644
index 0000000..79facce
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -0,0 +1,19 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible :	Must be "moxa,moxart-dma"
+- reg :		Should contain registers location and length
+- interrupts :	Should contain the interrupt number
+- #dma-cells :	Should be 1, a single cell holding a line request number
+
+Example:
+
+	dma: dma at 90500000 {
+		compatible = "moxa,moxart-dma";
+		reg = <0x90500080 0x40>;
+		interrupts = <24 0>;
+		#dma-cells = <1>;
+	};
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index f238cfd..f4ed3a9 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -318,6 +318,13 @@ config K3_DMA
 	  Support the DMA engine for Hisilicon K3 platform
 	  devices.
 
+config MOXART_DMA
+	tristate "MOXART DMA support"
+	depends on ARCH_MOXART
+	select DMA_ENGINE
+	help
+	  Enable support for the MOXA ART SoC DMA controller.
+
 config DMA_ENGINE
 	bool
 
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index db89035..9ef0916 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -41,3 +41,4 @@ obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
 obj-$(CONFIG_TI_CPPI41) += cppi41.o
 obj-$(CONFIG_K3_DMA) += k3dma.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..d418a16
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,651 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+
+#define APB_DMA_MAX_CHANNEL			4
+
+#define REG_ADDRESS_SOURCE			0
+#define REG_ADDRESS_DEST			4
+#define REG_CYCLES				8
+#define REG_CTRL				12
+#define REG_CHAN_SIZE				16
+
+#define APB_DMA_ENABLE				0x1
+#define APB_DMA_FIN_INT_STS			0x2
+#define APB_DMA_FIN_INT_EN			0x4
+#define APB_DMA_BURST_MODE			0x8
+#define APB_DMA_ERR_INT_STS			0x10
+#define APB_DMA_ERR_INT_EN			0x20
+
+/*
+ * unset to select APB source
+ * set to select AHB source
+ */
+#define APB_DMA_SOURCE_SELECT			0x40
+
+/*
+ * unset to select APB destination
+ * set to select AHB destination
+ */
+#define APB_DMA_DEST_SELECT			0x80
+
+#define APB_DMA_SOURCE				0x100
+#define APB_DMA_SOURCE_MASK			0x700
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4  (burst=1)
+ * 010: +2 (burst=0), +8  (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4  (burst=1)
+ * 110: -2 (burst=0), -8  (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+ */
+#define APB_DMA_SOURCE_INC_0			0
+#define APB_DMA_SOURCE_INC_1_4			0x100
+#define APB_DMA_SOURCE_INC_2_8			0x200
+#define APB_DMA_SOURCE_INC_4_16			0x300
+#define APB_DMA_SOURCE_DEC_1_4			0x500
+#define APB_DMA_SOURCE_DEC_2_8			0x600
+#define APB_DMA_SOURCE_DEC_4_16			0x700
+
+#define APB_DMA_DEST				0x1000
+#define APB_DMA_DEST_MASK			0x7000
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4  (burst=1)
+ * 010: +2 (burst=0), +8  (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4  (burst=1)
+ * 110: -2 (burst=0), -8  (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+*/
+#define APB_DMA_DEST_INC_0			0
+#define APB_DMA_DEST_INC_1_4			0x1000
+#define APB_DMA_DEST_INC_2_8			0x2000
+#define APB_DMA_DEST_INC_4_16			0x3000
+#define APB_DMA_DEST_DEC_1_4			0x5000
+#define APB_DMA_DEST_DEC_2_8			0x6000
+#define APB_DMA_DEST_DEC_4_16			0x7000
+
+/*
+ * request signal select of destination
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0:    no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_DEST_REQ_NO			0x10000
+#define APB_DMA_DEST_REQ_NO_MASK		0xf0000
+
+#define APB_DMA_DATA_WIDTH			0x100000
+#define APB_DMA_DATA_WIDTH_MASK			0x300000
+/*
+ * data width of transfer
+ * 00: word
+ * 01: half
+ * 10: byte
+ */
+#define APB_DMA_DATA_WIDTH_4			0
+#define APB_DMA_DATA_WIDTH_2			0x100000
+#define APB_DMA_DATA_WIDTH_1			0x200000
+
+/*
+ * request signal select of source
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0:    no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_SOURCE_REQ_NO			0x1000000
+#define APB_DMA_SOURCE_REQ_NO_MASK		0xf000000
+#define APB_DMA_CYCLES_MASK			0x00ffffff
+
+struct moxart_dma_chan {
+	struct dma_chan			chan;
+	int				ch_num;
+	bool				allocated;
+	bool				error;
+	void __iomem			*base;
+	struct dma_slave_config		cfg;
+	struct dma_async_tx_descriptor	tx_desc;
+	unsigned int			line_reqno;
+};
+
+struct moxart_dma_container {
+	int				ctlr;
+	struct dma_device		dma_slave;
+	struct moxart_dma_chan		slave_chans[APB_DMA_MAX_CHANNEL];
+	spinlock_t			dma_lock;
+	struct tasklet_struct		tasklet;
+};
+
+struct moxart_dma_filter_data {
+	struct moxart_dma_container	*mdc;
+	struct of_phandle_args		*dma_spec;
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+	return &chan->dev->device;
+}
+
+static inline struct moxart_dma_container
+*to_dma_container(struct dma_device *d)
+{
+	return container_of(d, struct moxart_dma_container, dma_slave);
+}
+
+static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct moxart_dma_chan, chan);
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *ch = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *c = to_dma_container(ch->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
+
+	spin_lock_irqsave(&c->dma_lock, flags);
+
+	ctrl = readl(ch->base + REG_CTRL);
+	ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, ch->base + REG_CTRL);
+
+	spin_unlock_irqrestore(&c->dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+			       struct dma_slave_config *cfg)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	memcpy(&mchan->cfg, cfg, sizeof(mchan->cfg));
+
+	ctrl = readl(mchan->base + REG_CTRL);
+	ctrl |= APB_DMA_BURST_MODE;
+	ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
+	ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
+
+	switch (mchan->cfg.src_addr_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		ctrl |= APB_DMA_DATA_WIDTH_1;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_1_4;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_1_4;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		ctrl |= APB_DMA_DATA_WIDTH_2;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_2_8;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_2_8;
+		break;
+	default:
+		ctrl &= ~APB_DMA_DATA_WIDTH;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_4_16;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_4_16;
+		break;
+	}
+
+	if (mchan->cfg.direction == DMA_MEM_TO_DEV) {
+		ctrl &= ~APB_DMA_DEST_SELECT;
+		ctrl |= APB_DMA_SOURCE_SELECT;
+		ctrl |= (mchan->line_reqno << 16 &
+			 APB_DMA_DEST_REQ_NO_MASK);
+	} else {
+		ctrl |= APB_DMA_DEST_SELECT;
+		ctrl &= ~APB_DMA_SOURCE_SELECT;
+		ctrl |= (mchan->line_reqno << 24 &
+			 APB_DMA_SOURCE_REQ_NO_MASK);
+	}
+
+	writel(ctrl, mchan->base + REG_CTRL);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+			  unsigned long arg)
+{
+	int ret = 0;
+	struct dma_slave_config *config;
+
+	switch (cmd) {
+	case DMA_TERMINATE_ALL:
+		moxart_terminate_all(chan);
+		break;
+	case DMA_SLAVE_CONFIG:
+		config = (struct dma_slave_config *)arg;
+		ret = moxart_slave_config(chan, config);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static dma_cookie_t moxart_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(tx->chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	dma_cookie_t cookie;
+	u32 ctrl;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(tx->chan), "%s: mchan=%p mchan->ch_num=%u mchan->base=%p\n",
+		__func__, mchan, mchan->ch_num, mchan->base);
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	cookie = dma_cookie_assign(tx);
+
+	ctrl = readl(mchan->base + REG_CTRL);
+	ctrl |= (APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, mchan->base + REG_CTRL);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return cookie;
+}
+
+static struct dma_async_tx_descriptor
+*moxart_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+		      unsigned int sg_len,
+		      enum dma_transfer_direction direction,
+		      unsigned long tx_flags, void *context)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	unsigned long flags;
+	unsigned int size, adr_width;
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	if (direction == DMA_MEM_TO_DEV) {
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       mchan->base + REG_ADDRESS_SOURCE);
+		writel(mchan->cfg.dst_addr, mchan->base + REG_ADDRESS_DEST);
+
+		adr_width = mchan->cfg.src_addr_width;
+	} else {
+		writel(mchan->cfg.src_addr, mchan->base + REG_ADDRESS_SOURCE);
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       mchan->base + REG_ADDRESS_DEST);
+
+		adr_width = mchan->cfg.dst_addr_width;
+	}
+
+	size = sgl->length >> adr_width;
+
+	/*
+	 * size is 4 on 64 bytes copied, i.e. one cycle copies 16 bytes
+	 * ( when data_width == APB_DMAB_DATA_WIDTH_4 )
+	 */
+	writel(size, mchan->base + REG_CYCLES);
+
+	dev_dbg(chan2dev(chan), "%s: set %u DMA cycles (sgl->length=%u adr_width=%u)\n",
+		__func__, size, sgl->length, adr_width);
+
+	dma_async_tx_descriptor_init(&mchan->tx_desc, chan);
+	mchan->tx_desc.tx_submit = moxart_tx_submit;
+	mchan->error = 0;
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return &mchan->tx_desc;
+}
+
+bool moxart_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+	struct moxart_dma_filter_data *fdata = param;
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+	if (chan->device->dev != fdata->mdc->dma_slave.dev ||
+	    chan->device->dev->of_node != fdata->dma_spec->np) {
+		dev_dbg(chan2dev(chan), "device not registered to this DMA engine\n");
+		return 0;
+	}
+
+	dev_dbg(chan2dev(chan), "%s: mchan=%p line_reqno=%u mchan->ch_num=%u\n",
+		__func__, mchan, fdata->dma_spec->args[0], mchan->ch_num);
+
+	mchan->line_reqno = fdata->dma_spec->args[0];
+
+	return 1;
+}
+
+static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
+					struct of_dma *ofdma)
+{
+	struct moxart_dma_container *mdc = ofdma->of_dma_data;
+	struct moxart_dma_filter_data fdata = {
+		.mdc = mdc,
+	};
+
+	if (dma_spec->args_count < 1)
+		return NULL;
+
+	fdata.dma_spec = dma_spec;
+
+	return dma_request_channel(mdc->dma_slave.cap_mask,
+				   moxart_dma_filter_fn, &fdata);
+}
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+	dev_dbg(chan2dev(chan), "%s: allocating channel #%u\n",
+		__func__, mchan->ch_num);
+	mchan->allocated = 1;
+
+	return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+	dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+		__func__, mchan->ch_num);
+	mchan->allocated = 0;
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	ctrl = readl(mchan->base + REG_CTRL);
+	ctrl |= APB_DMA_ENABLE;
+	writel(ctrl, mchan->base + REG_CTRL);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+					dma_cookie_t cookie,
+					struct dma_tx_state *txs)
+{
+	struct moxart_dma_chan *ch = to_moxart_dma_chan(chan);
+	enum dma_status ret;
+
+	ret = (ch->error) ? DMA_ERROR : dma_cookie_status(chan, cookie, txs);
+
+	return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+	dma->device_prep_slave_sg		= moxart_prep_slave_sg;
+	dma->device_alloc_chan_resources	= moxart_alloc_chan_resources;
+	dma->device_free_chan_resources		= moxart_free_chan_resources;
+	dma->device_issue_pending		= moxart_issue_pending;
+	dma->device_tx_status			= moxart_tx_status;
+	dma->device_control			= moxart_control;
+	dma->dev				= dev;
+
+	INIT_LIST_HEAD(&dma->channels);
+}
+
+static void moxart_dma_tasklet(unsigned long data)
+{
+	struct moxart_dma_container *mc = (void *)data;
+	struct moxart_dma_chan *ch = &mc->slave_chans[0];
+	struct dma_async_tx_descriptor *tx_desc;
+	unsigned int i;
+	enum dma_status s;
+	struct dma_tx_state txs;
+
+	pr_debug("%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+		if (ch->allocated) {
+			tx_desc = &ch->tx_desc;
+
+			s = mc->dma_slave.device_tx_status(&ch->chan,
+							   ch->chan.cookie,
+							   &txs);
+
+			switch (s) {
+			case DMA_ERROR:
+				printk_ratelimited("%s: DMA error\n",
+						   __func__);
+				break;
+			case DMA_SUCCESS:
+				break;
+			case DMA_IN_PROGRESS:
+			case DMA_PAUSED:
+				continue;
+			}
+
+			if (tx_desc->callback) {
+				pr_debug("%s: call callback for ch=%p\n",
+					 __func__, ch);
+				tx_desc->callback(tx_desc->callback_param);
+			}
+		}
+	}
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+	struct moxart_dma_container *mc = devid;
+	struct moxart_dma_chan *mchan = &mc->slave_chans[0];
+	unsigned int i;
+	u32 ctrl;
+
+	pr_debug("%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		if (mchan->allocated) {
+			ctrl = readl(mchan->base + REG_CTRL);
+			pr_debug("%s: ctrl=%x\n", __func__, ctrl);
+
+			if (ctrl & APB_DMA_FIN_INT_STS) {
+				ctrl &= ~APB_DMA_FIN_INT_STS;
+				dma_cookie_complete(&mchan->tx_desc);
+			}
+			if (ctrl & APB_DMA_ERR_INT_STS) {
+				ctrl &= ~APB_DMA_ERR_INT_STS;
+				mchan->error = 1;
+			}
+			/*
+			 * bits must be cleared here, this function
+			 * called in a loop if moved to tasklet
+			 */
+			writel(ctrl, mchan->base + REG_CTRL);
+
+			tasklet_schedule(&mc->tasklet);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int moxart_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct resource *res;
+	static void __iomem *dma_base_addr;
+	int ret, i;
+	unsigned int irq;
+	struct moxart_dma_chan *mchan;
+	struct moxart_dma_container *mdc;
+
+	mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+	if (!mdc) {
+		dev_err(dev, "can't allocate DMA container\n");
+		return -ENOMEM;
+	}
+
+	irq = irq_of_parse_and_map(node, 0);
+	if (irq <= 0) {
+		dev_err(dev, "irq_of_parse_and_map failed\n");
+		return -EINVAL;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dma_base_addr = devm_ioremap_resource(dev, res);
+	if (IS_ERR(dma_base_addr)) {
+		dev_err(dev, "devm_ioremap_resource failed\n");
+		return PTR_ERR(dma_base_addr);
+	}
+
+	mdc->ctlr = pdev->id;
+	spin_lock_init(&mdc->dma_lock);
+
+	dma_cap_zero(mdc->dma_slave.cap_mask);
+	dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+
+	moxart_dma_init(&mdc->dma_slave, dev);
+
+	mchan = &mdc->slave_chans[0];
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		mchan->ch_num = i;
+		mchan->base = dma_base_addr + i * REG_CHAN_SIZE;
+		mchan->allocated = 0;
+
+		dma_cookie_init(&mchan->chan);
+		mchan->chan.device = &mdc->dma_slave;
+		list_add_tail(&mchan->chan.device_node,
+			      &mdc->dma_slave.channels);
+
+		dev_dbg(dev, "%s: mchans[%d]: mchan->ch_num=%u mchan->base=%p\n",
+			__func__, i, mchan->ch_num, mchan->base);
+	}
+
+	ret = dma_async_device_register(&mdc->dma_slave);
+	if (ret) {
+		dev_err(dev, "dma_async_device_register failed\n");
+		return ret;
+	}
+
+	platform_set_drvdata(pdev, mdc);
+
+	ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
+			       "moxart-dma-engine", mdc);
+	if (ret) {
+		dev_err(dev, "devm_request_irq failed\n");
+		return ret;
+	}
+
+	ret = of_dma_controller_register(node, moxart_of_xlate, mdc);
+	if (ret) {
+		dev_err(dev, "of_dma_controller_register failed\n");
+		dma_async_device_unregister(&mdc->dma_slave);
+		return ret;
+	}
+
+	tasklet_init(&mdc->tasklet, moxart_dma_tasklet, (unsigned long)mdc);
+
+	dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq);
+
+	return 0;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+	struct moxart_dma_container *m = platform_get_drvdata(pdev);
+
+	tasklet_kill(&m->tasklet);
+
+	dma_async_device_unregister(&m->dma_slave);
+
+	if (pdev->dev.of_node)
+		of_dma_controller_free(pdev->dev.of_node);
+
+	return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+	{ .compatible = "moxa,moxart-dma" },
+	{ }
+};
+
+static struct platform_driver moxart_driver = {
+	.probe	= moxart_probe,
+	.remove	= moxart_remove,
+	.driver = {
+		.name		= "moxart-dma-engine",
+		.owner		= THIS_MODULE,
+		.of_match_table	= moxart_dma_match,
+	},
+};
+
+static int moxart_init(void)
+{
+	return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+	platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
-- 
1.8.2.1

^ permalink raw reply related	[flat|nested] 80+ messages in thread

* Re: [PATCH v8] dmaengine: Add MOXA ART DMA engine driver
  2013-08-07 15:13                 ` Mark Rutland
@ 2013-10-07 13:42                   ` Jonas Jensen
  -1 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2013-10-07 13:42 UTC (permalink / raw)
  To: Mark Rutland
  Cc: linux-arm-kernel, linux-kernel, arm, vinod.koul, djbw, arnd, linux

Hi Mark,

Thanks for the replies. Please have a look at v9.

On 7 August 2013 17:13, Mark Rutland <mark.rutland@arm.com> wrote:
>> +
>> +       ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
>> +                              "moxart-dma-engine", mdc);
>> +       if (ret) {
>> +               dev_err(dev, "devm_request_irq failed\n");
>
> Do you not need calls to of_dma_controller_free and
> dma_async_device_unregister here? I'm not all that familiar with the DMA
> API, so maybe you don't.

Yes. I see now, I should have moved both dma_async_device_register()
and of_dma_controller_register() to happen after devm_request_irq().
I'll include that in next version.

>> +static int moxart_remove(struct platform_device *pdev)
>> +{
>> +       struct moxart_dma_container *m = dev_get_drvdata(&pdev->dev);
>
> Similarly, do you not need to call of_dma_controller free here?

Yes, this is now done.


Best regards,
Jonas

^ permalink raw reply	[flat|nested] 80+ messages in thread

* [PATCH v8] dmaengine: Add MOXA ART DMA engine driver
@ 2013-10-07 13:42                   ` Jonas Jensen
  0 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2013-10-07 13:42 UTC (permalink / raw)
  To: linux-arm-kernel

Hi Mark,

Thanks for the replies. Please have a look at v9.

On 7 August 2013 17:13, Mark Rutland <mark.rutland@arm.com> wrote:
>> +
>> +       ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
>> +                              "moxart-dma-engine", mdc);
>> +       if (ret) {
>> +               dev_err(dev, "devm_request_irq failed\n");
>
> Do you not need calls to of_dma_controller_free and
> dma_async_device_unregister here? I'm not all that familiar with the DMA
> API, so maybe you don't.

Yes. I see now, I should have moved both dma_async_device_register()
and of_dma_controller_register() to happen after devm_request_irq().
I'll include that in next version.

>> +static int moxart_remove(struct platform_device *pdev)
>> +{
>> +       struct moxart_dma_container *m = dev_get_drvdata(&pdev->dev);
>
> Similarly, do you not need to call of_dma_controller free here?

Yes, this is now done.


Best regards,
Jonas

^ permalink raw reply	[flat|nested] 80+ messages in thread

* [PATCH v10] dmaengine: Add MOXA ART DMA engine driver
  2013-10-07 13:13                 ` Jonas Jensen
@ 2013-10-07 14:10                   ` Jonas Jensen
  -1 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2013-10-07 14:10 UTC (permalink / raw)
  To: linux-arm-kernel
  Cc: linux-kernel, arm, vinod.koul, djbw, arnd, linux, mark.rutland,
	Jonas Jensen

The MOXA ART SoC has a DMA controller capable of offloading expensive
memory operations, such as large copies. This patch adds support for
the controller including four channels. Two of these are used to
handle MMC copy on the UC-7112-LX hardware. The remaining two can be
used in a future audio driver or client application.

Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
---

Notes:
    Changes since v9:
    
    1. reorder probe path, also move dma_async_device_register() to happen after devm_request_irq()
    
    Applies to next-20130927

 .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  19 +
 drivers/dma/Kconfig                                |   7 +
 drivers/dma/Makefile                               |   1 +
 drivers/dma/moxart-dma.c                           | 651 +++++++++++++++++++++
 4 files changed, 678 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
 create mode 100644 drivers/dma/moxart-dma.c

diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644
index 0000000..79facce
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -0,0 +1,19 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible :	Must be "moxa,moxart-dma"
+- reg :		Should contain registers location and length
+- interrupts :	Should contain the interrupt number
+- #dma-cells :	Should be 1, a single cell holding a line request number
+
+Example:
+
+	dma: dma@90500000 {
+		compatible = "moxa,moxart-dma";
+		reg = <0x90500080 0x40>;
+		interrupts = <24 0>;
+		#dma-cells = <1>;
+	};
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index f238cfd..f4ed3a9 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -318,6 +318,13 @@ config K3_DMA
 	  Support the DMA engine for Hisilicon K3 platform
 	  devices.
 
+config MOXART_DMA
+	tristate "MOXART DMA support"
+	depends on ARCH_MOXART
+	select DMA_ENGINE
+	help
+	  Enable support for the MOXA ART SoC DMA controller.
+
 config DMA_ENGINE
 	bool
 
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index db89035..9ef0916 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -41,3 +41,4 @@ obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
 obj-$(CONFIG_TI_CPPI41) += cppi41.o
 obj-$(CONFIG_K3_DMA) += k3dma.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..edd6de2
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,651 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+
+#define APB_DMA_MAX_CHANNEL			4
+
+#define REG_ADDRESS_SOURCE			0
+#define REG_ADDRESS_DEST			4
+#define REG_CYCLES				8
+#define REG_CTRL				12
+#define REG_CHAN_SIZE				16
+
+#define APB_DMA_ENABLE				0x1
+#define APB_DMA_FIN_INT_STS			0x2
+#define APB_DMA_FIN_INT_EN			0x4
+#define APB_DMA_BURST_MODE			0x8
+#define APB_DMA_ERR_INT_STS			0x10
+#define APB_DMA_ERR_INT_EN			0x20
+
+/*
+ * unset to select APB source
+ * set to select AHB source
+ */
+#define APB_DMA_SOURCE_SELECT			0x40
+
+/*
+ * unset to select APB destination
+ * set to select AHB destination
+ */
+#define APB_DMA_DEST_SELECT			0x80
+
+#define APB_DMA_SOURCE				0x100
+#define APB_DMA_SOURCE_MASK			0x700
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4  (burst=1)
+ * 010: +2 (burst=0), +8  (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4  (burst=1)
+ * 110: -2 (burst=0), -8  (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+ */
+#define APB_DMA_SOURCE_INC_0			0
+#define APB_DMA_SOURCE_INC_1_4			0x100
+#define APB_DMA_SOURCE_INC_2_8			0x200
+#define APB_DMA_SOURCE_INC_4_16			0x300
+#define APB_DMA_SOURCE_DEC_1_4			0x500
+#define APB_DMA_SOURCE_DEC_2_8			0x600
+#define APB_DMA_SOURCE_DEC_4_16			0x700
+
+#define APB_DMA_DEST				0x1000
+#define APB_DMA_DEST_MASK			0x7000
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4  (burst=1)
+ * 010: +2 (burst=0), +8  (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4  (burst=1)
+ * 110: -2 (burst=0), -8  (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+*/
+#define APB_DMA_DEST_INC_0			0
+#define APB_DMA_DEST_INC_1_4			0x1000
+#define APB_DMA_DEST_INC_2_8			0x2000
+#define APB_DMA_DEST_INC_4_16			0x3000
+#define APB_DMA_DEST_DEC_1_4			0x5000
+#define APB_DMA_DEST_DEC_2_8			0x6000
+#define APB_DMA_DEST_DEC_4_16			0x7000
+
+/*
+ * request signal select of destination
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0:    no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_DEST_REQ_NO			0x10000
+#define APB_DMA_DEST_REQ_NO_MASK		0xf0000
+
+#define APB_DMA_DATA_WIDTH			0x100000
+#define APB_DMA_DATA_WIDTH_MASK			0x300000
+/*
+ * data width of transfer
+ * 00: word
+ * 01: half
+ * 10: byte
+ */
+#define APB_DMA_DATA_WIDTH_4			0
+#define APB_DMA_DATA_WIDTH_2			0x100000
+#define APB_DMA_DATA_WIDTH_1			0x200000
+
+/*
+ * request signal select of source
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0:    no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_SOURCE_REQ_NO			0x1000000
+#define APB_DMA_SOURCE_REQ_NO_MASK		0xf000000
+#define APB_DMA_CYCLES_MASK			0x00ffffff
+
+struct moxart_dma_chan {
+	struct dma_chan			chan;
+	int				ch_num;
+	bool				allocated;
+	bool				error;
+	void __iomem			*base;
+	struct dma_slave_config		cfg;
+	struct dma_async_tx_descriptor	tx_desc;
+	unsigned int			line_reqno;
+};
+
+struct moxart_dma_container {
+	int				ctlr;
+	struct dma_device		dma_slave;
+	struct moxart_dma_chan		slave_chans[APB_DMA_MAX_CHANNEL];
+	spinlock_t			dma_lock;
+	struct tasklet_struct		tasklet;
+};
+
+struct moxart_dma_filter_data {
+	struct moxart_dma_container	*mdc;
+	struct of_phandle_args		*dma_spec;
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+	return &chan->dev->device;
+}
+
+static inline struct moxart_dma_container
+*to_dma_container(struct dma_device *d)
+{
+	return container_of(d, struct moxart_dma_container, dma_slave);
+}
+
+static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct moxart_dma_chan, chan);
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *ch = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *c = to_dma_container(ch->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
+
+	spin_lock_irqsave(&c->dma_lock, flags);
+
+	ctrl = readl(ch->base + REG_CTRL);
+	ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, ch->base + REG_CTRL);
+
+	spin_unlock_irqrestore(&c->dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+			       struct dma_slave_config *cfg)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	memcpy(&mchan->cfg, cfg, sizeof(mchan->cfg));
+
+	ctrl = readl(mchan->base + REG_CTRL);
+	ctrl |= APB_DMA_BURST_MODE;
+	ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
+	ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
+
+	switch (mchan->cfg.src_addr_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		ctrl |= APB_DMA_DATA_WIDTH_1;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_1_4;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_1_4;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		ctrl |= APB_DMA_DATA_WIDTH_2;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_2_8;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_2_8;
+		break;
+	default:
+		ctrl &= ~APB_DMA_DATA_WIDTH;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_4_16;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_4_16;
+		break;
+	}
+
+	if (mchan->cfg.direction == DMA_MEM_TO_DEV) {
+		ctrl &= ~APB_DMA_DEST_SELECT;
+		ctrl |= APB_DMA_SOURCE_SELECT;
+		ctrl |= (mchan->line_reqno << 16 &
+			 APB_DMA_DEST_REQ_NO_MASK);
+	} else {
+		ctrl |= APB_DMA_DEST_SELECT;
+		ctrl &= ~APB_DMA_SOURCE_SELECT;
+		ctrl |= (mchan->line_reqno << 24 &
+			 APB_DMA_SOURCE_REQ_NO_MASK);
+	}
+
+	writel(ctrl, mchan->base + REG_CTRL);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+			  unsigned long arg)
+{
+	int ret = 0;
+	struct dma_slave_config *config;
+
+	switch (cmd) {
+	case DMA_TERMINATE_ALL:
+		moxart_terminate_all(chan);
+		break;
+	case DMA_SLAVE_CONFIG:
+		config = (struct dma_slave_config *)arg;
+		ret = moxart_slave_config(chan, config);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static dma_cookie_t moxart_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(tx->chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	dma_cookie_t cookie;
+	u32 ctrl;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(tx->chan), "%s: mchan=%p mchan->ch_num=%u mchan->base=%p\n",
+		__func__, mchan, mchan->ch_num, mchan->base);
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	cookie = dma_cookie_assign(tx);
+
+	ctrl = readl(mchan->base + REG_CTRL);
+	ctrl |= (APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, mchan->base + REG_CTRL);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return cookie;
+}
+
+static struct dma_async_tx_descriptor
+*moxart_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+		      unsigned int sg_len,
+		      enum dma_transfer_direction direction,
+		      unsigned long tx_flags, void *context)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	unsigned long flags;
+	unsigned int size, adr_width;
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	if (direction == DMA_MEM_TO_DEV) {
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       mchan->base + REG_ADDRESS_SOURCE);
+		writel(mchan->cfg.dst_addr, mchan->base + REG_ADDRESS_DEST);
+
+		adr_width = mchan->cfg.src_addr_width;
+	} else {
+		writel(mchan->cfg.src_addr, mchan->base + REG_ADDRESS_SOURCE);
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       mchan->base + REG_ADDRESS_DEST);
+
+		adr_width = mchan->cfg.dst_addr_width;
+	}
+
+	size = sgl->length >> adr_width;
+
+	/*
+	 * size is 4 on 64 bytes copied, i.e. one cycle copies 16 bytes
+	 * ( when data_width == APB_DMAB_DATA_WIDTH_4 )
+	 */
+	writel(size, mchan->base + REG_CYCLES);
+
+	dev_dbg(chan2dev(chan), "%s: set %u DMA cycles (sgl->length=%u adr_width=%u)\n",
+		__func__, size, sgl->length, adr_width);
+
+	dma_async_tx_descriptor_init(&mchan->tx_desc, chan);
+	mchan->tx_desc.tx_submit = moxart_tx_submit;
+	mchan->error = 0;
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return &mchan->tx_desc;
+}
+
+bool moxart_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+	struct moxart_dma_filter_data *fdata = param;
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+	if (chan->device->dev != fdata->mdc->dma_slave.dev ||
+	    chan->device->dev->of_node != fdata->dma_spec->np) {
+		dev_dbg(chan2dev(chan), "device not registered to this DMA engine\n");
+		return 0;
+	}
+
+	dev_dbg(chan2dev(chan), "%s: mchan=%p line_reqno=%u mchan->ch_num=%u\n",
+		__func__, mchan, fdata->dma_spec->args[0], mchan->ch_num);
+
+	mchan->line_reqno = fdata->dma_spec->args[0];
+
+	return 1;
+}
+
+static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
+					struct of_dma *ofdma)
+{
+	struct moxart_dma_container *mdc = ofdma->of_dma_data;
+	struct moxart_dma_filter_data fdata = {
+		.mdc = mdc,
+	};
+
+	if (dma_spec->args_count < 1)
+		return NULL;
+
+	fdata.dma_spec = dma_spec;
+
+	return dma_request_channel(mdc->dma_slave.cap_mask,
+				   moxart_dma_filter_fn, &fdata);
+}
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+	dev_dbg(chan2dev(chan), "%s: allocating channel #%u\n",
+		__func__, mchan->ch_num);
+	mchan->allocated = 1;
+
+	return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+	dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+		__func__, mchan->ch_num);
+	mchan->allocated = 0;
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	ctrl = readl(mchan->base + REG_CTRL);
+	ctrl |= APB_DMA_ENABLE;
+	writel(ctrl, mchan->base + REG_CTRL);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+					dma_cookie_t cookie,
+					struct dma_tx_state *txs)
+{
+	struct moxart_dma_chan *ch = to_moxart_dma_chan(chan);
+	enum dma_status ret;
+
+	ret = (ch->error) ? DMA_ERROR : dma_cookie_status(chan, cookie, txs);
+
+	return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+	dma->device_prep_slave_sg		= moxart_prep_slave_sg;
+	dma->device_alloc_chan_resources	= moxart_alloc_chan_resources;
+	dma->device_free_chan_resources		= moxart_free_chan_resources;
+	dma->device_issue_pending		= moxart_issue_pending;
+	dma->device_tx_status			= moxart_tx_status;
+	dma->device_control			= moxart_control;
+	dma->dev				= dev;
+
+	INIT_LIST_HEAD(&dma->channels);
+}
+
+static void moxart_dma_tasklet(unsigned long data)
+{
+	struct moxart_dma_container *mc = (void *)data;
+	struct moxart_dma_chan *ch = &mc->slave_chans[0];
+	struct dma_async_tx_descriptor *tx_desc;
+	unsigned int i;
+	enum dma_status s;
+	struct dma_tx_state txs;
+
+	pr_debug("%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+		if (ch->allocated) {
+			tx_desc = &ch->tx_desc;
+
+			s = mc->dma_slave.device_tx_status(&ch->chan,
+							   ch->chan.cookie,
+							   &txs);
+
+			switch (s) {
+			case DMA_ERROR:
+				printk_ratelimited("%s: DMA error\n",
+						   __func__);
+				break;
+			case DMA_SUCCESS:
+				break;
+			case DMA_IN_PROGRESS:
+			case DMA_PAUSED:
+				continue;
+			}
+
+			if (tx_desc->callback) {
+				pr_debug("%s: call callback for ch=%p\n",
+					 __func__, ch);
+				tx_desc->callback(tx_desc->callback_param);
+			}
+		}
+	}
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+	struct moxart_dma_container *mc = devid;
+	struct moxart_dma_chan *mchan = &mc->slave_chans[0];
+	unsigned int i;
+	u32 ctrl;
+
+	pr_debug("%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		if (mchan->allocated) {
+			ctrl = readl(mchan->base + REG_CTRL);
+			pr_debug("%s: ctrl=%x\n", __func__, ctrl);
+
+			if (ctrl & APB_DMA_FIN_INT_STS) {
+				ctrl &= ~APB_DMA_FIN_INT_STS;
+				dma_cookie_complete(&mchan->tx_desc);
+			}
+			if (ctrl & APB_DMA_ERR_INT_STS) {
+				ctrl &= ~APB_DMA_ERR_INT_STS;
+				mchan->error = 1;
+			}
+			/*
+			 * bits must be cleared here, this function
+			 * called in a loop if moved to tasklet
+			 */
+			writel(ctrl, mchan->base + REG_CTRL);
+
+			tasklet_schedule(&mc->tasklet);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int moxart_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct resource *res;
+	static void __iomem *dma_base_addr;
+	int ret, i;
+	unsigned int irq;
+	struct moxart_dma_chan *mchan;
+	struct moxart_dma_container *mdc;
+
+	mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+	if (!mdc) {
+		dev_err(dev, "can't allocate DMA container\n");
+		return -ENOMEM;
+	}
+
+	irq = irq_of_parse_and_map(node, 0);
+	if (irq <= 0) {
+		dev_err(dev, "irq_of_parse_and_map failed\n");
+		return -EINVAL;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dma_base_addr = devm_ioremap_resource(dev, res);
+	if (IS_ERR(dma_base_addr)) {
+		dev_err(dev, "devm_ioremap_resource failed\n");
+		return PTR_ERR(dma_base_addr);
+	}
+
+	mdc->ctlr = pdev->id;
+	spin_lock_init(&mdc->dma_lock);
+
+	dma_cap_zero(mdc->dma_slave.cap_mask);
+	dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+
+	moxart_dma_init(&mdc->dma_slave, dev);
+
+	mchan = &mdc->slave_chans[0];
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		mchan->ch_num = i;
+		mchan->base = dma_base_addr + i * REG_CHAN_SIZE;
+		mchan->allocated = 0;
+
+		dma_cookie_init(&mchan->chan);
+		mchan->chan.device = &mdc->dma_slave;
+		list_add_tail(&mchan->chan.device_node,
+			      &mdc->dma_slave.channels);
+
+		dev_dbg(dev, "%s: mchans[%d]: mchan->ch_num=%u mchan->base=%p\n",
+			__func__, i, mchan->ch_num, mchan->base);
+	}
+
+	platform_set_drvdata(pdev, mdc);
+
+	ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
+			       "moxart-dma-engine", mdc);
+	if (ret) {
+		dev_err(dev, "devm_request_irq failed\n");
+		return ret;
+	}
+
+	ret = dma_async_device_register(&mdc->dma_slave);
+	if (ret) {
+		dev_err(dev, "dma_async_device_register failed\n");
+		return ret;
+	}
+
+	ret = of_dma_controller_register(node, moxart_of_xlate, mdc);
+	if (ret) {
+		dev_err(dev, "of_dma_controller_register failed\n");
+		dma_async_device_unregister(&mdc->dma_slave);
+		return ret;
+	}
+
+	tasklet_init(&mdc->tasklet, moxart_dma_tasklet, (unsigned long)mdc);
+
+	dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq);
+
+	return 0;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+	struct moxart_dma_container *m = platform_get_drvdata(pdev);
+
+	tasklet_kill(&m->tasklet);
+
+	dma_async_device_unregister(&m->dma_slave);
+
+	if (pdev->dev.of_node)
+		of_dma_controller_free(pdev->dev.of_node);
+
+	return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+	{ .compatible = "moxa,moxart-dma" },
+	{ }
+};
+
+static struct platform_driver moxart_driver = {
+	.probe	= moxart_probe,
+	.remove	= moxart_remove,
+	.driver = {
+		.name		= "moxart-dma-engine",
+		.owner		= THIS_MODULE,
+		.of_match_table	= moxart_dma_match,
+	},
+};
+
+static int moxart_init(void)
+{
+	return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+	platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
-- 
1.8.2.1


^ permalink raw reply related	[flat|nested] 80+ messages in thread

* [PATCH v10] dmaengine: Add MOXA ART DMA engine driver
@ 2013-10-07 14:10                   ` Jonas Jensen
  0 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2013-10-07 14:10 UTC (permalink / raw)
  To: linux-arm-kernel

The MOXA ART SoC has a DMA controller capable of offloading expensive
memory operations, such as large copies. This patch adds support for
the controller including four channels. Two of these are used to
handle MMC copy on the UC-7112-LX hardware. The remaining two can be
used in a future audio driver or client application.

Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
---

Notes:
    Changes since v9:
    
    1. reorder probe path, also move dma_async_device_register() to happen after devm_request_irq()
    
    Applies to next-20130927

 .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  19 +
 drivers/dma/Kconfig                                |   7 +
 drivers/dma/Makefile                               |   1 +
 drivers/dma/moxart-dma.c                           | 651 +++++++++++++++++++++
 4 files changed, 678 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
 create mode 100644 drivers/dma/moxart-dma.c

diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644
index 0000000..79facce
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -0,0 +1,19 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible :	Must be "moxa,moxart-dma"
+- reg :		Should contain registers location and length
+- interrupts :	Should contain the interrupt number
+- #dma-cells :	Should be 1, a single cell holding a line request number
+
+Example:
+
+	dma: dma at 90500000 {
+		compatible = "moxa,moxart-dma";
+		reg = <0x90500080 0x40>;
+		interrupts = <24 0>;
+		#dma-cells = <1>;
+	};
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index f238cfd..f4ed3a9 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -318,6 +318,13 @@ config K3_DMA
 	  Support the DMA engine for Hisilicon K3 platform
 	  devices.
 
+config MOXART_DMA
+	tristate "MOXART DMA support"
+	depends on ARCH_MOXART
+	select DMA_ENGINE
+	help
+	  Enable support for the MOXA ART SoC DMA controller.
+
 config DMA_ENGINE
 	bool
 
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index db89035..9ef0916 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -41,3 +41,4 @@ obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
 obj-$(CONFIG_TI_CPPI41) += cppi41.o
 obj-$(CONFIG_K3_DMA) += k3dma.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..edd6de2
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,651 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+
+#define APB_DMA_MAX_CHANNEL			4
+
+#define REG_ADDRESS_SOURCE			0
+#define REG_ADDRESS_DEST			4
+#define REG_CYCLES				8
+#define REG_CTRL				12
+#define REG_CHAN_SIZE				16
+
+#define APB_DMA_ENABLE				0x1
+#define APB_DMA_FIN_INT_STS			0x2
+#define APB_DMA_FIN_INT_EN			0x4
+#define APB_DMA_BURST_MODE			0x8
+#define APB_DMA_ERR_INT_STS			0x10
+#define APB_DMA_ERR_INT_EN			0x20
+
+/*
+ * unset to select APB source
+ * set to select AHB source
+ */
+#define APB_DMA_SOURCE_SELECT			0x40
+
+/*
+ * unset to select APB destination
+ * set to select AHB destination
+ */
+#define APB_DMA_DEST_SELECT			0x80
+
+#define APB_DMA_SOURCE				0x100
+#define APB_DMA_SOURCE_MASK			0x700
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4  (burst=1)
+ * 010: +2 (burst=0), +8  (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4  (burst=1)
+ * 110: -2 (burst=0), -8  (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+ */
+#define APB_DMA_SOURCE_INC_0			0
+#define APB_DMA_SOURCE_INC_1_4			0x100
+#define APB_DMA_SOURCE_INC_2_8			0x200
+#define APB_DMA_SOURCE_INC_4_16			0x300
+#define APB_DMA_SOURCE_DEC_1_4			0x500
+#define APB_DMA_SOURCE_DEC_2_8			0x600
+#define APB_DMA_SOURCE_DEC_4_16			0x700
+
+#define APB_DMA_DEST				0x1000
+#define APB_DMA_DEST_MASK			0x7000
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4  (burst=1)
+ * 010: +2 (burst=0), +8  (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4  (burst=1)
+ * 110: -2 (burst=0), -8  (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+*/
+#define APB_DMA_DEST_INC_0			0
+#define APB_DMA_DEST_INC_1_4			0x1000
+#define APB_DMA_DEST_INC_2_8			0x2000
+#define APB_DMA_DEST_INC_4_16			0x3000
+#define APB_DMA_DEST_DEC_1_4			0x5000
+#define APB_DMA_DEST_DEC_2_8			0x6000
+#define APB_DMA_DEST_DEC_4_16			0x7000
+
+/*
+ * request signal select of destination
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0:    no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_DEST_REQ_NO			0x10000
+#define APB_DMA_DEST_REQ_NO_MASK		0xf0000
+
+#define APB_DMA_DATA_WIDTH			0x100000
+#define APB_DMA_DATA_WIDTH_MASK			0x300000
+/*
+ * data width of transfer
+ * 00: word
+ * 01: half
+ * 10: byte
+ */
+#define APB_DMA_DATA_WIDTH_4			0
+#define APB_DMA_DATA_WIDTH_2			0x100000
+#define APB_DMA_DATA_WIDTH_1			0x200000
+
+/*
+ * request signal select of source
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0:    no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_SOURCE_REQ_NO			0x1000000
+#define APB_DMA_SOURCE_REQ_NO_MASK		0xf000000
+#define APB_DMA_CYCLES_MASK			0x00ffffff
+
+struct moxart_dma_chan {
+	struct dma_chan			chan;
+	int				ch_num;
+	bool				allocated;
+	bool				error;
+	void __iomem			*base;
+	struct dma_slave_config		cfg;
+	struct dma_async_tx_descriptor	tx_desc;
+	unsigned int			line_reqno;
+};
+
+struct moxart_dma_container {
+	int				ctlr;
+	struct dma_device		dma_slave;
+	struct moxart_dma_chan		slave_chans[APB_DMA_MAX_CHANNEL];
+	spinlock_t			dma_lock;
+	struct tasklet_struct		tasklet;
+};
+
+struct moxart_dma_filter_data {
+	struct moxart_dma_container	*mdc;
+	struct of_phandle_args		*dma_spec;
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+	return &chan->dev->device;
+}
+
+static inline struct moxart_dma_container
+*to_dma_container(struct dma_device *d)
+{
+	return container_of(d, struct moxart_dma_container, dma_slave);
+}
+
+static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct moxart_dma_chan, chan);
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *ch = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *c = to_dma_container(ch->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
+
+	spin_lock_irqsave(&c->dma_lock, flags);
+
+	ctrl = readl(ch->base + REG_CTRL);
+	ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, ch->base + REG_CTRL);
+
+	spin_unlock_irqrestore(&c->dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+			       struct dma_slave_config *cfg)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	memcpy(&mchan->cfg, cfg, sizeof(mchan->cfg));
+
+	ctrl = readl(mchan->base + REG_CTRL);
+	ctrl |= APB_DMA_BURST_MODE;
+	ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
+	ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
+
+	switch (mchan->cfg.src_addr_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		ctrl |= APB_DMA_DATA_WIDTH_1;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_1_4;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_1_4;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		ctrl |= APB_DMA_DATA_WIDTH_2;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_2_8;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_2_8;
+		break;
+	default:
+		ctrl &= ~APB_DMA_DATA_WIDTH;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_4_16;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_4_16;
+		break;
+	}
+
+	if (mchan->cfg.direction == DMA_MEM_TO_DEV) {
+		ctrl &= ~APB_DMA_DEST_SELECT;
+		ctrl |= APB_DMA_SOURCE_SELECT;
+		ctrl |= (mchan->line_reqno << 16 &
+			 APB_DMA_DEST_REQ_NO_MASK);
+	} else {
+		ctrl |= APB_DMA_DEST_SELECT;
+		ctrl &= ~APB_DMA_SOURCE_SELECT;
+		ctrl |= (mchan->line_reqno << 24 &
+			 APB_DMA_SOURCE_REQ_NO_MASK);
+	}
+
+	writel(ctrl, mchan->base + REG_CTRL);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+			  unsigned long arg)
+{
+	int ret = 0;
+	struct dma_slave_config *config;
+
+	switch (cmd) {
+	case DMA_TERMINATE_ALL:
+		moxart_terminate_all(chan);
+		break;
+	case DMA_SLAVE_CONFIG:
+		config = (struct dma_slave_config *)arg;
+		ret = moxart_slave_config(chan, config);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static dma_cookie_t moxart_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(tx->chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	dma_cookie_t cookie;
+	u32 ctrl;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(tx->chan), "%s: mchan=%p mchan->ch_num=%u mchan->base=%p\n",
+		__func__, mchan, mchan->ch_num, mchan->base);
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	cookie = dma_cookie_assign(tx);
+
+	ctrl = readl(mchan->base + REG_CTRL);
+	ctrl |= (APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, mchan->base + REG_CTRL);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return cookie;
+}
+
+static struct dma_async_tx_descriptor
+*moxart_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+		      unsigned int sg_len,
+		      enum dma_transfer_direction direction,
+		      unsigned long tx_flags, void *context)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	unsigned long flags;
+	unsigned int size, adr_width;
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	if (direction == DMA_MEM_TO_DEV) {
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       mchan->base + REG_ADDRESS_SOURCE);
+		writel(mchan->cfg.dst_addr, mchan->base + REG_ADDRESS_DEST);
+
+		adr_width = mchan->cfg.src_addr_width;
+	} else {
+		writel(mchan->cfg.src_addr, mchan->base + REG_ADDRESS_SOURCE);
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       mchan->base + REG_ADDRESS_DEST);
+
+		adr_width = mchan->cfg.dst_addr_width;
+	}
+
+	size = sgl->length >> adr_width;
+
+	/*
+	 * size is 4 on 64 bytes copied, i.e. one cycle copies 16 bytes
+	 * ( when data_width == APB_DMAB_DATA_WIDTH_4 )
+	 */
+	writel(size, mchan->base + REG_CYCLES);
+
+	dev_dbg(chan2dev(chan), "%s: set %u DMA cycles (sgl->length=%u adr_width=%u)\n",
+		__func__, size, sgl->length, adr_width);
+
+	dma_async_tx_descriptor_init(&mchan->tx_desc, chan);
+	mchan->tx_desc.tx_submit = moxart_tx_submit;
+	mchan->error = 0;
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return &mchan->tx_desc;
+}
+
+bool moxart_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+	struct moxart_dma_filter_data *fdata = param;
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+	if (chan->device->dev != fdata->mdc->dma_slave.dev ||
+	    chan->device->dev->of_node != fdata->dma_spec->np) {
+		dev_dbg(chan2dev(chan), "device not registered to this DMA engine\n");
+		return 0;
+	}
+
+	dev_dbg(chan2dev(chan), "%s: mchan=%p line_reqno=%u mchan->ch_num=%u\n",
+		__func__, mchan, fdata->dma_spec->args[0], mchan->ch_num);
+
+	mchan->line_reqno = fdata->dma_spec->args[0];
+
+	return 1;
+}
+
+static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
+					struct of_dma *ofdma)
+{
+	struct moxart_dma_container *mdc = ofdma->of_dma_data;
+	struct moxart_dma_filter_data fdata = {
+		.mdc = mdc,
+	};
+
+	if (dma_spec->args_count < 1)
+		return NULL;
+
+	fdata.dma_spec = dma_spec;
+
+	return dma_request_channel(mdc->dma_slave.cap_mask,
+				   moxart_dma_filter_fn, &fdata);
+}
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+	dev_dbg(chan2dev(chan), "%s: allocating channel #%u\n",
+		__func__, mchan->ch_num);
+	mchan->allocated = 1;
+
+	return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+	dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+		__func__, mchan->ch_num);
+	mchan->allocated = 0;
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	ctrl = readl(mchan->base + REG_CTRL);
+	ctrl |= APB_DMA_ENABLE;
+	writel(ctrl, mchan->base + REG_CTRL);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+					dma_cookie_t cookie,
+					struct dma_tx_state *txs)
+{
+	struct moxart_dma_chan *ch = to_moxart_dma_chan(chan);
+	enum dma_status ret;
+
+	ret = (ch->error) ? DMA_ERROR : dma_cookie_status(chan, cookie, txs);
+
+	return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+	dma->device_prep_slave_sg		= moxart_prep_slave_sg;
+	dma->device_alloc_chan_resources	= moxart_alloc_chan_resources;
+	dma->device_free_chan_resources		= moxart_free_chan_resources;
+	dma->device_issue_pending		= moxart_issue_pending;
+	dma->device_tx_status			= moxart_tx_status;
+	dma->device_control			= moxart_control;
+	dma->dev				= dev;
+
+	INIT_LIST_HEAD(&dma->channels);
+}
+
+static void moxart_dma_tasklet(unsigned long data)
+{
+	struct moxart_dma_container *mc = (void *)data;
+	struct moxart_dma_chan *ch = &mc->slave_chans[0];
+	struct dma_async_tx_descriptor *tx_desc;
+	unsigned int i;
+	enum dma_status s;
+	struct dma_tx_state txs;
+
+	pr_debug("%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+		if (ch->allocated) {
+			tx_desc = &ch->tx_desc;
+
+			s = mc->dma_slave.device_tx_status(&ch->chan,
+							   ch->chan.cookie,
+							   &txs);
+
+			switch (s) {
+			case DMA_ERROR:
+				printk_ratelimited("%s: DMA error\n",
+						   __func__);
+				break;
+			case DMA_SUCCESS:
+				break;
+			case DMA_IN_PROGRESS:
+			case DMA_PAUSED:
+				continue;
+			}
+
+			if (tx_desc->callback) {
+				pr_debug("%s: call callback for ch=%p\n",
+					 __func__, ch);
+				tx_desc->callback(tx_desc->callback_param);
+			}
+		}
+	}
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+	struct moxart_dma_container *mc = devid;
+	struct moxart_dma_chan *mchan = &mc->slave_chans[0];
+	unsigned int i;
+	u32 ctrl;
+
+	pr_debug("%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		if (mchan->allocated) {
+			ctrl = readl(mchan->base + REG_CTRL);
+			pr_debug("%s: ctrl=%x\n", __func__, ctrl);
+
+			if (ctrl & APB_DMA_FIN_INT_STS) {
+				ctrl &= ~APB_DMA_FIN_INT_STS;
+				dma_cookie_complete(&mchan->tx_desc);
+			}
+			if (ctrl & APB_DMA_ERR_INT_STS) {
+				ctrl &= ~APB_DMA_ERR_INT_STS;
+				mchan->error = 1;
+			}
+			/*
+			 * bits must be cleared here, this function
+			 * called in a loop if moved to tasklet
+			 */
+			writel(ctrl, mchan->base + REG_CTRL);
+
+			tasklet_schedule(&mc->tasklet);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int moxart_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct resource *res;
+	static void __iomem *dma_base_addr;
+	int ret, i;
+	unsigned int irq;
+	struct moxart_dma_chan *mchan;
+	struct moxart_dma_container *mdc;
+
+	mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+	if (!mdc) {
+		dev_err(dev, "can't allocate DMA container\n");
+		return -ENOMEM;
+	}
+
+	irq = irq_of_parse_and_map(node, 0);
+	if (irq <= 0) {
+		dev_err(dev, "irq_of_parse_and_map failed\n");
+		return -EINVAL;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dma_base_addr = devm_ioremap_resource(dev, res);
+	if (IS_ERR(dma_base_addr)) {
+		dev_err(dev, "devm_ioremap_resource failed\n");
+		return PTR_ERR(dma_base_addr);
+	}
+
+	mdc->ctlr = pdev->id;
+	spin_lock_init(&mdc->dma_lock);
+
+	dma_cap_zero(mdc->dma_slave.cap_mask);
+	dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+
+	moxart_dma_init(&mdc->dma_slave, dev);
+
+	mchan = &mdc->slave_chans[0];
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		mchan->ch_num = i;
+		mchan->base = dma_base_addr + i * REG_CHAN_SIZE;
+		mchan->allocated = 0;
+
+		dma_cookie_init(&mchan->chan);
+		mchan->chan.device = &mdc->dma_slave;
+		list_add_tail(&mchan->chan.device_node,
+			      &mdc->dma_slave.channels);
+
+		dev_dbg(dev, "%s: mchans[%d]: mchan->ch_num=%u mchan->base=%p\n",
+			__func__, i, mchan->ch_num, mchan->base);
+	}
+
+	platform_set_drvdata(pdev, mdc);
+
+	ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
+			       "moxart-dma-engine", mdc);
+	if (ret) {
+		dev_err(dev, "devm_request_irq failed\n");
+		return ret;
+	}
+
+	ret = dma_async_device_register(&mdc->dma_slave);
+	if (ret) {
+		dev_err(dev, "dma_async_device_register failed\n");
+		return ret;
+	}
+
+	ret = of_dma_controller_register(node, moxart_of_xlate, mdc);
+	if (ret) {
+		dev_err(dev, "of_dma_controller_register failed\n");
+		dma_async_device_unregister(&mdc->dma_slave);
+		return ret;
+	}
+
+	tasklet_init(&mdc->tasklet, moxart_dma_tasklet, (unsigned long)mdc);
+
+	dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq);
+
+	return 0;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+	struct moxart_dma_container *m = platform_get_drvdata(pdev);
+
+	tasklet_kill(&m->tasklet);
+
+	dma_async_device_unregister(&m->dma_slave);
+
+	if (pdev->dev.of_node)
+		of_dma_controller_free(pdev->dev.of_node);
+
+	return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+	{ .compatible = "moxa,moxart-dma" },
+	{ }
+};
+
+static struct platform_driver moxart_driver = {
+	.probe	= moxart_probe,
+	.remove	= moxart_remove,
+	.driver = {
+		.name		= "moxart-dma-engine",
+		.owner		= THIS_MODULE,
+		.of_match_table	= moxart_dma_match,
+	},
+};
+
+static int moxart_init(void)
+{
+	return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+	platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
-- 
1.8.2.1

^ permalink raw reply related	[flat|nested] 80+ messages in thread

* Re: [PATCH v10] dmaengine: Add MOXA ART DMA engine driver
  2013-10-07 14:10                   ` Jonas Jensen
  (?)
@ 2013-10-07 15:12                     ` Mark Rutland
  -1 siblings, 0 replies; 80+ messages in thread
From: Mark Rutland @ 2013-10-07 15:12 UTC (permalink / raw)
  To: Jonas Jensen
  Cc: linux-arm-kernel, linux-kernel, arm, vinod.koul, djbw, arnd,
	linux, devicetree

[adding devicetree]

On Mon, Oct 07, 2013 at 03:10:34PM +0100, Jonas Jensen wrote:
> The MOXA ART SoC has a DMA controller capable of offloading expensive
> memory operations, such as large copies. This patch adds support for
> the controller including four channels. Two of these are used to
> handle MMC copy on the UC-7112-LX hardware. The remaining two can be
> used in a future audio driver or client application.
> 
> Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
> ---
> 
> Notes:
>     Changes since v9:
> 
>     1. reorder probe path, also move dma_async_device_register() to happen after devm_request_irq()
> 
>     Applies to next-20130927
> 
>  .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  19 +
>  drivers/dma/Kconfig                                |   7 +
>  drivers/dma/Makefile                               |   1 +
>  drivers/dma/moxart-dma.c                           | 651 +++++++++++++++++++++
>  4 files changed, 678 insertions(+)
>  create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
>  create mode 100644 drivers/dma/moxart-dma.c
> 
> diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> new file mode 100644
> index 0000000..79facce
> --- /dev/null
> +++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> @@ -0,0 +1,19 @@
> +MOXA ART DMA Controller
> +
> +See dma.txt first
> +
> +Required properties:
> +
> +- compatible : Must be "moxa,moxart-dma"

Sorry I didn't notice this previously, but "moxa" isn't in
Documentation/devicetree/bindings/vendor-prefixes.txt (despite several
bindings using it). Could you cook up a separate patch to add an entry
for Moxa, please?

Also, given the SoC is called "ART" it's a shame that we're calling this
"moxa,moxart-dma" rather than "moxa,art-dma". We already have precedent
for "moxart" in bindings though, so changing that's likely to lead to
more problems.

> +- reg :                Should contain registers location and length
> +- interrupts : Should contain the interrupt number

Sorry for yet more pendantry, but could we instead have:

 - interrupts: Should contain an interrupt-specifier for the sole
               interrupt generated by the device.

> +- #dma-cells : Should be 1, a single cell holding a line request number
> +
> +Example:
> +
> +       dma: dma@90500000 {
> +               compatible = "moxa,moxart-dma";
> +               reg = <0x90500080 0x40>;
> +               interrupts = <24 0>;
> +               #dma-cells = <1>;
> +       };

Otherwise I think the binding looks OK.

Thanks,
Mark

^ permalink raw reply	[flat|nested] 80+ messages in thread

* Re: [PATCH v10] dmaengine: Add MOXA ART DMA engine driver
@ 2013-10-07 15:12                     ` Mark Rutland
  0 siblings, 0 replies; 80+ messages in thread
From: Mark Rutland @ 2013-10-07 15:12 UTC (permalink / raw)
  To: Jonas Jensen
  Cc: devicetree, linux, arnd, vinod.koul, linux-kernel, arm, djbw,
	linux-arm-kernel

[adding devicetree]

On Mon, Oct 07, 2013 at 03:10:34PM +0100, Jonas Jensen wrote:
> The MOXA ART SoC has a DMA controller capable of offloading expensive
> memory operations, such as large copies. This patch adds support for
> the controller including four channels. Two of these are used to
> handle MMC copy on the UC-7112-LX hardware. The remaining two can be
> used in a future audio driver or client application.
> 
> Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
> ---
> 
> Notes:
>     Changes since v9:
> 
>     1. reorder probe path, also move dma_async_device_register() to happen after devm_request_irq()
> 
>     Applies to next-20130927
> 
>  .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  19 +
>  drivers/dma/Kconfig                                |   7 +
>  drivers/dma/Makefile                               |   1 +
>  drivers/dma/moxart-dma.c                           | 651 +++++++++++++++++++++
>  4 files changed, 678 insertions(+)
>  create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
>  create mode 100644 drivers/dma/moxart-dma.c
> 
> diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> new file mode 100644
> index 0000000..79facce
> --- /dev/null
> +++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> @@ -0,0 +1,19 @@
> +MOXA ART DMA Controller
> +
> +See dma.txt first
> +
> +Required properties:
> +
> +- compatible : Must be "moxa,moxart-dma"

Sorry I didn't notice this previously, but "moxa" isn't in
Documentation/devicetree/bindings/vendor-prefixes.txt (despite several
bindings using it). Could you cook up a separate patch to add an entry
for Moxa, please?

Also, given the SoC is called "ART" it's a shame that we're calling this
"moxa,moxart-dma" rather than "moxa,art-dma". We already have precedent
for "moxart" in bindings though, so changing that's likely to lead to
more problems.

> +- reg :                Should contain registers location and length
> +- interrupts : Should contain the interrupt number

Sorry for yet more pendantry, but could we instead have:

 - interrupts: Should contain an interrupt-specifier for the sole
               interrupt generated by the device.

> +- #dma-cells : Should be 1, a single cell holding a line request number
> +
> +Example:
> +
> +       dma: dma@90500000 {
> +               compatible = "moxa,moxart-dma";
> +               reg = <0x90500080 0x40>;
> +               interrupts = <24 0>;
> +               #dma-cells = <1>;
> +       };

Otherwise I think the binding looks OK.

Thanks,
Mark

^ permalink raw reply	[flat|nested] 80+ messages in thread

* [PATCH v10] dmaengine: Add MOXA ART DMA engine driver
@ 2013-10-07 15:12                     ` Mark Rutland
  0 siblings, 0 replies; 80+ messages in thread
From: Mark Rutland @ 2013-10-07 15:12 UTC (permalink / raw)
  To: linux-arm-kernel

[adding devicetree]

On Mon, Oct 07, 2013 at 03:10:34PM +0100, Jonas Jensen wrote:
> The MOXA ART SoC has a DMA controller capable of offloading expensive
> memory operations, such as large copies. This patch adds support for
> the controller including four channels. Two of these are used to
> handle MMC copy on the UC-7112-LX hardware. The remaining two can be
> used in a future audio driver or client application.
> 
> Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
> ---
> 
> Notes:
>     Changes since v9:
> 
>     1. reorder probe path, also move dma_async_device_register() to happen after devm_request_irq()
> 
>     Applies to next-20130927
> 
>  .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  19 +
>  drivers/dma/Kconfig                                |   7 +
>  drivers/dma/Makefile                               |   1 +
>  drivers/dma/moxart-dma.c                           | 651 +++++++++++++++++++++
>  4 files changed, 678 insertions(+)
>  create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
>  create mode 100644 drivers/dma/moxart-dma.c
> 
> diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> new file mode 100644
> index 0000000..79facce
> --- /dev/null
> +++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> @@ -0,0 +1,19 @@
> +MOXA ART DMA Controller
> +
> +See dma.txt first
> +
> +Required properties:
> +
> +- compatible : Must be "moxa,moxart-dma"

Sorry I didn't notice this previously, but "moxa" isn't in
Documentation/devicetree/bindings/vendor-prefixes.txt (despite several
bindings using it). Could you cook up a separate patch to add an entry
for Moxa, please?

Also, given the SoC is called "ART" it's a shame that we're calling this
"moxa,moxart-dma" rather than "moxa,art-dma". We already have precedent
for "moxart" in bindings though, so changing that's likely to lead to
more problems.

> +- reg :                Should contain registers location and length
> +- interrupts : Should contain the interrupt number

Sorry for yet more pendantry, but could we instead have:

 - interrupts: Should contain an interrupt-specifier for the sole
               interrupt generated by the device.

> +- #dma-cells : Should be 1, a single cell holding a line request number
> +
> +Example:
> +
> +       dma: dma at 90500000 {
> +               compatible = "moxa,moxart-dma";
> +               reg = <0x90500080 0x40>;
> +               interrupts = <24 0>;
> +               #dma-cells = <1>;
> +       };

Otherwise I think the binding looks OK.

Thanks,
Mark

^ permalink raw reply	[flat|nested] 80+ messages in thread

* [PATCH v11] dmaengine: Add MOXA ART DMA engine driver
  2013-10-07 14:10                   ` Jonas Jensen
@ 2013-10-08  8:42                     ` Jonas Jensen
  -1 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2013-10-08  8:42 UTC (permalink / raw)
  To: linux-arm-kernel
  Cc: linux-kernel, arm, vinod.koul, djbw, arnd, linux, mark.rutland,
	Jonas Jensen

The MOXA ART SoC has a DMA controller capable of offloading expensive
memory operations, such as large copies. This patch adds support for
the controller including four channels. Two of these are used to
handle MMC copy on the UC-7112-LX hardware. The remaining two can be
used in a future audio driver or client application.

Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
---

Notes:
    Changes since v10:
    
    device tree bindings document:
    1. reformat interrupt description text
    
    Applies to next-20130927

 .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  20 +
 drivers/dma/Kconfig                                |   7 +
 drivers/dma/Makefile                               |   1 +
 drivers/dma/moxart-dma.c                           | 651 +++++++++++++++++++++
 4 files changed, 679 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
 create mode 100644 drivers/dma/moxart-dma.c

diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644
index 0000000..697e3f6
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -0,0 +1,20 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible :	Must be "moxa,moxart-dma"
+- reg :		Should contain registers location and length
+- interrupts :	Should contain an interrupt-specifier for the sole
+		interrupt generated by the device
+- #dma-cells :	Should be 1, a single cell holding a line request number
+
+Example:
+
+	dma: dma@90500000 {
+		compatible = "moxa,moxart-dma";
+		reg = <0x90500080 0x40>;
+		interrupts = <24 0>;
+		#dma-cells = <1>;
+	};
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index f238cfd..f4ed3a9 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -318,6 +318,13 @@ config K3_DMA
 	  Support the DMA engine for Hisilicon K3 platform
 	  devices.
 
+config MOXART_DMA
+	tristate "MOXART DMA support"
+	depends on ARCH_MOXART
+	select DMA_ENGINE
+	help
+	  Enable support for the MOXA ART SoC DMA controller.
+
 config DMA_ENGINE
 	bool
 
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index db89035..9ef0916 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -41,3 +41,4 @@ obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
 obj-$(CONFIG_TI_CPPI41) += cppi41.o
 obj-$(CONFIG_K3_DMA) += k3dma.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..edd6de2
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,651 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+
+#define APB_DMA_MAX_CHANNEL			4
+
+#define REG_ADDRESS_SOURCE			0
+#define REG_ADDRESS_DEST			4
+#define REG_CYCLES				8
+#define REG_CTRL				12
+#define REG_CHAN_SIZE				16
+
+#define APB_DMA_ENABLE				0x1
+#define APB_DMA_FIN_INT_STS			0x2
+#define APB_DMA_FIN_INT_EN			0x4
+#define APB_DMA_BURST_MODE			0x8
+#define APB_DMA_ERR_INT_STS			0x10
+#define APB_DMA_ERR_INT_EN			0x20
+
+/*
+ * unset to select APB source
+ * set to select AHB source
+ */
+#define APB_DMA_SOURCE_SELECT			0x40
+
+/*
+ * unset to select APB destination
+ * set to select AHB destination
+ */
+#define APB_DMA_DEST_SELECT			0x80
+
+#define APB_DMA_SOURCE				0x100
+#define APB_DMA_SOURCE_MASK			0x700
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4  (burst=1)
+ * 010: +2 (burst=0), +8  (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4  (burst=1)
+ * 110: -2 (burst=0), -8  (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+ */
+#define APB_DMA_SOURCE_INC_0			0
+#define APB_DMA_SOURCE_INC_1_4			0x100
+#define APB_DMA_SOURCE_INC_2_8			0x200
+#define APB_DMA_SOURCE_INC_4_16			0x300
+#define APB_DMA_SOURCE_DEC_1_4			0x500
+#define APB_DMA_SOURCE_DEC_2_8			0x600
+#define APB_DMA_SOURCE_DEC_4_16			0x700
+
+#define APB_DMA_DEST				0x1000
+#define APB_DMA_DEST_MASK			0x7000
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4  (burst=1)
+ * 010: +2 (burst=0), +8  (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4  (burst=1)
+ * 110: -2 (burst=0), -8  (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+*/
+#define APB_DMA_DEST_INC_0			0
+#define APB_DMA_DEST_INC_1_4			0x1000
+#define APB_DMA_DEST_INC_2_8			0x2000
+#define APB_DMA_DEST_INC_4_16			0x3000
+#define APB_DMA_DEST_DEC_1_4			0x5000
+#define APB_DMA_DEST_DEC_2_8			0x6000
+#define APB_DMA_DEST_DEC_4_16			0x7000
+
+/*
+ * request signal select of destination
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0:    no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_DEST_REQ_NO			0x10000
+#define APB_DMA_DEST_REQ_NO_MASK		0xf0000
+
+#define APB_DMA_DATA_WIDTH			0x100000
+#define APB_DMA_DATA_WIDTH_MASK			0x300000
+/*
+ * data width of transfer
+ * 00: word
+ * 01: half
+ * 10: byte
+ */
+#define APB_DMA_DATA_WIDTH_4			0
+#define APB_DMA_DATA_WIDTH_2			0x100000
+#define APB_DMA_DATA_WIDTH_1			0x200000
+
+/*
+ * request signal select of source
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0:    no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_SOURCE_REQ_NO			0x1000000
+#define APB_DMA_SOURCE_REQ_NO_MASK		0xf000000
+#define APB_DMA_CYCLES_MASK			0x00ffffff
+
+struct moxart_dma_chan {
+	struct dma_chan			chan;
+	int				ch_num;
+	bool				allocated;
+	bool				error;
+	void __iomem			*base;
+	struct dma_slave_config		cfg;
+	struct dma_async_tx_descriptor	tx_desc;
+	unsigned int			line_reqno;
+};
+
+struct moxart_dma_container {
+	int				ctlr;
+	struct dma_device		dma_slave;
+	struct moxart_dma_chan		slave_chans[APB_DMA_MAX_CHANNEL];
+	spinlock_t			dma_lock;
+	struct tasklet_struct		tasklet;
+};
+
+struct moxart_dma_filter_data {
+	struct moxart_dma_container	*mdc;
+	struct of_phandle_args		*dma_spec;
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+	return &chan->dev->device;
+}
+
+static inline struct moxart_dma_container
+*to_dma_container(struct dma_device *d)
+{
+	return container_of(d, struct moxart_dma_container, dma_slave);
+}
+
+static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct moxart_dma_chan, chan);
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *ch = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *c = to_dma_container(ch->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
+
+	spin_lock_irqsave(&c->dma_lock, flags);
+
+	ctrl = readl(ch->base + REG_CTRL);
+	ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, ch->base + REG_CTRL);
+
+	spin_unlock_irqrestore(&c->dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+			       struct dma_slave_config *cfg)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	memcpy(&mchan->cfg, cfg, sizeof(mchan->cfg));
+
+	ctrl = readl(mchan->base + REG_CTRL);
+	ctrl |= APB_DMA_BURST_MODE;
+	ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
+	ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
+
+	switch (mchan->cfg.src_addr_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		ctrl |= APB_DMA_DATA_WIDTH_1;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_1_4;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_1_4;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		ctrl |= APB_DMA_DATA_WIDTH_2;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_2_8;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_2_8;
+		break;
+	default:
+		ctrl &= ~APB_DMA_DATA_WIDTH;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_4_16;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_4_16;
+		break;
+	}
+
+	if (mchan->cfg.direction == DMA_MEM_TO_DEV) {
+		ctrl &= ~APB_DMA_DEST_SELECT;
+		ctrl |= APB_DMA_SOURCE_SELECT;
+		ctrl |= (mchan->line_reqno << 16 &
+			 APB_DMA_DEST_REQ_NO_MASK);
+	} else {
+		ctrl |= APB_DMA_DEST_SELECT;
+		ctrl &= ~APB_DMA_SOURCE_SELECT;
+		ctrl |= (mchan->line_reqno << 24 &
+			 APB_DMA_SOURCE_REQ_NO_MASK);
+	}
+
+	writel(ctrl, mchan->base + REG_CTRL);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+			  unsigned long arg)
+{
+	int ret = 0;
+	struct dma_slave_config *config;
+
+	switch (cmd) {
+	case DMA_TERMINATE_ALL:
+		moxart_terminate_all(chan);
+		break;
+	case DMA_SLAVE_CONFIG:
+		config = (struct dma_slave_config *)arg;
+		ret = moxart_slave_config(chan, config);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static dma_cookie_t moxart_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(tx->chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	dma_cookie_t cookie;
+	u32 ctrl;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(tx->chan), "%s: mchan=%p mchan->ch_num=%u mchan->base=%p\n",
+		__func__, mchan, mchan->ch_num, mchan->base);
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	cookie = dma_cookie_assign(tx);
+
+	ctrl = readl(mchan->base + REG_CTRL);
+	ctrl |= (APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, mchan->base + REG_CTRL);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return cookie;
+}
+
+static struct dma_async_tx_descriptor
+*moxart_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+		      unsigned int sg_len,
+		      enum dma_transfer_direction direction,
+		      unsigned long tx_flags, void *context)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	unsigned long flags;
+	unsigned int size, adr_width;
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	if (direction == DMA_MEM_TO_DEV) {
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       mchan->base + REG_ADDRESS_SOURCE);
+		writel(mchan->cfg.dst_addr, mchan->base + REG_ADDRESS_DEST);
+
+		adr_width = mchan->cfg.src_addr_width;
+	} else {
+		writel(mchan->cfg.src_addr, mchan->base + REG_ADDRESS_SOURCE);
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       mchan->base + REG_ADDRESS_DEST);
+
+		adr_width = mchan->cfg.dst_addr_width;
+	}
+
+	size = sgl->length >> adr_width;
+
+	/*
+	 * size is 4 on 64 bytes copied, i.e. one cycle copies 16 bytes
+	 * ( when data_width == APB_DMAB_DATA_WIDTH_4 )
+	 */
+	writel(size, mchan->base + REG_CYCLES);
+
+	dev_dbg(chan2dev(chan), "%s: set %u DMA cycles (sgl->length=%u adr_width=%u)\n",
+		__func__, size, sgl->length, adr_width);
+
+	dma_async_tx_descriptor_init(&mchan->tx_desc, chan);
+	mchan->tx_desc.tx_submit = moxart_tx_submit;
+	mchan->error = 0;
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return &mchan->tx_desc;
+}
+
+bool moxart_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+	struct moxart_dma_filter_data *fdata = param;
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+	if (chan->device->dev != fdata->mdc->dma_slave.dev ||
+	    chan->device->dev->of_node != fdata->dma_spec->np) {
+		dev_dbg(chan2dev(chan), "device not registered to this DMA engine\n");
+		return 0;
+	}
+
+	dev_dbg(chan2dev(chan), "%s: mchan=%p line_reqno=%u mchan->ch_num=%u\n",
+		__func__, mchan, fdata->dma_spec->args[0], mchan->ch_num);
+
+	mchan->line_reqno = fdata->dma_spec->args[0];
+
+	return 1;
+}
+
+static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
+					struct of_dma *ofdma)
+{
+	struct moxart_dma_container *mdc = ofdma->of_dma_data;
+	struct moxart_dma_filter_data fdata = {
+		.mdc = mdc,
+	};
+
+	if (dma_spec->args_count < 1)
+		return NULL;
+
+	fdata.dma_spec = dma_spec;
+
+	return dma_request_channel(mdc->dma_slave.cap_mask,
+				   moxart_dma_filter_fn, &fdata);
+}
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+	dev_dbg(chan2dev(chan), "%s: allocating channel #%u\n",
+		__func__, mchan->ch_num);
+	mchan->allocated = 1;
+
+	return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+	dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+		__func__, mchan->ch_num);
+	mchan->allocated = 0;
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	ctrl = readl(mchan->base + REG_CTRL);
+	ctrl |= APB_DMA_ENABLE;
+	writel(ctrl, mchan->base + REG_CTRL);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+					dma_cookie_t cookie,
+					struct dma_tx_state *txs)
+{
+	struct moxart_dma_chan *ch = to_moxart_dma_chan(chan);
+	enum dma_status ret;
+
+	ret = (ch->error) ? DMA_ERROR : dma_cookie_status(chan, cookie, txs);
+
+	return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+	dma->device_prep_slave_sg		= moxart_prep_slave_sg;
+	dma->device_alloc_chan_resources	= moxart_alloc_chan_resources;
+	dma->device_free_chan_resources		= moxart_free_chan_resources;
+	dma->device_issue_pending		= moxart_issue_pending;
+	dma->device_tx_status			= moxart_tx_status;
+	dma->device_control			= moxart_control;
+	dma->dev				= dev;
+
+	INIT_LIST_HEAD(&dma->channels);
+}
+
+static void moxart_dma_tasklet(unsigned long data)
+{
+	struct moxart_dma_container *mc = (void *)data;
+	struct moxart_dma_chan *ch = &mc->slave_chans[0];
+	struct dma_async_tx_descriptor *tx_desc;
+	unsigned int i;
+	enum dma_status s;
+	struct dma_tx_state txs;
+
+	pr_debug("%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+		if (ch->allocated) {
+			tx_desc = &ch->tx_desc;
+
+			s = mc->dma_slave.device_tx_status(&ch->chan,
+							   ch->chan.cookie,
+							   &txs);
+
+			switch (s) {
+			case DMA_ERROR:
+				printk_ratelimited("%s: DMA error\n",
+						   __func__);
+				break;
+			case DMA_SUCCESS:
+				break;
+			case DMA_IN_PROGRESS:
+			case DMA_PAUSED:
+				continue;
+			}
+
+			if (tx_desc->callback) {
+				pr_debug("%s: call callback for ch=%p\n",
+					 __func__, ch);
+				tx_desc->callback(tx_desc->callback_param);
+			}
+		}
+	}
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+	struct moxart_dma_container *mc = devid;
+	struct moxart_dma_chan *mchan = &mc->slave_chans[0];
+	unsigned int i;
+	u32 ctrl;
+
+	pr_debug("%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		if (mchan->allocated) {
+			ctrl = readl(mchan->base + REG_CTRL);
+			pr_debug("%s: ctrl=%x\n", __func__, ctrl);
+
+			if (ctrl & APB_DMA_FIN_INT_STS) {
+				ctrl &= ~APB_DMA_FIN_INT_STS;
+				dma_cookie_complete(&mchan->tx_desc);
+			}
+			if (ctrl & APB_DMA_ERR_INT_STS) {
+				ctrl &= ~APB_DMA_ERR_INT_STS;
+				mchan->error = 1;
+			}
+			/*
+			 * bits must be cleared here, this function
+			 * called in a loop if moved to tasklet
+			 */
+			writel(ctrl, mchan->base + REG_CTRL);
+
+			tasklet_schedule(&mc->tasklet);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int moxart_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct resource *res;
+	static void __iomem *dma_base_addr;
+	int ret, i;
+	unsigned int irq;
+	struct moxart_dma_chan *mchan;
+	struct moxart_dma_container *mdc;
+
+	mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+	if (!mdc) {
+		dev_err(dev, "can't allocate DMA container\n");
+		return -ENOMEM;
+	}
+
+	irq = irq_of_parse_and_map(node, 0);
+	if (irq <= 0) {
+		dev_err(dev, "irq_of_parse_and_map failed\n");
+		return -EINVAL;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dma_base_addr = devm_ioremap_resource(dev, res);
+	if (IS_ERR(dma_base_addr)) {
+		dev_err(dev, "devm_ioremap_resource failed\n");
+		return PTR_ERR(dma_base_addr);
+	}
+
+	mdc->ctlr = pdev->id;
+	spin_lock_init(&mdc->dma_lock);
+
+	dma_cap_zero(mdc->dma_slave.cap_mask);
+	dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+
+	moxart_dma_init(&mdc->dma_slave, dev);
+
+	mchan = &mdc->slave_chans[0];
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		mchan->ch_num = i;
+		mchan->base = dma_base_addr + i * REG_CHAN_SIZE;
+		mchan->allocated = 0;
+
+		dma_cookie_init(&mchan->chan);
+		mchan->chan.device = &mdc->dma_slave;
+		list_add_tail(&mchan->chan.device_node,
+			      &mdc->dma_slave.channels);
+
+		dev_dbg(dev, "%s: mchans[%d]: mchan->ch_num=%u mchan->base=%p\n",
+			__func__, i, mchan->ch_num, mchan->base);
+	}
+
+	platform_set_drvdata(pdev, mdc);
+
+	ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
+			       "moxart-dma-engine", mdc);
+	if (ret) {
+		dev_err(dev, "devm_request_irq failed\n");
+		return ret;
+	}
+
+	ret = dma_async_device_register(&mdc->dma_slave);
+	if (ret) {
+		dev_err(dev, "dma_async_device_register failed\n");
+		return ret;
+	}
+
+	ret = of_dma_controller_register(node, moxart_of_xlate, mdc);
+	if (ret) {
+		dev_err(dev, "of_dma_controller_register failed\n");
+		dma_async_device_unregister(&mdc->dma_slave);
+		return ret;
+	}
+
+	tasklet_init(&mdc->tasklet, moxart_dma_tasklet, (unsigned long)mdc);
+
+	dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq);
+
+	return 0;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+	struct moxart_dma_container *m = platform_get_drvdata(pdev);
+
+	tasklet_kill(&m->tasklet);
+
+	dma_async_device_unregister(&m->dma_slave);
+
+	if (pdev->dev.of_node)
+		of_dma_controller_free(pdev->dev.of_node);
+
+	return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+	{ .compatible = "moxa,moxart-dma" },
+	{ }
+};
+
+static struct platform_driver moxart_driver = {
+	.probe	= moxart_probe,
+	.remove	= moxart_remove,
+	.driver = {
+		.name		= "moxart-dma-engine",
+		.owner		= THIS_MODULE,
+		.of_match_table	= moxart_dma_match,
+	},
+};
+
+static int moxart_init(void)
+{
+	return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+	platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
-- 
1.8.2.1


^ permalink raw reply related	[flat|nested] 80+ messages in thread

* [PATCH v11] dmaengine: Add MOXA ART DMA engine driver
@ 2013-10-08  8:42                     ` Jonas Jensen
  0 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2013-10-08  8:42 UTC (permalink / raw)
  To: linux-arm-kernel

The MOXA ART SoC has a DMA controller capable of offloading expensive
memory operations, such as large copies. This patch adds support for
the controller including four channels. Two of these are used to
handle MMC copy on the UC-7112-LX hardware. The remaining two can be
used in a future audio driver or client application.

Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
---

Notes:
    Changes since v10:
    
    device tree bindings document:
    1. reformat interrupt description text
    
    Applies to next-20130927

 .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  20 +
 drivers/dma/Kconfig                                |   7 +
 drivers/dma/Makefile                               |   1 +
 drivers/dma/moxart-dma.c                           | 651 +++++++++++++++++++++
 4 files changed, 679 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
 create mode 100644 drivers/dma/moxart-dma.c

diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644
index 0000000..697e3f6
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -0,0 +1,20 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible :	Must be "moxa,moxart-dma"
+- reg :		Should contain registers location and length
+- interrupts :	Should contain an interrupt-specifier for the sole
+		interrupt generated by the device
+- #dma-cells :	Should be 1, a single cell holding a line request number
+
+Example:
+
+	dma: dma at 90500000 {
+		compatible = "moxa,moxart-dma";
+		reg = <0x90500080 0x40>;
+		interrupts = <24 0>;
+		#dma-cells = <1>;
+	};
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index f238cfd..f4ed3a9 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -318,6 +318,13 @@ config K3_DMA
 	  Support the DMA engine for Hisilicon K3 platform
 	  devices.
 
+config MOXART_DMA
+	tristate "MOXART DMA support"
+	depends on ARCH_MOXART
+	select DMA_ENGINE
+	help
+	  Enable support for the MOXA ART SoC DMA controller.
+
 config DMA_ENGINE
 	bool
 
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index db89035..9ef0916 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -41,3 +41,4 @@ obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
 obj-$(CONFIG_TI_CPPI41) += cppi41.o
 obj-$(CONFIG_K3_DMA) += k3dma.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..edd6de2
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,651 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+
+#define APB_DMA_MAX_CHANNEL			4
+
+#define REG_ADDRESS_SOURCE			0
+#define REG_ADDRESS_DEST			4
+#define REG_CYCLES				8
+#define REG_CTRL				12
+#define REG_CHAN_SIZE				16
+
+#define APB_DMA_ENABLE				0x1
+#define APB_DMA_FIN_INT_STS			0x2
+#define APB_DMA_FIN_INT_EN			0x4
+#define APB_DMA_BURST_MODE			0x8
+#define APB_DMA_ERR_INT_STS			0x10
+#define APB_DMA_ERR_INT_EN			0x20
+
+/*
+ * unset to select APB source
+ * set to select AHB source
+ */
+#define APB_DMA_SOURCE_SELECT			0x40
+
+/*
+ * unset to select APB destination
+ * set to select AHB destination
+ */
+#define APB_DMA_DEST_SELECT			0x80
+
+#define APB_DMA_SOURCE				0x100
+#define APB_DMA_SOURCE_MASK			0x700
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4  (burst=1)
+ * 010: +2 (burst=0), +8  (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4  (burst=1)
+ * 110: -2 (burst=0), -8  (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+ */
+#define APB_DMA_SOURCE_INC_0			0
+#define APB_DMA_SOURCE_INC_1_4			0x100
+#define APB_DMA_SOURCE_INC_2_8			0x200
+#define APB_DMA_SOURCE_INC_4_16			0x300
+#define APB_DMA_SOURCE_DEC_1_4			0x500
+#define APB_DMA_SOURCE_DEC_2_8			0x600
+#define APB_DMA_SOURCE_DEC_4_16			0x700
+
+#define APB_DMA_DEST				0x1000
+#define APB_DMA_DEST_MASK			0x7000
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4  (burst=1)
+ * 010: +2 (burst=0), +8  (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4  (burst=1)
+ * 110: -2 (burst=0), -8  (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+*/
+#define APB_DMA_DEST_INC_0			0
+#define APB_DMA_DEST_INC_1_4			0x1000
+#define APB_DMA_DEST_INC_2_8			0x2000
+#define APB_DMA_DEST_INC_4_16			0x3000
+#define APB_DMA_DEST_DEC_1_4			0x5000
+#define APB_DMA_DEST_DEC_2_8			0x6000
+#define APB_DMA_DEST_DEC_4_16			0x7000
+
+/*
+ * request signal select of destination
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0:    no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_DEST_REQ_NO			0x10000
+#define APB_DMA_DEST_REQ_NO_MASK		0xf0000
+
+#define APB_DMA_DATA_WIDTH			0x100000
+#define APB_DMA_DATA_WIDTH_MASK			0x300000
+/*
+ * data width of transfer
+ * 00: word
+ * 01: half
+ * 10: byte
+ */
+#define APB_DMA_DATA_WIDTH_4			0
+#define APB_DMA_DATA_WIDTH_2			0x100000
+#define APB_DMA_DATA_WIDTH_1			0x200000
+
+/*
+ * request signal select of source
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0:    no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_SOURCE_REQ_NO			0x1000000
+#define APB_DMA_SOURCE_REQ_NO_MASK		0xf000000
+#define APB_DMA_CYCLES_MASK			0x00ffffff
+
+struct moxart_dma_chan {
+	struct dma_chan			chan;
+	int				ch_num;
+	bool				allocated;
+	bool				error;
+	void __iomem			*base;
+	struct dma_slave_config		cfg;
+	struct dma_async_tx_descriptor	tx_desc;
+	unsigned int			line_reqno;
+};
+
+struct moxart_dma_container {
+	int				ctlr;
+	struct dma_device		dma_slave;
+	struct moxart_dma_chan		slave_chans[APB_DMA_MAX_CHANNEL];
+	spinlock_t			dma_lock;
+	struct tasklet_struct		tasklet;
+};
+
+struct moxart_dma_filter_data {
+	struct moxart_dma_container	*mdc;
+	struct of_phandle_args		*dma_spec;
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+	return &chan->dev->device;
+}
+
+static inline struct moxart_dma_container
+*to_dma_container(struct dma_device *d)
+{
+	return container_of(d, struct moxart_dma_container, dma_slave);
+}
+
+static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct moxart_dma_chan, chan);
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *ch = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *c = to_dma_container(ch->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
+
+	spin_lock_irqsave(&c->dma_lock, flags);
+
+	ctrl = readl(ch->base + REG_CTRL);
+	ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, ch->base + REG_CTRL);
+
+	spin_unlock_irqrestore(&c->dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+			       struct dma_slave_config *cfg)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	memcpy(&mchan->cfg, cfg, sizeof(mchan->cfg));
+
+	ctrl = readl(mchan->base + REG_CTRL);
+	ctrl |= APB_DMA_BURST_MODE;
+	ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
+	ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
+
+	switch (mchan->cfg.src_addr_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		ctrl |= APB_DMA_DATA_WIDTH_1;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_1_4;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_1_4;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		ctrl |= APB_DMA_DATA_WIDTH_2;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_2_8;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_2_8;
+		break;
+	default:
+		ctrl &= ~APB_DMA_DATA_WIDTH;
+		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_4_16;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_4_16;
+		break;
+	}
+
+	if (mchan->cfg.direction == DMA_MEM_TO_DEV) {
+		ctrl &= ~APB_DMA_DEST_SELECT;
+		ctrl |= APB_DMA_SOURCE_SELECT;
+		ctrl |= (mchan->line_reqno << 16 &
+			 APB_DMA_DEST_REQ_NO_MASK);
+	} else {
+		ctrl |= APB_DMA_DEST_SELECT;
+		ctrl &= ~APB_DMA_SOURCE_SELECT;
+		ctrl |= (mchan->line_reqno << 24 &
+			 APB_DMA_SOURCE_REQ_NO_MASK);
+	}
+
+	writel(ctrl, mchan->base + REG_CTRL);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+			  unsigned long arg)
+{
+	int ret = 0;
+	struct dma_slave_config *config;
+
+	switch (cmd) {
+	case DMA_TERMINATE_ALL:
+		moxart_terminate_all(chan);
+		break;
+	case DMA_SLAVE_CONFIG:
+		config = (struct dma_slave_config *)arg;
+		ret = moxart_slave_config(chan, config);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static dma_cookie_t moxart_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(tx->chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	dma_cookie_t cookie;
+	u32 ctrl;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(tx->chan), "%s: mchan=%p mchan->ch_num=%u mchan->base=%p\n",
+		__func__, mchan, mchan->ch_num, mchan->base);
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	cookie = dma_cookie_assign(tx);
+
+	ctrl = readl(mchan->base + REG_CTRL);
+	ctrl |= (APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, mchan->base + REG_CTRL);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return cookie;
+}
+
+static struct dma_async_tx_descriptor
+*moxart_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+		      unsigned int sg_len,
+		      enum dma_transfer_direction direction,
+		      unsigned long tx_flags, void *context)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	unsigned long flags;
+	unsigned int size, adr_width;
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	if (direction == DMA_MEM_TO_DEV) {
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       mchan->base + REG_ADDRESS_SOURCE);
+		writel(mchan->cfg.dst_addr, mchan->base + REG_ADDRESS_DEST);
+
+		adr_width = mchan->cfg.src_addr_width;
+	} else {
+		writel(mchan->cfg.src_addr, mchan->base + REG_ADDRESS_SOURCE);
+		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
+		       mchan->base + REG_ADDRESS_DEST);
+
+		adr_width = mchan->cfg.dst_addr_width;
+	}
+
+	size = sgl->length >> adr_width;
+
+	/*
+	 * size is 4 on 64 bytes copied, i.e. one cycle copies 16 bytes
+	 * ( when data_width == APB_DMAB_DATA_WIDTH_4 )
+	 */
+	writel(size, mchan->base + REG_CYCLES);
+
+	dev_dbg(chan2dev(chan), "%s: set %u DMA cycles (sgl->length=%u adr_width=%u)\n",
+		__func__, size, sgl->length, adr_width);
+
+	dma_async_tx_descriptor_init(&mchan->tx_desc, chan);
+	mchan->tx_desc.tx_submit = moxart_tx_submit;
+	mchan->error = 0;
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+
+	return &mchan->tx_desc;
+}
+
+bool moxart_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+	struct moxart_dma_filter_data *fdata = param;
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+	if (chan->device->dev != fdata->mdc->dma_slave.dev ||
+	    chan->device->dev->of_node != fdata->dma_spec->np) {
+		dev_dbg(chan2dev(chan), "device not registered to this DMA engine\n");
+		return 0;
+	}
+
+	dev_dbg(chan2dev(chan), "%s: mchan=%p line_reqno=%u mchan->ch_num=%u\n",
+		__func__, mchan, fdata->dma_spec->args[0], mchan->ch_num);
+
+	mchan->line_reqno = fdata->dma_spec->args[0];
+
+	return 1;
+}
+
+static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
+					struct of_dma *ofdma)
+{
+	struct moxart_dma_container *mdc = ofdma->of_dma_data;
+	struct moxart_dma_filter_data fdata = {
+		.mdc = mdc,
+	};
+
+	if (dma_spec->args_count < 1)
+		return NULL;
+
+	fdata.dma_spec = dma_spec;
+
+	return dma_request_channel(mdc->dma_slave.cap_mask,
+				   moxart_dma_filter_fn, &fdata);
+}
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+	dev_dbg(chan2dev(chan), "%s: allocating channel #%u\n",
+		__func__, mchan->ch_num);
+	mchan->allocated = 1;
+
+	return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+
+	dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+		__func__, mchan->ch_num);
+	mchan->allocated = 0;
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
+	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
+	u32 ctrl;
+	unsigned long flags;
+
+	dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
+
+	spin_lock_irqsave(&mc->dma_lock, flags);
+
+	ctrl = readl(mchan->base + REG_CTRL);
+	ctrl |= APB_DMA_ENABLE;
+	writel(ctrl, mchan->base + REG_CTRL);
+
+	spin_unlock_irqrestore(&mc->dma_lock, flags);
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+					dma_cookie_t cookie,
+					struct dma_tx_state *txs)
+{
+	struct moxart_dma_chan *ch = to_moxart_dma_chan(chan);
+	enum dma_status ret;
+
+	ret = (ch->error) ? DMA_ERROR : dma_cookie_status(chan, cookie, txs);
+
+	return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+	dma->device_prep_slave_sg		= moxart_prep_slave_sg;
+	dma->device_alloc_chan_resources	= moxart_alloc_chan_resources;
+	dma->device_free_chan_resources		= moxart_free_chan_resources;
+	dma->device_issue_pending		= moxart_issue_pending;
+	dma->device_tx_status			= moxart_tx_status;
+	dma->device_control			= moxart_control;
+	dma->dev				= dev;
+
+	INIT_LIST_HEAD(&dma->channels);
+}
+
+static void moxart_dma_tasklet(unsigned long data)
+{
+	struct moxart_dma_container *mc = (void *)data;
+	struct moxart_dma_chan *ch = &mc->slave_chans[0];
+	struct dma_async_tx_descriptor *tx_desc;
+	unsigned int i;
+	enum dma_status s;
+	struct dma_tx_state txs;
+
+	pr_debug("%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+		if (ch->allocated) {
+			tx_desc = &ch->tx_desc;
+
+			s = mc->dma_slave.device_tx_status(&ch->chan,
+							   ch->chan.cookie,
+							   &txs);
+
+			switch (s) {
+			case DMA_ERROR:
+				printk_ratelimited("%s: DMA error\n",
+						   __func__);
+				break;
+			case DMA_SUCCESS:
+				break;
+			case DMA_IN_PROGRESS:
+			case DMA_PAUSED:
+				continue;
+			}
+
+			if (tx_desc->callback) {
+				pr_debug("%s: call callback for ch=%p\n",
+					 __func__, ch);
+				tx_desc->callback(tx_desc->callback_param);
+			}
+		}
+	}
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+	struct moxart_dma_container *mc = devid;
+	struct moxart_dma_chan *mchan = &mc->slave_chans[0];
+	unsigned int i;
+	u32 ctrl;
+
+	pr_debug("%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		if (mchan->allocated) {
+			ctrl = readl(mchan->base + REG_CTRL);
+			pr_debug("%s: ctrl=%x\n", __func__, ctrl);
+
+			if (ctrl & APB_DMA_FIN_INT_STS) {
+				ctrl &= ~APB_DMA_FIN_INT_STS;
+				dma_cookie_complete(&mchan->tx_desc);
+			}
+			if (ctrl & APB_DMA_ERR_INT_STS) {
+				ctrl &= ~APB_DMA_ERR_INT_STS;
+				mchan->error = 1;
+			}
+			/*
+			 * bits must be cleared here, this function
+			 * called in a loop if moved to tasklet
+			 */
+			writel(ctrl, mchan->base + REG_CTRL);
+
+			tasklet_schedule(&mc->tasklet);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int moxart_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct resource *res;
+	static void __iomem *dma_base_addr;
+	int ret, i;
+	unsigned int irq;
+	struct moxart_dma_chan *mchan;
+	struct moxart_dma_container *mdc;
+
+	mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+	if (!mdc) {
+		dev_err(dev, "can't allocate DMA container\n");
+		return -ENOMEM;
+	}
+
+	irq = irq_of_parse_and_map(node, 0);
+	if (irq <= 0) {
+		dev_err(dev, "irq_of_parse_and_map failed\n");
+		return -EINVAL;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dma_base_addr = devm_ioremap_resource(dev, res);
+	if (IS_ERR(dma_base_addr)) {
+		dev_err(dev, "devm_ioremap_resource failed\n");
+		return PTR_ERR(dma_base_addr);
+	}
+
+	mdc->ctlr = pdev->id;
+	spin_lock_init(&mdc->dma_lock);
+
+	dma_cap_zero(mdc->dma_slave.cap_mask);
+	dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+
+	moxart_dma_init(&mdc->dma_slave, dev);
+
+	mchan = &mdc->slave_chans[0];
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
+		mchan->ch_num = i;
+		mchan->base = dma_base_addr + i * REG_CHAN_SIZE;
+		mchan->allocated = 0;
+
+		dma_cookie_init(&mchan->chan);
+		mchan->chan.device = &mdc->dma_slave;
+		list_add_tail(&mchan->chan.device_node,
+			      &mdc->dma_slave.channels);
+
+		dev_dbg(dev, "%s: mchans[%d]: mchan->ch_num=%u mchan->base=%p\n",
+			__func__, i, mchan->ch_num, mchan->base);
+	}
+
+	platform_set_drvdata(pdev, mdc);
+
+	ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
+			       "moxart-dma-engine", mdc);
+	if (ret) {
+		dev_err(dev, "devm_request_irq failed\n");
+		return ret;
+	}
+
+	ret = dma_async_device_register(&mdc->dma_slave);
+	if (ret) {
+		dev_err(dev, "dma_async_device_register failed\n");
+		return ret;
+	}
+
+	ret = of_dma_controller_register(node, moxart_of_xlate, mdc);
+	if (ret) {
+		dev_err(dev, "of_dma_controller_register failed\n");
+		dma_async_device_unregister(&mdc->dma_slave);
+		return ret;
+	}
+
+	tasklet_init(&mdc->tasklet, moxart_dma_tasklet, (unsigned long)mdc);
+
+	dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq);
+
+	return 0;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+	struct moxart_dma_container *m = platform_get_drvdata(pdev);
+
+	tasklet_kill(&m->tasklet);
+
+	dma_async_device_unregister(&m->dma_slave);
+
+	if (pdev->dev.of_node)
+		of_dma_controller_free(pdev->dev.of_node);
+
+	return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+	{ .compatible = "moxa,moxart-dma" },
+	{ }
+};
+
+static struct platform_driver moxart_driver = {
+	.probe	= moxart_probe,
+	.remove	= moxart_remove,
+	.driver = {
+		.name		= "moxart-dma-engine",
+		.owner		= THIS_MODULE,
+		.of_match_table	= moxart_dma_match,
+	},
+};
+
+static int moxart_init(void)
+{
+	return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+	platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
-- 
1.8.2.1

^ permalink raw reply related	[flat|nested] 80+ messages in thread

* Re: [PATCH v10] dmaengine: Add MOXA ART DMA engine driver
@ 2013-10-08  9:53                       ` Jonas Jensen
  0 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2013-10-08  9:53 UTC (permalink / raw)
  To: Mark Rutland
  Cc: linux-arm-kernel, linux-kernel, arm, vinod.koul, djbw, arnd,
	linux, devicetree

On 7 October 2013 17:12, Mark Rutland <mark.rutland@arm.com> wrote:
> Sorry I didn't notice this previously, but "moxa" isn't in
> Documentation/devicetree/bindings/vendor-prefixes.txt (despite several
> bindings using it). Could you cook up a separate patch to add an entry
> for Moxa, please?

Yes, I'll submit a separate patch.

> Also, given the SoC is called "ART" it's a shame that we're calling this
> "moxa,moxart-dma" rather than "moxa,art-dma". We already have precedent
> for "moxart" in bindings though, so changing that's likely to lead to
> more problems.

Sorry about that, I think the "moxart" contraction was suggested and
has been sticky ever since.

It's at least a little appropriate because the physical chip text
reads "MOXA ART" (photo):

https://lh4.googleusercontent.com/-A-2FXDrObU8/UMcMc_K2vEI/AAAAAAAABwg/ldaLZ7ps1P4/w1331-h998-no/UC-7112-LX-picture4.jpg

Currently three drivers in linux-next use the name with accompanying
device tree bindings.
Considering the amount of patches required, can we keep the name, please?

> Sorry for yet more pendantry, but could we instead have:
>
>  - interrupts: Should contain an interrupt-specifier for the sole
>                interrupt generated by the device.

Fixed in v11.


Regards,
Jonas

^ permalink raw reply	[flat|nested] 80+ messages in thread

* Re: [PATCH v10] dmaengine: Add MOXA ART DMA engine driver
@ 2013-10-08  9:53                       ` Jonas Jensen
  0 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2013-10-08  9:53 UTC (permalink / raw)
  To: Mark Rutland
  Cc: linux-arm-kernel-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA, arm-DgEjT+Ai2ygdnm+yROfE0A,
	vinod.koul-ral2JQCrhuEAvxtiuMwx3w, djbw-b10kYP2dOMg,
	arnd-r2nGTMty4D4, linux-lFZ/pmaqli7XmaaqVzeoHQ,
	devicetree-u79uwXL29TY76Z2rM5mHXA

On 7 October 2013 17:12, Mark Rutland <mark.rutland-5wv7dgnIgG8@public.gmane.org> wrote:
> Sorry I didn't notice this previously, but "moxa" isn't in
> Documentation/devicetree/bindings/vendor-prefixes.txt (despite several
> bindings using it). Could you cook up a separate patch to add an entry
> for Moxa, please?

Yes, I'll submit a separate patch.

> Also, given the SoC is called "ART" it's a shame that we're calling this
> "moxa,moxart-dma" rather than "moxa,art-dma". We already have precedent
> for "moxart" in bindings though, so changing that's likely to lead to
> more problems.

Sorry about that, I think the "moxart" contraction was suggested and
has been sticky ever since.

It's at least a little appropriate because the physical chip text
reads "MOXA ART" (photo):

https://lh4.googleusercontent.com/-A-2FXDrObU8/UMcMc_K2vEI/AAAAAAAABwg/ldaLZ7ps1P4/w1331-h998-no/UC-7112-LX-picture4.jpg

Currently three drivers in linux-next use the name with accompanying
device tree bindings.
Considering the amount of patches required, can we keep the name, please?

> Sorry for yet more pendantry, but could we instead have:
>
>  - interrupts: Should contain an interrupt-specifier for the sole
>                interrupt generated by the device.

Fixed in v11.


Regards,
Jonas
--
To unsubscribe from this list: send the line "unsubscribe devicetree" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 80+ messages in thread

* [PATCH v10] dmaengine: Add MOXA ART DMA engine driver
@ 2013-10-08  9:53                       ` Jonas Jensen
  0 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2013-10-08  9:53 UTC (permalink / raw)
  To: linux-arm-kernel

On 7 October 2013 17:12, Mark Rutland <mark.rutland@arm.com> wrote:
> Sorry I didn't notice this previously, but "moxa" isn't in
> Documentation/devicetree/bindings/vendor-prefixes.txt (despite several
> bindings using it). Could you cook up a separate patch to add an entry
> for Moxa, please?

Yes, I'll submit a separate patch.

> Also, given the SoC is called "ART" it's a shame that we're calling this
> "moxa,moxart-dma" rather than "moxa,art-dma". We already have precedent
> for "moxart" in bindings though, so changing that's likely to lead to
> more problems.

Sorry about that, I think the "moxart" contraction was suggested and
has been sticky ever since.

It's at least a little appropriate because the physical chip text
reads "MOXA ART" (photo):

https://lh4.googleusercontent.com/-A-2FXDrObU8/UMcMc_K2vEI/AAAAAAAABwg/ldaLZ7ps1P4/w1331-h998-no/UC-7112-LX-picture4.jpg

Currently three drivers in linux-next use the name with accompanying
device tree bindings.
Considering the amount of patches required, can we keep the name, please?

> Sorry for yet more pendantry, but could we instead have:
>
>  - interrupts: Should contain an interrupt-specifier for the sole
>                interrupt generated by the device.

Fixed in v11.


Regards,
Jonas

^ permalink raw reply	[flat|nested] 80+ messages in thread

* Re: [PATCH v10] dmaengine: Add MOXA ART DMA engine driver
  2013-10-08  9:53                       ` Jonas Jensen
  (?)
@ 2013-10-08 12:55                         ` Mark Rutland
  -1 siblings, 0 replies; 80+ messages in thread
From: Mark Rutland @ 2013-10-08 12:55 UTC (permalink / raw)
  To: Jonas Jensen
  Cc: linux-arm-kernel, linux-kernel, arm, vinod.koul, djbw, arnd,
	linux, devicetree

On Tue, Oct 08, 2013 at 10:53:36AM +0100, Jonas Jensen wrote:
> On 7 October 2013 17:12, Mark Rutland <mark.rutland@arm.com> wrote:
> > Sorry I didn't notice this previously, but "moxa" isn't in
> > Documentation/devicetree/bindings/vendor-prefixes.txt (despite several
> > bindings using it). Could you cook up a separate patch to add an entry
> > for Moxa, please?
> 
> Yes, I'll submit a separate patch.

Cheers.

> 
> > Also, given the SoC is called "ART" it's a shame that we're calling this
> > "moxa,moxart-dma" rather than "moxa,art-dma". We already have precedent
> > for "moxart" in bindings though, so changing that's likely to lead to
> > more problems.
> 
> Sorry about that, I think the "moxart" contraction was suggested and
> has been sticky ever since.
> 
> It's at least a little appropriate because the physical chip text
> reads "MOXA ART" (photo):
> 
> https://lh4.googleusercontent.com/-A-2FXDrObU8/UMcMc_K2vEI/AAAAAAAABwg/ldaLZ7ps1P4/w1331-h998-no/UC-7112-LX-picture4.jpg
> 
> Currently three drivers in linux-next use the name with accompanying
> device tree bindings.
> Considering the amount of patches required, can we keep the name, please?

Yeah, I think we have to keep it. It's not objectively wrong, and we
have other contractions (e.g. vexpress) in bindings. It just looks a bit
more odd than the others due to the repetition of "moxa". There's no
benefit to be had changing it now.

> 
> > Sorry for yet more pendantry, but could we instead have:
> >
> >  - interrupts: Should contain an interrupt-specifier for the sole
> >                interrupt generated by the device.
> 
> Fixed in v11.

Sounds good.

Cheers,
Mark.

^ permalink raw reply	[flat|nested] 80+ messages in thread

* Re: [PATCH v10] dmaengine: Add MOXA ART DMA engine driver
@ 2013-10-08 12:55                         ` Mark Rutland
  0 siblings, 0 replies; 80+ messages in thread
From: Mark Rutland @ 2013-10-08 12:55 UTC (permalink / raw)
  To: Jonas Jensen
  Cc: devicetree, linux, arnd, vinod.koul, linux-kernel, arm, djbw,
	linux-arm-kernel

On Tue, Oct 08, 2013 at 10:53:36AM +0100, Jonas Jensen wrote:
> On 7 October 2013 17:12, Mark Rutland <mark.rutland@arm.com> wrote:
> > Sorry I didn't notice this previously, but "moxa" isn't in
> > Documentation/devicetree/bindings/vendor-prefixes.txt (despite several
> > bindings using it). Could you cook up a separate patch to add an entry
> > for Moxa, please?
> 
> Yes, I'll submit a separate patch.

Cheers.

> 
> > Also, given the SoC is called "ART" it's a shame that we're calling this
> > "moxa,moxart-dma" rather than "moxa,art-dma". We already have precedent
> > for "moxart" in bindings though, so changing that's likely to lead to
> > more problems.
> 
> Sorry about that, I think the "moxart" contraction was suggested and
> has been sticky ever since.
> 
> It's at least a little appropriate because the physical chip text
> reads "MOXA ART" (photo):
> 
> https://lh4.googleusercontent.com/-A-2FXDrObU8/UMcMc_K2vEI/AAAAAAAABwg/ldaLZ7ps1P4/w1331-h998-no/UC-7112-LX-picture4.jpg
> 
> Currently three drivers in linux-next use the name with accompanying
> device tree bindings.
> Considering the amount of patches required, can we keep the name, please?

Yeah, I think we have to keep it. It's not objectively wrong, and we
have other contractions (e.g. vexpress) in bindings. It just looks a bit
more odd than the others due to the repetition of "moxa". There's no
benefit to be had changing it now.

> 
> > Sorry for yet more pendantry, but could we instead have:
> >
> >  - interrupts: Should contain an interrupt-specifier for the sole
> >                interrupt generated by the device.
> 
> Fixed in v11.

Sounds good.

Cheers,
Mark.

^ permalink raw reply	[flat|nested] 80+ messages in thread

* [PATCH v10] dmaengine: Add MOXA ART DMA engine driver
@ 2013-10-08 12:55                         ` Mark Rutland
  0 siblings, 0 replies; 80+ messages in thread
From: Mark Rutland @ 2013-10-08 12:55 UTC (permalink / raw)
  To: linux-arm-kernel

On Tue, Oct 08, 2013 at 10:53:36AM +0100, Jonas Jensen wrote:
> On 7 October 2013 17:12, Mark Rutland <mark.rutland@arm.com> wrote:
> > Sorry I didn't notice this previously, but "moxa" isn't in
> > Documentation/devicetree/bindings/vendor-prefixes.txt (despite several
> > bindings using it). Could you cook up a separate patch to add an entry
> > for Moxa, please?
> 
> Yes, I'll submit a separate patch.

Cheers.

> 
> > Also, given the SoC is called "ART" it's a shame that we're calling this
> > "moxa,moxart-dma" rather than "moxa,art-dma". We already have precedent
> > for "moxart" in bindings though, so changing that's likely to lead to
> > more problems.
> 
> Sorry about that, I think the "moxart" contraction was suggested and
> has been sticky ever since.
> 
> It's at least a little appropriate because the physical chip text
> reads "MOXA ART" (photo):
> 
> https://lh4.googleusercontent.com/-A-2FXDrObU8/UMcMc_K2vEI/AAAAAAAABwg/ldaLZ7ps1P4/w1331-h998-no/UC-7112-LX-picture4.jpg
> 
> Currently three drivers in linux-next use the name with accompanying
> device tree bindings.
> Considering the amount of patches required, can we keep the name, please?

Yeah, I think we have to keep it. It's not objectively wrong, and we
have other contractions (e.g. vexpress) in bindings. It just looks a bit
more odd than the others due to the repetition of "moxa". There's no
benefit to be had changing it now.

> 
> > Sorry for yet more pendantry, but could we instead have:
> >
> >  - interrupts: Should contain an interrupt-specifier for the sole
> >                interrupt generated by the device.
> 
> Fixed in v11.

Sounds good.

Cheers,
Mark.

^ permalink raw reply	[flat|nested] 80+ messages in thread

* Re: [PATCH v11] dmaengine: Add MOXA ART DMA engine driver
  2013-10-08  8:42                     ` Jonas Jensen
@ 2013-11-13 13:59                       ` Vinod Koul
  -1 siblings, 0 replies; 80+ messages in thread
From: Vinod Koul @ 2013-11-13 13:59 UTC (permalink / raw)
  To: Jonas Jensen
  Cc: linux-arm-kernel, linux-kernel, arm, djbw, arnd, linux, mark.rutland

On Tue, Oct 08, 2013 at 10:42:36AM +0200, Jonas Jensen wrote:
> The MOXA ART SoC has a DMA controller capable of offloading expensive
> memory operations, such as large copies. This patch adds support for
> the controller including four channels. Two of these are used to
> handle MMC copy on the UC-7112-LX hardware. The remaining two can be
> used in a future audio driver or client application.
I see this is pending and I first need the AKC on DT parts of the patch before
we can apply this.

Also pls cc dmaengine@vger.kernel.org on this patch

> 
> Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
> ---
> 
> Notes:
>     Changes since v10:
>     
>     device tree bindings document:
>     1. reformat interrupt description text
>     
>     Applies to next-20130927
> 
>  .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  20 +
>  drivers/dma/Kconfig                                |   7 +
>  drivers/dma/Makefile                               |   1 +
>  drivers/dma/moxart-dma.c                           | 651 +++++++++++++++++++++
>  4 files changed, 679 insertions(+)
>  create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
>  create mode 100644 drivers/dma/moxart-dma.c
> 
> diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> new file mode 100644
> index 0000000..697e3f6
> --- /dev/null
> +++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> @@ -0,0 +1,20 @@
> +MOXA ART DMA Controller
> +
> +See dma.txt first
> +
> +Required properties:
> +
> +- compatible :	Must be "moxa,moxart-dma"
> +- reg :		Should contain registers location and length
> +- interrupts :	Should contain an interrupt-specifier for the sole
> +		interrupt generated by the device
> +- #dma-cells :	Should be 1, a single cell holding a line request number
> +
> +Example:
> +
> +	dma: dma@90500000 {
> +		compatible = "moxa,moxart-dma";
> +		reg = <0x90500080 0x40>;
> +		interrupts = <24 0>;
> +		#dma-cells = <1>;
> +	};
> diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
> index f238cfd..f4ed3a9 100644
> --- a/drivers/dma/Kconfig
> +++ b/drivers/dma/Kconfig
> @@ -318,6 +318,13 @@ config K3_DMA
>  	  Support the DMA engine for Hisilicon K3 platform
>  	  devices.
>  
> +config MOXART_DMA
> +	tristate "MOXART DMA support"
> +	depends on ARCH_MOXART
> +	select DMA_ENGINE
> +	help
> +	  Enable support for the MOXA ART SoC DMA controller.
> +
>  config DMA_ENGINE
>  	bool
>  
> diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
> index db89035..9ef0916 100644
> --- a/drivers/dma/Makefile
> +++ b/drivers/dma/Makefile
> @@ -41,3 +41,4 @@ obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
>  obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
>  obj-$(CONFIG_TI_CPPI41) += cppi41.o
>  obj-$(CONFIG_K3_DMA) += k3dma.o
> +obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
> diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
> new file mode 100644
> index 0000000..edd6de2
> --- /dev/null
> +++ b/drivers/dma/moxart-dma.c
> @@ -0,0 +1,651 @@
> +/*
> + * MOXA ART SoCs DMA Engine support.
> + *
> + * Copyright (C) 2013 Jonas Jensen
> + *
> + * Jonas Jensen <jonas.jensen@gmail.com>
> + *
> + * This file is licensed under the terms of the GNU General Public
> + * License version 2.  This program is licensed "as is" without any
> + * warranty of any kind, whether express or implied.
> + */
> +
> +#include <linux/dmaengine.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/err.h>
> +#include <linux/init.h>
> +#include <linux/interrupt.h>
> +#include <linux/list.h>
> +#include <linux/module.h>
> +#include <linux/platform_device.h>
> +#include <linux/slab.h>
> +#include <linux/spinlock.h>
> +#include <linux/irq.h>
> +#include <linux/of_address.h>
> +#include <linux/of_irq.h>
> +#include <linux/of_dma.h>
> +
> +#include <asm/cacheflush.h>
> +
> +#include "dmaengine.h"
> +
> +#define APB_DMA_MAX_CHANNEL			4
> +
> +#define REG_ADDRESS_SOURCE			0
> +#define REG_ADDRESS_DEST			4
> +#define REG_CYCLES				8
> +#define REG_CTRL				12
> +#define REG_CHAN_SIZE				16
> +
> +#define APB_DMA_ENABLE				0x1
> +#define APB_DMA_FIN_INT_STS			0x2
> +#define APB_DMA_FIN_INT_EN			0x4
> +#define APB_DMA_BURST_MODE			0x8
> +#define APB_DMA_ERR_INT_STS			0x10
> +#define APB_DMA_ERR_INT_EN			0x20
> +
> +/*
> + * unset to select APB source
> + * set to select AHB source
> + */
> +#define APB_DMA_SOURCE_SELECT			0x40
> +
> +/*
> + * unset to select APB destination
> + * set to select AHB destination
> + */
> +#define APB_DMA_DEST_SELECT			0x80
> +
> +#define APB_DMA_SOURCE				0x100
> +#define APB_DMA_SOURCE_MASK			0x700
> +/*
> + * 000: no increment
> + * 001: +1 (busrt=0), +4  (burst=1)
> + * 010: +2 (burst=0), +8  (burst=1)
> + * 011: +4 (burst=0), +16 (burst=1)
> + * 101: -1 (burst=0), -4  (burst=1)
> + * 110: -2 (burst=0), -8  (burst=1)
> + * 111: -4 (burst=0), -16 (burst=1)
> + */
> +#define APB_DMA_SOURCE_INC_0			0
> +#define APB_DMA_SOURCE_INC_1_4			0x100
> +#define APB_DMA_SOURCE_INC_2_8			0x200
> +#define APB_DMA_SOURCE_INC_4_16			0x300
> +#define APB_DMA_SOURCE_DEC_1_4			0x500
> +#define APB_DMA_SOURCE_DEC_2_8			0x600
> +#define APB_DMA_SOURCE_DEC_4_16			0x700
> +
> +#define APB_DMA_DEST				0x1000
> +#define APB_DMA_DEST_MASK			0x7000
> +/*
> + * 000: no increment
> + * 001: +1 (busrt=0), +4  (burst=1)
> + * 010: +2 (burst=0), +8  (burst=1)
> + * 011: +4 (burst=0), +16 (burst=1)
> + * 101: -1 (burst=0), -4  (burst=1)
> + * 110: -2 (burst=0), -8  (burst=1)
> + * 111: -4 (burst=0), -16 (burst=1)
> +*/
> +#define APB_DMA_DEST_INC_0			0
> +#define APB_DMA_DEST_INC_1_4			0x1000
> +#define APB_DMA_DEST_INC_2_8			0x2000
> +#define APB_DMA_DEST_INC_4_16			0x3000
> +#define APB_DMA_DEST_DEC_1_4			0x5000
> +#define APB_DMA_DEST_DEC_2_8			0x6000
> +#define APB_DMA_DEST_DEC_4_16			0x7000
> +
> +/*
> + * request signal select of destination
> + * address for DMA hardware handshake
> + *
> + * the request line number is a property of
> + * the DMA controller itself, e.g. MMC must
> + * always request channels where
> + * dma_slave_config->slave_id == 5
> + *
> + * 0:    no request / grant signal
> + * 1-15: request / grant signal
> + */
> +#define APB_DMA_DEST_REQ_NO			0x10000
> +#define APB_DMA_DEST_REQ_NO_MASK		0xf0000
> +
> +#define APB_DMA_DATA_WIDTH			0x100000
> +#define APB_DMA_DATA_WIDTH_MASK			0x300000
> +/*
> + * data width of transfer
> + * 00: word
> + * 01: half
> + * 10: byte
> + */
> +#define APB_DMA_DATA_WIDTH_4			0
> +#define APB_DMA_DATA_WIDTH_2			0x100000
> +#define APB_DMA_DATA_WIDTH_1			0x200000
> +
> +/*
> + * request signal select of source
> + * address for DMA hardware handshake
> + *
> + * the request line number is a property of
> + * the DMA controller itself, e.g. MMC must
> + * always request channels where
> + * dma_slave_config->slave_id == 5
> + *
> + * 0:    no request / grant signal
> + * 1-15: request / grant signal
> + */
> +#define APB_DMA_SOURCE_REQ_NO			0x1000000
> +#define APB_DMA_SOURCE_REQ_NO_MASK		0xf000000
> +#define APB_DMA_CYCLES_MASK			0x00ffffff
> +
> +struct moxart_dma_chan {
> +	struct dma_chan			chan;
> +	int				ch_num;
> +	bool				allocated;
> +	bool				error;
> +	void __iomem			*base;
> +	struct dma_slave_config		cfg;
> +	struct dma_async_tx_descriptor	tx_desc;
> +	unsigned int			line_reqno;
> +};
> +
> +struct moxart_dma_container {
> +	int				ctlr;
> +	struct dma_device		dma_slave;
> +	struct moxart_dma_chan		slave_chans[APB_DMA_MAX_CHANNEL];
> +	spinlock_t			dma_lock;
> +	struct tasklet_struct		tasklet;
> +};
> +
> +struct moxart_dma_filter_data {
> +	struct moxart_dma_container	*mdc;
> +	struct of_phandle_args		*dma_spec;
> +};
> +
> +static struct device *chan2dev(struct dma_chan *chan)
> +{
> +	return &chan->dev->device;
> +}
> +
> +static inline struct moxart_dma_container
> +*to_dma_container(struct dma_device *d)
> +{
> +	return container_of(d, struct moxart_dma_container, dma_slave);
> +}
> +
> +static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
> +{
> +	return container_of(c, struct moxart_dma_chan, chan);
> +}
> +
> +static int moxart_terminate_all(struct dma_chan *chan)
> +{
> +	struct moxart_dma_chan *ch = to_moxart_dma_chan(chan);
> +	struct moxart_dma_container *c = to_dma_container(ch->chan.device);
> +	u32 ctrl;
> +	unsigned long flags;
> +
> +	dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
> +
> +	spin_lock_irqsave(&c->dma_lock, flags);
> +
> +	ctrl = readl(ch->base + REG_CTRL);
> +	ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
> +	writel(ctrl, ch->base + REG_CTRL);
> +
> +	spin_unlock_irqrestore(&c->dma_lock, flags);
> +
> +	return 0;
> +}
> +
> +static int moxart_slave_config(struct dma_chan *chan,
> +			       struct dma_slave_config *cfg)
> +{
> +	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
> +	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
> +	u32 ctrl;
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&mc->dma_lock, flags);
> +
> +	memcpy(&mchan->cfg, cfg, sizeof(mchan->cfg));
> +
> +	ctrl = readl(mchan->base + REG_CTRL);
> +	ctrl |= APB_DMA_BURST_MODE;
> +	ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
> +	ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
> +
> +	switch (mchan->cfg.src_addr_width) {
> +	case DMA_SLAVE_BUSWIDTH_1_BYTE:
> +		ctrl |= APB_DMA_DATA_WIDTH_1;
> +		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
> +			ctrl |= APB_DMA_DEST_INC_1_4;
> +		else
> +			ctrl |= APB_DMA_SOURCE_INC_1_4;
> +		break;
> +	case DMA_SLAVE_BUSWIDTH_2_BYTES:
> +		ctrl |= APB_DMA_DATA_WIDTH_2;
> +		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
> +			ctrl |= APB_DMA_DEST_INC_2_8;
> +		else
> +			ctrl |= APB_DMA_SOURCE_INC_2_8;
> +		break;
> +	default:
> +		ctrl &= ~APB_DMA_DATA_WIDTH;
> +		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
> +			ctrl |= APB_DMA_DEST_INC_4_16;
> +		else
> +			ctrl |= APB_DMA_SOURCE_INC_4_16;
> +		break;
> +	}
> +
> +	if (mchan->cfg.direction == DMA_MEM_TO_DEV) {
> +		ctrl &= ~APB_DMA_DEST_SELECT;
> +		ctrl |= APB_DMA_SOURCE_SELECT;
> +		ctrl |= (mchan->line_reqno << 16 &
> +			 APB_DMA_DEST_REQ_NO_MASK);
> +	} else {
> +		ctrl |= APB_DMA_DEST_SELECT;
> +		ctrl &= ~APB_DMA_SOURCE_SELECT;
> +		ctrl |= (mchan->line_reqno << 24 &
> +			 APB_DMA_SOURCE_REQ_NO_MASK);
> +	}
> +
> +	writel(ctrl, mchan->base + REG_CTRL);
> +
> +	spin_unlock_irqrestore(&mc->dma_lock, flags);
> +
> +	return 0;
> +}
> +
> +static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
> +			  unsigned long arg)
> +{
> +	int ret = 0;
> +	struct dma_slave_config *config;
> +
> +	switch (cmd) {
> +	case DMA_TERMINATE_ALL:
> +		moxart_terminate_all(chan);
> +		break;
> +	case DMA_SLAVE_CONFIG:
> +		config = (struct dma_slave_config *)arg;
> +		ret = moxart_slave_config(chan, config);
> +		break;
> +	default:
> +		ret = -ENOSYS;
> +	}
> +
> +	return ret;
> +}
> +
> +static dma_cookie_t moxart_tx_submit(struct dma_async_tx_descriptor *tx)
> +{
> +	struct moxart_dma_chan *mchan = to_moxart_dma_chan(tx->chan);
> +	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
> +	dma_cookie_t cookie;
> +	u32 ctrl;
> +	unsigned long flags;
> +
> +	dev_dbg(chan2dev(tx->chan), "%s: mchan=%p mchan->ch_num=%u mchan->base=%p\n",
> +		__func__, mchan, mchan->ch_num, mchan->base);
> +
> +	spin_lock_irqsave(&mc->dma_lock, flags);
> +
> +	cookie = dma_cookie_assign(tx);
> +
> +	ctrl = readl(mchan->base + REG_CTRL);
> +	ctrl |= (APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
> +	writel(ctrl, mchan->base + REG_CTRL);
> +
> +	spin_unlock_irqrestore(&mc->dma_lock, flags);
> +
> +	return cookie;
> +}
> +
> +static struct dma_async_tx_descriptor
> +*moxart_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
> +		      unsigned int sg_len,
> +		      enum dma_transfer_direction direction,
> +		      unsigned long tx_flags, void *context)
> +{
> +	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
> +	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
> +	unsigned long flags;
> +	unsigned int size, adr_width;
> +
> +	spin_lock_irqsave(&mc->dma_lock, flags);
> +
> +	if (direction == DMA_MEM_TO_DEV) {
> +		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
> +		       mchan->base + REG_ADDRESS_SOURCE);
> +		writel(mchan->cfg.dst_addr, mchan->base + REG_ADDRESS_DEST);
> +
> +		adr_width = mchan->cfg.src_addr_width;
> +	} else {
> +		writel(mchan->cfg.src_addr, mchan->base + REG_ADDRESS_SOURCE);
> +		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
> +		       mchan->base + REG_ADDRESS_DEST);
> +
> +		adr_width = mchan->cfg.dst_addr_width;
this is odd. You are not supposed to write to hardware here. You should store
all info, prepare the descriptor and then write to hw in issue_pending.
> +	}
> +
> +	size = sgl->length >> adr_width;
> +
> +	/*
> +	 * size is 4 on 64 bytes copied, i.e. one cycle copies 16 bytes
> +	 * ( when data_width == APB_DMAB_DATA_WIDTH_4 )
> +	 */
> +	writel(size, mchan->base + REG_CYCLES);
> +
> +	dev_dbg(chan2dev(chan), "%s: set %u DMA cycles (sgl->length=%u adr_width=%u)\n",
> +		__func__, size, sgl->length, adr_width);
> +
> +	dma_async_tx_descriptor_init(&mchan->tx_desc, chan);
> +	mchan->tx_desc.tx_submit = moxart_tx_submit;
> +	mchan->error = 0;
> +
> +	spin_unlock_irqrestore(&mc->dma_lock, flags);
> +
> +	return &mchan->tx_desc;
you dont see to store the descriptor anywhere?
> +}
> +
> +bool moxart_dma_filter_fn(struct dma_chan *chan, void *param)
> +{
> +	struct moxart_dma_filter_data *fdata = param;
> +	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
> +
> +	if (chan->device->dev != fdata->mdc->dma_slave.dev ||
> +	    chan->device->dev->of_node != fdata->dma_spec->np) {
> +		dev_dbg(chan2dev(chan), "device not registered to this DMA engine\n");
> +		return 0;
> +	}
> +
> +	dev_dbg(chan2dev(chan), "%s: mchan=%p line_reqno=%u mchan->ch_num=%u\n",
> +		__func__, mchan, fdata->dma_spec->args[0], mchan->ch_num);
> +
> +	mchan->line_reqno = fdata->dma_spec->args[0];
> +
> +	return 1;
1..? true/false makes more sense.

> +}
> +
> +static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
> +					struct of_dma *ofdma)
> +{
> +	struct moxart_dma_container *mdc = ofdma->of_dma_data;
> +	struct moxart_dma_filter_data fdata = {
> +		.mdc = mdc,
> +	};
> +
> +	if (dma_spec->args_count < 1)
> +		return NULL;
> +
> +	fdata.dma_spec = dma_spec;
> +
> +	return dma_request_channel(mdc->dma_slave.cap_mask,
> +				   moxart_dma_filter_fn, &fdata);
> +}
> +
> +static int moxart_alloc_chan_resources(struct dma_chan *chan)
> +{
> +	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
> +
> +	dev_dbg(chan2dev(chan), "%s: allocating channel #%u\n",
> +		__func__, mchan->ch_num);
> +	mchan->allocated = 1;
> +
> +	return 0;
> +}
> +
> +static void moxart_free_chan_resources(struct dma_chan *chan)
> +{
> +	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
> +
> +	dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
> +		__func__, mchan->ch_num);
> +	mchan->allocated = 0;
> +}
> +
> +static void moxart_issue_pending(struct dma_chan *chan)
> +{
> +	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
> +	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
> +	u32 ctrl;
> +	unsigned long flags;
> +
> +	dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
> +
> +	spin_lock_irqsave(&mc->dma_lock, flags);
> +
> +	ctrl = readl(mchan->base + REG_CTRL);
> +	ctrl |= APB_DMA_ENABLE;
> +	writel(ctrl, mchan->base + REG_CTRL);
what about channel configuration. Also what about the case when the dma channel
is already executing, you need to wait for that!

> +	spin_unlock_irqrestore(&mc->dma_lock, flags);
> +}
> +
> +static enum dma_status moxart_tx_status(struct dma_chan *chan,
> +					dma_cookie_t cookie,
> +					struct dma_tx_state *txs)
> +{
> +	struct moxart_dma_chan *ch = to_moxart_dma_chan(chan);
> +	enum dma_status ret;
> +
> +	ret = (ch->error) ? DMA_ERROR : dma_cookie_status(chan, cookie, txs);
You are not filling the residue for the in flight descriptors.

> +
> +	return ret;
> +}
> +
> +static void moxart_dma_init(struct dma_device *dma, struct device *dev)
> +{
> +	dma->device_prep_slave_sg		= moxart_prep_slave_sg;
> +	dma->device_alloc_chan_resources	= moxart_alloc_chan_resources;
> +	dma->device_free_chan_resources		= moxart_free_chan_resources;
> +	dma->device_issue_pending		= moxart_issue_pending;
> +	dma->device_tx_status			= moxart_tx_status;
> +	dma->device_control			= moxart_control;
> +	dma->dev				= dev;
> +
> +	INIT_LIST_HEAD(&dma->channels);
> +}
> +
> +static void moxart_dma_tasklet(unsigned long data)
> +{
> +	struct moxart_dma_container *mc = (void *)data;
> +	struct moxart_dma_chan *ch = &mc->slave_chans[0];
> +	struct dma_async_tx_descriptor *tx_desc;
> +	unsigned int i;
> +	enum dma_status s;
> +	struct dma_tx_state txs;
> +
> +	pr_debug("%s\n", __func__);
> +
> +	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
> +		if (ch->allocated) {
> +			tx_desc = &ch->tx_desc;
> +
> +			s = mc->dma_slave.device_tx_status(&ch->chan,
> +							   ch->chan.cookie,
> +							   &txs);
> +
> +			switch (s) {
> +			case DMA_ERROR:
> +				printk_ratelimited("%s: DMA error\n",
> +						   __func__);
no log level here?
> +				break;
> +			case DMA_SUCCESS:
> +				break;
> +			case DMA_IN_PROGRESS:
> +			case DMA_PAUSED:
> +				continue;
> +			}
Its odd actually, the channel status of PAUSE makes sense but I am not sure what
you mean by DMA_SUCCESS or DMA_IN_PROGRESS. These make sense for the
descriptors.
> +
> +			if (tx_desc->callback) {
> +				pr_debug("%s: call callback for ch=%p\n",
> +					 __func__, ch);
> +				tx_desc->callback(tx_desc->callback_param);
> +			}
> +		}
> +	> +}
> +
> +static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
> +{
> +	struct moxart_dma_container *mc = devid;
> +	struct moxart_dma_chan *mchan = &mc->slave_chans[0];
> +	unsigned int i;
> +	u32 ctrl;
> +
> +	pr_debug("%s\n", __func__);
> +
> +	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
> +		if (mchan->allocated) {
> +			ctrl = readl(mchan->base + REG_CTRL);
> +			pr_debug("%s: ctrl=%x\n", __func__, ctrl);
> +
> +			if (ctrl & APB_DMA_FIN_INT_STS) {
> +				ctrl &= ~APB_DMA_FIN_INT_STS;
> +				dma_cookie_complete(&mchan->tx_desc);
> +			}
> +			if (ctrl & APB_DMA_ERR_INT_STS) {
> +				ctrl &= ~APB_DMA_ERR_INT_STS;
> +				mchan->error = 1;
> +			}
> +			/*
> +			 * bits must be cleared here, this function
> +			 * called in a loop if moved to tasklet
> +			 */
> +			writel(ctrl, mchan->base + REG_CTRL);
> +
> +			tasklet_schedule(&mc->tasklet);
> +		}
> +	}
> +
> +	return IRQ_HANDLED;
> +}
I think you have implemnted that there will be _only_ one descriptor active and
submitted at any point of time.. IMO this shouldnt be done, you cna easily
implement a better way by amnaging multiple transactions in the driver.

Also see the virt-dma layer, using that will help you managing the descriptors
and lists for managing the descriptors

--
~Vinod

^ permalink raw reply	[flat|nested] 80+ messages in thread

* [PATCH v11] dmaengine: Add MOXA ART DMA engine driver
@ 2013-11-13 13:59                       ` Vinod Koul
  0 siblings, 0 replies; 80+ messages in thread
From: Vinod Koul @ 2013-11-13 13:59 UTC (permalink / raw)
  To: linux-arm-kernel

On Tue, Oct 08, 2013 at 10:42:36AM +0200, Jonas Jensen wrote:
> The MOXA ART SoC has a DMA controller capable of offloading expensive
> memory operations, such as large copies. This patch adds support for
> the controller including four channels. Two of these are used to
> handle MMC copy on the UC-7112-LX hardware. The remaining two can be
> used in a future audio driver or client application.
I see this is pending and I first need the AKC on DT parts of the patch before
we can apply this.

Also pls cc dmaengine at vger.kernel.org on this patch

> 
> Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
> ---
> 
> Notes:
>     Changes since v10:
>     
>     device tree bindings document:
>     1. reformat interrupt description text
>     
>     Applies to next-20130927
> 
>  .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  20 +
>  drivers/dma/Kconfig                                |   7 +
>  drivers/dma/Makefile                               |   1 +
>  drivers/dma/moxart-dma.c                           | 651 +++++++++++++++++++++
>  4 files changed, 679 insertions(+)
>  create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
>  create mode 100644 drivers/dma/moxart-dma.c
> 
> diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> new file mode 100644
> index 0000000..697e3f6
> --- /dev/null
> +++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> @@ -0,0 +1,20 @@
> +MOXA ART DMA Controller
> +
> +See dma.txt first
> +
> +Required properties:
> +
> +- compatible :	Must be "moxa,moxart-dma"
> +- reg :		Should contain registers location and length
> +- interrupts :	Should contain an interrupt-specifier for the sole
> +		interrupt generated by the device
> +- #dma-cells :	Should be 1, a single cell holding a line request number
> +
> +Example:
> +
> +	dma: dma at 90500000 {
> +		compatible = "moxa,moxart-dma";
> +		reg = <0x90500080 0x40>;
> +		interrupts = <24 0>;
> +		#dma-cells = <1>;
> +	};
> diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
> index f238cfd..f4ed3a9 100644
> --- a/drivers/dma/Kconfig
> +++ b/drivers/dma/Kconfig
> @@ -318,6 +318,13 @@ config K3_DMA
>  	  Support the DMA engine for Hisilicon K3 platform
>  	  devices.
>  
> +config MOXART_DMA
> +	tristate "MOXART DMA support"
> +	depends on ARCH_MOXART
> +	select DMA_ENGINE
> +	help
> +	  Enable support for the MOXA ART SoC DMA controller.
> +
>  config DMA_ENGINE
>  	bool
>  
> diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
> index db89035..9ef0916 100644
> --- a/drivers/dma/Makefile
> +++ b/drivers/dma/Makefile
> @@ -41,3 +41,4 @@ obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
>  obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
>  obj-$(CONFIG_TI_CPPI41) += cppi41.o
>  obj-$(CONFIG_K3_DMA) += k3dma.o
> +obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
> diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
> new file mode 100644
> index 0000000..edd6de2
> --- /dev/null
> +++ b/drivers/dma/moxart-dma.c
> @@ -0,0 +1,651 @@
> +/*
> + * MOXA ART SoCs DMA Engine support.
> + *
> + * Copyright (C) 2013 Jonas Jensen
> + *
> + * Jonas Jensen <jonas.jensen@gmail.com>
> + *
> + * This file is licensed under the terms of the GNU General Public
> + * License version 2.  This program is licensed "as is" without any
> + * warranty of any kind, whether express or implied.
> + */
> +
> +#include <linux/dmaengine.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/err.h>
> +#include <linux/init.h>
> +#include <linux/interrupt.h>
> +#include <linux/list.h>
> +#include <linux/module.h>
> +#include <linux/platform_device.h>
> +#include <linux/slab.h>
> +#include <linux/spinlock.h>
> +#include <linux/irq.h>
> +#include <linux/of_address.h>
> +#include <linux/of_irq.h>
> +#include <linux/of_dma.h>
> +
> +#include <asm/cacheflush.h>
> +
> +#include "dmaengine.h"
> +
> +#define APB_DMA_MAX_CHANNEL			4
> +
> +#define REG_ADDRESS_SOURCE			0
> +#define REG_ADDRESS_DEST			4
> +#define REG_CYCLES				8
> +#define REG_CTRL				12
> +#define REG_CHAN_SIZE				16
> +
> +#define APB_DMA_ENABLE				0x1
> +#define APB_DMA_FIN_INT_STS			0x2
> +#define APB_DMA_FIN_INT_EN			0x4
> +#define APB_DMA_BURST_MODE			0x8
> +#define APB_DMA_ERR_INT_STS			0x10
> +#define APB_DMA_ERR_INT_EN			0x20
> +
> +/*
> + * unset to select APB source
> + * set to select AHB source
> + */
> +#define APB_DMA_SOURCE_SELECT			0x40
> +
> +/*
> + * unset to select APB destination
> + * set to select AHB destination
> + */
> +#define APB_DMA_DEST_SELECT			0x80
> +
> +#define APB_DMA_SOURCE				0x100
> +#define APB_DMA_SOURCE_MASK			0x700
> +/*
> + * 000: no increment
> + * 001: +1 (busrt=0), +4  (burst=1)
> + * 010: +2 (burst=0), +8  (burst=1)
> + * 011: +4 (burst=0), +16 (burst=1)
> + * 101: -1 (burst=0), -4  (burst=1)
> + * 110: -2 (burst=0), -8  (burst=1)
> + * 111: -4 (burst=0), -16 (burst=1)
> + */
> +#define APB_DMA_SOURCE_INC_0			0
> +#define APB_DMA_SOURCE_INC_1_4			0x100
> +#define APB_DMA_SOURCE_INC_2_8			0x200
> +#define APB_DMA_SOURCE_INC_4_16			0x300
> +#define APB_DMA_SOURCE_DEC_1_4			0x500
> +#define APB_DMA_SOURCE_DEC_2_8			0x600
> +#define APB_DMA_SOURCE_DEC_4_16			0x700
> +
> +#define APB_DMA_DEST				0x1000
> +#define APB_DMA_DEST_MASK			0x7000
> +/*
> + * 000: no increment
> + * 001: +1 (busrt=0), +4  (burst=1)
> + * 010: +2 (burst=0), +8  (burst=1)
> + * 011: +4 (burst=0), +16 (burst=1)
> + * 101: -1 (burst=0), -4  (burst=1)
> + * 110: -2 (burst=0), -8  (burst=1)
> + * 111: -4 (burst=0), -16 (burst=1)
> +*/
> +#define APB_DMA_DEST_INC_0			0
> +#define APB_DMA_DEST_INC_1_4			0x1000
> +#define APB_DMA_DEST_INC_2_8			0x2000
> +#define APB_DMA_DEST_INC_4_16			0x3000
> +#define APB_DMA_DEST_DEC_1_4			0x5000
> +#define APB_DMA_DEST_DEC_2_8			0x6000
> +#define APB_DMA_DEST_DEC_4_16			0x7000
> +
> +/*
> + * request signal select of destination
> + * address for DMA hardware handshake
> + *
> + * the request line number is a property of
> + * the DMA controller itself, e.g. MMC must
> + * always request channels where
> + * dma_slave_config->slave_id == 5
> + *
> + * 0:    no request / grant signal
> + * 1-15: request / grant signal
> + */
> +#define APB_DMA_DEST_REQ_NO			0x10000
> +#define APB_DMA_DEST_REQ_NO_MASK		0xf0000
> +
> +#define APB_DMA_DATA_WIDTH			0x100000
> +#define APB_DMA_DATA_WIDTH_MASK			0x300000
> +/*
> + * data width of transfer
> + * 00: word
> + * 01: half
> + * 10: byte
> + */
> +#define APB_DMA_DATA_WIDTH_4			0
> +#define APB_DMA_DATA_WIDTH_2			0x100000
> +#define APB_DMA_DATA_WIDTH_1			0x200000
> +
> +/*
> + * request signal select of source
> + * address for DMA hardware handshake
> + *
> + * the request line number is a property of
> + * the DMA controller itself, e.g. MMC must
> + * always request channels where
> + * dma_slave_config->slave_id == 5
> + *
> + * 0:    no request / grant signal
> + * 1-15: request / grant signal
> + */
> +#define APB_DMA_SOURCE_REQ_NO			0x1000000
> +#define APB_DMA_SOURCE_REQ_NO_MASK		0xf000000
> +#define APB_DMA_CYCLES_MASK			0x00ffffff
> +
> +struct moxart_dma_chan {
> +	struct dma_chan			chan;
> +	int				ch_num;
> +	bool				allocated;
> +	bool				error;
> +	void __iomem			*base;
> +	struct dma_slave_config		cfg;
> +	struct dma_async_tx_descriptor	tx_desc;
> +	unsigned int			line_reqno;
> +};
> +
> +struct moxart_dma_container {
> +	int				ctlr;
> +	struct dma_device		dma_slave;
> +	struct moxart_dma_chan		slave_chans[APB_DMA_MAX_CHANNEL];
> +	spinlock_t			dma_lock;
> +	struct tasklet_struct		tasklet;
> +};
> +
> +struct moxart_dma_filter_data {
> +	struct moxart_dma_container	*mdc;
> +	struct of_phandle_args		*dma_spec;
> +};
> +
> +static struct device *chan2dev(struct dma_chan *chan)
> +{
> +	return &chan->dev->device;
> +}
> +
> +static inline struct moxart_dma_container
> +*to_dma_container(struct dma_device *d)
> +{
> +	return container_of(d, struct moxart_dma_container, dma_slave);
> +}
> +
> +static inline struct moxart_dma_chan *to_moxart_dma_chan(struct dma_chan *c)
> +{
> +	return container_of(c, struct moxart_dma_chan, chan);
> +}
> +
> +static int moxart_terminate_all(struct dma_chan *chan)
> +{
> +	struct moxart_dma_chan *ch = to_moxart_dma_chan(chan);
> +	struct moxart_dma_container *c = to_dma_container(ch->chan.device);
> +	u32 ctrl;
> +	unsigned long flags;
> +
> +	dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
> +
> +	spin_lock_irqsave(&c->dma_lock, flags);
> +
> +	ctrl = readl(ch->base + REG_CTRL);
> +	ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
> +	writel(ctrl, ch->base + REG_CTRL);
> +
> +	spin_unlock_irqrestore(&c->dma_lock, flags);
> +
> +	return 0;
> +}
> +
> +static int moxart_slave_config(struct dma_chan *chan,
> +			       struct dma_slave_config *cfg)
> +{
> +	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
> +	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
> +	u32 ctrl;
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&mc->dma_lock, flags);
> +
> +	memcpy(&mchan->cfg, cfg, sizeof(mchan->cfg));
> +
> +	ctrl = readl(mchan->base + REG_CTRL);
> +	ctrl |= APB_DMA_BURST_MODE;
> +	ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
> +	ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
> +
> +	switch (mchan->cfg.src_addr_width) {
> +	case DMA_SLAVE_BUSWIDTH_1_BYTE:
> +		ctrl |= APB_DMA_DATA_WIDTH_1;
> +		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
> +			ctrl |= APB_DMA_DEST_INC_1_4;
> +		else
> +			ctrl |= APB_DMA_SOURCE_INC_1_4;
> +		break;
> +	case DMA_SLAVE_BUSWIDTH_2_BYTES:
> +		ctrl |= APB_DMA_DATA_WIDTH_2;
> +		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
> +			ctrl |= APB_DMA_DEST_INC_2_8;
> +		else
> +			ctrl |= APB_DMA_SOURCE_INC_2_8;
> +		break;
> +	default:
> +		ctrl &= ~APB_DMA_DATA_WIDTH;
> +		if (mchan->cfg.direction != DMA_MEM_TO_DEV)
> +			ctrl |= APB_DMA_DEST_INC_4_16;
> +		else
> +			ctrl |= APB_DMA_SOURCE_INC_4_16;
> +		break;
> +	}
> +
> +	if (mchan->cfg.direction == DMA_MEM_TO_DEV) {
> +		ctrl &= ~APB_DMA_DEST_SELECT;
> +		ctrl |= APB_DMA_SOURCE_SELECT;
> +		ctrl |= (mchan->line_reqno << 16 &
> +			 APB_DMA_DEST_REQ_NO_MASK);
> +	} else {
> +		ctrl |= APB_DMA_DEST_SELECT;
> +		ctrl &= ~APB_DMA_SOURCE_SELECT;
> +		ctrl |= (mchan->line_reqno << 24 &
> +			 APB_DMA_SOURCE_REQ_NO_MASK);
> +	}
> +
> +	writel(ctrl, mchan->base + REG_CTRL);
> +
> +	spin_unlock_irqrestore(&mc->dma_lock, flags);
> +
> +	return 0;
> +}
> +
> +static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
> +			  unsigned long arg)
> +{
> +	int ret = 0;
> +	struct dma_slave_config *config;
> +
> +	switch (cmd) {
> +	case DMA_TERMINATE_ALL:
> +		moxart_terminate_all(chan);
> +		break;
> +	case DMA_SLAVE_CONFIG:
> +		config = (struct dma_slave_config *)arg;
> +		ret = moxart_slave_config(chan, config);
> +		break;
> +	default:
> +		ret = -ENOSYS;
> +	}
> +
> +	return ret;
> +}
> +
> +static dma_cookie_t moxart_tx_submit(struct dma_async_tx_descriptor *tx)
> +{
> +	struct moxart_dma_chan *mchan = to_moxart_dma_chan(tx->chan);
> +	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
> +	dma_cookie_t cookie;
> +	u32 ctrl;
> +	unsigned long flags;
> +
> +	dev_dbg(chan2dev(tx->chan), "%s: mchan=%p mchan->ch_num=%u mchan->base=%p\n",
> +		__func__, mchan, mchan->ch_num, mchan->base);
> +
> +	spin_lock_irqsave(&mc->dma_lock, flags);
> +
> +	cookie = dma_cookie_assign(tx);
> +
> +	ctrl = readl(mchan->base + REG_CTRL);
> +	ctrl |= (APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
> +	writel(ctrl, mchan->base + REG_CTRL);
> +
> +	spin_unlock_irqrestore(&mc->dma_lock, flags);
> +
> +	return cookie;
> +}
> +
> +static struct dma_async_tx_descriptor
> +*moxart_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
> +		      unsigned int sg_len,
> +		      enum dma_transfer_direction direction,
> +		      unsigned long tx_flags, void *context)
> +{
> +	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
> +	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
> +	unsigned long flags;
> +	unsigned int size, adr_width;
> +
> +	spin_lock_irqsave(&mc->dma_lock, flags);
> +
> +	if (direction == DMA_MEM_TO_DEV) {
> +		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
> +		       mchan->base + REG_ADDRESS_SOURCE);
> +		writel(mchan->cfg.dst_addr, mchan->base + REG_ADDRESS_DEST);
> +
> +		adr_width = mchan->cfg.src_addr_width;
> +	} else {
> +		writel(mchan->cfg.src_addr, mchan->base + REG_ADDRESS_SOURCE);
> +		writel(virt_to_phys((void *)sg_dma_address(&sgl[0])),
> +		       mchan->base + REG_ADDRESS_DEST);
> +
> +		adr_width = mchan->cfg.dst_addr_width;
this is odd. You are not supposed to write to hardware here. You should store
all info, prepare the descriptor and then write to hw in issue_pending.
> +	}
> +
> +	size = sgl->length >> adr_width;
> +
> +	/*
> +	 * size is 4 on 64 bytes copied, i.e. one cycle copies 16 bytes
> +	 * ( when data_width == APB_DMAB_DATA_WIDTH_4 )
> +	 */
> +	writel(size, mchan->base + REG_CYCLES);
> +
> +	dev_dbg(chan2dev(chan), "%s: set %u DMA cycles (sgl->length=%u adr_width=%u)\n",
> +		__func__, size, sgl->length, adr_width);
> +
> +	dma_async_tx_descriptor_init(&mchan->tx_desc, chan);
> +	mchan->tx_desc.tx_submit = moxart_tx_submit;
> +	mchan->error = 0;
> +
> +	spin_unlock_irqrestore(&mc->dma_lock, flags);
> +
> +	return &mchan->tx_desc;
you dont see to store the descriptor anywhere?
> +}
> +
> +bool moxart_dma_filter_fn(struct dma_chan *chan, void *param)
> +{
> +	struct moxart_dma_filter_data *fdata = param;
> +	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
> +
> +	if (chan->device->dev != fdata->mdc->dma_slave.dev ||
> +	    chan->device->dev->of_node != fdata->dma_spec->np) {
> +		dev_dbg(chan2dev(chan), "device not registered to this DMA engine\n");
> +		return 0;
> +	}
> +
> +	dev_dbg(chan2dev(chan), "%s: mchan=%p line_reqno=%u mchan->ch_num=%u\n",
> +		__func__, mchan, fdata->dma_spec->args[0], mchan->ch_num);
> +
> +	mchan->line_reqno = fdata->dma_spec->args[0];
> +
> +	return 1;
1..? true/false makes more sense.

> +}
> +
> +static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
> +					struct of_dma *ofdma)
> +{
> +	struct moxart_dma_container *mdc = ofdma->of_dma_data;
> +	struct moxart_dma_filter_data fdata = {
> +		.mdc = mdc,
> +	};
> +
> +	if (dma_spec->args_count < 1)
> +		return NULL;
> +
> +	fdata.dma_spec = dma_spec;
> +
> +	return dma_request_channel(mdc->dma_slave.cap_mask,
> +				   moxart_dma_filter_fn, &fdata);
> +}
> +
> +static int moxart_alloc_chan_resources(struct dma_chan *chan)
> +{
> +	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
> +
> +	dev_dbg(chan2dev(chan), "%s: allocating channel #%u\n",
> +		__func__, mchan->ch_num);
> +	mchan->allocated = 1;
> +
> +	return 0;
> +}
> +
> +static void moxart_free_chan_resources(struct dma_chan *chan)
> +{
> +	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
> +
> +	dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
> +		__func__, mchan->ch_num);
> +	mchan->allocated = 0;
> +}
> +
> +static void moxart_issue_pending(struct dma_chan *chan)
> +{
> +	struct moxart_dma_chan *mchan = to_moxart_dma_chan(chan);
> +	struct moxart_dma_container *mc = to_dma_container(mchan->chan.device);
> +	u32 ctrl;
> +	unsigned long flags;
> +
> +	dev_dbg(chan2dev(chan), "%s: mchan=%p\n", __func__, mchan);
> +
> +	spin_lock_irqsave(&mc->dma_lock, flags);
> +
> +	ctrl = readl(mchan->base + REG_CTRL);
> +	ctrl |= APB_DMA_ENABLE;
> +	writel(ctrl, mchan->base + REG_CTRL);
what about channel configuration. Also what about the case when the dma channel
is already executing, you need to wait for that!

> +	spin_unlock_irqrestore(&mc->dma_lock, flags);
> +}
> +
> +static enum dma_status moxart_tx_status(struct dma_chan *chan,
> +					dma_cookie_t cookie,
> +					struct dma_tx_state *txs)
> +{
> +	struct moxart_dma_chan *ch = to_moxart_dma_chan(chan);
> +	enum dma_status ret;
> +
> +	ret = (ch->error) ? DMA_ERROR : dma_cookie_status(chan, cookie, txs);
You are not filling the residue for the in flight descriptors.

> +
> +	return ret;
> +}
> +
> +static void moxart_dma_init(struct dma_device *dma, struct device *dev)
> +{
> +	dma->device_prep_slave_sg		= moxart_prep_slave_sg;
> +	dma->device_alloc_chan_resources	= moxart_alloc_chan_resources;
> +	dma->device_free_chan_resources		= moxart_free_chan_resources;
> +	dma->device_issue_pending		= moxart_issue_pending;
> +	dma->device_tx_status			= moxart_tx_status;
> +	dma->device_control			= moxart_control;
> +	dma->dev				= dev;
> +
> +	INIT_LIST_HEAD(&dma->channels);
> +}
> +
> +static void moxart_dma_tasklet(unsigned long data)
> +{
> +	struct moxart_dma_container *mc = (void *)data;
> +	struct moxart_dma_chan *ch = &mc->slave_chans[0];
> +	struct dma_async_tx_descriptor *tx_desc;
> +	unsigned int i;
> +	enum dma_status s;
> +	struct dma_tx_state txs;
> +
> +	pr_debug("%s\n", __func__);
> +
> +	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
> +		if (ch->allocated) {
> +			tx_desc = &ch->tx_desc;
> +
> +			s = mc->dma_slave.device_tx_status(&ch->chan,
> +							   ch->chan.cookie,
> +							   &txs);
> +
> +			switch (s) {
> +			case DMA_ERROR:
> +				printk_ratelimited("%s: DMA error\n",
> +						   __func__);
no log level here?
> +				break;
> +			case DMA_SUCCESS:
> +				break;
> +			case DMA_IN_PROGRESS:
> +			case DMA_PAUSED:
> +				continue;
> +			}
Its odd actually, the channel status of PAUSE makes sense but I am not sure what
you mean by DMA_SUCCESS or DMA_IN_PROGRESS. These make sense for the
descriptors.
> +
> +			if (tx_desc->callback) {
> +				pr_debug("%s: call callback for ch=%p\n",
> +					 __func__, ch);
> +				tx_desc->callback(tx_desc->callback_param);
> +			}
> +		}
> +	> +}
> +
> +static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
> +{
> +	struct moxart_dma_container *mc = devid;
> +	struct moxart_dma_chan *mchan = &mc->slave_chans[0];
> +	unsigned int i;
> +	u32 ctrl;
> +
> +	pr_debug("%s\n", __func__);
> +
> +	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, mchan++) {
> +		if (mchan->allocated) {
> +			ctrl = readl(mchan->base + REG_CTRL);
> +			pr_debug("%s: ctrl=%x\n", __func__, ctrl);
> +
> +			if (ctrl & APB_DMA_FIN_INT_STS) {
> +				ctrl &= ~APB_DMA_FIN_INT_STS;
> +				dma_cookie_complete(&mchan->tx_desc);
> +			}
> +			if (ctrl & APB_DMA_ERR_INT_STS) {
> +				ctrl &= ~APB_DMA_ERR_INT_STS;
> +				mchan->error = 1;
> +			}
> +			/*
> +			 * bits must be cleared here, this function
> +			 * called in a loop if moved to tasklet
> +			 */
> +			writel(ctrl, mchan->base + REG_CTRL);
> +
> +			tasklet_schedule(&mc->tasklet);
> +		}
> +	}
> +
> +	return IRQ_HANDLED;
> +}
I think you have implemnted that there will be _only_ one descriptor active and
submitted at any point of time.. IMO this shouldnt be done, you cna easily
implement a better way by amnaging multiple transactions in the driver.

Also see the virt-dma layer, using that will help you managing the descriptors
and lists for managing the descriptors

--
~Vinod

^ permalink raw reply	[flat|nested] 80+ messages in thread

* Re: [PATCH v11] dmaengine: Add MOXA ART DMA engine driver
  2013-11-13 13:59                       ` Vinod Koul
@ 2013-11-13 17:16                         ` Arnd Bergmann
  -1 siblings, 0 replies; 80+ messages in thread
From: Arnd Bergmann @ 2013-11-13 17:16 UTC (permalink / raw)
  To: Vinod Koul
  Cc: Jonas Jensen, linux-arm-kernel, linux-kernel, arm, djbw, linux,
	mark.rutland

On Wednesday 13 November 2013, Vinod Koul wrote:
> On Tue, Oct 08, 2013 at 10:42:36AM +0200, Jonas Jensen wrote:
> > The MOXA ART SoC has a DMA controller capable of offloading expensive
> > memory operations, such as large copies. This patch adds support for
> > the controller including four channels. Two of these are used to
> > handle MMC copy on the UC-7112-LX hardware. The remaining two can be
> > used in a future audio driver or client application.
> I see this is pending and I first need the AKC on DT parts of the patch before
> we can apply this.
> 
> Also pls cc dmaengine@vger.kernel.org on this patch

The DT binding looks good to me

Acked-by: Arnd Bergmann <arnd@arndb.de>

However, in the future, such binding should also be sent to the
devicetree@vger.kernel.org list for review, in a separate patch,
as clarified during the kernel summit.

	Arnd

^ permalink raw reply	[flat|nested] 80+ messages in thread

* [PATCH v11] dmaengine: Add MOXA ART DMA engine driver
@ 2013-11-13 17:16                         ` Arnd Bergmann
  0 siblings, 0 replies; 80+ messages in thread
From: Arnd Bergmann @ 2013-11-13 17:16 UTC (permalink / raw)
  To: linux-arm-kernel

On Wednesday 13 November 2013, Vinod Koul wrote:
> On Tue, Oct 08, 2013 at 10:42:36AM +0200, Jonas Jensen wrote:
> > The MOXA ART SoC has a DMA controller capable of offloading expensive
> > memory operations, such as large copies. This patch adds support for
> > the controller including four channels. Two of these are used to
> > handle MMC copy on the UC-7112-LX hardware. The remaining two can be
> > used in a future audio driver or client application.
> I see this is pending and I first need the AKC on DT parts of the patch before
> we can apply this.
> 
> Also pls cc dmaengine at vger.kernel.org on this patch

The DT binding looks good to me

Acked-by: Arnd Bergmann <arnd@arndb.de>

However, in the future, such binding should also be sent to the
devicetree at vger.kernel.org list for review, in a separate patch,
as clarified during the kernel summit.

	Arnd

^ permalink raw reply	[flat|nested] 80+ messages in thread

* [PATCH v12] dmaengine: Add MOXA ART DMA engine driver
  2013-10-08  8:42                     ` Jonas Jensen
@ 2013-12-06 14:27                       ` Jonas Jensen
  -1 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2013-12-06 14:27 UTC (permalink / raw)
  To: linux-arm-kernel
  Cc: linux-kernel, arm, vinod.koul, djbw, arnd, linux, mark.rutland,
	Jonas Jensen

The MOXA ART SoC has a DMA controller capable of offloading expensive
memory operations, such as large copies. This patch adds support for
the controller including four channels. Two of these are used to
handle MMC copy on the UC-7112-LX hardware. The remaining two can be
used in a future audio driver or client application.

Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
---

Notes:
    Thanks for the replies.
    
    This has now been reworked / uses the virt-dma layer.
    
    Changes inspired by OMAP DMA.
    
    Changes since v11:
    1. implement vchan support
    2. fill in residue
    
    Applies to next-20131206

 .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  20 +
 drivers/dma/Kconfig                                |   8 +
 drivers/dma/Makefile                               |   1 +
 drivers/dma/moxart-dma.c                           | 764 +++++++++++++++++++++
 4 files changed, 793 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
 create mode 100644 drivers/dma/moxart-dma.c

diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644
index 0000000..697e3f6
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -0,0 +1,20 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible :	Must be "moxa,moxart-dma"
+- reg :		Should contain registers location and length
+- interrupts :	Should contain an interrupt-specifier for the sole
+		interrupt generated by the device
+- #dma-cells :	Should be 1, a single cell holding a line request number
+
+Example:
+
+	dma: dma@90500000 {
+		compatible = "moxa,moxart-dma";
+		reg = <0x90500080 0x40>;
+		interrupts = <24 0>;
+		#dma-cells = <1>;
+	};
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 446687c..36eb081 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -331,6 +331,14 @@ config K3_DMA
 	  Support the DMA engine for Hisilicon K3 platform
 	  devices.
 
+config MOXART_DMA
+	tristate "MOXART DMA support"
+	depends on ARCH_MOXART
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Enable support for the MOXA ART SoC DMA controller.
+
 config DMA_ENGINE
 	bool
 
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 0ce2da9..551bfcd 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -42,3 +42,4 @@ obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
 obj-$(CONFIG_TI_CPPI41) += cppi41.o
 obj-$(CONFIG_K3_DMA) += k3dma.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..d035142
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,764 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+#define APB_DMA_MAX_CHANNEL			4
+
+#define REG_ADDRESS_SOURCE			0
+#define REG_ADDRESS_DEST			4
+#define REG_CYCLES				8
+#define REG_CTRL				12
+#define REG_CHAN_SIZE				16
+
+#define APB_DMA_ENABLE				0x1
+#define APB_DMA_FIN_INT_STS			0x2
+#define APB_DMA_FIN_INT_EN			0x4
+#define APB_DMA_BURST_MODE			0x8
+#define APB_DMA_ERR_INT_STS			0x10
+#define APB_DMA_ERR_INT_EN			0x20
+
+/*
+ * unset to select APB source
+ * set to select AHB source
+ */
+#define APB_DMA_SOURCE_SELECT			0x40
+
+/*
+ * unset to select APB destination
+ * set to select AHB destination
+ */
+#define APB_DMA_DEST_SELECT			0x80
+
+#define APB_DMA_SOURCE				0x100
+#define APB_DMA_SOURCE_MASK			0x700
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4  (burst=1)
+ * 010: +2 (burst=0), +8  (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4  (burst=1)
+ * 110: -2 (burst=0), -8  (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+ */
+#define APB_DMA_SOURCE_INC_0			0
+#define APB_DMA_SOURCE_INC_1_4			0x100
+#define APB_DMA_SOURCE_INC_2_8			0x200
+#define APB_DMA_SOURCE_INC_4_16			0x300
+#define APB_DMA_SOURCE_DEC_1_4			0x500
+#define APB_DMA_SOURCE_DEC_2_8			0x600
+#define APB_DMA_SOURCE_DEC_4_16			0x700
+
+#define APB_DMA_DEST				0x1000
+#define APB_DMA_DEST_MASK			0x7000
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4  (burst=1)
+ * 010: +2 (burst=0), +8  (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4  (burst=1)
+ * 110: -2 (burst=0), -8  (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+*/
+#define APB_DMA_DEST_INC_0			0
+#define APB_DMA_DEST_INC_1_4			0x1000
+#define APB_DMA_DEST_INC_2_8			0x2000
+#define APB_DMA_DEST_INC_4_16			0x3000
+#define APB_DMA_DEST_DEC_1_4			0x5000
+#define APB_DMA_DEST_DEC_2_8			0x6000
+#define APB_DMA_DEST_DEC_4_16			0x7000
+
+/*
+ * request signal select of destination
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0:    no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_DEST_REQ_NO			0x10000
+#define APB_DMA_DEST_REQ_NO_MASK		0xf0000
+
+#define APB_DMA_DATA_WIDTH			0x100000
+#define APB_DMA_DATA_WIDTH_MASK			0x300000
+/*
+ * data width of transfer
+ * 00: word
+ * 01: half
+ * 10: byte
+ */
+#define APB_DMA_DATA_WIDTH_4			0
+#define APB_DMA_DATA_WIDTH_2			0x100000
+#define APB_DMA_DATA_WIDTH_1			0x200000
+
+/*
+ * request signal select of source
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0:    no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_SOURCE_REQ_NO			0x1000000
+#define APB_DMA_SOURCE_REQ_NO_MASK		0xf000000
+#define APB_DMA_CYCLES_MASK			0x00ffffff
+
+#define MOXART_DMA_DATA_TYPE_S8			0x00
+#define MOXART_DMA_DATA_TYPE_S16		0x01
+#define MOXART_DMA_DATA_TYPE_S32		0x02
+
+struct moxart_sg {
+	dma_addr_t addr;
+	uint32_t fn;
+};
+
+struct moxart_desc {
+	struct dma_async_tx_descriptor	tx_desc;
+
+	enum dma_transfer_direction	dma_dir;
+	dma_addr_t			dev_addr;
+	unsigned int			sglen;
+	unsigned int			dma_cycles;
+	struct virt_dma_desc		vd;
+	uint8_t				es;
+	struct moxart_sg		sg[0];
+};
+
+struct moxart_chan {
+	struct virt_dma_chan		vc;
+
+	void __iomem			*base;
+	struct moxart_desc		*desc;
+
+	struct dma_slave_config		cfg;
+
+	bool				allocated;
+	bool				error;
+	int				ch_num;
+	unsigned int			line_reqno;
+	unsigned int			sgidx;
+};
+
+struct moxart_dmadev {
+	int				ctlr;
+	struct dma_device		dma_slave;
+	struct moxart_chan		slave_chans[APB_DMA_MAX_CHANNEL];
+	struct tasklet_struct		tasklet;
+};
+
+struct moxart_filter_data {
+	struct moxart_dmadev		*mdc;
+	struct of_phandle_args		*dma_spec;
+};
+
+static const unsigned es_bytes[] = {
+	[MOXART_DMA_DATA_TYPE_S8] = 1,
+	[MOXART_DMA_DATA_TYPE_S16] = 2,
+	[MOXART_DMA_DATA_TYPE_S32] = 4,
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+	return &chan->dev->device;
+}
+
+static inline struct moxart_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct moxart_chan, vc.chan);
+}
+
+static inline struct moxart_desc *to_moxart_dma_desc(
+	struct dma_async_tx_descriptor *t)
+{
+	return container_of(t, struct moxart_desc, vd.tx);
+}
+
+static void moxart_dma_desc_free(struct virt_dma_desc *vd)
+{
+	kfree(container_of(vd, struct moxart_desc, vd));
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	unsigned long flags;
+	LIST_HEAD(head);
+	u32 ctrl;
+
+	dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
+
+	spin_lock_irqsave(&ch->vc.lock, flags);
+
+	if (ch->desc)
+		ch->desc = NULL;
+
+	ctrl = readl(ch->base + REG_CTRL);
+	ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, ch->base + REG_CTRL);
+
+	vchan_get_all_descriptors(&ch->vc, &head);
+	spin_unlock_irqrestore(&ch->vc.lock, flags);
+	vchan_dma_desc_free_list(&ch->vc, &head);
+
+	return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+			       struct dma_slave_config *cfg)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	u32 ctrl;
+
+	memcpy(&ch->cfg, cfg, sizeof(ch->cfg));
+
+	ctrl = readl(ch->base + REG_CTRL);
+	ctrl |= APB_DMA_BURST_MODE;
+	ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
+	ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
+
+	switch (ch->cfg.src_addr_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		ctrl |= APB_DMA_DATA_WIDTH_1;
+		if (ch->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_1_4;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_1_4;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		ctrl |= APB_DMA_DATA_WIDTH_2;
+		if (ch->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_2_8;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_2_8;
+		break;
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+		ctrl &= ~APB_DMA_DATA_WIDTH;
+		if (ch->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_4_16;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_4_16;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (ch->cfg.direction == DMA_MEM_TO_DEV) {
+		ctrl &= ~APB_DMA_DEST_SELECT;
+		ctrl |= APB_DMA_SOURCE_SELECT;
+		ctrl |= (ch->line_reqno << 16 &
+			 APB_DMA_DEST_REQ_NO_MASK);
+	} else {
+		ctrl |= APB_DMA_DEST_SELECT;
+		ctrl &= ~APB_DMA_SOURCE_SELECT;
+		ctrl |= (ch->line_reqno << 24 &
+			 APB_DMA_SOURCE_REQ_NO_MASK);
+	}
+
+	writel(ctrl, ch->base + REG_CTRL);
+
+	return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+			  unsigned long arg)
+{
+	int ret = 0;
+
+	switch (cmd) {
+	case DMA_PAUSE:
+	case DMA_RESUME:
+		return -EINVAL;
+	case DMA_TERMINATE_ALL:
+		moxart_terminate_all(chan);
+		break;
+	case DMA_SLAVE_CONFIG:
+		ret = moxart_slave_config(chan, (struct dma_slave_config *)arg);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static struct dma_async_tx_descriptor *moxart_prep_slave_sg(
+	struct dma_chan *chan, struct scatterlist *sgl,
+	unsigned int sg_len, enum dma_transfer_direction dir,
+	unsigned long tx_flags, void *context)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	struct moxart_desc *d;
+	enum dma_slave_buswidth dev_width;
+	dma_addr_t dev_addr;
+	struct scatterlist *sgent;
+	unsigned int es;
+	unsigned i, j = 0;
+
+	if (dir == DMA_DEV_TO_MEM) {
+		dev_addr = ch->cfg.src_addr;
+		dev_width = ch->cfg.src_addr_width;
+	} else if (dir == DMA_MEM_TO_DEV) {
+		dev_addr = ch->cfg.dst_addr;
+		dev_width = ch->cfg.dst_addr_width;
+	} else {
+		dev_err(chan2dev(chan), "%s: unsupported direction\n",
+			__func__);
+		return NULL;
+	}
+
+	switch (dev_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		es = MOXART_DMA_DATA_TYPE_S8;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		es = MOXART_DMA_DATA_TYPE_S16;
+		break;
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+		es = MOXART_DMA_DATA_TYPE_S32;
+		break;
+	default:
+		dev_err(chan2dev(chan), "%s: unsupported data width (%u)\n",
+			__func__, dev_width);
+		return NULL;
+	}
+
+	d = kzalloc(sizeof(*d) + sg_len * sizeof(d->sg[0]), GFP_ATOMIC);
+	if (!d)
+		return NULL;
+
+	d->dma_dir = dir;
+	d->dev_addr = dev_addr;
+	d->es = es;
+
+	for_each_sg(sgl, sgent, sg_len, i) {
+		d->sg[j].addr = sg_dma_address(sgent);
+		d->sg[j].fn = sg_dma_len(sgent);
+		j++;
+	}
+
+	d->sglen = j;
+
+	ch->error = 0;
+
+	return vchan_tx_prep(&ch->vc, &d->vd, tx_flags);
+}
+
+bool moxart_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+	struct moxart_filter_data *fdata = param;
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+
+	if (chan->device->dev != fdata->mdc->dma_slave.dev ||
+	    chan->device->dev->of_node != fdata->dma_spec->np) {
+		dev_dbg(chan2dev(chan), "device not registered to this DMA engine\n");
+		return false;
+	}
+
+	dev_dbg(chan2dev(chan), "%s: ch=%p line_reqno=%u ch->ch_num=%u\n",
+		__func__, ch, fdata->dma_spec->args[0], ch->ch_num);
+
+	ch->line_reqno = fdata->dma_spec->args[0];
+
+	return true;
+}
+
+static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
+					struct of_dma *ofdma)
+{
+	struct moxart_dmadev *mdc = ofdma->of_dma_data;
+	struct moxart_filter_data fdata = {
+		.mdc = mdc,
+	};
+
+	if (dma_spec->args_count < 1)
+		return NULL;
+
+	fdata.dma_spec = dma_spec;
+
+	return dma_request_channel(mdc->dma_slave.cap_mask,
+				   moxart_dma_filter_fn, &fdata);
+}
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+
+	dev_dbg(chan2dev(chan), "%s: allocating channel #%u\n",
+		__func__, ch->ch_num);
+	ch->allocated = 1;
+
+	return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+
+	vchan_free_chan_resources(&ch->vc);
+
+	dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+		__func__, ch->ch_num);
+	ch->allocated = 0;
+}
+
+static void moxart_dma_set_dest_params(struct moxart_chan *ch,
+				       dma_addr_t src_addr,
+				       dma_addr_t dest_addr)
+{
+	phys_addr_t phy_addr = virt_to_phys((void *)dest_addr);
+
+	writel(src_addr, ch->base + REG_ADDRESS_SOURCE);
+	writel(phy_addr, ch->base + REG_ADDRESS_DEST);
+}
+
+static void moxart_dma_set_src_params(struct moxart_chan *ch,
+				      dma_addr_t src_addr,
+				      dma_addr_t dest_addr)
+{
+	phys_addr_t phy_addr = virt_to_phys((void *)src_addr);
+
+	writel(phy_addr, ch->base + REG_ADDRESS_SOURCE);
+	writel(dest_addr, ch->base + REG_ADDRESS_DEST);
+}
+
+static void moxart_set_transfer_params(struct moxart_chan *ch, unsigned int len)
+{
+	struct moxart_desc *d = ch->desc;
+	unsigned int sglen_div = es_bytes[d->es];
+
+	d->dma_cycles = len >> sglen_div;
+
+	/*
+	 * cycles is 4 on 64 bytes copied, i.e. one cycle copies 16 bytes
+	 * ( when width is APB_DMAB_DATA_WIDTH_4 )
+	 */
+	writel(d->dma_cycles, ch->base + REG_CYCLES);
+
+	dev_dbg(chan2dev(&ch->vc.chan), "%s: set %u DMA cycles (len=%u)\n",
+		__func__, d->dma_cycles, len);
+}
+
+static void moxart_start_dma(struct moxart_chan *ch)
+{
+	u32 ctrl;
+
+	ctrl = readl(ch->base + REG_CTRL);
+	ctrl |= (APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, ch->base + REG_CTRL);
+}
+
+static void moxart_dma_start_sg(struct moxart_chan *ch, unsigned int idx)
+{
+	struct moxart_desc *d = ch->desc;
+	struct moxart_sg *sg = ch->desc->sg + idx;
+
+	if (ch->desc->dma_dir == DMA_MEM_TO_DEV)
+		moxart_dma_set_src_params(ch, d->sg[0].addr, d->dev_addr);
+	else if (ch->desc->dma_dir == DMA_DEV_TO_MEM)
+		moxart_dma_set_dest_params(ch, d->dev_addr, d->sg[0].addr);
+
+	moxart_set_transfer_params(ch, sg->fn);
+
+	moxart_start_dma(ch);
+}
+
+static void moxart_dma_start_desc(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	struct virt_dma_desc *vd;
+
+	vd = vchan_next_desc(&ch->vc);
+
+	if (!vd) {
+		ch->desc = NULL;
+		return;
+	}
+
+	list_del(&vd->node);
+
+	ch->desc = to_moxart_dma_desc(&vd->tx);
+	ch->sgidx = 0;
+
+	moxart_dma_start_sg(ch, 0);
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ch->vc.lock, flags);
+	if (vchan_issue_pending(&ch->vc) && !ch->desc)
+		moxart_dma_start_desc(chan);
+	spin_unlock_irqrestore(&ch->vc.lock, flags);
+}
+
+static size_t moxart_dma_desc_size(struct moxart_desc *d)
+{
+	unsigned i;
+	size_t size;
+
+	for (size = i = 0; i < d->sglen; i++)
+		size += d->sg[i].fn;
+
+	return size;
+}
+
+static size_t moxart_dma_desc_size_in_flight(struct moxart_chan *ch)
+{
+	size_t size;
+	unsigned int completed_cycles, cycles;
+
+	cycles = readl(ch->base + REG_CYCLES);
+	size = moxart_dma_desc_size(ch->desc);
+	completed_cycles = (ch->desc->dma_cycles - cycles);
+	size -= completed_cycles << es_bytes[ch->desc->es];
+
+	dev_dbg(chan2dev(&ch->vc.chan), "%s: size=%u\n", __func__, size);
+
+	return size;
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+					dma_cookie_t cookie,
+					struct dma_tx_state *txstate)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	struct virt_dma_desc *vd;
+	struct moxart_desc *d;
+	enum dma_status ret;
+	unsigned long flags;
+
+	/*
+	 * dma_cookie_status assigns initial residue value
+	 */
+	ret = dma_cookie_status(chan, cookie, txstate);
+
+	spin_lock_irqsave(&ch->vc.lock, flags);
+	vd = vchan_find_desc(&ch->vc, cookie);
+	if (vd) {
+		d = to_moxart_dma_desc(&vd->tx);
+		txstate->residue = moxart_dma_desc_size(d);
+	} else if (ch->desc && ch->desc->vd.tx.cookie == cookie) {
+		txstate->residue = moxart_dma_desc_size_in_flight(ch);
+	} else {
+		txstate->residue = 0;
+	}
+	spin_unlock_irqrestore(&ch->vc.lock, flags);
+
+	if (ch->error)
+		ret = DMA_ERROR;
+
+	return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+	dma->device_prep_slave_sg		= moxart_prep_slave_sg;
+	dma->device_alloc_chan_resources	= moxart_alloc_chan_resources;
+	dma->device_free_chan_resources		= moxart_free_chan_resources;
+	dma->device_issue_pending		= moxart_issue_pending;
+	dma->device_tx_status			= moxart_tx_status;
+	dma->device_control			= moxart_control;
+	dma->dev				= dev;
+
+	INIT_LIST_HEAD(&dma->channels);
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+	struct moxart_dmadev *mc = devid;
+	struct moxart_chan *ch = &mc->slave_chans[0];
+	unsigned int i;
+	u32 ctrl;
+
+	dev_dbg(chan2dev(&ch->vc.chan), "%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+		if (!ch->allocated)
+			continue;
+
+		ctrl = readl(ch->base + REG_CTRL);
+
+		dev_dbg(chan2dev(&ch->vc.chan), "%s: ch=%p ch->base=%p ctrl=%x\n",
+			__func__, ch, ch->base, ctrl);
+
+		if (ctrl & APB_DMA_FIN_INT_STS) {
+			ctrl &= ~APB_DMA_FIN_INT_STS;
+			if (ch->desc) {
+				if (++ch->sgidx < ch->desc->sglen)
+					moxart_dma_start_sg(ch, ch->sgidx);
+				else {
+					vchan_cookie_complete(&ch->desc->vd);
+					ch->desc = NULL;
+				}
+			}
+		}
+
+		if (ctrl & APB_DMA_ERR_INT_STS) {
+			ctrl &= ~APB_DMA_ERR_INT_STS;
+			ch->error = 1;
+		}
+
+		writel(ctrl, ch->base + REG_CTRL);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int moxart_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct resource *res;
+	static void __iomem *dma_base_addr;
+	int ret, i;
+	unsigned int irq;
+	struct moxart_chan *ch;
+	struct moxart_dmadev *mdc;
+
+	mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+	if (!mdc) {
+		dev_err(dev, "can't allocate DMA container\n");
+		return -ENOMEM;
+	}
+
+	irq = irq_of_parse_and_map(node, 0);
+	if (irq <= 0) {
+		dev_err(dev, "irq_of_parse_and_map failed\n");
+		return -EINVAL;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dma_base_addr = devm_ioremap_resource(dev, res);
+	if (IS_ERR(dma_base_addr)) {
+		dev_err(dev, "devm_ioremap_resource failed\n");
+		return PTR_ERR(dma_base_addr);
+	}
+
+	mdc->ctlr = pdev->id;
+
+	dma_cap_zero(mdc->dma_slave.cap_mask);
+	dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+
+	moxart_dma_init(&mdc->dma_slave, dev);
+
+	ch = &mdc->slave_chans[0];
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+		ch->ch_num = i;
+		ch->base = dma_base_addr + i * REG_CHAN_SIZE;
+		ch->allocated = 0;
+
+		ch->vc.desc_free = moxart_dma_desc_free;
+		vchan_init(&ch->vc, &mdc->dma_slave);
+
+		dev_dbg(dev, "%s: chs[%d]: ch->ch_num=%u ch->base=%p\n",
+			__func__, i, ch->ch_num, ch->base);
+	}
+
+	platform_set_drvdata(pdev, mdc);
+
+	ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
+			       "moxart-dma-engine", mdc);
+	if (ret) {
+		dev_err(dev, "devm_request_irq failed\n");
+		return ret;
+	}
+
+	ret = dma_async_device_register(&mdc->dma_slave);
+	if (ret) {
+		dev_err(dev, "dma_async_device_register failed\n");
+		return ret;
+	}
+
+	ret = of_dma_controller_register(node, moxart_of_xlate, mdc);
+	if (ret) {
+		dev_err(dev, "of_dma_controller_register failed\n");
+		dma_async_device_unregister(&mdc->dma_slave);
+		return ret;
+	}
+
+	dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq);
+
+	return 0;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+	struct moxart_dmadev *m = platform_get_drvdata(pdev);
+
+	tasklet_kill(&m->tasklet);
+
+	dma_async_device_unregister(&m->dma_slave);
+
+	if (pdev->dev.of_node)
+		of_dma_controller_free(pdev->dev.of_node);
+
+	return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+	{ .compatible = "moxa,moxart-dma" },
+	{ }
+};
+
+static struct platform_driver moxart_driver = {
+	.probe	= moxart_probe,
+	.remove	= moxart_remove,
+	.driver = {
+		.name		= "moxart-dma-engine",
+		.owner		= THIS_MODULE,
+		.of_match_table	= moxart_dma_match,
+	},
+};
+
+static int moxart_init(void)
+{
+	return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+	platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
-- 
1.8.2.1


^ permalink raw reply related	[flat|nested] 80+ messages in thread

* [PATCH v12] dmaengine: Add MOXA ART DMA engine driver
@ 2013-12-06 14:27                       ` Jonas Jensen
  0 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2013-12-06 14:27 UTC (permalink / raw)
  To: linux-arm-kernel

The MOXA ART SoC has a DMA controller capable of offloading expensive
memory operations, such as large copies. This patch adds support for
the controller including four channels. Two of these are used to
handle MMC copy on the UC-7112-LX hardware. The remaining two can be
used in a future audio driver or client application.

Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
---

Notes:
    Thanks for the replies.
    
    This has now been reworked / uses the virt-dma layer.
    
    Changes inspired by OMAP DMA.
    
    Changes since v11:
    1. implement vchan support
    2. fill in residue
    
    Applies to next-20131206

 .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  20 +
 drivers/dma/Kconfig                                |   8 +
 drivers/dma/Makefile                               |   1 +
 drivers/dma/moxart-dma.c                           | 764 +++++++++++++++++++++
 4 files changed, 793 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
 create mode 100644 drivers/dma/moxart-dma.c

diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644
index 0000000..697e3f6
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -0,0 +1,20 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible :	Must be "moxa,moxart-dma"
+- reg :		Should contain registers location and length
+- interrupts :	Should contain an interrupt-specifier for the sole
+		interrupt generated by the device
+- #dma-cells :	Should be 1, a single cell holding a line request number
+
+Example:
+
+	dma: dma at 90500000 {
+		compatible = "moxa,moxart-dma";
+		reg = <0x90500080 0x40>;
+		interrupts = <24 0>;
+		#dma-cells = <1>;
+	};
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 446687c..36eb081 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -331,6 +331,14 @@ config K3_DMA
 	  Support the DMA engine for Hisilicon K3 platform
 	  devices.
 
+config MOXART_DMA
+	tristate "MOXART DMA support"
+	depends on ARCH_MOXART
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Enable support for the MOXA ART SoC DMA controller.
+
 config DMA_ENGINE
 	bool
 
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 0ce2da9..551bfcd 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -42,3 +42,4 @@ obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
 obj-$(CONFIG_TI_CPPI41) += cppi41.o
 obj-$(CONFIG_K3_DMA) += k3dma.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..d035142
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,764 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+#define APB_DMA_MAX_CHANNEL			4
+
+#define REG_ADDRESS_SOURCE			0
+#define REG_ADDRESS_DEST			4
+#define REG_CYCLES				8
+#define REG_CTRL				12
+#define REG_CHAN_SIZE				16
+
+#define APB_DMA_ENABLE				0x1
+#define APB_DMA_FIN_INT_STS			0x2
+#define APB_DMA_FIN_INT_EN			0x4
+#define APB_DMA_BURST_MODE			0x8
+#define APB_DMA_ERR_INT_STS			0x10
+#define APB_DMA_ERR_INT_EN			0x20
+
+/*
+ * unset to select APB source
+ * set to select AHB source
+ */
+#define APB_DMA_SOURCE_SELECT			0x40
+
+/*
+ * unset to select APB destination
+ * set to select AHB destination
+ */
+#define APB_DMA_DEST_SELECT			0x80
+
+#define APB_DMA_SOURCE				0x100
+#define APB_DMA_SOURCE_MASK			0x700
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4  (burst=1)
+ * 010: +2 (burst=0), +8  (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4  (burst=1)
+ * 110: -2 (burst=0), -8  (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+ */
+#define APB_DMA_SOURCE_INC_0			0
+#define APB_DMA_SOURCE_INC_1_4			0x100
+#define APB_DMA_SOURCE_INC_2_8			0x200
+#define APB_DMA_SOURCE_INC_4_16			0x300
+#define APB_DMA_SOURCE_DEC_1_4			0x500
+#define APB_DMA_SOURCE_DEC_2_8			0x600
+#define APB_DMA_SOURCE_DEC_4_16			0x700
+
+#define APB_DMA_DEST				0x1000
+#define APB_DMA_DEST_MASK			0x7000
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4  (burst=1)
+ * 010: +2 (burst=0), +8  (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4  (burst=1)
+ * 110: -2 (burst=0), -8  (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+*/
+#define APB_DMA_DEST_INC_0			0
+#define APB_DMA_DEST_INC_1_4			0x1000
+#define APB_DMA_DEST_INC_2_8			0x2000
+#define APB_DMA_DEST_INC_4_16			0x3000
+#define APB_DMA_DEST_DEC_1_4			0x5000
+#define APB_DMA_DEST_DEC_2_8			0x6000
+#define APB_DMA_DEST_DEC_4_16			0x7000
+
+/*
+ * request signal select of destination
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0:    no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_DEST_REQ_NO			0x10000
+#define APB_DMA_DEST_REQ_NO_MASK		0xf0000
+
+#define APB_DMA_DATA_WIDTH			0x100000
+#define APB_DMA_DATA_WIDTH_MASK			0x300000
+/*
+ * data width of transfer
+ * 00: word
+ * 01: half
+ * 10: byte
+ */
+#define APB_DMA_DATA_WIDTH_4			0
+#define APB_DMA_DATA_WIDTH_2			0x100000
+#define APB_DMA_DATA_WIDTH_1			0x200000
+
+/*
+ * request signal select of source
+ * address for DMA hardware handshake
+ *
+ * the request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id == 5
+ *
+ * 0:    no request / grant signal
+ * 1-15: request / grant signal
+ */
+#define APB_DMA_SOURCE_REQ_NO			0x1000000
+#define APB_DMA_SOURCE_REQ_NO_MASK		0xf000000
+#define APB_DMA_CYCLES_MASK			0x00ffffff
+
+#define MOXART_DMA_DATA_TYPE_S8			0x00
+#define MOXART_DMA_DATA_TYPE_S16		0x01
+#define MOXART_DMA_DATA_TYPE_S32		0x02
+
+struct moxart_sg {
+	dma_addr_t addr;
+	uint32_t fn;
+};
+
+struct moxart_desc {
+	struct dma_async_tx_descriptor	tx_desc;
+
+	enum dma_transfer_direction	dma_dir;
+	dma_addr_t			dev_addr;
+	unsigned int			sglen;
+	unsigned int			dma_cycles;
+	struct virt_dma_desc		vd;
+	uint8_t				es;
+	struct moxart_sg		sg[0];
+};
+
+struct moxart_chan {
+	struct virt_dma_chan		vc;
+
+	void __iomem			*base;
+	struct moxart_desc		*desc;
+
+	struct dma_slave_config		cfg;
+
+	bool				allocated;
+	bool				error;
+	int				ch_num;
+	unsigned int			line_reqno;
+	unsigned int			sgidx;
+};
+
+struct moxart_dmadev {
+	int				ctlr;
+	struct dma_device		dma_slave;
+	struct moxart_chan		slave_chans[APB_DMA_MAX_CHANNEL];
+	struct tasklet_struct		tasklet;
+};
+
+struct moxart_filter_data {
+	struct moxart_dmadev		*mdc;
+	struct of_phandle_args		*dma_spec;
+};
+
+static const unsigned es_bytes[] = {
+	[MOXART_DMA_DATA_TYPE_S8] = 1,
+	[MOXART_DMA_DATA_TYPE_S16] = 2,
+	[MOXART_DMA_DATA_TYPE_S32] = 4,
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+	return &chan->dev->device;
+}
+
+static inline struct moxart_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct moxart_chan, vc.chan);
+}
+
+static inline struct moxart_desc *to_moxart_dma_desc(
+	struct dma_async_tx_descriptor *t)
+{
+	return container_of(t, struct moxart_desc, vd.tx);
+}
+
+static void moxart_dma_desc_free(struct virt_dma_desc *vd)
+{
+	kfree(container_of(vd, struct moxart_desc, vd));
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	unsigned long flags;
+	LIST_HEAD(head);
+	u32 ctrl;
+
+	dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
+
+	spin_lock_irqsave(&ch->vc.lock, flags);
+
+	if (ch->desc)
+		ch->desc = NULL;
+
+	ctrl = readl(ch->base + REG_CTRL);
+	ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, ch->base + REG_CTRL);
+
+	vchan_get_all_descriptors(&ch->vc, &head);
+	spin_unlock_irqrestore(&ch->vc.lock, flags);
+	vchan_dma_desc_free_list(&ch->vc, &head);
+
+	return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+			       struct dma_slave_config *cfg)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	u32 ctrl;
+
+	memcpy(&ch->cfg, cfg, sizeof(ch->cfg));
+
+	ctrl = readl(ch->base + REG_CTRL);
+	ctrl |= APB_DMA_BURST_MODE;
+	ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
+	ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
+
+	switch (ch->cfg.src_addr_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		ctrl |= APB_DMA_DATA_WIDTH_1;
+		if (ch->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_1_4;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_1_4;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		ctrl |= APB_DMA_DATA_WIDTH_2;
+		if (ch->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_2_8;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_2_8;
+		break;
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+		ctrl &= ~APB_DMA_DATA_WIDTH;
+		if (ch->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_4_16;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_4_16;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (ch->cfg.direction == DMA_MEM_TO_DEV) {
+		ctrl &= ~APB_DMA_DEST_SELECT;
+		ctrl |= APB_DMA_SOURCE_SELECT;
+		ctrl |= (ch->line_reqno << 16 &
+			 APB_DMA_DEST_REQ_NO_MASK);
+	} else {
+		ctrl |= APB_DMA_DEST_SELECT;
+		ctrl &= ~APB_DMA_SOURCE_SELECT;
+		ctrl |= (ch->line_reqno << 24 &
+			 APB_DMA_SOURCE_REQ_NO_MASK);
+	}
+
+	writel(ctrl, ch->base + REG_CTRL);
+
+	return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+			  unsigned long arg)
+{
+	int ret = 0;
+
+	switch (cmd) {
+	case DMA_PAUSE:
+	case DMA_RESUME:
+		return -EINVAL;
+	case DMA_TERMINATE_ALL:
+		moxart_terminate_all(chan);
+		break;
+	case DMA_SLAVE_CONFIG:
+		ret = moxart_slave_config(chan, (struct dma_slave_config *)arg);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static struct dma_async_tx_descriptor *moxart_prep_slave_sg(
+	struct dma_chan *chan, struct scatterlist *sgl,
+	unsigned int sg_len, enum dma_transfer_direction dir,
+	unsigned long tx_flags, void *context)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	struct moxart_desc *d;
+	enum dma_slave_buswidth dev_width;
+	dma_addr_t dev_addr;
+	struct scatterlist *sgent;
+	unsigned int es;
+	unsigned i, j = 0;
+
+	if (dir == DMA_DEV_TO_MEM) {
+		dev_addr = ch->cfg.src_addr;
+		dev_width = ch->cfg.src_addr_width;
+	} else if (dir == DMA_MEM_TO_DEV) {
+		dev_addr = ch->cfg.dst_addr;
+		dev_width = ch->cfg.dst_addr_width;
+	} else {
+		dev_err(chan2dev(chan), "%s: unsupported direction\n",
+			__func__);
+		return NULL;
+	}
+
+	switch (dev_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		es = MOXART_DMA_DATA_TYPE_S8;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		es = MOXART_DMA_DATA_TYPE_S16;
+		break;
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+		es = MOXART_DMA_DATA_TYPE_S32;
+		break;
+	default:
+		dev_err(chan2dev(chan), "%s: unsupported data width (%u)\n",
+			__func__, dev_width);
+		return NULL;
+	}
+
+	d = kzalloc(sizeof(*d) + sg_len * sizeof(d->sg[0]), GFP_ATOMIC);
+	if (!d)
+		return NULL;
+
+	d->dma_dir = dir;
+	d->dev_addr = dev_addr;
+	d->es = es;
+
+	for_each_sg(sgl, sgent, sg_len, i) {
+		d->sg[j].addr = sg_dma_address(sgent);
+		d->sg[j].fn = sg_dma_len(sgent);
+		j++;
+	}
+
+	d->sglen = j;
+
+	ch->error = 0;
+
+	return vchan_tx_prep(&ch->vc, &d->vd, tx_flags);
+}
+
+bool moxart_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+	struct moxart_filter_data *fdata = param;
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+
+	if (chan->device->dev != fdata->mdc->dma_slave.dev ||
+	    chan->device->dev->of_node != fdata->dma_spec->np) {
+		dev_dbg(chan2dev(chan), "device not registered to this DMA engine\n");
+		return false;
+	}
+
+	dev_dbg(chan2dev(chan), "%s: ch=%p line_reqno=%u ch->ch_num=%u\n",
+		__func__, ch, fdata->dma_spec->args[0], ch->ch_num);
+
+	ch->line_reqno = fdata->dma_spec->args[0];
+
+	return true;
+}
+
+static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
+					struct of_dma *ofdma)
+{
+	struct moxart_dmadev *mdc = ofdma->of_dma_data;
+	struct moxart_filter_data fdata = {
+		.mdc = mdc,
+	};
+
+	if (dma_spec->args_count < 1)
+		return NULL;
+
+	fdata.dma_spec = dma_spec;
+
+	return dma_request_channel(mdc->dma_slave.cap_mask,
+				   moxart_dma_filter_fn, &fdata);
+}
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+
+	dev_dbg(chan2dev(chan), "%s: allocating channel #%u\n",
+		__func__, ch->ch_num);
+	ch->allocated = 1;
+
+	return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+
+	vchan_free_chan_resources(&ch->vc);
+
+	dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+		__func__, ch->ch_num);
+	ch->allocated = 0;
+}
+
+static void moxart_dma_set_dest_params(struct moxart_chan *ch,
+				       dma_addr_t src_addr,
+				       dma_addr_t dest_addr)
+{
+	phys_addr_t phy_addr = virt_to_phys((void *)dest_addr);
+
+	writel(src_addr, ch->base + REG_ADDRESS_SOURCE);
+	writel(phy_addr, ch->base + REG_ADDRESS_DEST);
+}
+
+static void moxart_dma_set_src_params(struct moxart_chan *ch,
+				      dma_addr_t src_addr,
+				      dma_addr_t dest_addr)
+{
+	phys_addr_t phy_addr = virt_to_phys((void *)src_addr);
+
+	writel(phy_addr, ch->base + REG_ADDRESS_SOURCE);
+	writel(dest_addr, ch->base + REG_ADDRESS_DEST);
+}
+
+static void moxart_set_transfer_params(struct moxart_chan *ch, unsigned int len)
+{
+	struct moxart_desc *d = ch->desc;
+	unsigned int sglen_div = es_bytes[d->es];
+
+	d->dma_cycles = len >> sglen_div;
+
+	/*
+	 * cycles is 4 on 64 bytes copied, i.e. one cycle copies 16 bytes
+	 * ( when width is APB_DMAB_DATA_WIDTH_4 )
+	 */
+	writel(d->dma_cycles, ch->base + REG_CYCLES);
+
+	dev_dbg(chan2dev(&ch->vc.chan), "%s: set %u DMA cycles (len=%u)\n",
+		__func__, d->dma_cycles, len);
+}
+
+static void moxart_start_dma(struct moxart_chan *ch)
+{
+	u32 ctrl;
+
+	ctrl = readl(ch->base + REG_CTRL);
+	ctrl |= (APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, ch->base + REG_CTRL);
+}
+
+static void moxart_dma_start_sg(struct moxart_chan *ch, unsigned int idx)
+{
+	struct moxart_desc *d = ch->desc;
+	struct moxart_sg *sg = ch->desc->sg + idx;
+
+	if (ch->desc->dma_dir == DMA_MEM_TO_DEV)
+		moxart_dma_set_src_params(ch, d->sg[0].addr, d->dev_addr);
+	else if (ch->desc->dma_dir == DMA_DEV_TO_MEM)
+		moxart_dma_set_dest_params(ch, d->dev_addr, d->sg[0].addr);
+
+	moxart_set_transfer_params(ch, sg->fn);
+
+	moxart_start_dma(ch);
+}
+
+static void moxart_dma_start_desc(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	struct virt_dma_desc *vd;
+
+	vd = vchan_next_desc(&ch->vc);
+
+	if (!vd) {
+		ch->desc = NULL;
+		return;
+	}
+
+	list_del(&vd->node);
+
+	ch->desc = to_moxart_dma_desc(&vd->tx);
+	ch->sgidx = 0;
+
+	moxart_dma_start_sg(ch, 0);
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ch->vc.lock, flags);
+	if (vchan_issue_pending(&ch->vc) && !ch->desc)
+		moxart_dma_start_desc(chan);
+	spin_unlock_irqrestore(&ch->vc.lock, flags);
+}
+
+static size_t moxart_dma_desc_size(struct moxart_desc *d)
+{
+	unsigned i;
+	size_t size;
+
+	for (size = i = 0; i < d->sglen; i++)
+		size += d->sg[i].fn;
+
+	return size;
+}
+
+static size_t moxart_dma_desc_size_in_flight(struct moxart_chan *ch)
+{
+	size_t size;
+	unsigned int completed_cycles, cycles;
+
+	cycles = readl(ch->base + REG_CYCLES);
+	size = moxart_dma_desc_size(ch->desc);
+	completed_cycles = (ch->desc->dma_cycles - cycles);
+	size -= completed_cycles << es_bytes[ch->desc->es];
+
+	dev_dbg(chan2dev(&ch->vc.chan), "%s: size=%u\n", __func__, size);
+
+	return size;
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+					dma_cookie_t cookie,
+					struct dma_tx_state *txstate)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	struct virt_dma_desc *vd;
+	struct moxart_desc *d;
+	enum dma_status ret;
+	unsigned long flags;
+
+	/*
+	 * dma_cookie_status assigns initial residue value
+	 */
+	ret = dma_cookie_status(chan, cookie, txstate);
+
+	spin_lock_irqsave(&ch->vc.lock, flags);
+	vd = vchan_find_desc(&ch->vc, cookie);
+	if (vd) {
+		d = to_moxart_dma_desc(&vd->tx);
+		txstate->residue = moxart_dma_desc_size(d);
+	} else if (ch->desc && ch->desc->vd.tx.cookie == cookie) {
+		txstate->residue = moxart_dma_desc_size_in_flight(ch);
+	} else {
+		txstate->residue = 0;
+	}
+	spin_unlock_irqrestore(&ch->vc.lock, flags);
+
+	if (ch->error)
+		ret = DMA_ERROR;
+
+	return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+	dma->device_prep_slave_sg		= moxart_prep_slave_sg;
+	dma->device_alloc_chan_resources	= moxart_alloc_chan_resources;
+	dma->device_free_chan_resources		= moxart_free_chan_resources;
+	dma->device_issue_pending		= moxart_issue_pending;
+	dma->device_tx_status			= moxart_tx_status;
+	dma->device_control			= moxart_control;
+	dma->dev				= dev;
+
+	INIT_LIST_HEAD(&dma->channels);
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+	struct moxart_dmadev *mc = devid;
+	struct moxart_chan *ch = &mc->slave_chans[0];
+	unsigned int i;
+	u32 ctrl;
+
+	dev_dbg(chan2dev(&ch->vc.chan), "%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+		if (!ch->allocated)
+			continue;
+
+		ctrl = readl(ch->base + REG_CTRL);
+
+		dev_dbg(chan2dev(&ch->vc.chan), "%s: ch=%p ch->base=%p ctrl=%x\n",
+			__func__, ch, ch->base, ctrl);
+
+		if (ctrl & APB_DMA_FIN_INT_STS) {
+			ctrl &= ~APB_DMA_FIN_INT_STS;
+			if (ch->desc) {
+				if (++ch->sgidx < ch->desc->sglen)
+					moxart_dma_start_sg(ch, ch->sgidx);
+				else {
+					vchan_cookie_complete(&ch->desc->vd);
+					ch->desc = NULL;
+				}
+			}
+		}
+
+		if (ctrl & APB_DMA_ERR_INT_STS) {
+			ctrl &= ~APB_DMA_ERR_INT_STS;
+			ch->error = 1;
+		}
+
+		writel(ctrl, ch->base + REG_CTRL);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int moxart_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct resource *res;
+	static void __iomem *dma_base_addr;
+	int ret, i;
+	unsigned int irq;
+	struct moxart_chan *ch;
+	struct moxart_dmadev *mdc;
+
+	mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+	if (!mdc) {
+		dev_err(dev, "can't allocate DMA container\n");
+		return -ENOMEM;
+	}
+
+	irq = irq_of_parse_and_map(node, 0);
+	if (irq <= 0) {
+		dev_err(dev, "irq_of_parse_and_map failed\n");
+		return -EINVAL;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dma_base_addr = devm_ioremap_resource(dev, res);
+	if (IS_ERR(dma_base_addr)) {
+		dev_err(dev, "devm_ioremap_resource failed\n");
+		return PTR_ERR(dma_base_addr);
+	}
+
+	mdc->ctlr = pdev->id;
+
+	dma_cap_zero(mdc->dma_slave.cap_mask);
+	dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+
+	moxart_dma_init(&mdc->dma_slave, dev);
+
+	ch = &mdc->slave_chans[0];
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+		ch->ch_num = i;
+		ch->base = dma_base_addr + i * REG_CHAN_SIZE;
+		ch->allocated = 0;
+
+		ch->vc.desc_free = moxart_dma_desc_free;
+		vchan_init(&ch->vc, &mdc->dma_slave);
+
+		dev_dbg(dev, "%s: chs[%d]: ch->ch_num=%u ch->base=%p\n",
+			__func__, i, ch->ch_num, ch->base);
+	}
+
+	platform_set_drvdata(pdev, mdc);
+
+	ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
+			       "moxart-dma-engine", mdc);
+	if (ret) {
+		dev_err(dev, "devm_request_irq failed\n");
+		return ret;
+	}
+
+	ret = dma_async_device_register(&mdc->dma_slave);
+	if (ret) {
+		dev_err(dev, "dma_async_device_register failed\n");
+		return ret;
+	}
+
+	ret = of_dma_controller_register(node, moxart_of_xlate, mdc);
+	if (ret) {
+		dev_err(dev, "of_dma_controller_register failed\n");
+		dma_async_device_unregister(&mdc->dma_slave);
+		return ret;
+	}
+
+	dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq);
+
+	return 0;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+	struct moxart_dmadev *m = platform_get_drvdata(pdev);
+
+	tasklet_kill(&m->tasklet);
+
+	dma_async_device_unregister(&m->dma_slave);
+
+	if (pdev->dev.of_node)
+		of_dma_controller_free(pdev->dev.of_node);
+
+	return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+	{ .compatible = "moxa,moxart-dma" },
+	{ }
+};
+
+static struct platform_driver moxart_driver = {
+	.probe	= moxart_probe,
+	.remove	= moxart_remove,
+	.driver = {
+		.name		= "moxart-dma-engine",
+		.owner		= THIS_MODULE,
+		.of_match_table	= moxart_dma_match,
+	},
+};
+
+static int moxart_init(void)
+{
+	return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+	platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
-- 
1.8.2.1

^ permalink raw reply related	[flat|nested] 80+ messages in thread

* [PATCH v13] dmaengine: Add MOXA ART DMA engine driver
  2013-12-06 14:27                       ` Jonas Jensen
@ 2013-12-11 15:13                         ` Jonas Jensen
  -1 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2013-12-11 15:13 UTC (permalink / raw)
  To: dmaengine
  Cc: linux-arm-kernel, linux-kernel, arm, vinod.koul, djbw, arnd,
	linux, mark.rutland, andriy.shevchenko, lars, Jonas Jensen

The MOXA ART SoC has a DMA controller capable of offloading expensive
memory operations, such as large copies. This patch adds support for
the controller including four channels. Two of these are used to
handle MMC copy on the UC-7112-LX hardware. The remaining two can be
used in a future audio driver or client application.

Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
---

Notes:
    Thanks for the replies!
    
    Changes since v12:
    1.  add description of dma specifier to DT binding document
    2.  remove linux/irq.h include
    3.  rename "fn" to "len" in struct moxart_sg
    4.  remove unused variable "tx_desc" (struct moxart_desc)
    5.  remove unused variable "ctrl" (struct moxart_dmadev)
    6.  remove unused tasklet "tasklet" (struct moxart_dmadev)
    7.  don't use memcpy() in moxart_slave_config(),
        assign directly "ch->cfg = *cfg"
    8.  remove redundant variable j in moxart_prep_slave_sg(),
        already provided as parameter "sg_len"
    9.  functions moxart_dma_set_*_params use virt_to_phys()
        but the address is never virtual, refactor these
        into one: moxart_dma_set_params()
    10. use local pointer "sg" in moxart_dma_start_sg()
    11. add spin_lock_irqsave()/spin_unlock_irqrestore() around
        calls that happen on interrupt status APB_DMA_FIN_INT_STS
    12. add _OFF suffix to REG_* defines
    13. use BIT() macro (APB_DMA_* defines)
    14. comment style formatting
    15. add is_slave_direction() and remove code under else (moxart_prep_slave_sg())
    16. print size_t using format "%zd" (moxart_dma_desc_size_in_flight())
    17. dma_cookie_status() assigns initial residue value to zero,
        remove code under else
    18. don't shadow dma_cookie_status() return value,
        directly return DMA_ERROR (moxart_tx_status())
    19. remove redundant dev_err() call in moxart_probe(),
        devm_ioremap_resource() has its own error messaging
    
    Applies to next-20131211

 .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  45 ++
 drivers/dma/Kconfig                                |   8 +
 drivers/dma/Makefile                               |   1 +
 drivers/dma/moxart-dma.c                           | 729 +++++++++++++++++++++
 4 files changed, 783 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
 create mode 100644 drivers/dma/moxart-dma.c

diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644
index 0000000..8a9f355
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -0,0 +1,45 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible :	Must be "moxa,moxart-dma"
+- reg :		Should contain registers location and length
+- interrupts :	Should contain an interrupt-specifier for the sole
+		interrupt generated by the device
+- #dma-cells :	Should be 1, a single cell holding a line request number
+
+Example:
+
+	dma: dma@90500000 {
+		compatible = "moxa,moxart-dma";
+		reg = <0x90500080 0x40>;
+		interrupts = <24 0>;
+		#dma-cells = <1>;
+	};
+
+
+Clients:
+
+DMA clients connected to the MOXA ART DMA controller must use the format
+described in the dma.txt file, using a two-cell specifier for each channel:
+a phandle plus one integer cells.
+The two cells in order are:
+
+1. A phandle pointing to the DMA controller.
+2. Peripheral identifier for the hardware handshaking interface.
+
+Example:
+Use specific request line passing from dma
+For example, MMC request line is 5
+
+	sdhci: sdhci@98e00000 {
+		compatible = "moxa,moxart-sdhci";
+		reg = <0x98e00000 0x5C>;
+		interrupts = <5 0>;
+		clocks = <&clk_apb>;
+		dmas =  <&dma 5>,
+			<&dma 5>;
+		dma-names = "tx", "rx";
+	};
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 132a4fd..ca4fa6b 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -334,6 +334,14 @@ config K3_DMA
 	  Support the DMA engine for Hisilicon K3 platform
 	  devices.
 
+config MOXART_DMA
+	tristate "MOXART DMA support"
+	depends on ARCH_MOXART
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Enable support for the MOXA ART SoC DMA controller.
+
 config DMA_ENGINE
 	bool
 
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 0ce2da9..551bfcd 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -42,3 +42,4 @@ obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
 obj-$(CONFIG_TI_CPPI41) += cppi41.o
 obj-$(CONFIG_K3_DMA) += k3dma.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..51749ac
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,729 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+#include <linux/bitops.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+#define APB_DMA_MAX_CHANNEL			4
+
+#define REG_OFF_ADDRESS_SOURCE			0
+#define REG_OFF_ADDRESS_DEST			4
+#define REG_OFF_CYCLES				8
+#define REG_OFF_CTRL				12
+#define REG_OFF_CHAN_SIZE			16
+
+#define APB_DMA_ENABLE				BIT(0)
+#define APB_DMA_FIN_INT_STS			BIT(1)
+#define APB_DMA_FIN_INT_EN			BIT(2)
+#define APB_DMA_BURST_MODE			BIT(3)
+#define APB_DMA_ERR_INT_STS			BIT(4)
+#define APB_DMA_ERR_INT_EN			BIT(5)
+
+/*
+ * unset: APB, set: AHB
+ */
+#define APB_DMA_SOURCE_SELECT			0x40
+
+/*
+ * unset: APB, set: AHB
+ */
+#define APB_DMA_DEST_SELECT			0x80
+
+#define APB_DMA_SOURCE				0x100
+#define APB_DMA_SOURCE_MASK			0x700
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4  (burst=1)
+ * 010: +2 (burst=0), +8  (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4  (burst=1)
+ * 110: -2 (burst=0), -8  (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+ */
+#define APB_DMA_SOURCE_INC_0			0
+#define APB_DMA_SOURCE_INC_1_4			0x100
+#define APB_DMA_SOURCE_INC_2_8			0x200
+#define APB_DMA_SOURCE_INC_4_16			0x300
+#define APB_DMA_SOURCE_DEC_1_4			0x500
+#define APB_DMA_SOURCE_DEC_2_8			0x600
+#define APB_DMA_SOURCE_DEC_4_16			0x700
+
+#define APB_DMA_DEST				0x1000
+#define APB_DMA_DEST_MASK			0x7000
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4  (burst=1)
+ * 010: +2 (burst=0), +8  (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4  (burst=1)
+ * 110: -2 (burst=0), -8  (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+*/
+#define APB_DMA_DEST_INC_0			0
+#define APB_DMA_DEST_INC_1_4			0x1000
+#define APB_DMA_DEST_INC_2_8			0x2000
+#define APB_DMA_DEST_INC_4_16			0x3000
+#define APB_DMA_DEST_DEC_1_4			0x5000
+#define APB_DMA_DEST_DEC_2_8			0x6000
+#define APB_DMA_DEST_DEC_4_16			0x7000
+
+/*
+ * Request signal select source/destination
+ * address for DMA hardware handshake.
+ *
+ * The request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id is 5.
+ *
+ * 0:    no request / grant signal
+ * 1-15: request    / grant signal
+ */
+#define APB_DMA_DEST_REQ_NO			0x10000
+#define APB_DMA_DEST_REQ_NO_MASK		0xf0000
+#define APB_DMA_SOURCE_REQ_NO			0x1000000
+#define APB_DMA_SOURCE_REQ_NO_MASK		0xf000000
+
+#define APB_DMA_DATA_WIDTH			0x100000
+#define APB_DMA_DATA_WIDTH_MASK			0x300000
+/*
+ * Data width of transfer:
+ *
+ * 00: word
+ * 01: half
+ * 10: byte
+ */
+#define APB_DMA_DATA_WIDTH_4			0
+#define APB_DMA_DATA_WIDTH_2			0x100000
+#define APB_DMA_DATA_WIDTH_1			0x200000
+
+#define APB_DMA_CYCLES_MASK			0x00ffffff
+
+#define MOXART_DMA_DATA_TYPE_S8			0x00
+#define MOXART_DMA_DATA_TYPE_S16		0x01
+#define MOXART_DMA_DATA_TYPE_S32		0x02
+
+struct moxart_sg {
+	dma_addr_t addr;
+	uint32_t len;
+};
+
+struct moxart_desc {
+	enum dma_transfer_direction	dma_dir;
+	dma_addr_t			dev_addr;
+	unsigned int			sglen;
+	unsigned int			dma_cycles;
+	struct virt_dma_desc		vd;
+	uint8_t				es;
+	struct moxart_sg		sg[0];
+};
+
+struct moxart_chan {
+	struct virt_dma_chan		vc;
+
+	void __iomem			*base;
+	struct moxart_desc		*desc;
+
+	struct dma_slave_config		cfg;
+
+	bool				allocated;
+	bool				error;
+	int				ch_num;
+	unsigned int			line_reqno;
+	unsigned int			sgidx;
+};
+
+struct moxart_dmadev {
+	struct dma_device		dma_slave;
+	struct moxart_chan		slave_chans[APB_DMA_MAX_CHANNEL];
+};
+
+struct moxart_filter_data {
+	struct moxart_dmadev		*mdc;
+	struct of_phandle_args		*dma_spec;
+};
+
+static const unsigned es_bytes[] = {
+	[MOXART_DMA_DATA_TYPE_S8] = 1,
+	[MOXART_DMA_DATA_TYPE_S16] = 2,
+	[MOXART_DMA_DATA_TYPE_S32] = 4,
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+	return &chan->dev->device;
+}
+
+static inline struct moxart_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct moxart_chan, vc.chan);
+}
+
+static inline struct moxart_desc *to_moxart_dma_desc(
+	struct dma_async_tx_descriptor *t)
+{
+	return container_of(t, struct moxart_desc, vd.tx);
+}
+
+static void moxart_dma_desc_free(struct virt_dma_desc *vd)
+{
+	kfree(container_of(vd, struct moxart_desc, vd));
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	unsigned long flags;
+	LIST_HEAD(head);
+	u32 ctrl;
+
+	dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
+
+	spin_lock_irqsave(&ch->vc.lock, flags);
+
+	if (ch->desc)
+		ch->desc = NULL;
+
+	ctrl = readl(ch->base + REG_OFF_CTRL);
+	ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, ch->base + REG_OFF_CTRL);
+
+	vchan_get_all_descriptors(&ch->vc, &head);
+	spin_unlock_irqrestore(&ch->vc.lock, flags);
+	vchan_dma_desc_free_list(&ch->vc, &head);
+
+	return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+			       struct dma_slave_config *cfg)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	u32 ctrl;
+
+	ch->cfg = *cfg;
+
+	ctrl = readl(ch->base + REG_OFF_CTRL);
+	ctrl |= APB_DMA_BURST_MODE;
+	ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
+	ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
+
+	switch (ch->cfg.src_addr_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		ctrl |= APB_DMA_DATA_WIDTH_1;
+		if (ch->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_1_4;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_1_4;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		ctrl |= APB_DMA_DATA_WIDTH_2;
+		if (ch->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_2_8;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_2_8;
+		break;
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+		ctrl &= ~APB_DMA_DATA_WIDTH;
+		if (ch->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_4_16;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_4_16;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (ch->cfg.direction == DMA_MEM_TO_DEV) {
+		ctrl &= ~APB_DMA_DEST_SELECT;
+		ctrl |= APB_DMA_SOURCE_SELECT;
+		ctrl |= (ch->line_reqno << 16 &
+			 APB_DMA_DEST_REQ_NO_MASK);
+	} else {
+		ctrl |= APB_DMA_DEST_SELECT;
+		ctrl &= ~APB_DMA_SOURCE_SELECT;
+		ctrl |= (ch->line_reqno << 24 &
+			 APB_DMA_SOURCE_REQ_NO_MASK);
+	}
+
+	writel(ctrl, ch->base + REG_OFF_CTRL);
+
+	return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+			  unsigned long arg)
+{
+	int ret = 0;
+
+	switch (cmd) {
+	case DMA_PAUSE:
+	case DMA_RESUME:
+		return -EINVAL;
+	case DMA_TERMINATE_ALL:
+		moxart_terminate_all(chan);
+		break;
+	case DMA_SLAVE_CONFIG:
+		ret = moxart_slave_config(chan, (struct dma_slave_config *)arg);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static struct dma_async_tx_descriptor *moxart_prep_slave_sg(
+	struct dma_chan *chan, struct scatterlist *sgl,
+	unsigned int sg_len, enum dma_transfer_direction dir,
+	unsigned long tx_flags, void *context)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	struct moxart_desc *d;
+	enum dma_slave_buswidth dev_width;
+	dma_addr_t dev_addr;
+	struct scatterlist *sgent;
+	unsigned int es, i;
+
+	if (!is_slave_direction(dir)) {
+		dev_err(chan2dev(chan), "%s: invalid DMA direction\n",
+			__func__);
+		return NULL;
+	}
+
+	if (dir == DMA_DEV_TO_MEM) {
+		dev_addr = ch->cfg.src_addr;
+		dev_width = ch->cfg.src_addr_width;
+	} else {
+		dev_addr = ch->cfg.dst_addr;
+		dev_width = ch->cfg.dst_addr_width;
+	}
+
+	switch (dev_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		es = MOXART_DMA_DATA_TYPE_S8;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		es = MOXART_DMA_DATA_TYPE_S16;
+		break;
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+		es = MOXART_DMA_DATA_TYPE_S32;
+		break;
+	default:
+		dev_err(chan2dev(chan), "%s: unsupported data width (%u)\n",
+			__func__, dev_width);
+		return NULL;
+	}
+
+	d = kzalloc(sizeof(*d) + sg_len * sizeof(d->sg[0]), GFP_ATOMIC);
+	if (!d)
+		return NULL;
+
+	d->dma_dir = dir;
+	d->dev_addr = dev_addr;
+	d->es = es;
+
+	for_each_sg(sgl, sgent, sg_len, i) {
+		d->sg[i].addr = sg_dma_address(sgent);
+		d->sg[i].len = sg_dma_len(sgent);
+	}
+
+	d->sglen = sg_len;
+
+	ch->error = 0;
+
+	return vchan_tx_prep(&ch->vc, &d->vd, tx_flags);
+}
+
+bool moxart_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+	struct moxart_filter_data *fdata = param;
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+
+	if (chan->device->dev != fdata->mdc->dma_slave.dev ||
+	    chan->device->dev->of_node != fdata->dma_spec->np) {
+		dev_dbg(chan2dev(chan), "device not registered to this DMA engine\n");
+		return false;
+	}
+
+	dev_dbg(chan2dev(chan), "%s: ch=%p line_reqno=%u ch->ch_num=%u\n",
+		__func__, ch, fdata->dma_spec->args[0], ch->ch_num);
+
+	ch->line_reqno = fdata->dma_spec->args[0];
+
+	return true;
+}
+
+static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
+					struct of_dma *ofdma)
+{
+	struct moxart_dmadev *mdc = ofdma->of_dma_data;
+	struct moxart_filter_data fdata = {
+		.mdc = mdc,
+	};
+
+	if (dma_spec->args_count < 1)
+		return NULL;
+
+	fdata.dma_spec = dma_spec;
+
+	return dma_request_channel(mdc->dma_slave.cap_mask,
+				   moxart_dma_filter_fn, &fdata);
+}
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+
+	dev_dbg(chan2dev(chan), "%s: allocating channel #%u\n",
+		__func__, ch->ch_num);
+	ch->allocated = 1;
+
+	return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+
+	vchan_free_chan_resources(&ch->vc);
+
+	dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+		__func__, ch->ch_num);
+	ch->allocated = 0;
+}
+
+static void moxart_dma_set_params(struct moxart_chan *ch, dma_addr_t src_addr,
+				  dma_addr_t dst_addr)
+{
+	writel(src_addr, ch->base + REG_OFF_ADDRESS_SOURCE);
+	writel(dst_addr, ch->base + REG_OFF_ADDRESS_DEST);
+}
+
+static void moxart_set_transfer_params(struct moxart_chan *ch, unsigned int len)
+{
+	struct moxart_desc *d = ch->desc;
+	unsigned int sglen_div = es_bytes[d->es];
+
+	d->dma_cycles = len >> sglen_div;
+
+	/*
+	 * cycles is 4 on 64 bytes copied, i.e. one cycle copies 16 bytes
+	 * ( when width is APB_DMAB_DATA_WIDTH_4 )
+	 */
+	writel(d->dma_cycles, ch->base + REG_OFF_CYCLES);
+
+	dev_dbg(chan2dev(&ch->vc.chan), "%s: set %u DMA cycles (len=%u)\n",
+		__func__, d->dma_cycles, len);
+}
+
+static void moxart_start_dma(struct moxart_chan *ch)
+{
+	u32 ctrl;
+
+	ctrl = readl(ch->base + REG_OFF_CTRL);
+	ctrl |= (APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, ch->base + REG_OFF_CTRL);
+}
+
+static void moxart_dma_start_sg(struct moxart_chan *ch, unsigned int idx)
+{
+	struct moxart_desc *d = ch->desc;
+	struct moxart_sg *sg = ch->desc->sg + idx;
+
+	if (ch->desc->dma_dir == DMA_MEM_TO_DEV)
+		moxart_dma_set_params(ch, sg->addr, d->dev_addr);
+	else if (ch->desc->dma_dir == DMA_DEV_TO_MEM)
+		moxart_dma_set_params(ch, d->dev_addr, sg->addr);
+
+	moxart_set_transfer_params(ch, sg->len);
+
+	moxart_start_dma(ch);
+}
+
+static void moxart_dma_start_desc(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	struct virt_dma_desc *vd;
+
+	vd = vchan_next_desc(&ch->vc);
+
+	if (!vd) {
+		ch->desc = NULL;
+		return;
+	}
+
+	list_del(&vd->node);
+
+	ch->desc = to_moxart_dma_desc(&vd->tx);
+	ch->sgidx = 0;
+
+	moxart_dma_start_sg(ch, 0);
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ch->vc.lock, flags);
+	if (vchan_issue_pending(&ch->vc) && !ch->desc)
+		moxart_dma_start_desc(chan);
+	spin_unlock_irqrestore(&ch->vc.lock, flags);
+}
+
+static size_t moxart_dma_desc_size(struct moxart_desc *d)
+{
+	unsigned i;
+	size_t size;
+
+	for (size = i = 0; i < d->sglen; i++)
+		size += d->sg[i].len;
+
+	return size;
+}
+
+static size_t moxart_dma_desc_size_in_flight(struct moxart_chan *ch)
+{
+	size_t size;
+	unsigned int completed_cycles, cycles;
+
+	cycles = readl(ch->base + REG_OFF_CYCLES);
+	size = moxart_dma_desc_size(ch->desc);
+	completed_cycles = (ch->desc->dma_cycles - cycles);
+	size -= completed_cycles << es_bytes[ch->desc->es];
+
+	dev_dbg(chan2dev(&ch->vc.chan), "%s: size=%zd\n", __func__, size);
+
+	return size;
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+					dma_cookie_t cookie,
+					struct dma_tx_state *txstate)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	struct virt_dma_desc *vd;
+	struct moxart_desc *d;
+	enum dma_status ret;
+	unsigned long flags;
+
+	/*
+	 * dma_cookie_status assigns initial residue value
+	 */
+	ret = dma_cookie_status(chan, cookie, txstate);
+
+	spin_lock_irqsave(&ch->vc.lock, flags);
+	vd = vchan_find_desc(&ch->vc, cookie);
+	if (vd) {
+		d = to_moxart_dma_desc(&vd->tx);
+		txstate->residue = moxart_dma_desc_size(d);
+	} else if (ch->desc && ch->desc->vd.tx.cookie == cookie) {
+		txstate->residue = moxart_dma_desc_size_in_flight(ch);
+	}
+	spin_unlock_irqrestore(&ch->vc.lock, flags);
+
+	if (ch->error)
+		return DMA_ERROR;
+
+	return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+	dma->device_prep_slave_sg		= moxart_prep_slave_sg;
+	dma->device_alloc_chan_resources	= moxart_alloc_chan_resources;
+	dma->device_free_chan_resources		= moxart_free_chan_resources;
+	dma->device_issue_pending		= moxart_issue_pending;
+	dma->device_tx_status			= moxart_tx_status;
+	dma->device_control			= moxart_control;
+	dma->dev				= dev;
+
+	INIT_LIST_HEAD(&dma->channels);
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+	struct moxart_dmadev *mc = devid;
+	struct moxart_chan *ch = &mc->slave_chans[0];
+	unsigned int i;
+	unsigned long flags;
+	u32 ctrl;
+
+	dev_dbg(chan2dev(&ch->vc.chan), "%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+		if (!ch->allocated)
+			continue;
+
+		ctrl = readl(ch->base + REG_OFF_CTRL);
+
+		dev_dbg(chan2dev(&ch->vc.chan), "%s: ch=%p ch->base=%p ctrl=%x\n",
+			__func__, ch, ch->base, ctrl);
+
+		if (ctrl & APB_DMA_FIN_INT_STS) {
+			ctrl &= ~APB_DMA_FIN_INT_STS;
+			if (ch->desc) {
+				spin_lock_irqsave(&ch->vc.lock, flags);
+				if (++ch->sgidx < ch->desc->sglen)
+					moxart_dma_start_sg(ch, ch->sgidx);
+				else {
+					vchan_cookie_complete(&ch->desc->vd);
+					ch->desc = NULL;
+				}
+				spin_unlock_irqrestore(&ch->vc.lock, flags);
+			}
+		}
+
+		if (ctrl & APB_DMA_ERR_INT_STS) {
+			ctrl &= ~APB_DMA_ERR_INT_STS;
+			ch->error = 1;
+		}
+
+		writel(ctrl, ch->base + REG_OFF_CTRL);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int moxart_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct resource *res;
+	static void __iomem *dma_base_addr;
+	int ret, i;
+	unsigned int irq;
+	struct moxart_chan *ch;
+	struct moxart_dmadev *mdc;
+
+	mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+	if (!mdc) {
+		dev_err(dev, "can't allocate DMA container\n");
+		return -ENOMEM;
+	}
+
+	irq = irq_of_parse_and_map(node, 0);
+	if (irq <= 0) {
+		dev_err(dev, "irq_of_parse_and_map failed\n");
+		return -EINVAL;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dma_base_addr = devm_ioremap_resource(dev, res);
+	if (IS_ERR(dma_base_addr))
+		return PTR_ERR(dma_base_addr);
+
+	dma_cap_zero(mdc->dma_slave.cap_mask);
+	dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+
+	moxart_dma_init(&mdc->dma_slave, dev);
+
+	ch = &mdc->slave_chans[0];
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+		ch->ch_num = i;
+		ch->base = dma_base_addr + i * REG_OFF_CHAN_SIZE;
+		ch->allocated = 0;
+
+		ch->vc.desc_free = moxart_dma_desc_free;
+		vchan_init(&ch->vc, &mdc->dma_slave);
+
+		dev_dbg(dev, "%s: chs[%d]: ch->ch_num=%u ch->base=%p\n",
+			__func__, i, ch->ch_num, ch->base);
+	}
+
+	platform_set_drvdata(pdev, mdc);
+
+	ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
+			       "moxart-dma-engine", mdc);
+	if (ret) {
+		dev_err(dev, "devm_request_irq failed\n");
+		return ret;
+	}
+
+	ret = dma_async_device_register(&mdc->dma_slave);
+	if (ret) {
+		dev_err(dev, "dma_async_device_register failed\n");
+		return ret;
+	}
+
+	ret = of_dma_controller_register(node, moxart_of_xlate, mdc);
+	if (ret) {
+		dev_err(dev, "of_dma_controller_register failed\n");
+		dma_async_device_unregister(&mdc->dma_slave);
+		return ret;
+	}
+
+	dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq);
+
+	return 0;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+	struct moxart_dmadev *m = platform_get_drvdata(pdev);
+
+	dma_async_device_unregister(&m->dma_slave);
+
+	if (pdev->dev.of_node)
+		of_dma_controller_free(pdev->dev.of_node);
+
+	return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+	{ .compatible = "moxa,moxart-dma" },
+	{ }
+};
+
+static struct platform_driver moxart_driver = {
+	.probe	= moxart_probe,
+	.remove	= moxart_remove,
+	.driver = {
+		.name		= "moxart-dma-engine",
+		.owner		= THIS_MODULE,
+		.of_match_table	= moxart_dma_match,
+	},
+};
+
+static int moxart_init(void)
+{
+	return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+	platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
-- 
1.8.2.1


^ permalink raw reply related	[flat|nested] 80+ messages in thread

* [PATCH v13] dmaengine: Add MOXA ART DMA engine driver
@ 2013-12-11 15:13                         ` Jonas Jensen
  0 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2013-12-11 15:13 UTC (permalink / raw)
  To: linux-arm-kernel

The MOXA ART SoC has a DMA controller capable of offloading expensive
memory operations, such as large copies. This patch adds support for
the controller including four channels. Two of these are used to
handle MMC copy on the UC-7112-LX hardware. The remaining two can be
used in a future audio driver or client application.

Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
---

Notes:
    Thanks for the replies!
    
    Changes since v12:
    1.  add description of dma specifier to DT binding document
    2.  remove linux/irq.h include
    3.  rename "fn" to "len" in struct moxart_sg
    4.  remove unused variable "tx_desc" (struct moxart_desc)
    5.  remove unused variable "ctrl" (struct moxart_dmadev)
    6.  remove unused tasklet "tasklet" (struct moxart_dmadev)
    7.  don't use memcpy() in moxart_slave_config(),
        assign directly "ch->cfg = *cfg"
    8.  remove redundant variable j in moxart_prep_slave_sg(),
        already provided as parameter "sg_len"
    9.  functions moxart_dma_set_*_params use virt_to_phys()
        but the address is never virtual, refactor these
        into one: moxart_dma_set_params()
    10. use local pointer "sg" in moxart_dma_start_sg()
    11. add spin_lock_irqsave()/spin_unlock_irqrestore() around
        calls that happen on interrupt status APB_DMA_FIN_INT_STS
    12. add _OFF suffix to REG_* defines
    13. use BIT() macro (APB_DMA_* defines)
    14. comment style formatting
    15. add is_slave_direction() and remove code under else (moxart_prep_slave_sg())
    16. print size_t using format "%zd" (moxart_dma_desc_size_in_flight())
    17. dma_cookie_status() assigns initial residue value to zero,
        remove code under else
    18. don't shadow dma_cookie_status() return value,
        directly return DMA_ERROR (moxart_tx_status())
    19. remove redundant dev_err() call in moxart_probe(),
        devm_ioremap_resource() has its own error messaging
    
    Applies to next-20131211

 .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  45 ++
 drivers/dma/Kconfig                                |   8 +
 drivers/dma/Makefile                               |   1 +
 drivers/dma/moxart-dma.c                           | 729 +++++++++++++++++++++
 4 files changed, 783 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
 create mode 100644 drivers/dma/moxart-dma.c

diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644
index 0000000..8a9f355
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -0,0 +1,45 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible :	Must be "moxa,moxart-dma"
+- reg :		Should contain registers location and length
+- interrupts :	Should contain an interrupt-specifier for the sole
+		interrupt generated by the device
+- #dma-cells :	Should be 1, a single cell holding a line request number
+
+Example:
+
+	dma: dma at 90500000 {
+		compatible = "moxa,moxart-dma";
+		reg = <0x90500080 0x40>;
+		interrupts = <24 0>;
+		#dma-cells = <1>;
+	};
+
+
+Clients:
+
+DMA clients connected to the MOXA ART DMA controller must use the format
+described in the dma.txt file, using a two-cell specifier for each channel:
+a phandle plus one integer cells.
+The two cells in order are:
+
+1. A phandle pointing to the DMA controller.
+2. Peripheral identifier for the hardware handshaking interface.
+
+Example:
+Use specific request line passing from dma
+For example, MMC request line is 5
+
+	sdhci: sdhci at 98e00000 {
+		compatible = "moxa,moxart-sdhci";
+		reg = <0x98e00000 0x5C>;
+		interrupts = <5 0>;
+		clocks = <&clk_apb>;
+		dmas =  <&dma 5>,
+			<&dma 5>;
+		dma-names = "tx", "rx";
+	};
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 132a4fd..ca4fa6b 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -334,6 +334,14 @@ config K3_DMA
 	  Support the DMA engine for Hisilicon K3 platform
 	  devices.
 
+config MOXART_DMA
+	tristate "MOXART DMA support"
+	depends on ARCH_MOXART
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Enable support for the MOXA ART SoC DMA controller.
+
 config DMA_ENGINE
 	bool
 
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 0ce2da9..551bfcd 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -42,3 +42,4 @@ obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
 obj-$(CONFIG_TI_CPPI41) += cppi41.o
 obj-$(CONFIG_K3_DMA) += k3dma.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..51749ac
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,729 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+#include <linux/bitops.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+#define APB_DMA_MAX_CHANNEL			4
+
+#define REG_OFF_ADDRESS_SOURCE			0
+#define REG_OFF_ADDRESS_DEST			4
+#define REG_OFF_CYCLES				8
+#define REG_OFF_CTRL				12
+#define REG_OFF_CHAN_SIZE			16
+
+#define APB_DMA_ENABLE				BIT(0)
+#define APB_DMA_FIN_INT_STS			BIT(1)
+#define APB_DMA_FIN_INT_EN			BIT(2)
+#define APB_DMA_BURST_MODE			BIT(3)
+#define APB_DMA_ERR_INT_STS			BIT(4)
+#define APB_DMA_ERR_INT_EN			BIT(5)
+
+/*
+ * unset: APB, set: AHB
+ */
+#define APB_DMA_SOURCE_SELECT			0x40
+
+/*
+ * unset: APB, set: AHB
+ */
+#define APB_DMA_DEST_SELECT			0x80
+
+#define APB_DMA_SOURCE				0x100
+#define APB_DMA_SOURCE_MASK			0x700
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4  (burst=1)
+ * 010: +2 (burst=0), +8  (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4  (burst=1)
+ * 110: -2 (burst=0), -8  (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+ */
+#define APB_DMA_SOURCE_INC_0			0
+#define APB_DMA_SOURCE_INC_1_4			0x100
+#define APB_DMA_SOURCE_INC_2_8			0x200
+#define APB_DMA_SOURCE_INC_4_16			0x300
+#define APB_DMA_SOURCE_DEC_1_4			0x500
+#define APB_DMA_SOURCE_DEC_2_8			0x600
+#define APB_DMA_SOURCE_DEC_4_16			0x700
+
+#define APB_DMA_DEST				0x1000
+#define APB_DMA_DEST_MASK			0x7000
+/*
+ * 000: no increment
+ * 001: +1 (busrt=0), +4  (burst=1)
+ * 010: +2 (burst=0), +8  (burst=1)
+ * 011: +4 (burst=0), +16 (burst=1)
+ * 101: -1 (burst=0), -4  (burst=1)
+ * 110: -2 (burst=0), -8  (burst=1)
+ * 111: -4 (burst=0), -16 (burst=1)
+*/
+#define APB_DMA_DEST_INC_0			0
+#define APB_DMA_DEST_INC_1_4			0x1000
+#define APB_DMA_DEST_INC_2_8			0x2000
+#define APB_DMA_DEST_INC_4_16			0x3000
+#define APB_DMA_DEST_DEC_1_4			0x5000
+#define APB_DMA_DEST_DEC_2_8			0x6000
+#define APB_DMA_DEST_DEC_4_16			0x7000
+
+/*
+ * Request signal select source/destination
+ * address for DMA hardware handshake.
+ *
+ * The request line number is a property of
+ * the DMA controller itself, e.g. MMC must
+ * always request channels where
+ * dma_slave_config->slave_id is 5.
+ *
+ * 0:    no request / grant signal
+ * 1-15: request    / grant signal
+ */
+#define APB_DMA_DEST_REQ_NO			0x10000
+#define APB_DMA_DEST_REQ_NO_MASK		0xf0000
+#define APB_DMA_SOURCE_REQ_NO			0x1000000
+#define APB_DMA_SOURCE_REQ_NO_MASK		0xf000000
+
+#define APB_DMA_DATA_WIDTH			0x100000
+#define APB_DMA_DATA_WIDTH_MASK			0x300000
+/*
+ * Data width of transfer:
+ *
+ * 00: word
+ * 01: half
+ * 10: byte
+ */
+#define APB_DMA_DATA_WIDTH_4			0
+#define APB_DMA_DATA_WIDTH_2			0x100000
+#define APB_DMA_DATA_WIDTH_1			0x200000
+
+#define APB_DMA_CYCLES_MASK			0x00ffffff
+
+#define MOXART_DMA_DATA_TYPE_S8			0x00
+#define MOXART_DMA_DATA_TYPE_S16		0x01
+#define MOXART_DMA_DATA_TYPE_S32		0x02
+
+struct moxart_sg {
+	dma_addr_t addr;
+	uint32_t len;
+};
+
+struct moxart_desc {
+	enum dma_transfer_direction	dma_dir;
+	dma_addr_t			dev_addr;
+	unsigned int			sglen;
+	unsigned int			dma_cycles;
+	struct virt_dma_desc		vd;
+	uint8_t				es;
+	struct moxart_sg		sg[0];
+};
+
+struct moxart_chan {
+	struct virt_dma_chan		vc;
+
+	void __iomem			*base;
+	struct moxart_desc		*desc;
+
+	struct dma_slave_config		cfg;
+
+	bool				allocated;
+	bool				error;
+	int				ch_num;
+	unsigned int			line_reqno;
+	unsigned int			sgidx;
+};
+
+struct moxart_dmadev {
+	struct dma_device		dma_slave;
+	struct moxart_chan		slave_chans[APB_DMA_MAX_CHANNEL];
+};
+
+struct moxart_filter_data {
+	struct moxart_dmadev		*mdc;
+	struct of_phandle_args		*dma_spec;
+};
+
+static const unsigned es_bytes[] = {
+	[MOXART_DMA_DATA_TYPE_S8] = 1,
+	[MOXART_DMA_DATA_TYPE_S16] = 2,
+	[MOXART_DMA_DATA_TYPE_S32] = 4,
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+	return &chan->dev->device;
+}
+
+static inline struct moxart_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct moxart_chan, vc.chan);
+}
+
+static inline struct moxart_desc *to_moxart_dma_desc(
+	struct dma_async_tx_descriptor *t)
+{
+	return container_of(t, struct moxart_desc, vd.tx);
+}
+
+static void moxart_dma_desc_free(struct virt_dma_desc *vd)
+{
+	kfree(container_of(vd, struct moxart_desc, vd));
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	unsigned long flags;
+	LIST_HEAD(head);
+	u32 ctrl;
+
+	dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
+
+	spin_lock_irqsave(&ch->vc.lock, flags);
+
+	if (ch->desc)
+		ch->desc = NULL;
+
+	ctrl = readl(ch->base + REG_OFF_CTRL);
+	ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, ch->base + REG_OFF_CTRL);
+
+	vchan_get_all_descriptors(&ch->vc, &head);
+	spin_unlock_irqrestore(&ch->vc.lock, flags);
+	vchan_dma_desc_free_list(&ch->vc, &head);
+
+	return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+			       struct dma_slave_config *cfg)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	u32 ctrl;
+
+	ch->cfg = *cfg;
+
+	ctrl = readl(ch->base + REG_OFF_CTRL);
+	ctrl |= APB_DMA_BURST_MODE;
+	ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
+	ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
+
+	switch (ch->cfg.src_addr_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		ctrl |= APB_DMA_DATA_WIDTH_1;
+		if (ch->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_1_4;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_1_4;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		ctrl |= APB_DMA_DATA_WIDTH_2;
+		if (ch->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_2_8;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_2_8;
+		break;
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+		ctrl &= ~APB_DMA_DATA_WIDTH;
+		if (ch->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_4_16;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_4_16;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (ch->cfg.direction == DMA_MEM_TO_DEV) {
+		ctrl &= ~APB_DMA_DEST_SELECT;
+		ctrl |= APB_DMA_SOURCE_SELECT;
+		ctrl |= (ch->line_reqno << 16 &
+			 APB_DMA_DEST_REQ_NO_MASK);
+	} else {
+		ctrl |= APB_DMA_DEST_SELECT;
+		ctrl &= ~APB_DMA_SOURCE_SELECT;
+		ctrl |= (ch->line_reqno << 24 &
+			 APB_DMA_SOURCE_REQ_NO_MASK);
+	}
+
+	writel(ctrl, ch->base + REG_OFF_CTRL);
+
+	return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+			  unsigned long arg)
+{
+	int ret = 0;
+
+	switch (cmd) {
+	case DMA_PAUSE:
+	case DMA_RESUME:
+		return -EINVAL;
+	case DMA_TERMINATE_ALL:
+		moxart_terminate_all(chan);
+		break;
+	case DMA_SLAVE_CONFIG:
+		ret = moxart_slave_config(chan, (struct dma_slave_config *)arg);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static struct dma_async_tx_descriptor *moxart_prep_slave_sg(
+	struct dma_chan *chan, struct scatterlist *sgl,
+	unsigned int sg_len, enum dma_transfer_direction dir,
+	unsigned long tx_flags, void *context)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	struct moxart_desc *d;
+	enum dma_slave_buswidth dev_width;
+	dma_addr_t dev_addr;
+	struct scatterlist *sgent;
+	unsigned int es, i;
+
+	if (!is_slave_direction(dir)) {
+		dev_err(chan2dev(chan), "%s: invalid DMA direction\n",
+			__func__);
+		return NULL;
+	}
+
+	if (dir == DMA_DEV_TO_MEM) {
+		dev_addr = ch->cfg.src_addr;
+		dev_width = ch->cfg.src_addr_width;
+	} else {
+		dev_addr = ch->cfg.dst_addr;
+		dev_width = ch->cfg.dst_addr_width;
+	}
+
+	switch (dev_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		es = MOXART_DMA_DATA_TYPE_S8;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		es = MOXART_DMA_DATA_TYPE_S16;
+		break;
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+		es = MOXART_DMA_DATA_TYPE_S32;
+		break;
+	default:
+		dev_err(chan2dev(chan), "%s: unsupported data width (%u)\n",
+			__func__, dev_width);
+		return NULL;
+	}
+
+	d = kzalloc(sizeof(*d) + sg_len * sizeof(d->sg[0]), GFP_ATOMIC);
+	if (!d)
+		return NULL;
+
+	d->dma_dir = dir;
+	d->dev_addr = dev_addr;
+	d->es = es;
+
+	for_each_sg(sgl, sgent, sg_len, i) {
+		d->sg[i].addr = sg_dma_address(sgent);
+		d->sg[i].len = sg_dma_len(sgent);
+	}
+
+	d->sglen = sg_len;
+
+	ch->error = 0;
+
+	return vchan_tx_prep(&ch->vc, &d->vd, tx_flags);
+}
+
+bool moxart_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+	struct moxart_filter_data *fdata = param;
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+
+	if (chan->device->dev != fdata->mdc->dma_slave.dev ||
+	    chan->device->dev->of_node != fdata->dma_spec->np) {
+		dev_dbg(chan2dev(chan), "device not registered to this DMA engine\n");
+		return false;
+	}
+
+	dev_dbg(chan2dev(chan), "%s: ch=%p line_reqno=%u ch->ch_num=%u\n",
+		__func__, ch, fdata->dma_spec->args[0], ch->ch_num);
+
+	ch->line_reqno = fdata->dma_spec->args[0];
+
+	return true;
+}
+
+static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
+					struct of_dma *ofdma)
+{
+	struct moxart_dmadev *mdc = ofdma->of_dma_data;
+	struct moxart_filter_data fdata = {
+		.mdc = mdc,
+	};
+
+	if (dma_spec->args_count < 1)
+		return NULL;
+
+	fdata.dma_spec = dma_spec;
+
+	return dma_request_channel(mdc->dma_slave.cap_mask,
+				   moxart_dma_filter_fn, &fdata);
+}
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+
+	dev_dbg(chan2dev(chan), "%s: allocating channel #%u\n",
+		__func__, ch->ch_num);
+	ch->allocated = 1;
+
+	return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+
+	vchan_free_chan_resources(&ch->vc);
+
+	dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+		__func__, ch->ch_num);
+	ch->allocated = 0;
+}
+
+static void moxart_dma_set_params(struct moxart_chan *ch, dma_addr_t src_addr,
+				  dma_addr_t dst_addr)
+{
+	writel(src_addr, ch->base + REG_OFF_ADDRESS_SOURCE);
+	writel(dst_addr, ch->base + REG_OFF_ADDRESS_DEST);
+}
+
+static void moxart_set_transfer_params(struct moxart_chan *ch, unsigned int len)
+{
+	struct moxart_desc *d = ch->desc;
+	unsigned int sglen_div = es_bytes[d->es];
+
+	d->dma_cycles = len >> sglen_div;
+
+	/*
+	 * cycles is 4 on 64 bytes copied, i.e. one cycle copies 16 bytes
+	 * ( when width is APB_DMAB_DATA_WIDTH_4 )
+	 */
+	writel(d->dma_cycles, ch->base + REG_OFF_CYCLES);
+
+	dev_dbg(chan2dev(&ch->vc.chan), "%s: set %u DMA cycles (len=%u)\n",
+		__func__, d->dma_cycles, len);
+}
+
+static void moxart_start_dma(struct moxart_chan *ch)
+{
+	u32 ctrl;
+
+	ctrl = readl(ch->base + REG_OFF_CTRL);
+	ctrl |= (APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, ch->base + REG_OFF_CTRL);
+}
+
+static void moxart_dma_start_sg(struct moxart_chan *ch, unsigned int idx)
+{
+	struct moxart_desc *d = ch->desc;
+	struct moxart_sg *sg = ch->desc->sg + idx;
+
+	if (ch->desc->dma_dir == DMA_MEM_TO_DEV)
+		moxart_dma_set_params(ch, sg->addr, d->dev_addr);
+	else if (ch->desc->dma_dir == DMA_DEV_TO_MEM)
+		moxart_dma_set_params(ch, d->dev_addr, sg->addr);
+
+	moxart_set_transfer_params(ch, sg->len);
+
+	moxart_start_dma(ch);
+}
+
+static void moxart_dma_start_desc(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	struct virt_dma_desc *vd;
+
+	vd = vchan_next_desc(&ch->vc);
+
+	if (!vd) {
+		ch->desc = NULL;
+		return;
+	}
+
+	list_del(&vd->node);
+
+	ch->desc = to_moxart_dma_desc(&vd->tx);
+	ch->sgidx = 0;
+
+	moxart_dma_start_sg(ch, 0);
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ch->vc.lock, flags);
+	if (vchan_issue_pending(&ch->vc) && !ch->desc)
+		moxart_dma_start_desc(chan);
+	spin_unlock_irqrestore(&ch->vc.lock, flags);
+}
+
+static size_t moxart_dma_desc_size(struct moxart_desc *d)
+{
+	unsigned i;
+	size_t size;
+
+	for (size = i = 0; i < d->sglen; i++)
+		size += d->sg[i].len;
+
+	return size;
+}
+
+static size_t moxart_dma_desc_size_in_flight(struct moxart_chan *ch)
+{
+	size_t size;
+	unsigned int completed_cycles, cycles;
+
+	cycles = readl(ch->base + REG_OFF_CYCLES);
+	size = moxart_dma_desc_size(ch->desc);
+	completed_cycles = (ch->desc->dma_cycles - cycles);
+	size -= completed_cycles << es_bytes[ch->desc->es];
+
+	dev_dbg(chan2dev(&ch->vc.chan), "%s: size=%zd\n", __func__, size);
+
+	return size;
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+					dma_cookie_t cookie,
+					struct dma_tx_state *txstate)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	struct virt_dma_desc *vd;
+	struct moxart_desc *d;
+	enum dma_status ret;
+	unsigned long flags;
+
+	/*
+	 * dma_cookie_status assigns initial residue value
+	 */
+	ret = dma_cookie_status(chan, cookie, txstate);
+
+	spin_lock_irqsave(&ch->vc.lock, flags);
+	vd = vchan_find_desc(&ch->vc, cookie);
+	if (vd) {
+		d = to_moxart_dma_desc(&vd->tx);
+		txstate->residue = moxart_dma_desc_size(d);
+	} else if (ch->desc && ch->desc->vd.tx.cookie == cookie) {
+		txstate->residue = moxart_dma_desc_size_in_flight(ch);
+	}
+	spin_unlock_irqrestore(&ch->vc.lock, flags);
+
+	if (ch->error)
+		return DMA_ERROR;
+
+	return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+	dma->device_prep_slave_sg		= moxart_prep_slave_sg;
+	dma->device_alloc_chan_resources	= moxart_alloc_chan_resources;
+	dma->device_free_chan_resources		= moxart_free_chan_resources;
+	dma->device_issue_pending		= moxart_issue_pending;
+	dma->device_tx_status			= moxart_tx_status;
+	dma->device_control			= moxart_control;
+	dma->dev				= dev;
+
+	INIT_LIST_HEAD(&dma->channels);
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+	struct moxart_dmadev *mc = devid;
+	struct moxart_chan *ch = &mc->slave_chans[0];
+	unsigned int i;
+	unsigned long flags;
+	u32 ctrl;
+
+	dev_dbg(chan2dev(&ch->vc.chan), "%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+		if (!ch->allocated)
+			continue;
+
+		ctrl = readl(ch->base + REG_OFF_CTRL);
+
+		dev_dbg(chan2dev(&ch->vc.chan), "%s: ch=%p ch->base=%p ctrl=%x\n",
+			__func__, ch, ch->base, ctrl);
+
+		if (ctrl & APB_DMA_FIN_INT_STS) {
+			ctrl &= ~APB_DMA_FIN_INT_STS;
+			if (ch->desc) {
+				spin_lock_irqsave(&ch->vc.lock, flags);
+				if (++ch->sgidx < ch->desc->sglen)
+					moxart_dma_start_sg(ch, ch->sgidx);
+				else {
+					vchan_cookie_complete(&ch->desc->vd);
+					ch->desc = NULL;
+				}
+				spin_unlock_irqrestore(&ch->vc.lock, flags);
+			}
+		}
+
+		if (ctrl & APB_DMA_ERR_INT_STS) {
+			ctrl &= ~APB_DMA_ERR_INT_STS;
+			ch->error = 1;
+		}
+
+		writel(ctrl, ch->base + REG_OFF_CTRL);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int moxart_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct resource *res;
+	static void __iomem *dma_base_addr;
+	int ret, i;
+	unsigned int irq;
+	struct moxart_chan *ch;
+	struct moxart_dmadev *mdc;
+
+	mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+	if (!mdc) {
+		dev_err(dev, "can't allocate DMA container\n");
+		return -ENOMEM;
+	}
+
+	irq = irq_of_parse_and_map(node, 0);
+	if (irq <= 0) {
+		dev_err(dev, "irq_of_parse_and_map failed\n");
+		return -EINVAL;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dma_base_addr = devm_ioremap_resource(dev, res);
+	if (IS_ERR(dma_base_addr))
+		return PTR_ERR(dma_base_addr);
+
+	dma_cap_zero(mdc->dma_slave.cap_mask);
+	dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+
+	moxart_dma_init(&mdc->dma_slave, dev);
+
+	ch = &mdc->slave_chans[0];
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+		ch->ch_num = i;
+		ch->base = dma_base_addr + i * REG_OFF_CHAN_SIZE;
+		ch->allocated = 0;
+
+		ch->vc.desc_free = moxart_dma_desc_free;
+		vchan_init(&ch->vc, &mdc->dma_slave);
+
+		dev_dbg(dev, "%s: chs[%d]: ch->ch_num=%u ch->base=%p\n",
+			__func__, i, ch->ch_num, ch->base);
+	}
+
+	platform_set_drvdata(pdev, mdc);
+
+	ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
+			       "moxart-dma-engine", mdc);
+	if (ret) {
+		dev_err(dev, "devm_request_irq failed\n");
+		return ret;
+	}
+
+	ret = dma_async_device_register(&mdc->dma_slave);
+	if (ret) {
+		dev_err(dev, "dma_async_device_register failed\n");
+		return ret;
+	}
+
+	ret = of_dma_controller_register(node, moxart_of_xlate, mdc);
+	if (ret) {
+		dev_err(dev, "of_dma_controller_register failed\n");
+		dma_async_device_unregister(&mdc->dma_slave);
+		return ret;
+	}
+
+	dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq);
+
+	return 0;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+	struct moxart_dmadev *m = platform_get_drvdata(pdev);
+
+	dma_async_device_unregister(&m->dma_slave);
+
+	if (pdev->dev.of_node)
+		of_dma_controller_free(pdev->dev.of_node);
+
+	return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+	{ .compatible = "moxa,moxart-dma" },
+	{ }
+};
+
+static struct platform_driver moxart_driver = {
+	.probe	= moxart_probe,
+	.remove	= moxart_remove,
+	.driver = {
+		.name		= "moxart-dma-engine",
+		.owner		= THIS_MODULE,
+		.of_match_table	= moxart_dma_match,
+	},
+};
+
+static int moxart_init(void)
+{
+	return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+	platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
-- 
1.8.2.1

^ permalink raw reply related	[flat|nested] 80+ messages in thread

* Re: [PATCH v13] dmaengine: Add MOXA ART DMA engine driver
  2013-12-11 15:13                         ` Jonas Jensen
@ 2013-12-11 21:27                           ` Arnd Bergmann
  -1 siblings, 0 replies; 80+ messages in thread
From: Arnd Bergmann @ 2013-12-11 21:27 UTC (permalink / raw)
  To: linux-arm-kernel, Stephen Warren
  Cc: Jonas Jensen, dmaengine, mark.rutland, lars, linux, vinod.koul,
	linux-kernel, arm, djbw, andriy.shevchenko

I didn't comment on this earlier since you had already gone through 12 revisions
and I didn't want to hold up merging any longer. This can be done as a follow-up,
or you can include it if you end up doing a v14:

On Wednesday 11 December 2013, Jonas Jensen wrote:

> +bool moxart_dma_filter_fn(struct dma_chan *chan, void *param)
> +{
> +       struct moxart_filter_data *fdata = param;
> +       struct moxart_chan *ch = to_moxart_dma_chan(chan);
> +
> +       if (chan->device->dev != fdata->mdc->dma_slave.dev ||
> +           chan->device->dev->of_node != fdata->dma_spec->np) {
> +               dev_dbg(chan2dev(chan), "device not registered to this DMA engine\n");
> +               return false;
> +       }
> +
> +       dev_dbg(chan2dev(chan), "%s: ch=%p line_reqno=%u ch->ch_num=%u\n",
> +               __func__, ch, fdata->dma_spec->args[0], ch->ch_num);
> +
> +       ch->line_reqno = fdata->dma_spec->args[0];
> +
> +       return true;
> +}
> +
> +static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
> +                                       struct of_dma *ofdma)
> +{
> +       struct moxart_dmadev *mdc = ofdma->of_dma_data;
> +       struct moxart_filter_data fdata = {
> +               .mdc = mdc,
> +       };
> +
> +       if (dma_spec->args_count < 1)
> +               return NULL;
> +
> +       fdata.dma_spec = dma_spec;
> +
> +       return dma_request_channel(mdc->dma_slave.cap_mask,
> +                                  moxart_dma_filter_fn, &fdata);
> +}

The moxart_dma_filter_fn should get removed and the moxart_of_xlate() rewritten
based on Stephen Warren's dma_get_any_slave_channel() interface once that
is available in the dmaengine git tree.

	Arnd

^ permalink raw reply	[flat|nested] 80+ messages in thread

* [PATCH v13] dmaengine: Add MOXA ART DMA engine driver
@ 2013-12-11 21:27                           ` Arnd Bergmann
  0 siblings, 0 replies; 80+ messages in thread
From: Arnd Bergmann @ 2013-12-11 21:27 UTC (permalink / raw)
  To: linux-arm-kernel

I didn't comment on this earlier since you had already gone through 12 revisions
and I didn't want to hold up merging any longer. This can be done as a follow-up,
or you can include it if you end up doing a v14:

On Wednesday 11 December 2013, Jonas Jensen wrote:

> +bool moxart_dma_filter_fn(struct dma_chan *chan, void *param)
> +{
> +       struct moxart_filter_data *fdata = param;
> +       struct moxart_chan *ch = to_moxart_dma_chan(chan);
> +
> +       if (chan->device->dev != fdata->mdc->dma_slave.dev ||
> +           chan->device->dev->of_node != fdata->dma_spec->np) {
> +               dev_dbg(chan2dev(chan), "device not registered to this DMA engine\n");
> +               return false;
> +       }
> +
> +       dev_dbg(chan2dev(chan), "%s: ch=%p line_reqno=%u ch->ch_num=%u\n",
> +               __func__, ch, fdata->dma_spec->args[0], ch->ch_num);
> +
> +       ch->line_reqno = fdata->dma_spec->args[0];
> +
> +       return true;
> +}
> +
> +static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
> +                                       struct of_dma *ofdma)
> +{
> +       struct moxart_dmadev *mdc = ofdma->of_dma_data;
> +       struct moxart_filter_data fdata = {
> +               .mdc = mdc,
> +       };
> +
> +       if (dma_spec->args_count < 1)
> +               return NULL;
> +
> +       fdata.dma_spec = dma_spec;
> +
> +       return dma_request_channel(mdc->dma_slave.cap_mask,
> +                                  moxart_dma_filter_fn, &fdata);
> +}

The moxart_dma_filter_fn should get removed and the moxart_of_xlate() rewritten
based on Stephen Warren's dma_get_any_slave_channel() interface once that
is available in the dmaengine git tree.

	Arnd

^ permalink raw reply	[flat|nested] 80+ messages in thread

* Re: [PATCH v13] dmaengine: Add MOXA ART DMA engine driver
  2013-12-11 15:13                         ` Jonas Jensen
@ 2013-12-12  9:16                           ` Andy Shevchenko
  -1 siblings, 0 replies; 80+ messages in thread
From: Andy Shevchenko @ 2013-12-12  9:16 UTC (permalink / raw)
  To: Jonas Jensen
  Cc: dmaengine, linux-arm-kernel, linux-kernel, arm, vinod.koul, djbw,
	arnd, linux, mark.rutland, lars

On Wed, 2013-12-11 at 16:13 +0100, Jonas Jensen wrote:
> The MOXA ART SoC has a DMA controller capable of offloading expensive
> memory operations, such as large copies. This patch adds support for
> the controller including four channels. Two of these are used to
> handle MMC copy on the UC-7112-LX hardware. The remaining two can be
> used in a future audio driver or client application.
> 

Thanks for an update.

Since you will have next version anyway (addressing Arnd's comment) I
add my few minor comments below.

[]

> +++ b/drivers/dma/moxart-dma.c
> @@ -0,0 +1,729 @@
> +/*
> + * MOXA ART SoCs DMA Engine support.
> + *
> + * Copyright (C) 2013 Jonas Jensen
> + *
> + * Jonas Jensen <jonas.jensen@gmail.com>
> + *
> + * This file is licensed under the terms of the GNU General Public
> + * License version 2.  This program is licensed "as is" without any
> + * warranty of any kind, whether express or implied.
> + */
> +
> +#include <linux/dmaengine.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/err.h>
> +#include <linux/init.h>
> +#include <linux/interrupt.h>
> +#include <linux/list.h>
> +#include <linux/module.h>
> +#include <linux/platform_device.h>
> +#include <linux/slab.h>
> +#include <linux/spinlock.h>
> +#include <linux/of_address.h>
> +#include <linux/of_irq.h>
> +#include <linux/of_dma.h>
> +#include <linux/bitops.h>
> +
> +#include <asm/cacheflush.h>
> +
> +#include "dmaengine.h"
> +#include "virt-dma.h"
> +
> +#define APB_DMA_MAX_CHANNEL			4
> +
> +#define REG_OFF_ADDRESS_SOURCE			0
> +#define REG_OFF_ADDRESS_DEST			4
> +#define REG_OFF_CYCLES				8
> +#define REG_OFF_CTRL				12
> +#define REG_OFF_CHAN_SIZE			16
> +
> +#define APB_DMA_ENABLE				BIT(0)
> +#define APB_DMA_FIN_INT_STS			BIT(1)
> +#define APB_DMA_FIN_INT_EN			BIT(2)
> +#define APB_DMA_BURST_MODE			BIT(3)
> +#define APB_DMA_ERR_INT_STS			BIT(4)
> +#define APB_DMA_ERR_INT_EN			BIT(5)
> +


> +/*
> + * unset: APB, set: AHB
> + */

One line?

> +#define APB_DMA_SOURCE_SELECT			0x40
> +
> +/*
> + * unset: APB, set: AHB
> + */

Ditto.

> +#define APB_DMA_DEST_SELECT			0x80
> +
> +#define APB_DMA_SOURCE				0x100
> +#define APB_DMA_SOURCE_MASK			0x700
> +/*
> + * 000: no increment
> + * 001: +1 (busrt=0), +4  (burst=1)
> + * 010: +2 (burst=0), +8  (burst=1)
> + * 011: +4 (burst=0), +16 (burst=1)
> + * 101: -1 (burst=0), -4  (burst=1)
> + * 110: -2 (burst=0), -8  (burst=1)
> + * 111: -4 (burst=0), -16 (burst=1)
> + */
> +#define APB_DMA_SOURCE_INC_0			0
> +#define APB_DMA_SOURCE_INC_1_4			0x100
> +#define APB_DMA_SOURCE_INC_2_8			0x200
> +#define APB_DMA_SOURCE_INC_4_16			0x300
> +#define APB_DMA_SOURCE_DEC_1_4			0x500
> +#define APB_DMA_SOURCE_DEC_2_8			0x600
> +#define APB_DMA_SOURCE_DEC_4_16			0x700
> +
> +#define APB_DMA_DEST				0x1000
> +#define APB_DMA_DEST_MASK			0x7000
> +/*
> + * 000: no increment
> + * 001: +1 (busrt=0), +4  (burst=1)
> + * 010: +2 (burst=0), +8  (burst=1)
> + * 011: +4 (burst=0), +16 (burst=1)
> + * 101: -1 (burst=0), -4  (burst=1)
> + * 110: -2 (burst=0), -8  (burst=1)
> + * 111: -4 (burst=0), -16 (burst=1)
> +*/
> +#define APB_DMA_DEST_INC_0			0
> +#define APB_DMA_DEST_INC_1_4			0x1000
> +#define APB_DMA_DEST_INC_2_8			0x2000
> +#define APB_DMA_DEST_INC_4_16			0x3000
> +#define APB_DMA_DEST_DEC_1_4			0x5000
> +#define APB_DMA_DEST_DEC_2_8			0x6000
> +#define APB_DMA_DEST_DEC_4_16			0x7000
> +
> +/*
> + * Request signal select source/destination
> + * address for DMA hardware handshake.
> + *
> + * The request line number is a property of
> + * the DMA controller itself, e.g. MMC must
> + * always request channels where
> + * dma_slave_config->slave_id is 5.

For me it seems that text too squeezed here. The width of 72-76 symbols
sounds better.

> + *
> + * 0:    no request / grant signal
> + * 1-15: request    / grant signal
> + */
> +#define APB_DMA_DEST_REQ_NO			0x10000
> +#define APB_DMA_DEST_REQ_NO_MASK		0xf0000
> +#define APB_DMA_SOURCE_REQ_NO			0x1000000
> +#define APB_DMA_SOURCE_REQ_NO_MASK		0xf000000
> +
> +#define APB_DMA_DATA_WIDTH			0x100000
> +#define APB_DMA_DATA_WIDTH_MASK			0x300000
> +/*
> + * Data width of transfer:

Might be better to keep style across comments. I mean capital letter,
commas, dots, etc.

Check entire code for this.

> + *
> + * 00: word
> + * 01: half
> + * 10: byte
> + */
> +#define APB_DMA_DATA_WIDTH_4			0
> +#define APB_DMA_DATA_WIDTH_2			0x100000
> +#define APB_DMA_DATA_WIDTH_1			0x200000
> +
> +#define APB_DMA_CYCLES_MASK			0x00ffffff
> +
> +#define MOXART_DMA_DATA_TYPE_S8			0x00
> +#define MOXART_DMA_DATA_TYPE_S16		0x01
> +#define MOXART_DMA_DATA_TYPE_S32		0x02
> +
> +struct moxart_sg {
> +	dma_addr_t addr;
> +	uint32_t len;
> +};
> +
> +struct moxart_desc {
> +	enum dma_transfer_direction	dma_dir;
> +	dma_addr_t			dev_addr;
> +	unsigned int			sglen;
> +	unsigned int			dma_cycles;
> +	struct virt_dma_desc		vd;
> +	uint8_t				es;
> +	struct moxart_sg		sg[0];
> +};
> +
> +struct moxart_chan {
> +	struct virt_dma_chan		vc;
> +
> +	void __iomem			*base;
> +	struct moxart_desc		*desc;
> +
> +	struct dma_slave_config		cfg;
> +
> +	bool				allocated;
> +	bool				error;
> +	int				ch_num;
> +	unsigned int			line_reqno;
> +	unsigned int			sgidx;
> +};
> +
> +struct moxart_dmadev {
> +	struct dma_device		dma_slave;
> +	struct moxart_chan		slave_chans[APB_DMA_MAX_CHANNEL];
> +};
> +
> +struct moxart_filter_data {
> +	struct moxart_dmadev		*mdc;
> +	struct of_phandle_args		*dma_spec;
> +};
> +
> +static const unsigned es_bytes[] = {
> +	[MOXART_DMA_DATA_TYPE_S8] = 1,
> +	[MOXART_DMA_DATA_TYPE_S16] = 2,
> +	[MOXART_DMA_DATA_TYPE_S32] = 4,
> +};
> +
> +static struct device *chan2dev(struct dma_chan *chan)
> +{
> +	return &chan->dev->device;
> +}
> +
> +static inline struct moxart_chan *to_moxart_dma_chan(struct dma_chan *c)
> +{
> +	return container_of(c, struct moxart_chan, vc.chan);
> +}
> +
> +static inline struct moxart_desc *to_moxart_dma_desc(
> +	struct dma_async_tx_descriptor *t)
> +{
> +	return container_of(t, struct moxart_desc, vd.tx);
> +}
> +
> +static void moxart_dma_desc_free(struct virt_dma_desc *vd)
> +{
> +	kfree(container_of(vd, struct moxart_desc, vd));
> +}
> +
> +static int moxart_terminate_all(struct dma_chan *chan)
> +{
> +	struct moxart_chan *ch = to_moxart_dma_chan(chan);
> +	unsigned long flags;
> +	LIST_HEAD(head);
> +	u32 ctrl;
> +
> +	dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
> +
> +	spin_lock_irqsave(&ch->vc.lock, flags);
> +
> +	if (ch->desc)
> +		ch->desc = NULL;
> +
> +	ctrl = readl(ch->base + REG_OFF_CTRL);
> +	ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
> +	writel(ctrl, ch->base + REG_OFF_CTRL);
> +
> +	vchan_get_all_descriptors(&ch->vc, &head);
> +	spin_unlock_irqrestore(&ch->vc.lock, flags);
> +	vchan_dma_desc_free_list(&ch->vc, &head);
> +
> +	return 0;
> +}
> +
> +static int moxart_slave_config(struct dma_chan *chan,
> +			       struct dma_slave_config *cfg)
> +{
> +	struct moxart_chan *ch = to_moxart_dma_chan(chan);
> +	u32 ctrl;
> +
> +	ch->cfg = *cfg;
> +
> +	ctrl = readl(ch->base + REG_OFF_CTRL);
> +	ctrl |= APB_DMA_BURST_MODE;
> +	ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
> +	ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
> +
> +	switch (ch->cfg.src_addr_width) {
> +	case DMA_SLAVE_BUSWIDTH_1_BYTE:
> +		ctrl |= APB_DMA_DATA_WIDTH_1;
> +		if (ch->cfg.direction != DMA_MEM_TO_DEV)
> +			ctrl |= APB_DMA_DEST_INC_1_4;
> +		else
> +			ctrl |= APB_DMA_SOURCE_INC_1_4;
> +		break;
> +	case DMA_SLAVE_BUSWIDTH_2_BYTES:
> +		ctrl |= APB_DMA_DATA_WIDTH_2;
> +		if (ch->cfg.direction != DMA_MEM_TO_DEV)
> +			ctrl |= APB_DMA_DEST_INC_2_8;
> +		else
> +			ctrl |= APB_DMA_SOURCE_INC_2_8;
> +		break;
> +	case DMA_SLAVE_BUSWIDTH_4_BYTES:
> +		ctrl &= ~APB_DMA_DATA_WIDTH;
> +		if (ch->cfg.direction != DMA_MEM_TO_DEV)
> +			ctrl |= APB_DMA_DEST_INC_4_16;
> +		else
> +			ctrl |= APB_DMA_SOURCE_INC_4_16;
> +		break;
> +	default:
> +		return -EINVAL;
> +	}
> +
> +	if (ch->cfg.direction == DMA_MEM_TO_DEV) {
> +		ctrl &= ~APB_DMA_DEST_SELECT;
> +		ctrl |= APB_DMA_SOURCE_SELECT;
> +		ctrl |= (ch->line_reqno << 16 &
> +			 APB_DMA_DEST_REQ_NO_MASK);
> +	} else {
> +		ctrl |= APB_DMA_DEST_SELECT;
> +		ctrl &= ~APB_DMA_SOURCE_SELECT;
> +		ctrl |= (ch->line_reqno << 24 &
> +			 APB_DMA_SOURCE_REQ_NO_MASK);
> +	}
> +
> +	writel(ctrl, ch->base + REG_OFF_CTRL);
> +
> +	return 0;
> +}
> +
> +static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
> +			  unsigned long arg)
> +{
> +	int ret = 0;
> +
> +	switch (cmd) {
> +	case DMA_PAUSE:
> +	case DMA_RESUME:
> +		return -EINVAL;
> +	case DMA_TERMINATE_ALL:
> +		moxart_terminate_all(chan);
> +		break;
> +	case DMA_SLAVE_CONFIG:
> +		ret = moxart_slave_config(chan, (struct dma_slave_config *)arg);
> +		break;
> +	default:
> +		ret = -ENOSYS;
> +	}
> +
> +	return ret;
> +}
> +
> +static struct dma_async_tx_descriptor *moxart_prep_slave_sg(
> +	struct dma_chan *chan, struct scatterlist *sgl,
> +	unsigned int sg_len, enum dma_transfer_direction dir,
> +	unsigned long tx_flags, void *context)
> +{
> +	struct moxart_chan *ch = to_moxart_dma_chan(chan);
> +	struct moxart_desc *d;
> +	enum dma_slave_buswidth dev_width;
> +	dma_addr_t dev_addr;
> +	struct scatterlist *sgent;

> +	unsigned int es, i;

Could you split to two lines since the variables are from different
classes of application?

> +
> +	if (!is_slave_direction(dir)) {
> +		dev_err(chan2dev(chan), "%s: invalid DMA direction\n",
> +			__func__);
> +		return NULL;
> +	}
> +
> +	if (dir == DMA_DEV_TO_MEM) {
> +		dev_addr = ch->cfg.src_addr;
> +		dev_width = ch->cfg.src_addr_width;
> +	} else {
> +		dev_addr = ch->cfg.dst_addr;
> +		dev_width = ch->cfg.dst_addr_width;
> +	}
> +
> +	switch (dev_width) {
> +	case DMA_SLAVE_BUSWIDTH_1_BYTE:
> +		es = MOXART_DMA_DATA_TYPE_S8;
> +		break;
> +	case DMA_SLAVE_BUSWIDTH_2_BYTES:
> +		es = MOXART_DMA_DATA_TYPE_S16;
> +		break;
> +	case DMA_SLAVE_BUSWIDTH_4_BYTES:
> +		es = MOXART_DMA_DATA_TYPE_S32;
> +		break;
> +	default:
> +		dev_err(chan2dev(chan), "%s: unsupported data width (%u)\n",
> +			__func__, dev_width);
> +		return NULL;
> +	}
> +
> +	d = kzalloc(sizeof(*d) + sg_len * sizeof(d->sg[0]), GFP_ATOMIC);



> +	if (!d)
> +		return NULL;
> +
> +	d->dma_dir = dir;
> +	d->dev_addr = dev_addr;
> +	d->es = es;
> +
> +	for_each_sg(sgl, sgent, sg_len, i) {
> +		d->sg[i].addr = sg_dma_address(sgent);
> +		d->sg[i].len = sg_dma_len(sgent);
> +	}
> +
> +	d->sglen = sg_len;
> +
> +	ch->error = 0;
> +
> +	return vchan_tx_prep(&ch->vc, &d->vd, tx_flags);
> +}
> +
> +bool moxart_dma_filter_fn(struct dma_chan *chan, void *param)
> +{
> +	struct moxart_filter_data *fdata = param;
> +	struct moxart_chan *ch = to_moxart_dma_chan(chan);
> +
> +	if (chan->device->dev != fdata->mdc->dma_slave.dev ||
> +	    chan->device->dev->of_node != fdata->dma_spec->np) {
> +		dev_dbg(chan2dev(chan), "device not registered to this DMA engine\n");
> +		return false;
> +	}
> +
> +	dev_dbg(chan2dev(chan), "%s: ch=%p line_reqno=%u ch->ch_num=%u\n",
> +		__func__, ch, fdata->dma_spec->args[0], ch->ch_num);
> +
> +	ch->line_reqno = fdata->dma_spec->args[0];
> +
> +	return true;
> +}
> +
> +static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
> +					struct of_dma *ofdma)
> +{
> +	struct moxart_dmadev *mdc = ofdma->of_dma_data;
> +	struct moxart_filter_data fdata = {
> +		.mdc = mdc,
> +	};
> +
> +	if (dma_spec->args_count < 1)
> +		return NULL;
> +
> +	fdata.dma_spec = dma_spec;
> +
> +	return dma_request_channel(mdc->dma_slave.cap_mask,
> +				   moxart_dma_filter_fn, &fdata);
> +}
> +
> +static int moxart_alloc_chan_resources(struct dma_chan *chan)
> +{
> +	struct moxart_chan *ch = to_moxart_dma_chan(chan);
> +
> +	dev_dbg(chan2dev(chan), "%s: allocating channel #%u\n",
> +		__func__, ch->ch_num);
> +	ch->allocated = 1;
> +
> +	return 0;
> +}
> +
> +static void moxart_free_chan_resources(struct dma_chan *chan)
> +{
> +	struct moxart_chan *ch = to_moxart_dma_chan(chan);
> +
> +	vchan_free_chan_resources(&ch->vc);
> +
> +	dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
> +		__func__, ch->ch_num);
> +	ch->allocated = 0;
> +}
> +
> +static void moxart_dma_set_params(struct moxart_chan *ch, dma_addr_t src_addr,
> +				  dma_addr_t dst_addr)
> +{
> +	writel(src_addr, ch->base + REG_OFF_ADDRESS_SOURCE);
> +	writel(dst_addr, ch->base + REG_OFF_ADDRESS_DEST);
> +}
> +
> +static void moxart_set_transfer_params(struct moxart_chan *ch, unsigned int len)
> +{
> +	struct moxart_desc *d = ch->desc;
> +	unsigned int sglen_div = es_bytes[d->es];
> +
> +	d->dma_cycles = len >> sglen_div;
> +
> +	/*
> +	 * cycles is 4 on 64 bytes copied, i.e. one cycle copies 16 bytes
> +	 * ( when width is APB_DMAB_DATA_WIDTH_4 )
> +	 */
> +	writel(d->dma_cycles, ch->base + REG_OFF_CYCLES);
> +
> +	dev_dbg(chan2dev(&ch->vc.chan), "%s: set %u DMA cycles (len=%u)\n",
> +		__func__, d->dma_cycles, len);
> +}
> +
> +static void moxart_start_dma(struct moxart_chan *ch)
> +{
> +	u32 ctrl;
> +
> +	ctrl = readl(ch->base + REG_OFF_CTRL);
> +	ctrl |= (APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
> +	writel(ctrl, ch->base + REG_OFF_CTRL);
> +}
> +
> +static void moxart_dma_start_sg(struct moxart_chan *ch, unsigned int idx)
> +{
> +	struct moxart_desc *d = ch->desc;
> +	struct moxart_sg *sg = ch->desc->sg + idx;
> +
> +	if (ch->desc->dma_dir == DMA_MEM_TO_DEV)
> +		moxart_dma_set_params(ch, sg->addr, d->dev_addr);
> +	else if (ch->desc->dma_dir == DMA_DEV_TO_MEM)
> +		moxart_dma_set_params(ch, d->dev_addr, sg->addr);
> +
> +	moxart_set_transfer_params(ch, sg->len);
> +
> +	moxart_start_dma(ch);
> +}
> +
> +static void moxart_dma_start_desc(struct dma_chan *chan)
> +{
> +	struct moxart_chan *ch = to_moxart_dma_chan(chan);
> +	struct virt_dma_desc *vd;
> +
> +	vd = vchan_next_desc(&ch->vc);
> +
> +	if (!vd) {
> +		ch->desc = NULL;
> +		return;
> +	}
> +
> +	list_del(&vd->node);
> +
> +	ch->desc = to_moxart_dma_desc(&vd->tx);
> +	ch->sgidx = 0;
> +
> +	moxart_dma_start_sg(ch, 0);
> +}
> +
> +static void moxart_issue_pending(struct dma_chan *chan)
> +{
> +	struct moxart_chan *ch = to_moxart_dma_chan(chan);
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&ch->vc.lock, flags);
> +	if (vchan_issue_pending(&ch->vc) && !ch->desc)
> +		moxart_dma_start_desc(chan);
> +	spin_unlock_irqrestore(&ch->vc.lock, flags);
> +}
> +
> +static size_t moxart_dma_desc_size(struct moxart_desc *d)
> +{
> +	unsigned i;
> +	size_t size;
> +
> +	for (size = i = 0; i < d->sglen; i++)
> +		size += d->sg[i].len;
> +
> +	return size;
> +}
> +
> +static size_t moxart_dma_desc_size_in_flight(struct moxart_chan *ch)
> +{
> +	size_t size;
> +	unsigned int completed_cycles, cycles;
> +
> +	cycles = readl(ch->base + REG_OFF_CYCLES);
> +	size = moxart_dma_desc_size(ch->desc);
> +	completed_cycles = (ch->desc->dma_cycles - cycles);
> +	size -= completed_cycles << es_bytes[ch->desc->es];
> +
> +	dev_dbg(chan2dev(&ch->vc.chan), "%s: size=%zd\n", __func__, size);

Sorry, I was a bit inaccurate here.

ssize_t (signed type) -> %zd
size_t (unsigned type) -> %zu

Please, fix this.

> +
> +	return size;
> +}
> +
> +static enum dma_status moxart_tx_status(struct dma_chan *chan,
> +					dma_cookie_t cookie,
> +					struct dma_tx_state *txstate)
> +{
> +	struct moxart_chan *ch = to_moxart_dma_chan(chan);
> +	struct virt_dma_desc *vd;
> +	struct moxart_desc *d;
> +	enum dma_status ret;
> +	unsigned long flags;
> +
> +	/*
> +	 * dma_cookie_status assigns initial residue value
> +	 */
> +	ret = dma_cookie_status(chan, cookie, txstate);
> +
> +	spin_lock_irqsave(&ch->vc.lock, flags);
> +	vd = vchan_find_desc(&ch->vc, cookie);
> +	if (vd) {
> +		d = to_moxart_dma_desc(&vd->tx);
> +		txstate->residue = moxart_dma_desc_size(d);
> +	} else if (ch->desc && ch->desc->vd.tx.cookie == cookie) {
> +		txstate->residue = moxart_dma_desc_size_in_flight(ch);
> +	}
> +	spin_unlock_irqrestore(&ch->vc.lock, flags);
> +
> +	if (ch->error)
> +		return DMA_ERROR;
> +
> +	return ret;
> +}
> +
> +static void moxart_dma_init(struct dma_device *dma, struct device *dev)
> +{
> +	dma->device_prep_slave_sg		= moxart_prep_slave_sg;
> +	dma->device_alloc_chan_resources	= moxart_alloc_chan_resources;
> +	dma->device_free_chan_resources		= moxart_free_chan_resources;
> +	dma->device_issue_pending		= moxart_issue_pending;
> +	dma->device_tx_status			= moxart_tx_status;
> +	dma->device_control			= moxart_control;
> +	dma->dev				= dev;
> +
> +	INIT_LIST_HEAD(&dma->channels);
> +}
> +
> +static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
> +{
> +	struct moxart_dmadev *mc = devid;
> +	struct moxart_chan *ch = &mc->slave_chans[0];
> +	unsigned int i;
> +	unsigned long flags;
> +	u32 ctrl;
> +
> +	dev_dbg(chan2dev(&ch->vc.chan), "%s\n", __func__);
> +
> +	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
> +		if (!ch->allocated)
> +			continue;
> +
> +		ctrl = readl(ch->base + REG_OFF_CTRL);
> +
> +		dev_dbg(chan2dev(&ch->vc.chan), "%s: ch=%p ch->base=%p ctrl=%x\n",
> +			__func__, ch, ch->base, ctrl);
> +
> +		if (ctrl & APB_DMA_FIN_INT_STS) {
> +			ctrl &= ~APB_DMA_FIN_INT_STS;
> +			if (ch->desc) {
> +				spin_lock_irqsave(&ch->vc.lock, flags);
> +				if (++ch->sgidx < ch->desc->sglen)
> +					moxart_dma_start_sg(ch, ch->sgidx);


> +				else {

} else {

If you use {} in one branch you have to use it in the other accordingly
to coding style.

> +					vchan_cookie_complete(&ch->desc->vd);
> +					ch->desc = NULL;
> +				}
> +				spin_unlock_irqrestore(&ch->vc.lock, flags);
> +			}
> +		}
> +
> +		if (ctrl & APB_DMA_ERR_INT_STS) {
> +			ctrl &= ~APB_DMA_ERR_INT_STS;
> +			ch->error = 1;
> +		}
> +
> +		writel(ctrl, ch->base + REG_OFF_CTRL);
> +	}
> +
> +	return IRQ_HANDLED;
> +}
> +
> +static int moxart_probe(struct platform_device *pdev)
> +{
> +	struct device *dev = &pdev->dev;
> +	struct device_node *node = dev->of_node;
> +	struct resource *res;
> +	static void __iomem *dma_base_addr;
> +	int ret, i;
> +	unsigned int irq;
> +	struct moxart_chan *ch;
> +	struct moxart_dmadev *mdc;
> +
> +	mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
> +	if (!mdc) {
> +		dev_err(dev, "can't allocate DMA container\n");
> +		return -ENOMEM;
> +	}
> +
> +	irq = irq_of_parse_and_map(node, 0);
> +	if (irq <= 0) {

This one is not correct. It will never be negative (see the variable
type). IIUC, you have to check against 0 only.

> +		dev_err(dev, "irq_of_parse_and_map failed\n");
> +		return -EINVAL;
> +	}
> +
> +	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
> +	dma_base_addr = devm_ioremap_resource(dev, res);
> +	if (IS_ERR(dma_base_addr))
> +		return PTR_ERR(dma_base_addr);
> +
> +	dma_cap_zero(mdc->dma_slave.cap_mask);
> +	dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
> +
> +	moxart_dma_init(&mdc->dma_slave, dev);
> +
> +	ch = &mdc->slave_chans[0];
> +	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
> +		ch->ch_num = i;
> +		ch->base = dma_base_addr + i * REG_OFF_CHAN_SIZE;
> +		ch->allocated = 0;
> +
> +		ch->vc.desc_free = moxart_dma_desc_free;
> +		vchan_init(&ch->vc, &mdc->dma_slave);
> +
> +		dev_dbg(dev, "%s: chs[%d]: ch->ch_num=%u ch->base=%p\n",
> +			__func__, i, ch->ch_num, ch->base);
> +	}
> +
> +	platform_set_drvdata(pdev, mdc);
> +
> +	ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
> +			       "moxart-dma-engine", mdc);
> +	if (ret) {
> +		dev_err(dev, "devm_request_irq failed\n");
> +		return ret;
> +	}
> +
> +	ret = dma_async_device_register(&mdc->dma_slave);
> +	if (ret) {
> +		dev_err(dev, "dma_async_device_register failed\n");
> +		return ret;
> +	}
> +
> +	ret = of_dma_controller_register(node, moxart_of_xlate, mdc);
> +	if (ret) {
> +		dev_err(dev, "of_dma_controller_register failed\n");
> +		dma_async_device_unregister(&mdc->dma_slave);
> +		return ret;
> +	}
> +
> +	dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq);
> +
> +	return 0;
> +}
> +
> +static int moxart_remove(struct platform_device *pdev)
> +{
> +	struct moxart_dmadev *m = platform_get_drvdata(pdev);
> +
> +	dma_async_device_unregister(&m->dma_slave);
> +
> +	if (pdev->dev.of_node)
> +		of_dma_controller_free(pdev->dev.of_node);
> +
> +	return 0;
> +}
> +
> +static const struct of_device_id moxart_dma_match[] = {
> +	{ .compatible = "moxa,moxart-dma" },
> +	{ }
> +};
> +
> +static struct platform_driver moxart_driver = {
> +	.probe	= moxart_probe,
> +	.remove	= moxart_remove,
> +	.driver = {
> +		.name		= "moxart-dma-engine",
> +		.owner		= THIS_MODULE,
> +		.of_match_table	= moxart_dma_match,
> +	},
> +};
> +
> +static int moxart_init(void)
> +{
> +	return platform_driver_register(&moxart_driver);
> +}
> +subsys_initcall(moxart_init);
> +
> +static void __exit moxart_exit(void)
> +{
> +	platform_driver_unregister(&moxart_driver);
> +}
> +module_exit(moxart_exit);
> +
> +MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
> +MODULE_DESCRIPTION("MOXART DMA engine driver");
> +MODULE_LICENSE("GPL v2");

-- 
Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Intel Finland Oy


^ permalink raw reply	[flat|nested] 80+ messages in thread

* [PATCH v13] dmaengine: Add MOXA ART DMA engine driver
@ 2013-12-12  9:16                           ` Andy Shevchenko
  0 siblings, 0 replies; 80+ messages in thread
From: Andy Shevchenko @ 2013-12-12  9:16 UTC (permalink / raw)
  To: linux-arm-kernel

On Wed, 2013-12-11 at 16:13 +0100, Jonas Jensen wrote:
> The MOXA ART SoC has a DMA controller capable of offloading expensive
> memory operations, such as large copies. This patch adds support for
> the controller including four channels. Two of these are used to
> handle MMC copy on the UC-7112-LX hardware. The remaining two can be
> used in a future audio driver or client application.
> 

Thanks for an update.

Since you will have next version anyway (addressing Arnd's comment) I
add my few minor comments below.

[]

> +++ b/drivers/dma/moxart-dma.c
> @@ -0,0 +1,729 @@
> +/*
> + * MOXA ART SoCs DMA Engine support.
> + *
> + * Copyright (C) 2013 Jonas Jensen
> + *
> + * Jonas Jensen <jonas.jensen@gmail.com>
> + *
> + * This file is licensed under the terms of the GNU General Public
> + * License version 2.  This program is licensed "as is" without any
> + * warranty of any kind, whether express or implied.
> + */
> +
> +#include <linux/dmaengine.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/err.h>
> +#include <linux/init.h>
> +#include <linux/interrupt.h>
> +#include <linux/list.h>
> +#include <linux/module.h>
> +#include <linux/platform_device.h>
> +#include <linux/slab.h>
> +#include <linux/spinlock.h>
> +#include <linux/of_address.h>
> +#include <linux/of_irq.h>
> +#include <linux/of_dma.h>
> +#include <linux/bitops.h>
> +
> +#include <asm/cacheflush.h>
> +
> +#include "dmaengine.h"
> +#include "virt-dma.h"
> +
> +#define APB_DMA_MAX_CHANNEL			4
> +
> +#define REG_OFF_ADDRESS_SOURCE			0
> +#define REG_OFF_ADDRESS_DEST			4
> +#define REG_OFF_CYCLES				8
> +#define REG_OFF_CTRL				12
> +#define REG_OFF_CHAN_SIZE			16
> +
> +#define APB_DMA_ENABLE				BIT(0)
> +#define APB_DMA_FIN_INT_STS			BIT(1)
> +#define APB_DMA_FIN_INT_EN			BIT(2)
> +#define APB_DMA_BURST_MODE			BIT(3)
> +#define APB_DMA_ERR_INT_STS			BIT(4)
> +#define APB_DMA_ERR_INT_EN			BIT(5)
> +


> +/*
> + * unset: APB, set: AHB
> + */

One line?

> +#define APB_DMA_SOURCE_SELECT			0x40
> +
> +/*
> + * unset: APB, set: AHB
> + */

Ditto.

> +#define APB_DMA_DEST_SELECT			0x80
> +
> +#define APB_DMA_SOURCE				0x100
> +#define APB_DMA_SOURCE_MASK			0x700
> +/*
> + * 000: no increment
> + * 001: +1 (busrt=0), +4  (burst=1)
> + * 010: +2 (burst=0), +8  (burst=1)
> + * 011: +4 (burst=0), +16 (burst=1)
> + * 101: -1 (burst=0), -4  (burst=1)
> + * 110: -2 (burst=0), -8  (burst=1)
> + * 111: -4 (burst=0), -16 (burst=1)
> + */
> +#define APB_DMA_SOURCE_INC_0			0
> +#define APB_DMA_SOURCE_INC_1_4			0x100
> +#define APB_DMA_SOURCE_INC_2_8			0x200
> +#define APB_DMA_SOURCE_INC_4_16			0x300
> +#define APB_DMA_SOURCE_DEC_1_4			0x500
> +#define APB_DMA_SOURCE_DEC_2_8			0x600
> +#define APB_DMA_SOURCE_DEC_4_16			0x700
> +
> +#define APB_DMA_DEST				0x1000
> +#define APB_DMA_DEST_MASK			0x7000
> +/*
> + * 000: no increment
> + * 001: +1 (busrt=0), +4  (burst=1)
> + * 010: +2 (burst=0), +8  (burst=1)
> + * 011: +4 (burst=0), +16 (burst=1)
> + * 101: -1 (burst=0), -4  (burst=1)
> + * 110: -2 (burst=0), -8  (burst=1)
> + * 111: -4 (burst=0), -16 (burst=1)
> +*/
> +#define APB_DMA_DEST_INC_0			0
> +#define APB_DMA_DEST_INC_1_4			0x1000
> +#define APB_DMA_DEST_INC_2_8			0x2000
> +#define APB_DMA_DEST_INC_4_16			0x3000
> +#define APB_DMA_DEST_DEC_1_4			0x5000
> +#define APB_DMA_DEST_DEC_2_8			0x6000
> +#define APB_DMA_DEST_DEC_4_16			0x7000
> +
> +/*
> + * Request signal select source/destination
> + * address for DMA hardware handshake.
> + *
> + * The request line number is a property of
> + * the DMA controller itself, e.g. MMC must
> + * always request channels where
> + * dma_slave_config->slave_id is 5.

For me it seems that text too squeezed here. The width of 72-76 symbols
sounds better.

> + *
> + * 0:    no request / grant signal
> + * 1-15: request    / grant signal
> + */
> +#define APB_DMA_DEST_REQ_NO			0x10000
> +#define APB_DMA_DEST_REQ_NO_MASK		0xf0000
> +#define APB_DMA_SOURCE_REQ_NO			0x1000000
> +#define APB_DMA_SOURCE_REQ_NO_MASK		0xf000000
> +
> +#define APB_DMA_DATA_WIDTH			0x100000
> +#define APB_DMA_DATA_WIDTH_MASK			0x300000
> +/*
> + * Data width of transfer:

Might be better to keep style across comments. I mean capital letter,
commas, dots, etc.

Check entire code for this.

> + *
> + * 00: word
> + * 01: half
> + * 10: byte
> + */
> +#define APB_DMA_DATA_WIDTH_4			0
> +#define APB_DMA_DATA_WIDTH_2			0x100000
> +#define APB_DMA_DATA_WIDTH_1			0x200000
> +
> +#define APB_DMA_CYCLES_MASK			0x00ffffff
> +
> +#define MOXART_DMA_DATA_TYPE_S8			0x00
> +#define MOXART_DMA_DATA_TYPE_S16		0x01
> +#define MOXART_DMA_DATA_TYPE_S32		0x02
> +
> +struct moxart_sg {
> +	dma_addr_t addr;
> +	uint32_t len;
> +};
> +
> +struct moxart_desc {
> +	enum dma_transfer_direction	dma_dir;
> +	dma_addr_t			dev_addr;
> +	unsigned int			sglen;
> +	unsigned int			dma_cycles;
> +	struct virt_dma_desc		vd;
> +	uint8_t				es;
> +	struct moxart_sg		sg[0];
> +};
> +
> +struct moxart_chan {
> +	struct virt_dma_chan		vc;
> +
> +	void __iomem			*base;
> +	struct moxart_desc		*desc;
> +
> +	struct dma_slave_config		cfg;
> +
> +	bool				allocated;
> +	bool				error;
> +	int				ch_num;
> +	unsigned int			line_reqno;
> +	unsigned int			sgidx;
> +};
> +
> +struct moxart_dmadev {
> +	struct dma_device		dma_slave;
> +	struct moxart_chan		slave_chans[APB_DMA_MAX_CHANNEL];
> +};
> +
> +struct moxart_filter_data {
> +	struct moxart_dmadev		*mdc;
> +	struct of_phandle_args		*dma_spec;
> +};
> +
> +static const unsigned es_bytes[] = {
> +	[MOXART_DMA_DATA_TYPE_S8] = 1,
> +	[MOXART_DMA_DATA_TYPE_S16] = 2,
> +	[MOXART_DMA_DATA_TYPE_S32] = 4,
> +};
> +
> +static struct device *chan2dev(struct dma_chan *chan)
> +{
> +	return &chan->dev->device;
> +}
> +
> +static inline struct moxart_chan *to_moxart_dma_chan(struct dma_chan *c)
> +{
> +	return container_of(c, struct moxart_chan, vc.chan);
> +}
> +
> +static inline struct moxart_desc *to_moxart_dma_desc(
> +	struct dma_async_tx_descriptor *t)
> +{
> +	return container_of(t, struct moxart_desc, vd.tx);
> +}
> +
> +static void moxart_dma_desc_free(struct virt_dma_desc *vd)
> +{
> +	kfree(container_of(vd, struct moxart_desc, vd));
> +}
> +
> +static int moxart_terminate_all(struct dma_chan *chan)
> +{
> +	struct moxart_chan *ch = to_moxart_dma_chan(chan);
> +	unsigned long flags;
> +	LIST_HEAD(head);
> +	u32 ctrl;
> +
> +	dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
> +
> +	spin_lock_irqsave(&ch->vc.lock, flags);
> +
> +	if (ch->desc)
> +		ch->desc = NULL;
> +
> +	ctrl = readl(ch->base + REG_OFF_CTRL);
> +	ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
> +	writel(ctrl, ch->base + REG_OFF_CTRL);
> +
> +	vchan_get_all_descriptors(&ch->vc, &head);
> +	spin_unlock_irqrestore(&ch->vc.lock, flags);
> +	vchan_dma_desc_free_list(&ch->vc, &head);
> +
> +	return 0;
> +}
> +
> +static int moxart_slave_config(struct dma_chan *chan,
> +			       struct dma_slave_config *cfg)
> +{
> +	struct moxart_chan *ch = to_moxart_dma_chan(chan);
> +	u32 ctrl;
> +
> +	ch->cfg = *cfg;
> +
> +	ctrl = readl(ch->base + REG_OFF_CTRL);
> +	ctrl |= APB_DMA_BURST_MODE;
> +	ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
> +	ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
> +
> +	switch (ch->cfg.src_addr_width) {
> +	case DMA_SLAVE_BUSWIDTH_1_BYTE:
> +		ctrl |= APB_DMA_DATA_WIDTH_1;
> +		if (ch->cfg.direction != DMA_MEM_TO_DEV)
> +			ctrl |= APB_DMA_DEST_INC_1_4;
> +		else
> +			ctrl |= APB_DMA_SOURCE_INC_1_4;
> +		break;
> +	case DMA_SLAVE_BUSWIDTH_2_BYTES:
> +		ctrl |= APB_DMA_DATA_WIDTH_2;
> +		if (ch->cfg.direction != DMA_MEM_TO_DEV)
> +			ctrl |= APB_DMA_DEST_INC_2_8;
> +		else
> +			ctrl |= APB_DMA_SOURCE_INC_2_8;
> +		break;
> +	case DMA_SLAVE_BUSWIDTH_4_BYTES:
> +		ctrl &= ~APB_DMA_DATA_WIDTH;
> +		if (ch->cfg.direction != DMA_MEM_TO_DEV)
> +			ctrl |= APB_DMA_DEST_INC_4_16;
> +		else
> +			ctrl |= APB_DMA_SOURCE_INC_4_16;
> +		break;
> +	default:
> +		return -EINVAL;
> +	}
> +
> +	if (ch->cfg.direction == DMA_MEM_TO_DEV) {
> +		ctrl &= ~APB_DMA_DEST_SELECT;
> +		ctrl |= APB_DMA_SOURCE_SELECT;
> +		ctrl |= (ch->line_reqno << 16 &
> +			 APB_DMA_DEST_REQ_NO_MASK);
> +	} else {
> +		ctrl |= APB_DMA_DEST_SELECT;
> +		ctrl &= ~APB_DMA_SOURCE_SELECT;
> +		ctrl |= (ch->line_reqno << 24 &
> +			 APB_DMA_SOURCE_REQ_NO_MASK);
> +	}
> +
> +	writel(ctrl, ch->base + REG_OFF_CTRL);
> +
> +	return 0;
> +}
> +
> +static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
> +			  unsigned long arg)
> +{
> +	int ret = 0;
> +
> +	switch (cmd) {
> +	case DMA_PAUSE:
> +	case DMA_RESUME:
> +		return -EINVAL;
> +	case DMA_TERMINATE_ALL:
> +		moxart_terminate_all(chan);
> +		break;
> +	case DMA_SLAVE_CONFIG:
> +		ret = moxart_slave_config(chan, (struct dma_slave_config *)arg);
> +		break;
> +	default:
> +		ret = -ENOSYS;
> +	}
> +
> +	return ret;
> +}
> +
> +static struct dma_async_tx_descriptor *moxart_prep_slave_sg(
> +	struct dma_chan *chan, struct scatterlist *sgl,
> +	unsigned int sg_len, enum dma_transfer_direction dir,
> +	unsigned long tx_flags, void *context)
> +{
> +	struct moxart_chan *ch = to_moxart_dma_chan(chan);
> +	struct moxart_desc *d;
> +	enum dma_slave_buswidth dev_width;
> +	dma_addr_t dev_addr;
> +	struct scatterlist *sgent;

> +	unsigned int es, i;

Could you split to two lines since the variables are from different
classes of application?

> +
> +	if (!is_slave_direction(dir)) {
> +		dev_err(chan2dev(chan), "%s: invalid DMA direction\n",
> +			__func__);
> +		return NULL;
> +	}
> +
> +	if (dir == DMA_DEV_TO_MEM) {
> +		dev_addr = ch->cfg.src_addr;
> +		dev_width = ch->cfg.src_addr_width;
> +	} else {
> +		dev_addr = ch->cfg.dst_addr;
> +		dev_width = ch->cfg.dst_addr_width;
> +	}
> +
> +	switch (dev_width) {
> +	case DMA_SLAVE_BUSWIDTH_1_BYTE:
> +		es = MOXART_DMA_DATA_TYPE_S8;
> +		break;
> +	case DMA_SLAVE_BUSWIDTH_2_BYTES:
> +		es = MOXART_DMA_DATA_TYPE_S16;
> +		break;
> +	case DMA_SLAVE_BUSWIDTH_4_BYTES:
> +		es = MOXART_DMA_DATA_TYPE_S32;
> +		break;
> +	default:
> +		dev_err(chan2dev(chan), "%s: unsupported data width (%u)\n",
> +			__func__, dev_width);
> +		return NULL;
> +	}
> +
> +	d = kzalloc(sizeof(*d) + sg_len * sizeof(d->sg[0]), GFP_ATOMIC);



> +	if (!d)
> +		return NULL;
> +
> +	d->dma_dir = dir;
> +	d->dev_addr = dev_addr;
> +	d->es = es;
> +
> +	for_each_sg(sgl, sgent, sg_len, i) {
> +		d->sg[i].addr = sg_dma_address(sgent);
> +		d->sg[i].len = sg_dma_len(sgent);
> +	}
> +
> +	d->sglen = sg_len;
> +
> +	ch->error = 0;
> +
> +	return vchan_tx_prep(&ch->vc, &d->vd, tx_flags);
> +}
> +
> +bool moxart_dma_filter_fn(struct dma_chan *chan, void *param)
> +{
> +	struct moxart_filter_data *fdata = param;
> +	struct moxart_chan *ch = to_moxart_dma_chan(chan);
> +
> +	if (chan->device->dev != fdata->mdc->dma_slave.dev ||
> +	    chan->device->dev->of_node != fdata->dma_spec->np) {
> +		dev_dbg(chan2dev(chan), "device not registered to this DMA engine\n");
> +		return false;
> +	}
> +
> +	dev_dbg(chan2dev(chan), "%s: ch=%p line_reqno=%u ch->ch_num=%u\n",
> +		__func__, ch, fdata->dma_spec->args[0], ch->ch_num);
> +
> +	ch->line_reqno = fdata->dma_spec->args[0];
> +
> +	return true;
> +}
> +
> +static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
> +					struct of_dma *ofdma)
> +{
> +	struct moxart_dmadev *mdc = ofdma->of_dma_data;
> +	struct moxart_filter_data fdata = {
> +		.mdc = mdc,
> +	};
> +
> +	if (dma_spec->args_count < 1)
> +		return NULL;
> +
> +	fdata.dma_spec = dma_spec;
> +
> +	return dma_request_channel(mdc->dma_slave.cap_mask,
> +				   moxart_dma_filter_fn, &fdata);
> +}
> +
> +static int moxart_alloc_chan_resources(struct dma_chan *chan)
> +{
> +	struct moxart_chan *ch = to_moxart_dma_chan(chan);
> +
> +	dev_dbg(chan2dev(chan), "%s: allocating channel #%u\n",
> +		__func__, ch->ch_num);
> +	ch->allocated = 1;
> +
> +	return 0;
> +}
> +
> +static void moxart_free_chan_resources(struct dma_chan *chan)
> +{
> +	struct moxart_chan *ch = to_moxart_dma_chan(chan);
> +
> +	vchan_free_chan_resources(&ch->vc);
> +
> +	dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
> +		__func__, ch->ch_num);
> +	ch->allocated = 0;
> +}
> +
> +static void moxart_dma_set_params(struct moxart_chan *ch, dma_addr_t src_addr,
> +				  dma_addr_t dst_addr)
> +{
> +	writel(src_addr, ch->base + REG_OFF_ADDRESS_SOURCE);
> +	writel(dst_addr, ch->base + REG_OFF_ADDRESS_DEST);
> +}
> +
> +static void moxart_set_transfer_params(struct moxart_chan *ch, unsigned int len)
> +{
> +	struct moxart_desc *d = ch->desc;
> +	unsigned int sglen_div = es_bytes[d->es];
> +
> +	d->dma_cycles = len >> sglen_div;
> +
> +	/*
> +	 * cycles is 4 on 64 bytes copied, i.e. one cycle copies 16 bytes
> +	 * ( when width is APB_DMAB_DATA_WIDTH_4 )
> +	 */
> +	writel(d->dma_cycles, ch->base + REG_OFF_CYCLES);
> +
> +	dev_dbg(chan2dev(&ch->vc.chan), "%s: set %u DMA cycles (len=%u)\n",
> +		__func__, d->dma_cycles, len);
> +}
> +
> +static void moxart_start_dma(struct moxart_chan *ch)
> +{
> +	u32 ctrl;
> +
> +	ctrl = readl(ch->base + REG_OFF_CTRL);
> +	ctrl |= (APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
> +	writel(ctrl, ch->base + REG_OFF_CTRL);
> +}
> +
> +static void moxart_dma_start_sg(struct moxart_chan *ch, unsigned int idx)
> +{
> +	struct moxart_desc *d = ch->desc;
> +	struct moxart_sg *sg = ch->desc->sg + idx;
> +
> +	if (ch->desc->dma_dir == DMA_MEM_TO_DEV)
> +		moxart_dma_set_params(ch, sg->addr, d->dev_addr);
> +	else if (ch->desc->dma_dir == DMA_DEV_TO_MEM)
> +		moxart_dma_set_params(ch, d->dev_addr, sg->addr);
> +
> +	moxart_set_transfer_params(ch, sg->len);
> +
> +	moxart_start_dma(ch);
> +}
> +
> +static void moxart_dma_start_desc(struct dma_chan *chan)
> +{
> +	struct moxart_chan *ch = to_moxart_dma_chan(chan);
> +	struct virt_dma_desc *vd;
> +
> +	vd = vchan_next_desc(&ch->vc);
> +
> +	if (!vd) {
> +		ch->desc = NULL;
> +		return;
> +	}
> +
> +	list_del(&vd->node);
> +
> +	ch->desc = to_moxart_dma_desc(&vd->tx);
> +	ch->sgidx = 0;
> +
> +	moxart_dma_start_sg(ch, 0);
> +}
> +
> +static void moxart_issue_pending(struct dma_chan *chan)
> +{
> +	struct moxart_chan *ch = to_moxart_dma_chan(chan);
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&ch->vc.lock, flags);
> +	if (vchan_issue_pending(&ch->vc) && !ch->desc)
> +		moxart_dma_start_desc(chan);
> +	spin_unlock_irqrestore(&ch->vc.lock, flags);
> +}
> +
> +static size_t moxart_dma_desc_size(struct moxart_desc *d)
> +{
> +	unsigned i;
> +	size_t size;
> +
> +	for (size = i = 0; i < d->sglen; i++)
> +		size += d->sg[i].len;
> +
> +	return size;
> +}
> +
> +static size_t moxart_dma_desc_size_in_flight(struct moxart_chan *ch)
> +{
> +	size_t size;
> +	unsigned int completed_cycles, cycles;
> +
> +	cycles = readl(ch->base + REG_OFF_CYCLES);
> +	size = moxart_dma_desc_size(ch->desc);
> +	completed_cycles = (ch->desc->dma_cycles - cycles);
> +	size -= completed_cycles << es_bytes[ch->desc->es];
> +
> +	dev_dbg(chan2dev(&ch->vc.chan), "%s: size=%zd\n", __func__, size);

Sorry, I was a bit inaccurate here.

ssize_t (signed type) -> %zd
size_t (unsigned type) -> %zu

Please, fix this.

> +
> +	return size;
> +}
> +
> +static enum dma_status moxart_tx_status(struct dma_chan *chan,
> +					dma_cookie_t cookie,
> +					struct dma_tx_state *txstate)
> +{
> +	struct moxart_chan *ch = to_moxart_dma_chan(chan);
> +	struct virt_dma_desc *vd;
> +	struct moxart_desc *d;
> +	enum dma_status ret;
> +	unsigned long flags;
> +
> +	/*
> +	 * dma_cookie_status assigns initial residue value
> +	 */
> +	ret = dma_cookie_status(chan, cookie, txstate);
> +
> +	spin_lock_irqsave(&ch->vc.lock, flags);
> +	vd = vchan_find_desc(&ch->vc, cookie);
> +	if (vd) {
> +		d = to_moxart_dma_desc(&vd->tx);
> +		txstate->residue = moxart_dma_desc_size(d);
> +	} else if (ch->desc && ch->desc->vd.tx.cookie == cookie) {
> +		txstate->residue = moxart_dma_desc_size_in_flight(ch);
> +	}
> +	spin_unlock_irqrestore(&ch->vc.lock, flags);
> +
> +	if (ch->error)
> +		return DMA_ERROR;
> +
> +	return ret;
> +}
> +
> +static void moxart_dma_init(struct dma_device *dma, struct device *dev)
> +{
> +	dma->device_prep_slave_sg		= moxart_prep_slave_sg;
> +	dma->device_alloc_chan_resources	= moxart_alloc_chan_resources;
> +	dma->device_free_chan_resources		= moxart_free_chan_resources;
> +	dma->device_issue_pending		= moxart_issue_pending;
> +	dma->device_tx_status			= moxart_tx_status;
> +	dma->device_control			= moxart_control;
> +	dma->dev				= dev;
> +
> +	INIT_LIST_HEAD(&dma->channels);
> +}
> +
> +static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
> +{
> +	struct moxart_dmadev *mc = devid;
> +	struct moxart_chan *ch = &mc->slave_chans[0];
> +	unsigned int i;
> +	unsigned long flags;
> +	u32 ctrl;
> +
> +	dev_dbg(chan2dev(&ch->vc.chan), "%s\n", __func__);
> +
> +	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
> +		if (!ch->allocated)
> +			continue;
> +
> +		ctrl = readl(ch->base + REG_OFF_CTRL);
> +
> +		dev_dbg(chan2dev(&ch->vc.chan), "%s: ch=%p ch->base=%p ctrl=%x\n",
> +			__func__, ch, ch->base, ctrl);
> +
> +		if (ctrl & APB_DMA_FIN_INT_STS) {
> +			ctrl &= ~APB_DMA_FIN_INT_STS;
> +			if (ch->desc) {
> +				spin_lock_irqsave(&ch->vc.lock, flags);
> +				if (++ch->sgidx < ch->desc->sglen)
> +					moxart_dma_start_sg(ch, ch->sgidx);


> +				else {

} else {

If you use {} in one branch you have to use it in the other accordingly
to coding style.

> +					vchan_cookie_complete(&ch->desc->vd);
> +					ch->desc = NULL;
> +				}
> +				spin_unlock_irqrestore(&ch->vc.lock, flags);
> +			}
> +		}
> +
> +		if (ctrl & APB_DMA_ERR_INT_STS) {
> +			ctrl &= ~APB_DMA_ERR_INT_STS;
> +			ch->error = 1;
> +		}
> +
> +		writel(ctrl, ch->base + REG_OFF_CTRL);
> +	}
> +
> +	return IRQ_HANDLED;
> +}
> +
> +static int moxart_probe(struct platform_device *pdev)
> +{
> +	struct device *dev = &pdev->dev;
> +	struct device_node *node = dev->of_node;
> +	struct resource *res;
> +	static void __iomem *dma_base_addr;
> +	int ret, i;
> +	unsigned int irq;
> +	struct moxart_chan *ch;
> +	struct moxart_dmadev *mdc;
> +
> +	mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
> +	if (!mdc) {
> +		dev_err(dev, "can't allocate DMA container\n");
> +		return -ENOMEM;
> +	}
> +
> +	irq = irq_of_parse_and_map(node, 0);
> +	if (irq <= 0) {

This one is not correct. It will never be negative (see the variable
type). IIUC, you have to check against 0 only.

> +		dev_err(dev, "irq_of_parse_and_map failed\n");
> +		return -EINVAL;
> +	}
> +
> +	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
> +	dma_base_addr = devm_ioremap_resource(dev, res);
> +	if (IS_ERR(dma_base_addr))
> +		return PTR_ERR(dma_base_addr);
> +
> +	dma_cap_zero(mdc->dma_slave.cap_mask);
> +	dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
> +
> +	moxart_dma_init(&mdc->dma_slave, dev);
> +
> +	ch = &mdc->slave_chans[0];
> +	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
> +		ch->ch_num = i;
> +		ch->base = dma_base_addr + i * REG_OFF_CHAN_SIZE;
> +		ch->allocated = 0;
> +
> +		ch->vc.desc_free = moxart_dma_desc_free;
> +		vchan_init(&ch->vc, &mdc->dma_slave);
> +
> +		dev_dbg(dev, "%s: chs[%d]: ch->ch_num=%u ch->base=%p\n",
> +			__func__, i, ch->ch_num, ch->base);
> +	}
> +
> +	platform_set_drvdata(pdev, mdc);
> +
> +	ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
> +			       "moxart-dma-engine", mdc);
> +	if (ret) {
> +		dev_err(dev, "devm_request_irq failed\n");
> +		return ret;
> +	}
> +
> +	ret = dma_async_device_register(&mdc->dma_slave);
> +	if (ret) {
> +		dev_err(dev, "dma_async_device_register failed\n");
> +		return ret;
> +	}
> +
> +	ret = of_dma_controller_register(node, moxart_of_xlate, mdc);
> +	if (ret) {
> +		dev_err(dev, "of_dma_controller_register failed\n");
> +		dma_async_device_unregister(&mdc->dma_slave);
> +		return ret;
> +	}
> +
> +	dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq);
> +
> +	return 0;
> +}
> +
> +static int moxart_remove(struct platform_device *pdev)
> +{
> +	struct moxart_dmadev *m = platform_get_drvdata(pdev);
> +
> +	dma_async_device_unregister(&m->dma_slave);
> +
> +	if (pdev->dev.of_node)
> +		of_dma_controller_free(pdev->dev.of_node);
> +
> +	return 0;
> +}
> +
> +static const struct of_device_id moxart_dma_match[] = {
> +	{ .compatible = "moxa,moxart-dma" },
> +	{ }
> +};
> +
> +static struct platform_driver moxart_driver = {
> +	.probe	= moxart_probe,
> +	.remove	= moxart_remove,
> +	.driver = {
> +		.name		= "moxart-dma-engine",
> +		.owner		= THIS_MODULE,
> +		.of_match_table	= moxart_dma_match,
> +	},
> +};
> +
> +static int moxart_init(void)
> +{
> +	return platform_driver_register(&moxart_driver);
> +}
> +subsys_initcall(moxart_init);
> +
> +static void __exit moxart_exit(void)
> +{
> +	platform_driver_unregister(&moxart_driver);
> +}
> +module_exit(moxart_exit);
> +
> +MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
> +MODULE_DESCRIPTION("MOXART DMA engine driver");
> +MODULE_LICENSE("GPL v2");

-- 
Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Intel Finland Oy

^ permalink raw reply	[flat|nested] 80+ messages in thread

* [PATCH v14] dmaengine: Add MOXA ART DMA engine driver
  2013-12-11 15:13                         ` Jonas Jensen
@ 2013-12-12 12:32                           ` Jonas Jensen
  -1 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2013-12-12 12:32 UTC (permalink / raw)
  To: dmaengine
  Cc: linux-arm-kernel, linux-kernel, arm, vinod.koul, djbw, arnd,
	linux, mark.rutland, andriy.shevchenko, lars, Jonas Jensen

[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #1: Type: text/plain; charset=y, Size: 35025 bytes --]

The MOXA ART SoC has a DMA controller capable of offloading expensive
memory operations, such as large copies. This patch adds support for
the controller including four channels. Two of these are used to
handle MMC copy on the UC-7112-LX hardware. The remaining two can be
used in a future audio driver or client application.

Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
---

Notes:
    Thanks for the replies!
    
    This is now rebased on next-20131212 and uses the newly added
    dma_get_any_slave_channel().
    
    There's a problem/crash if DMA_PRIVATE isn't first added to
    capabilities (see attached boot message):
    
    Is DMA_PRIVATE mandatory or should the driver do something?
    
    From what I can tell, private_candidate() can return NULL for already
    allocated channels, or if it's a device without channels, or if
    __dma_device_satisfies_mask() returns false.
    
    The latter two can be eliminated because those errors do not print.
    
    I think dma_get_any_slave_channel() fails, but I don't know why it
    then crashes in SDHCI probe.
    
    Changes since v13:
    1.  don't use implied integer type specifiers
    2.  only count completed segments in moxart_dma_desc_size_in_flight()
    3.  start next descriptor when previous finish
    4.  remove redundant comments
    5.  reorder comment/define blocks
    6.  format text to fit 80 column width
    7.  comment style cleanup
    8.  split "unsigned int es, i" on two rows (moxart_prep_slave_sg())
    9.  print size_t using format "%zu" (moxart_dma_desc_size_in_flight())
    10. add {} to accompanying if (moxart_dma_interrupt())
    11. use NO_IRQ in irq_of_parse_and_map() return value check
    12. use dma_get_any_slave_channel() in moxart_of_xlate()
    13. remove moxart_dma_filter_fn()
    14. add DMA_PRIVATE to capabilities
    
    Applies to next-20131212
    
    Boot log:
    Uncompressing Linux... done, booting the kernel.
    [    0.000000] Booting Linux on physical CPU 0x0
    [    0.000000] Linux version 3.13.0-rc3-next-20131212+ (i@Ildjarn) (gcc version 4.6.3 (crosstool-NG 1.16.0) ) #1330 PREEMPT Thu Dec 12 12:05:54 CET 2013
    [    0.000000] CPU: FA526 [66015261] revision 1 (ARMv4), cr=0000397f
    [    0.000000] CPU: VIVT data cache, VIVT instruction cache
    [    0.000000] Machine model: MOXA UC-7112-LX
    [    0.000000] bootconsole [earlycon0] enabled
    [    0.000000] Memory policy: Data cache writeback
    [    0.000000] On node 0 totalpages: 8192
    [    0.000000] free_area_init_node: node 0, pgdat c0386234, node_mem_map c0948000
    [    0.000000]   Normal zone: 72 pages used for memmap
    [    0.000000]   Normal zone: 0 pages reserved
    [    0.000000]   Normal zone: 8192 pages, LIFO batch:0
    [    0.000000] pcpu-alloc: s0 r0 d32768 u32768 alloc=1*32768
    [    0.000000] pcpu-alloc: [0] 0
    [    0.000000] Built 1 zonelists in Zone order, mobility grouping on.  Total pages: 8120
    [    0.000000] Kernel command line: debug loglevel=9 console=ttyS0,115200n8 earlyprintk root=/dev/mmcblk0p1 rw rootwait
    [    0.000000] PID hash table entries: 128 (order: -3, 512 bytes)
    [    0.000000] Dentry cache hash table entries: 4096 (order: 2, 16384 bytes)
    [    0.000000] Inode-cache hash table entries: 2048 (order: 1, 8192 bytes)
    [    0.000000] Memory: 22892K/32768K available (2746K kernel code, 106K rwdata, 564K rodata, 156K init, 5884K bss, 9876K reserved)
    [    0.000000] Virtual kernel memory layout:
    [    0.000000]     vector  : 0xffff0000 - 0xffff1000   (   4 kB)
    [    0.000000]     fixmap  : 0xfff00000 - 0xfffe0000   ( 896 kB)
    [    0.000000]     vmalloc : 0xc2800000 - 0xff000000   ( 968 MB)
    [    0.000000]     lowmem  : 0xc0000000 - 0xc2000000   (  32 MB)
    [    0.000000]       .text : 0xc0008000 - 0xc0343a90   (3311 kB)
    [    0.000000]       .init : 0xc0344000 - 0xc036b358   ( 157 kB)
    [    0.000000]       .data : 0xc036c000 - 0xc0386a80   ( 107 kB)
    [    0.000000]        .bss : 0xc0386a8c - 0xc0945b98   (5885 kB)
    [    0.000000] SLUB: HWalign=32, Order=0-3, MinObjects=0, CPUs=1, Nodes=1
    [    0.000000] Preemptible hierarchical RCU implementation.
    [    0.000000] NR_IRQS:16 nr_irqs:16 16
    [    0.000000] sched_clock: 32 bits at 100 Hz, resolution 10000000ns, wraps every 21474836480000000ns
    [    0.000000] Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar
    [    0.000000] ... MAX_LOCKDEP_SUBCLASSES:  8
    [    0.000000] ... MAX_LOCK_DEPTH:          48
    [    0.000000] ... MAX_LOCKDEP_KEYS:        8191
    [    0.000000] ... CLASSHASH_SIZE:          4096
    [    0.000000] ... MAX_LOCKDEP_ENTRIES:     16384
    [    0.000000] ... MAX_LOCKDEP_CHAINS:      32768
    [    0.000000] ... CHAINHASH_SIZE:          16384
    [    0.000000]  memory used by lock dependency info: 3695 kB
    [    0.000000]  per task-struct memory footprint: 1152 bytes
    [    0.000000] kmemleak: Kernel memory leak detector disabled
    [    0.000000] ODEBUG: 0 of 0 active objects replaced
    [    0.000000] kmemleak: Early log buffer exceeded (673), please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE
    [    0.140000] Calibrating delay loop... 143.76 BogoMIPS (lpj=718848)
    [    0.200000] pid_max: default: 4096 minimum: 301
    [    0.210000] Mount-cache hash table entries: 512
    [    0.300000] CPU: Testing write buffer coherency: ok
    [    0.320000] Setting up static identity map for 0x27ba60 - 0x27baa8
    [    0.390000] devtmpfs: initialized
    [    0.440000] kworker/u2:0 (13) used greatest stack depth: 6424 bytes left
    [    0.450000] NET: Registered protocol family 16
    [    0.480000] DMA: preallocated 256 KiB pool for atomic coherent allocations
    [    0.580000] kworker/u2:0 (21) used greatest stack depth: 6216 bytes left
    [    0.790000] kworker/u2:0 (46) used greatest stack depth: 6028 bytes left
    [    0.900000] kworker/u2:0 (60) used greatest stack depth: 5848 bytes left
    [    1.150000] bio: create slab <bio-0> at 0
    [    1.180000] moxart-dma-engine 90500080.dma: moxart_probe: chs[0]: ch->ch_num=0 ch->base=c2850080
    [    1.190000] moxart-dma-engine 90500080.dma: moxart_probe: chs[1]: ch->ch_num=1 ch->base=c2850090
    [    1.200000] moxart-dma-engine 90500080.dma: moxart_probe: chs[2]: ch->ch_num=2 ch->base=c28500a0
    [    1.210000] moxart-dma-engine 90500080.dma: moxart_probe: chs[3]: ch->ch_num=3 ch->base=c28500b0
    [    1.260000] moxart-dma-engine 90500080.dma: moxart_probe: IRQ=17
    [    1.550000] DMA-API: preallocated 4096 debug entries
    [    1.560000] DMA-API: debugging enabled by kernel config
    [    1.560000] Switched to clocksource moxart_timer
    [    1.680000] NET: Registered protocol family 2
    [    1.710000] TCP established hash table entries: 1024 (order: 0, 4096 bytes)
    [    1.720000] TCP bind hash table entries: 1024 (order: 3, 36864 bytes)
    [    1.730000] TCP: Hash tables configured (established 1024 bind 1024)
    [    1.750000] TCP: reno registered
    [    1.750000] UDP hash table entries: 128 (order: 1, 10240 bytes)
    [    1.760000] UDP-Lite hash table entries: 128 (order: 1, 10240 bytes)
    [    1.780000] NET: Registered protocol family 1
    [    2.550000] jffs2: version 2.2. (NAND) © 2001-2006 Red Hat, Inc.
    [    2.590000] msgmni has been set to 44
    [    2.600000] io scheduler noop registered
    [    2.610000] io scheduler cfq registered (default)
    [    2.630000] gpiochip_add: registered GPIOs 0 to 31 on device: moxart-gpio
    [    2.660000] Serial: 8250/16550 driver, 1 ports, IRQ sharing enabled
    [    2.750000] 98200000.uart: ttyS0 at MMIO 0x98200000 (irq = 21, base_baud = 921600) is a 16550A
    [    2.760000] console [ttyS0] enabled
    [    2.760000] console [ttyS0] enabled
    [    2.770000] bootconsole [earlycon0] disabled
    [    2.770000] bootconsole [earlycon0] disabled
    [    2.880000] 80000000.flash: Found 1 x16 devices at 0x0 in 16-bit bank. Manufacturer ID 0x000089 Chip ID 0x000018
    [    2.890000] Intel/Sharp Extended Query Table at 0x0031
    [    2.900000] Intel/Sharp Extended Query Table at 0x0031
    [    2.910000] Using buffer write method
    [    2.910000] cfi_cmdset_0001: Erase suspend on write enabled
    [    2.920000] erase region 0: offset=0x0,size=0x20000,blocks=128
    [    2.920000] 4 ofpart partitions found on MTD device 80000000.flash
    [    2.930000] Creating 4 MTD partitions on "80000000.flash":
    [    2.940000] 0x000000000000-0x000000040000 : "bootloader"
    [    3.020000] 0x000000040000-0x000000200000 : "linux kernel"
    [    3.090000] 0x000000200000-0x000000a00000 : "root filesystem"
    [    3.160000] 0x000000a00000-0x000001000000 : "user filesystem"
    [    3.910000] libphy: MOXA ART Ethernet MII: probed
    [    4.600000] libphy: MOXA ART Ethernet MII: probed
    [    4.680000] moxart-ethernet 90900000.mac eth0: moxart_mac_probe: IRQ=19 address=00:00:00:00:00:00
    [    4.690000] moxart-ethernet 90900000.mac eth0: generated random MAC address 72:53:d2:5f:0f:e8
    [    4.740000] moxart-ethernet 92000000.mac eth1: moxart_mac_probe: IRQ=20 address=00:00:00:00:00:00
    [    4.750000] moxart-ethernet 92000000.mac eth1: generated random MAC address b6:e4:b8:f4:02:8c
    [    4.770000] of_get_named_gpiod_flags exited with status 0
    [    4.790000] input: gpio_keys_polled.2 as /devices/gpio_keys_polled.2/input/input0
    [    4.820000] evbug: Connected device: input0 (gpio_keys_polled.2 at gpio-keys-polled/input0)
    [    4.840000] of_get_named_gpiod_flags exited with status 0
    [    4.850000] of_get_named_gpiod_flags exited with status 0
    [    4.860000] of_get_named_gpiod_flags exited with status 0
    [    4.890000] moxart-rtc rtc.0: rtc core: registered rtc.0 as rtc0
    [    4.930000] kworker/u2:0 (241) used greatest stack depth: 5840 bytes left
    [    4.970000] dma dma0chan0: moxart_alloc_chan_resources: allocating channel #0
    [    4.980000] Unable to handle kernel paging request at virtual address ffffffed
    [    4.990000] pgd = c0004000
    [    5.000000] [ffffffed] *pgd=01ffd831, *pte=00000000, *ppte=00000000
    [    5.000000] Internal error: Oops: 1 [#1] PREEMPT ARM
    [    5.000000] CPU: 0 PID: 1 Comm: swapper Not tainted 3.13.0-rc3-next-20131212+ #1330
    [    5.000000] task: c1834000 ti: c1838000 task.ti: c1838000
    [    5.000000] PC is at moxart_probe+0x248/0x344
    [    5.000000] LR is at moxart_probe+0x228/0x344
    [    5.000000] pc : [<c01d61e8>]    lr : [<c01d61c8>]    psr: 60000053
    [    5.000000] sp : c1839d68  ip : c18d0124  fp : c1839de4
    [    5.000000] r10: 00000012  r9 : c0379fd0  r8 : c1a04b60
    [    5.000000] r7 : c099165c  r6 : 00000000  r5 : c1870a10  r4 : c1a04800
    [    5.000000] r3 : 00000000  r2 : 00050348  r1 : 98e00040  r0 : ffffffed
    [    5.000000] Flags: nZCv  IRQs on  FIQs off  Mode SVC_32  ISA ARM  Segment kernel
    [    5.000000] Control: 0000397f  Table: 00004000  DAC: 00000017
    [    5.000000] Process swapper (pid: 1, stack limit = 0xc18381c0)
    [    5.000000] Stack: (0xc1839d68 to 0xc183a000)
    [    5.000000] 9d60:                   c03444b8 c036b2f4 c1839da4 c1839d80 00000002 98e00040
    [    5.000000] 9d80: 00000000 00000004 00000004 00000001 c03444b8 c1a095d8 c1839dcc 98e00000
    [    5.000000] 9da0: 98e0005b c0991698 00000200 00000000 00000000 00000000 c03808d4 c1870a10
    [    5.000000] 9dc0: c03808d4 c1870a10 00000000 c03808d4 c03444b8 c036b2f4 c1839dfc c1839de8
    [    5.000000] 9de0: c019d158 c01d5fb0 c0939698 c03808d4 c1839e24 c1839e00 c019bfb4 c019d148
    [    5.000000] 9e00: 00000000 c1870a10 c03808d4 c1870a44 00000000 c03554a4 c1839e44 c1839e28
    [    5.000000] 9e20: c019c1e0 c019bf40 00000002 c03808d4 c019c144 00000000 c1839e6c c1839e48
    [    5.000000] 9e40: c019a648 c019c154 c18036a8 c18697d0 c19e2558 c03808d4 c1a1c180 c037c738
    [    5.000000] 9e60: c1839e7c c1839e70 c019bd0c c019a5f4 c1839ea4 c1839e80 c019ae94 c019bcfc
    [    5.000000] 9e80: c02faa68 c1839e90 c03808d4 00000006 00000000 c0386aa0 c1839ebc c1839ea8
    [    5.000000] 9ea0: c019c614 c019adc4 c035cf64 00000006 c1839ecc c1839ec0 c019d97c c019c5a4
    [    5.000000] 9ec0: c1839edc c1839ed0 c03554bc c019d93c c1839f54 c1839ee0 c0344b34 c03554b4
    [    5.000000] 9ee0: c1839f0c c1839ef0 c1839f0c c1839ef8 c0344400 c03091f0 c099220b 00000036
    [    5.000000] 9f00: c1839f54 c1839f10 c002db18 c03444c8 c1839f34 00000006 00000006 c0308e90
    [    5.000000] 9f20: 00000000 c02f77f8 c1839f54 c035cf60 00000006 c035cf64 00000006 c035cf44
    [    5.000000] 9f40: c0386aa0 00000036 c1839f94 c1839f58 c0344cf0 c0344a94 00000006 00000006
    [    5.000000] 9f60: c03444b8 00000000 c00364d4 00000000 c0271f6c 00000000 00000000 00000000
    [    5.000000] 9f80: 00000000 00000000 c1839fac c1839f98 c0271f7c c0344c0c 00000000 00000000
    [    5.000000] 9fa0: 00000000 c1839fb0 c0009360 c0271f7c 00000000 00000000 00000000 00000000
    [    5.000000] 9fc0: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
    [    5.000000] 9fe0: 00000000 00000000 00000000 00000000 00000013 00000000 00000000 00000000
    [    5.000000] [<c01d61e8>] (moxart_probe) from [<c019d158>] (platform_drv_probe+0x20/0x50)
    [    5.000000] [<c019d158>] (platform_drv_probe) from [<c019bfb4>] (driver_probe_device+0x84/0x214)
    [    5.000000] [<c019bfb4>] (driver_probe_device) from [<c019c1e0>] (__driver_attach+0x9c/0xa0)
    [    5.000000] [<c019c1e0>] (__driver_attach) from [<c019a648>] (bus_for_each_dev+0x64/0x94)
    [    5.000000] [<c019a648>] (bus_for_each_dev) from [<c019bd0c>] (driver_attach+0x20/0x28)
    [    5.000000] [<c019bd0c>] (driver_attach) from [<c019ae94>] (bus_add_driver+0xe0/0x1cc)
    [    5.000000] [<c019ae94>] (bus_add_driver) from [<c019c614>] (driver_register+0x80/0xfc)
    [    5.000000] [<c019c614>] (driver_register) from [<c019d97c>] (__platform_driver_register+0x50/0x64)
    [    5.000000] [<c019d97c>] (__platform_driver_register) from [<c03554bc>] (moxart_sdhci_driver_init+0x18/0x20)
    [    5.000000] [<c03554bc>] (moxart_sdhci_driver_init) from [<c0344b34>] (do_one_initcall+0xb0/0x178)
    [    5.000000] [<c0344b34>] (do_one_initcall) from [<c0344cf0>] (kernel_init_freeable+0xf4/0x1b4)
    [    5.000000] [<c0344cf0>] (kernel_init_freeable) from [<c0271f7c>] (kernel_init+0x10/0x118)
    [    5.000000] [<c0271f7c>] (kernel_init) from [<c0009360>] (ret_from_fork+0x14/0x34)
    [    5.000000] Code: e50b306c e3a03000 e50b1068 e50b3064 (e5903000)
    [    5.010000] ---[ end trace a7a79519eb6f6ed3 ]---
    [    5.020000] swapper (1) used greatest stack depth: 4996 bytes left
    [    5.020000] Kernel panic - not syncing: Attempted to kill init! exitcode=0x0000000b
    [    5.020000]
    [ 1203.680000] random: nonblocking pool is initialized

 .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  45 ++
 drivers/dma/Kconfig                                |   8 +
 drivers/dma/Makefile                               |   1 +
 drivers/dma/moxart-dma.c                           | 699 +++++++++++++++++++++
 4 files changed, 753 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
 create mode 100644 drivers/dma/moxart-dma.c

diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644
index 0000000..8a9f355
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -0,0 +1,45 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible :	Must be "moxa,moxart-dma"
+- reg :		Should contain registers location and length
+- interrupts :	Should contain an interrupt-specifier for the sole
+		interrupt generated by the device
+- #dma-cells :	Should be 1, a single cell holding a line request number
+
+Example:
+
+	dma: dma@90500000 {
+		compatible = "moxa,moxart-dma";
+		reg = <0x90500080 0x40>;
+		interrupts = <24 0>;
+		#dma-cells = <1>;
+	};
+
+
+Clients:
+
+DMA clients connected to the MOXA ART DMA controller must use the format
+described in the dma.txt file, using a two-cell specifier for each channel:
+a phandle plus one integer cells.
+The two cells in order are:
+
+1. A phandle pointing to the DMA controller.
+2. Peripheral identifier for the hardware handshaking interface.
+
+Example:
+Use specific request line passing from dma
+For example, MMC request line is 5
+
+	sdhci: sdhci@98e00000 {
+		compatible = "moxa,moxart-sdhci";
+		reg = <0x98e00000 0x5C>;
+		interrupts = <5 0>;
+		clocks = <&clk_apb>;
+		dmas =  <&dma 5>,
+			<&dma 5>;
+		dma-names = "tx", "rx";
+	};
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 132a4fd..ca4fa6b 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -334,6 +334,14 @@ config K3_DMA
 	  Support the DMA engine for Hisilicon K3 platform
 	  devices.
 
+config MOXART_DMA
+	tristate "MOXART DMA support"
+	depends on ARCH_MOXART
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Enable support for the MOXA ART SoC DMA controller.
+
 config DMA_ENGINE
 	bool
 
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 0ce2da9..551bfcd 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -42,3 +42,4 @@ obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
 obj-$(CONFIG_TI_CPPI41) += cppi41.o
 obj-$(CONFIG_K3_DMA) += k3dma.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..cb3d07a
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,699 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+#include <linux/bitops.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+#define APB_DMA_MAX_CHANNEL			4
+
+#define REG_OFF_ADDRESS_SOURCE			0
+#define REG_OFF_ADDRESS_DEST			4
+#define REG_OFF_CYCLES				8
+#define REG_OFF_CTRL				12
+#define REG_OFF_CHAN_SIZE			16
+
+#define APB_DMA_ENABLE				BIT(0)
+#define APB_DMA_FIN_INT_STS			BIT(1)
+#define APB_DMA_FIN_INT_EN			BIT(2)
+#define APB_DMA_BURST_MODE			BIT(3)
+#define APB_DMA_ERR_INT_STS			BIT(4)
+#define APB_DMA_ERR_INT_EN			BIT(5)
+
+/*
+ * Unset: APB
+ * Set:   AHB
+ */
+#define APB_DMA_SOURCE_SELECT			0x40
+#define APB_DMA_DEST_SELECT			0x80
+
+#define APB_DMA_SOURCE				0x100
+#define APB_DMA_DEST				0x1000
+
+#define APB_DMA_SOURCE_MASK			0x700
+#define APB_DMA_DEST_MASK			0x7000
+
+/*
+ * 000: No increment
+ * 001: +1 (Burst=0), +4  (Burst=1)
+ * 010: +2 (Burst=0), +8  (Burst=1)
+ * 011: +4 (Burst=0), +16 (Burst=1)
+ * 101: -1 (Burst=0), -4  (Burst=1)
+ * 110: -2 (Burst=0), -8  (Burst=1)
+ * 111: -4 (Burst=0), -16 (Burst=1)
+ */
+#define APB_DMA_SOURCE_INC_0			0
+#define APB_DMA_SOURCE_INC_1_4			0x100
+#define APB_DMA_SOURCE_INC_2_8			0x200
+#define APB_DMA_SOURCE_INC_4_16			0x300
+#define APB_DMA_SOURCE_DEC_1_4			0x500
+#define APB_DMA_SOURCE_DEC_2_8			0x600
+#define APB_DMA_SOURCE_DEC_4_16			0x700
+#define APB_DMA_DEST_INC_0			0
+#define APB_DMA_DEST_INC_1_4			0x1000
+#define APB_DMA_DEST_INC_2_8			0x2000
+#define APB_DMA_DEST_INC_4_16			0x3000
+#define APB_DMA_DEST_DEC_1_4			0x5000
+#define APB_DMA_DEST_DEC_2_8			0x6000
+#define APB_DMA_DEST_DEC_4_16			0x7000
+
+/*
+ * Request signal select source/destination address for DMA hardware handshake.
+ *
+ * The request line number is a property of the DMA controller itself,
+ * e.g. MMC must always request channels where dma_slave_config->slave_id is 5.
+ *
+ * 0:    No request / Grant signal
+ * 1-15: Request    / Grant signal
+ */
+#define APB_DMA_SOURCE_REQ_NO			0x1000000
+#define APB_DMA_SOURCE_REQ_NO_MASK		0xf000000
+#define APB_DMA_DEST_REQ_NO			0x10000
+#define APB_DMA_DEST_REQ_NO_MASK		0xf0000
+
+#define APB_DMA_DATA_WIDTH			0x100000
+#define APB_DMA_DATA_WIDTH_MASK			0x300000
+/*
+ * Data width of transfer:
+ *
+ * 00: Word
+ * 01: Half
+ * 10: Byte
+ */
+#define APB_DMA_DATA_WIDTH_4			0
+#define APB_DMA_DATA_WIDTH_2			0x100000
+#define APB_DMA_DATA_WIDTH_1			0x200000
+
+#define APB_DMA_CYCLES_MASK			0x00ffffff
+
+#define MOXART_DMA_DATA_TYPE_S8			0x00
+#define MOXART_DMA_DATA_TYPE_S16		0x01
+#define MOXART_DMA_DATA_TYPE_S32		0x02
+
+struct moxart_sg {
+	dma_addr_t addr;
+	uint32_t len;
+};
+
+struct moxart_desc {
+	enum dma_transfer_direction	dma_dir;
+	dma_addr_t			dev_addr;
+	unsigned int			sglen;
+	unsigned int			dma_cycles;
+	struct virt_dma_desc		vd;
+	uint8_t				es;
+	struct moxart_sg		sg[0];
+};
+
+struct moxart_chan {
+	struct virt_dma_chan		vc;
+
+	void __iomem			*base;
+	struct moxart_desc		*desc;
+
+	struct dma_slave_config		cfg;
+
+	bool				allocated;
+	bool				error;
+	int				ch_num;
+	unsigned int			line_reqno;
+	unsigned int			sgidx;
+};
+
+struct moxart_dmadev {
+	struct dma_device		dma_slave;
+	struct moxart_chan		slave_chans[APB_DMA_MAX_CHANNEL];
+};
+
+struct moxart_filter_data {
+	struct moxart_dmadev		*mdc;
+	struct of_phandle_args		*dma_spec;
+};
+
+static const unsigned int es_bytes[] = {
+	[MOXART_DMA_DATA_TYPE_S8] = 1,
+	[MOXART_DMA_DATA_TYPE_S16] = 2,
+	[MOXART_DMA_DATA_TYPE_S32] = 4,
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+	return &chan->dev->device;
+}
+
+static inline struct moxart_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct moxart_chan, vc.chan);
+}
+
+static inline struct moxart_desc *to_moxart_dma_desc(
+	struct dma_async_tx_descriptor *t)
+{
+	return container_of(t, struct moxart_desc, vd.tx);
+}
+
+static void moxart_dma_desc_free(struct virt_dma_desc *vd)
+{
+	kfree(container_of(vd, struct moxart_desc, vd));
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	unsigned long flags;
+	LIST_HEAD(head);
+	u32 ctrl;
+
+	dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
+
+	spin_lock_irqsave(&ch->vc.lock, flags);
+
+	if (ch->desc)
+		ch->desc = NULL;
+
+	ctrl = readl(ch->base + REG_OFF_CTRL);
+	ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, ch->base + REG_OFF_CTRL);
+
+	vchan_get_all_descriptors(&ch->vc, &head);
+	spin_unlock_irqrestore(&ch->vc.lock, flags);
+	vchan_dma_desc_free_list(&ch->vc, &head);
+
+	return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+			       struct dma_slave_config *cfg)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	u32 ctrl;
+
+	ch->cfg = *cfg;
+
+	ctrl = readl(ch->base + REG_OFF_CTRL);
+	ctrl |= APB_DMA_BURST_MODE;
+	ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
+	ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
+
+	switch (ch->cfg.src_addr_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		ctrl |= APB_DMA_DATA_WIDTH_1;
+		if (ch->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_1_4;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_1_4;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		ctrl |= APB_DMA_DATA_WIDTH_2;
+		if (ch->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_2_8;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_2_8;
+		break;
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+		ctrl &= ~APB_DMA_DATA_WIDTH;
+		if (ch->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_4_16;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_4_16;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (ch->cfg.direction == DMA_MEM_TO_DEV) {
+		ctrl &= ~APB_DMA_DEST_SELECT;
+		ctrl |= APB_DMA_SOURCE_SELECT;
+		ctrl |= (ch->line_reqno << 16 &
+			 APB_DMA_DEST_REQ_NO_MASK);
+	} else {
+		ctrl |= APB_DMA_DEST_SELECT;
+		ctrl &= ~APB_DMA_SOURCE_SELECT;
+		ctrl |= (ch->line_reqno << 24 &
+			 APB_DMA_SOURCE_REQ_NO_MASK);
+	}
+
+	writel(ctrl, ch->base + REG_OFF_CTRL);
+
+	return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+			  unsigned long arg)
+{
+	int ret = 0;
+
+	switch (cmd) {
+	case DMA_PAUSE:
+	case DMA_RESUME:
+		return -EINVAL;
+	case DMA_TERMINATE_ALL:
+		moxart_terminate_all(chan);
+		break;
+	case DMA_SLAVE_CONFIG:
+		ret = moxart_slave_config(chan, (struct dma_slave_config *)arg);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static struct dma_async_tx_descriptor *moxart_prep_slave_sg(
+	struct dma_chan *chan, struct scatterlist *sgl,
+	unsigned int sg_len, enum dma_transfer_direction dir,
+	unsigned long tx_flags, void *context)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	struct moxart_desc *d;
+	enum dma_slave_buswidth dev_width;
+	dma_addr_t dev_addr;
+	struct scatterlist *sgent;
+	unsigned int es;
+	unsigned int i;
+
+	if (!is_slave_direction(dir)) {
+		dev_err(chan2dev(chan), "%s: invalid DMA direction\n",
+			__func__);
+		return NULL;
+	}
+
+	if (dir == DMA_DEV_TO_MEM) {
+		dev_addr = ch->cfg.src_addr;
+		dev_width = ch->cfg.src_addr_width;
+	} else {
+		dev_addr = ch->cfg.dst_addr;
+		dev_width = ch->cfg.dst_addr_width;
+	}
+
+	switch (dev_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		es = MOXART_DMA_DATA_TYPE_S8;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		es = MOXART_DMA_DATA_TYPE_S16;
+		break;
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+		es = MOXART_DMA_DATA_TYPE_S32;
+		break;
+	default:
+		dev_err(chan2dev(chan), "%s: unsupported data width (%u)\n",
+			__func__, dev_width);
+		return NULL;
+	}
+
+	d = kzalloc(sizeof(*d) + sg_len * sizeof(d->sg[0]), GFP_ATOMIC);
+	if (!d)
+		return NULL;
+
+	d->dma_dir = dir;
+	d->dev_addr = dev_addr;
+	d->es = es;
+
+	for_each_sg(sgl, sgent, sg_len, i) {
+		d->sg[i].addr = sg_dma_address(sgent);
+		d->sg[i].len = sg_dma_len(sgent);
+	}
+
+	d->sglen = sg_len;
+
+	ch->error = 0;
+
+	return vchan_tx_prep(&ch->vc, &d->vd, tx_flags);
+}
+
+static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
+					struct of_dma *ofdma)
+{
+	struct moxart_dmadev *mdc = ofdma->of_dma_data;
+	struct dma_chan *chan;
+	struct moxart_chan *ch;
+
+	chan = dma_get_any_slave_channel(&mdc->dma_slave);
+	if (!chan)
+		return NULL;
+
+	ch = to_moxart_dma_chan(chan);
+	ch->line_reqno = dma_spec->args[0];
+
+	return chan;
+}
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+
+	dev_dbg(chan2dev(chan), "%s: allocating channel #%u\n",
+		__func__, ch->ch_num);
+	ch->allocated = 1;
+
+	return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+
+	vchan_free_chan_resources(&ch->vc);
+
+	dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+		__func__, ch->ch_num);
+	ch->allocated = 0;
+}
+
+static void moxart_dma_set_params(struct moxart_chan *ch, dma_addr_t src_addr,
+				  dma_addr_t dst_addr)
+{
+	writel(src_addr, ch->base + REG_OFF_ADDRESS_SOURCE);
+	writel(dst_addr, ch->base + REG_OFF_ADDRESS_DEST);
+}
+
+static void moxart_set_transfer_params(struct moxart_chan *ch, unsigned int len)
+{
+	struct moxart_desc *d = ch->desc;
+	unsigned int sglen_div = es_bytes[d->es];
+
+	d->dma_cycles = len >> sglen_div;
+
+	/*
+	 * There are 4 cycles on 64 bytes copied, i.e. one cycle copies 16
+	 * bytes ( when width is APB_DMAB_DATA_WIDTH_4 ).
+	 */
+	writel(d->dma_cycles, ch->base + REG_OFF_CYCLES);
+
+	dev_dbg(chan2dev(&ch->vc.chan), "%s: set %u DMA cycles (len=%u)\n",
+		__func__, d->dma_cycles, len);
+}
+
+static void moxart_start_dma(struct moxart_chan *ch)
+{
+	u32 ctrl;
+
+	ctrl = readl(ch->base + REG_OFF_CTRL);
+	ctrl |= (APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, ch->base + REG_OFF_CTRL);
+}
+
+static void moxart_dma_start_sg(struct moxart_chan *ch, unsigned int idx)
+{
+	struct moxart_desc *d = ch->desc;
+	struct moxart_sg *sg = ch->desc->sg + idx;
+
+	if (ch->desc->dma_dir == DMA_MEM_TO_DEV)
+		moxart_dma_set_params(ch, sg->addr, d->dev_addr);
+	else if (ch->desc->dma_dir == DMA_DEV_TO_MEM)
+		moxart_dma_set_params(ch, d->dev_addr, sg->addr);
+
+	moxart_set_transfer_params(ch, sg->len);
+
+	moxart_start_dma(ch);
+}
+
+static void moxart_dma_start_desc(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	struct virt_dma_desc *vd;
+
+	vd = vchan_next_desc(&ch->vc);
+
+	if (!vd) {
+		ch->desc = NULL;
+		return;
+	}
+
+	list_del(&vd->node);
+
+	ch->desc = to_moxart_dma_desc(&vd->tx);
+	ch->sgidx = 0;
+
+	moxart_dma_start_sg(ch, 0);
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ch->vc.lock, flags);
+	if (vchan_issue_pending(&ch->vc) && !ch->desc)
+		moxart_dma_start_desc(chan);
+	spin_unlock_irqrestore(&ch->vc.lock, flags);
+}
+
+static size_t moxart_dma_desc_size(struct moxart_desc *d)
+{
+	unsigned int i;
+	size_t size;
+
+	for (size = i = 0; i < d->sglen; i++)
+		size += d->sg[i].len;
+
+	return size;
+}
+
+static size_t moxart_dma_desc_size_in_flight(struct moxart_chan *ch)
+{
+	size_t size;
+	unsigned int completed_cycles, cycles, i;
+
+	for (size = i = 0; i <= ch->sgidx; i++)
+		size += ch->desc->sg[i].len;
+	cycles = readl(ch->base + REG_OFF_CYCLES);
+	completed_cycles = (ch->desc->dma_cycles - cycles);
+	size -= completed_cycles << es_bytes[ch->desc->es];
+
+	dev_dbg(chan2dev(&ch->vc.chan), "%s: size=%zu\n", __func__, size);
+
+	return size;
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+					dma_cookie_t cookie,
+					struct dma_tx_state *txstate)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	struct virt_dma_desc *vd;
+	struct moxart_desc *d;
+	enum dma_status ret;
+	unsigned long flags;
+
+	/*
+	 * dma_cookie_status() assigns initial residue value.
+	 */
+	ret = dma_cookie_status(chan, cookie, txstate);
+
+	spin_lock_irqsave(&ch->vc.lock, flags);
+	vd = vchan_find_desc(&ch->vc, cookie);
+	if (vd) {
+		d = to_moxart_dma_desc(&vd->tx);
+		txstate->residue = moxart_dma_desc_size(d);
+	} else if (ch->desc && ch->desc->vd.tx.cookie == cookie) {
+		txstate->residue = moxart_dma_desc_size_in_flight(ch);
+	}
+	spin_unlock_irqrestore(&ch->vc.lock, flags);
+
+	if (ch->error)
+		return DMA_ERROR;
+
+	return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+	dma->device_prep_slave_sg		= moxart_prep_slave_sg;
+	dma->device_alloc_chan_resources	= moxart_alloc_chan_resources;
+	dma->device_free_chan_resources		= moxart_free_chan_resources;
+	dma->device_issue_pending		= moxart_issue_pending;
+	dma->device_tx_status			= moxart_tx_status;
+	dma->device_control			= moxart_control;
+	dma->dev				= dev;
+
+	INIT_LIST_HEAD(&dma->channels);
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+	struct moxart_dmadev *mc = devid;
+	struct moxart_chan *ch = &mc->slave_chans[0];
+	unsigned int i;
+	unsigned long flags;
+	u32 ctrl;
+
+	dev_dbg(chan2dev(&ch->vc.chan), "%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+		if (!ch->allocated)
+			continue;
+
+		ctrl = readl(ch->base + REG_OFF_CTRL);
+
+		dev_dbg(chan2dev(&ch->vc.chan), "%s: ch=%p ch->base=%p ctrl=%x\n",
+			__func__, ch, ch->base, ctrl);
+
+		if (ctrl & APB_DMA_FIN_INT_STS) {
+			ctrl &= ~APB_DMA_FIN_INT_STS;
+			if (ch->desc) {
+				spin_lock_irqsave(&ch->vc.lock, flags);
+				if (++ch->sgidx < ch->desc->sglen) {
+					moxart_dma_start_sg(ch, ch->sgidx);
+				} else {
+					vchan_cookie_complete(&ch->desc->vd);
+					moxart_dma_start_desc(&ch->vc.chan);
+				}
+				spin_unlock_irqrestore(&ch->vc.lock, flags);
+			}
+		}
+
+		if (ctrl & APB_DMA_ERR_INT_STS) {
+			ctrl &= ~APB_DMA_ERR_INT_STS;
+			ch->error = 1;
+		}
+
+		writel(ctrl, ch->base + REG_OFF_CTRL);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int moxart_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct resource *res;
+	static void __iomem *dma_base_addr;
+	int ret, i;
+	unsigned int irq;
+	struct moxart_chan *ch;
+	struct moxart_dmadev *mdc;
+
+	mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+	if (!mdc) {
+		dev_err(dev, "can't allocate DMA container\n");
+		return -ENOMEM;
+	}
+
+	irq = irq_of_parse_and_map(node, 0);
+	if (irq == NO_IRQ) {
+		dev_err(dev, "no IRQ resource\n");
+		return -EINVAL;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dma_base_addr = devm_ioremap_resource(dev, res);
+	if (IS_ERR(dma_base_addr))
+		return PTR_ERR(dma_base_addr);
+
+	dma_cap_zero(mdc->dma_slave.cap_mask);
+	dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+	dma_cap_set(DMA_PRIVATE, mdc->dma_slave.cap_mask);
+
+	moxart_dma_init(&mdc->dma_slave, dev);
+
+	ch = &mdc->slave_chans[0];
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+		ch->ch_num = i;
+		ch->base = dma_base_addr + i * REG_OFF_CHAN_SIZE;
+		ch->allocated = 0;
+
+		ch->vc.desc_free = moxart_dma_desc_free;
+		vchan_init(&ch->vc, &mdc->dma_slave);
+
+		dev_dbg(dev, "%s: chs[%d]: ch->ch_num=%u ch->base=%p\n",
+			__func__, i, ch->ch_num, ch->base);
+	}
+
+	platform_set_drvdata(pdev, mdc);
+
+	ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
+			       "moxart-dma-engine", mdc);
+	if (ret) {
+		dev_err(dev, "devm_request_irq failed\n");
+		return ret;
+	}
+
+	ret = dma_async_device_register(&mdc->dma_slave);
+	if (ret) {
+		dev_err(dev, "dma_async_device_register failed\n");
+		return ret;
+	}
+
+	ret = of_dma_controller_register(node, moxart_of_xlate, mdc);
+	if (ret) {
+		dev_err(dev, "of_dma_controller_register failed\n");
+		dma_async_device_unregister(&mdc->dma_slave);
+		return ret;
+	}
+
+	dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq);
+
+	return 0;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+	struct moxart_dmadev *m = platform_get_drvdata(pdev);
+
+	dma_async_device_unregister(&m->dma_slave);
+
+	if (pdev->dev.of_node)
+		of_dma_controller_free(pdev->dev.of_node);
+
+	return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+	{ .compatible = "moxa,moxart-dma" },
+	{ }
+};
+
+static struct platform_driver moxart_driver = {
+	.probe	= moxart_probe,
+	.remove	= moxart_remove,
+	.driver = {
+		.name		= "moxart-dma-engine",
+		.owner		= THIS_MODULE,
+		.of_match_table	= moxart_dma_match,
+	},
+};
+
+static int moxart_init(void)
+{
+	return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+	platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
-- 
1.8.2.1


^ permalink raw reply related	[flat|nested] 80+ messages in thread

* [PATCH v14] dmaengine: Add MOXA ART DMA engine driver
@ 2013-12-12 12:32                           ` Jonas Jensen
  0 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2013-12-12 12:32 UTC (permalink / raw)
  To: linux-arm-kernel

The MOXA ART SoC has a DMA controller capable of offloading expensive
memory operations, such as large copies. This patch adds support for
the controller including four channels. Two of these are used to
handle MMC copy on the UC-7112-LX hardware. The remaining two can be
used in a future audio driver or client application.

Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
---

Notes:
    Thanks for the replies!
    
    This is now rebased on next-20131212 and uses the newly added
    dma_get_any_slave_channel().
    
    There's a problem/crash if DMA_PRIVATE isn't first added to
    capabilities (see attached boot message):
    
    Is DMA_PRIVATE mandatory or should the driver do something?
    
    From what I can tell, private_candidate() can return NULL for already
    allocated channels, or if it's a device without channels, or if
    __dma_device_satisfies_mask() returns false.
    
    The latter two can be eliminated because those errors do not print.
    
    I think dma_get_any_slave_channel() fails, but I don't know why it
    then crashes in SDHCI probe.
    
    Changes since v13:
    1.  don't use implied integer type specifiers
    2.  only count completed segments in moxart_dma_desc_size_in_flight()
    3.  start next descriptor when previous finish
    4.  remove redundant comments
    5.  reorder comment/define blocks
    6.  format text to fit 80 column width
    7.  comment style cleanup
    8.  split "unsigned int es, i" on two rows (moxart_prep_slave_sg())
    9.  print size_t using format "%zu" (moxart_dma_desc_size_in_flight())
    10. add {} to accompanying if (moxart_dma_interrupt())
    11. use NO_IRQ in irq_of_parse_and_map() return value check
    12. use dma_get_any_slave_channel() in moxart_of_xlate()
    13. remove moxart_dma_filter_fn()
    14. add DMA_PRIVATE to capabilities
    
    Applies to next-20131212
    
    Boot log:
    Uncompressing Linux... done, booting the kernel.
    [    0.000000] Booting Linux on physical CPU 0x0
    [    0.000000] Linux version 3.13.0-rc3-next-20131212+ (i at Ildjarn) (gcc version 4.6.3 (crosstool-NG 1.16.0) ) #1330 PREEMPT Thu Dec 12 12:05:54 CET 2013
    [    0.000000] CPU: FA526 [66015261] revision 1 (ARMv4), cr=0000397f
    [    0.000000] CPU: VIVT data cache, VIVT instruction cache
    [    0.000000] Machine model: MOXA UC-7112-LX
    [    0.000000] bootconsole [earlycon0] enabled
    [    0.000000] Memory policy: Data cache writeback
    [    0.000000] On node 0 totalpages: 8192
    [    0.000000] free_area_init_node: node 0, pgdat c0386234, node_mem_map c0948000
    [    0.000000]   Normal zone: 72 pages used for memmap
    [    0.000000]   Normal zone: 0 pages reserved
    [    0.000000]   Normal zone: 8192 pages, LIFO batch:0
    [    0.000000] pcpu-alloc: s0 r0 d32768 u32768 alloc=1*32768
    [    0.000000] pcpu-alloc: [0] 0
    [    0.000000] Built 1 zonelists in Zone order, mobility grouping on.  Total pages: 8120
    [    0.000000] Kernel command line: debug loglevel=9 console=ttyS0,115200n8 earlyprintk root=/dev/mmcblk0p1 rw rootwait
    [    0.000000] PID hash table entries: 128 (order: -3, 512 bytes)
    [    0.000000] Dentry cache hash table entries: 4096 (order: 2, 16384 bytes)
    [    0.000000] Inode-cache hash table entries: 2048 (order: 1, 8192 bytes)
    [    0.000000] Memory: 22892K/32768K available (2746K kernel code, 106K rwdata, 564K rodata, 156K init, 5884K bss, 9876K reserved)
    [    0.000000] Virtual kernel memory layout:
    [    0.000000]     vector  : 0xffff0000 - 0xffff1000   (   4 kB)
    [    0.000000]     fixmap  : 0xfff00000 - 0xfffe0000   ( 896 kB)
    [    0.000000]     vmalloc : 0xc2800000 - 0xff000000   ( 968 MB)
    [    0.000000]     lowmem  : 0xc0000000 - 0xc2000000   (  32 MB)
    [    0.000000]       .text : 0xc0008000 - 0xc0343a90   (3311 kB)
    [    0.000000]       .init : 0xc0344000 - 0xc036b358   ( 157 kB)
    [    0.000000]       .data : 0xc036c000 - 0xc0386a80   ( 107 kB)
    [    0.000000]        .bss : 0xc0386a8c - 0xc0945b98   (5885 kB)
    [    0.000000] SLUB: HWalign=32, Order=0-3, MinObjects=0, CPUs=1, Nodes=1
    [    0.000000] Preemptible hierarchical RCU implementation.
    [    0.000000] NR_IRQS:16 nr_irqs:16 16
    [    0.000000] sched_clock: 32 bits at 100 Hz, resolution 10000000ns, wraps every 21474836480000000ns
    [    0.000000] Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar
    [    0.000000] ... MAX_LOCKDEP_SUBCLASSES:  8
    [    0.000000] ... MAX_LOCK_DEPTH:          48
    [    0.000000] ... MAX_LOCKDEP_KEYS:        8191
    [    0.000000] ... CLASSHASH_SIZE:          4096
    [    0.000000] ... MAX_LOCKDEP_ENTRIES:     16384
    [    0.000000] ... MAX_LOCKDEP_CHAINS:      32768
    [    0.000000] ... CHAINHASH_SIZE:          16384
    [    0.000000]  memory used by lock dependency info: 3695 kB
    [    0.000000]  per task-struct memory footprint: 1152 bytes
    [    0.000000] kmemleak: Kernel memory leak detector disabled
    [    0.000000] ODEBUG: 0 of 0 active objects replaced
    [    0.000000] kmemleak: Early log buffer exceeded (673), please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE
    [    0.140000] Calibrating delay loop... 143.76 BogoMIPS (lpj=718848)
    [    0.200000] pid_max: default: 4096 minimum: 301
    [    0.210000] Mount-cache hash table entries: 512
    [    0.300000] CPU: Testing write buffer coherency: ok
    [    0.320000] Setting up static identity map for 0x27ba60 - 0x27baa8
    [    0.390000] devtmpfs: initialized
    [    0.440000] kworker/u2:0 (13) used greatest stack depth: 6424 bytes left
    [    0.450000] NET: Registered protocol family 16
    [    0.480000] DMA: preallocated 256 KiB pool for atomic coherent allocations
    [    0.580000] kworker/u2:0 (21) used greatest stack depth: 6216 bytes left
    [    0.790000] kworker/u2:0 (46) used greatest stack depth: 6028 bytes left
    [    0.900000] kworker/u2:0 (60) used greatest stack depth: 5848 bytes left
    [    1.150000] bio: create slab <bio-0> at 0
    [    1.180000] moxart-dma-engine 90500080.dma: moxart_probe: chs[0]: ch->ch_num=0 ch->base=c2850080
    [    1.190000] moxart-dma-engine 90500080.dma: moxart_probe: chs[1]: ch->ch_num=1 ch->base=c2850090
    [    1.200000] moxart-dma-engine 90500080.dma: moxart_probe: chs[2]: ch->ch_num=2 ch->base=c28500a0
    [    1.210000] moxart-dma-engine 90500080.dma: moxart_probe: chs[3]: ch->ch_num=3 ch->base=c28500b0
    [    1.260000] moxart-dma-engine 90500080.dma: moxart_probe: IRQ=17
    [    1.550000] DMA-API: preallocated 4096 debug entries
    [    1.560000] DMA-API: debugging enabled by kernel config
    [    1.560000] Switched to clocksource moxart_timer
    [    1.680000] NET: Registered protocol family 2
    [    1.710000] TCP established hash table entries: 1024 (order: 0, 4096 bytes)
    [    1.720000] TCP bind hash table entries: 1024 (order: 3, 36864 bytes)
    [    1.730000] TCP: Hash tables configured (established 1024 bind 1024)
    [    1.750000] TCP: reno registered
    [    1.750000] UDP hash table entries: 128 (order: 1, 10240 bytes)
    [    1.760000] UDP-Lite hash table entries: 128 (order: 1, 10240 bytes)
    [    1.780000] NET: Registered protocol family 1
    [    2.550000] jffs2: version 2.2. (NAND) ???? 2001-2006 Red Hat, Inc.
    [    2.590000] msgmni has been set to 44
    [    2.600000] io scheduler noop registered
    [    2.610000] io scheduler cfq registered (default)
    [    2.630000] gpiochip_add: registered GPIOs 0 to 31 on device: moxart-gpio
    [    2.660000] Serial: 8250/16550 driver, 1 ports, IRQ sharing enabled
    [    2.750000] 98200000.uart: ttyS0 at MMIO 0x98200000 (irq = 21, base_baud = 921600) is a 16550A
    [    2.760000] console [ttyS0] enabled
    [    2.760000] console [ttyS0] enabled
    [    2.770000] bootconsole [earlycon0] disabled
    [    2.770000] bootconsole [earlycon0] disabled
    [    2.880000] 80000000.flash: Found 1 x16 devices at 0x0 in 16-bit bank. Manufacturer ID 0x000089 Chip ID 0x000018
    [    2.890000] Intel/Sharp Extended Query Table at 0x0031
    [    2.900000] Intel/Sharp Extended Query Table at 0x0031
    [    2.910000] Using buffer write method
    [    2.910000] cfi_cmdset_0001: Erase suspend on write enabled
    [    2.920000] erase region 0: offset=0x0,size=0x20000,blocks=128
    [    2.920000] 4 ofpart partitions found on MTD device 80000000.flash
    [    2.930000] Creating 4 MTD partitions on "80000000.flash":
    [    2.940000] 0x000000000000-0x000000040000 : "bootloader"
    [    3.020000] 0x000000040000-0x000000200000 : "linux kernel"
    [    3.090000] 0x000000200000-0x000000a00000 : "root filesystem"
    [    3.160000] 0x000000a00000-0x000001000000 : "user filesystem"
    [    3.910000] libphy: MOXA ART Ethernet MII: probed
    [    4.600000] libphy: MOXA ART Ethernet MII: probed
    [    4.680000] moxart-ethernet 90900000.mac eth0: moxart_mac_probe: IRQ=19 address=00:00:00:00:00:00
    [    4.690000] moxart-ethernet 90900000.mac eth0: generated random MAC address 72:53:d2:5f:0f:e8
    [    4.740000] moxart-ethernet 92000000.mac eth1: moxart_mac_probe: IRQ=20 address=00:00:00:00:00:00
    [    4.750000] moxart-ethernet 92000000.mac eth1: generated random MAC address b6:e4:b8:f4:02:8c
    [    4.770000] of_get_named_gpiod_flags exited with status 0
    [    4.790000] input: gpio_keys_polled.2 as /devices/gpio_keys_polled.2/input/input0
    [    4.820000] evbug: Connected device: input0 (gpio_keys_polled.2 at gpio-keys-polled/input0)
    [    4.840000] of_get_named_gpiod_flags exited with status 0
    [    4.850000] of_get_named_gpiod_flags exited with status 0
    [    4.860000] of_get_named_gpiod_flags exited with status 0
    [    4.890000] moxart-rtc rtc.0: rtc core: registered rtc.0 as rtc0
    [    4.930000] kworker/u2:0 (241) used greatest stack depth: 5840 bytes left
    [    4.970000] dma dma0chan0: moxart_alloc_chan_resources: allocating channel #0
    [    4.980000] Unable to handle kernel paging request at virtual address ffffffed
    [    4.990000] pgd = c0004000
    [    5.000000] [ffffffed] *pgd=01ffd831, *pte=00000000, *ppte=00000000
    [    5.000000] Internal error: Oops: 1 [#1] PREEMPT ARM
    [    5.000000] CPU: 0 PID: 1 Comm: swapper Not tainted 3.13.0-rc3-next-20131212+ #1330
    [    5.000000] task: c1834000 ti: c1838000 task.ti: c1838000
    [    5.000000] PC is at moxart_probe+0x248/0x344
    [    5.000000] LR is at moxart_probe+0x228/0x344
    [    5.000000] pc : [<c01d61e8>]    lr : [<c01d61c8>]    psr: 60000053
    [    5.000000] sp : c1839d68  ip : c18d0124  fp : c1839de4
    [    5.000000] r10: 00000012  r9 : c0379fd0  r8 : c1a04b60
    [    5.000000] r7 : c099165c  r6 : 00000000  r5 : c1870a10  r4 : c1a04800
    [    5.000000] r3 : 00000000  r2 : 00050348  r1 : 98e00040  r0 : ffffffed
    [    5.000000] Flags: nZCv  IRQs on  FIQs off  Mode SVC_32  ISA ARM  Segment kernel
    [    5.000000] Control: 0000397f  Table: 00004000  DAC: 00000017
    [    5.000000] Process swapper (pid: 1, stack limit = 0xc18381c0)
    [    5.000000] Stack: (0xc1839d68 to 0xc183a000)
    [    5.000000] 9d60:                   c03444b8 c036b2f4 c1839da4 c1839d80 00000002 98e00040
    [    5.000000] 9d80: 00000000 00000004 00000004 00000001 c03444b8 c1a095d8 c1839dcc 98e00000
    [    5.000000] 9da0: 98e0005b c0991698 00000200 00000000 00000000 00000000 c03808d4 c1870a10
    [    5.000000] 9dc0: c03808d4 c1870a10 00000000 c03808d4 c03444b8 c036b2f4 c1839dfc c1839de8
    [    5.000000] 9de0: c019d158 c01d5fb0 c0939698 c03808d4 c1839e24 c1839e00 c019bfb4 c019d148
    [    5.000000] 9e00: 00000000 c1870a10 c03808d4 c1870a44 00000000 c03554a4 c1839e44 c1839e28
    [    5.000000] 9e20: c019c1e0 c019bf40 00000002 c03808d4 c019c144 00000000 c1839e6c c1839e48
    [    5.000000] 9e40: c019a648 c019c154 c18036a8 c18697d0 c19e2558 c03808d4 c1a1c180 c037c738
    [    5.000000] 9e60: c1839e7c c1839e70 c019bd0c c019a5f4 c1839ea4 c1839e80 c019ae94 c019bcfc
    [    5.000000] 9e80: c02faa68 c1839e90 c03808d4 00000006 00000000 c0386aa0 c1839ebc c1839ea8
    [    5.000000] 9ea0: c019c614 c019adc4 c035cf64 00000006 c1839ecc c1839ec0 c019d97c c019c5a4
    [    5.000000] 9ec0: c1839edc c1839ed0 c03554bc c019d93c c1839f54 c1839ee0 c0344b34 c03554b4
    [    5.000000] 9ee0: c1839f0c c1839ef0 c1839f0c c1839ef8 c0344400 c03091f0 c099220b 00000036
    [    5.000000] 9f00: c1839f54 c1839f10 c002db18 c03444c8 c1839f34 00000006 00000006 c0308e90
    [    5.000000] 9f20: 00000000 c02f77f8 c1839f54 c035cf60 00000006 c035cf64 00000006 c035cf44
    [    5.000000] 9f40: c0386aa0 00000036 c1839f94 c1839f58 c0344cf0 c0344a94 00000006 00000006
    [    5.000000] 9f60: c03444b8 00000000 c00364d4 00000000 c0271f6c 00000000 00000000 00000000
    [    5.000000] 9f80: 00000000 00000000 c1839fac c1839f98 c0271f7c c0344c0c 00000000 00000000
    [    5.000000] 9fa0: 00000000 c1839fb0 c0009360 c0271f7c 00000000 00000000 00000000 00000000
    [    5.000000] 9fc0: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
    [    5.000000] 9fe0: 00000000 00000000 00000000 00000000 00000013 00000000 00000000 00000000
    [    5.000000] [<c01d61e8>] (moxart_probe) from [<c019d158>] (platform_drv_probe+0x20/0x50)
    [    5.000000] [<c019d158>] (platform_drv_probe) from [<c019bfb4>] (driver_probe_device+0x84/0x214)
    [    5.000000] [<c019bfb4>] (driver_probe_device) from [<c019c1e0>] (__driver_attach+0x9c/0xa0)
    [    5.000000] [<c019c1e0>] (__driver_attach) from [<c019a648>] (bus_for_each_dev+0x64/0x94)
    [    5.000000] [<c019a648>] (bus_for_each_dev) from [<c019bd0c>] (driver_attach+0x20/0x28)
    [    5.000000] [<c019bd0c>] (driver_attach) from [<c019ae94>] (bus_add_driver+0xe0/0x1cc)
    [    5.000000] [<c019ae94>] (bus_add_driver) from [<c019c614>] (driver_register+0x80/0xfc)
    [    5.000000] [<c019c614>] (driver_register) from [<c019d97c>] (__platform_driver_register+0x50/0x64)
    [    5.000000] [<c019d97c>] (__platform_driver_register) from [<c03554bc>] (moxart_sdhci_driver_init+0x18/0x20)
    [    5.000000] [<c03554bc>] (moxart_sdhci_driver_init) from [<c0344b34>] (do_one_initcall+0xb0/0x178)
    [    5.000000] [<c0344b34>] (do_one_initcall) from [<c0344cf0>] (kernel_init_freeable+0xf4/0x1b4)
    [    5.000000] [<c0344cf0>] (kernel_init_freeable) from [<c0271f7c>] (kernel_init+0x10/0x118)
    [    5.000000] [<c0271f7c>] (kernel_init) from [<c0009360>] (ret_from_fork+0x14/0x34)
    [    5.000000] Code: e50b306c e3a03000 e50b1068 e50b3064 (e5903000)
    [    5.010000] ---[ end trace a7a79519eb6f6ed3 ]---
    [    5.020000] swapper (1) used greatest stack depth: 4996 bytes left
    [    5.020000] Kernel panic - not syncing: Attempted to kill init! exitcode=0x0000000b
    [    5.020000]
    [ 1203.680000] random: nonblocking pool is initialized

 .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  45 ++
 drivers/dma/Kconfig                                |   8 +
 drivers/dma/Makefile                               |   1 +
 drivers/dma/moxart-dma.c                           | 699 +++++++++++++++++++++
 4 files changed, 753 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
 create mode 100644 drivers/dma/moxart-dma.c

diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644
index 0000000..8a9f355
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -0,0 +1,45 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible :	Must be "moxa,moxart-dma"
+- reg :		Should contain registers location and length
+- interrupts :	Should contain an interrupt-specifier for the sole
+		interrupt generated by the device
+- #dma-cells :	Should be 1, a single cell holding a line request number
+
+Example:
+
+	dma: dma at 90500000 {
+		compatible = "moxa,moxart-dma";
+		reg = <0x90500080 0x40>;
+		interrupts = <24 0>;
+		#dma-cells = <1>;
+	};
+
+
+Clients:
+
+DMA clients connected to the MOXA ART DMA controller must use the format
+described in the dma.txt file, using a two-cell specifier for each channel:
+a phandle plus one integer cells.
+The two cells in order are:
+
+1. A phandle pointing to the DMA controller.
+2. Peripheral identifier for the hardware handshaking interface.
+
+Example:
+Use specific request line passing from dma
+For example, MMC request line is 5
+
+	sdhci: sdhci at 98e00000 {
+		compatible = "moxa,moxart-sdhci";
+		reg = <0x98e00000 0x5C>;
+		interrupts = <5 0>;
+		clocks = <&clk_apb>;
+		dmas =  <&dma 5>,
+			<&dma 5>;
+		dma-names = "tx", "rx";
+	};
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 132a4fd..ca4fa6b 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -334,6 +334,14 @@ config K3_DMA
 	  Support the DMA engine for Hisilicon K3 platform
 	  devices.
 
+config MOXART_DMA
+	tristate "MOXART DMA support"
+	depends on ARCH_MOXART
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Enable support for the MOXA ART SoC DMA controller.
+
 config DMA_ENGINE
 	bool
 
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 0ce2da9..551bfcd 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -42,3 +42,4 @@ obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
 obj-$(CONFIG_TI_CPPI41) += cppi41.o
 obj-$(CONFIG_K3_DMA) += k3dma.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..cb3d07a
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,699 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+#include <linux/bitops.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+#define APB_DMA_MAX_CHANNEL			4
+
+#define REG_OFF_ADDRESS_SOURCE			0
+#define REG_OFF_ADDRESS_DEST			4
+#define REG_OFF_CYCLES				8
+#define REG_OFF_CTRL				12
+#define REG_OFF_CHAN_SIZE			16
+
+#define APB_DMA_ENABLE				BIT(0)
+#define APB_DMA_FIN_INT_STS			BIT(1)
+#define APB_DMA_FIN_INT_EN			BIT(2)
+#define APB_DMA_BURST_MODE			BIT(3)
+#define APB_DMA_ERR_INT_STS			BIT(4)
+#define APB_DMA_ERR_INT_EN			BIT(5)
+
+/*
+ * Unset: APB
+ * Set:   AHB
+ */
+#define APB_DMA_SOURCE_SELECT			0x40
+#define APB_DMA_DEST_SELECT			0x80
+
+#define APB_DMA_SOURCE				0x100
+#define APB_DMA_DEST				0x1000
+
+#define APB_DMA_SOURCE_MASK			0x700
+#define APB_DMA_DEST_MASK			0x7000
+
+/*
+ * 000: No increment
+ * 001: +1 (Burst=0), +4  (Burst=1)
+ * 010: +2 (Burst=0), +8  (Burst=1)
+ * 011: +4 (Burst=0), +16 (Burst=1)
+ * 101: -1 (Burst=0), -4  (Burst=1)
+ * 110: -2 (Burst=0), -8  (Burst=1)
+ * 111: -4 (Burst=0), -16 (Burst=1)
+ */
+#define APB_DMA_SOURCE_INC_0			0
+#define APB_DMA_SOURCE_INC_1_4			0x100
+#define APB_DMA_SOURCE_INC_2_8			0x200
+#define APB_DMA_SOURCE_INC_4_16			0x300
+#define APB_DMA_SOURCE_DEC_1_4			0x500
+#define APB_DMA_SOURCE_DEC_2_8			0x600
+#define APB_DMA_SOURCE_DEC_4_16			0x700
+#define APB_DMA_DEST_INC_0			0
+#define APB_DMA_DEST_INC_1_4			0x1000
+#define APB_DMA_DEST_INC_2_8			0x2000
+#define APB_DMA_DEST_INC_4_16			0x3000
+#define APB_DMA_DEST_DEC_1_4			0x5000
+#define APB_DMA_DEST_DEC_2_8			0x6000
+#define APB_DMA_DEST_DEC_4_16			0x7000
+
+/*
+ * Request signal select source/destination address for DMA hardware handshake.
+ *
+ * The request line number is a property of the DMA controller itself,
+ * e.g. MMC must always request channels where dma_slave_config->slave_id is 5.
+ *
+ * 0:    No request / Grant signal
+ * 1-15: Request    / Grant signal
+ */
+#define APB_DMA_SOURCE_REQ_NO			0x1000000
+#define APB_DMA_SOURCE_REQ_NO_MASK		0xf000000
+#define APB_DMA_DEST_REQ_NO			0x10000
+#define APB_DMA_DEST_REQ_NO_MASK		0xf0000
+
+#define APB_DMA_DATA_WIDTH			0x100000
+#define APB_DMA_DATA_WIDTH_MASK			0x300000
+/*
+ * Data width of transfer:
+ *
+ * 00: Word
+ * 01: Half
+ * 10: Byte
+ */
+#define APB_DMA_DATA_WIDTH_4			0
+#define APB_DMA_DATA_WIDTH_2			0x100000
+#define APB_DMA_DATA_WIDTH_1			0x200000
+
+#define APB_DMA_CYCLES_MASK			0x00ffffff
+
+#define MOXART_DMA_DATA_TYPE_S8			0x00
+#define MOXART_DMA_DATA_TYPE_S16		0x01
+#define MOXART_DMA_DATA_TYPE_S32		0x02
+
+struct moxart_sg {
+	dma_addr_t addr;
+	uint32_t len;
+};
+
+struct moxart_desc {
+	enum dma_transfer_direction	dma_dir;
+	dma_addr_t			dev_addr;
+	unsigned int			sglen;
+	unsigned int			dma_cycles;
+	struct virt_dma_desc		vd;
+	uint8_t				es;
+	struct moxart_sg		sg[0];
+};
+
+struct moxart_chan {
+	struct virt_dma_chan		vc;
+
+	void __iomem			*base;
+	struct moxart_desc		*desc;
+
+	struct dma_slave_config		cfg;
+
+	bool				allocated;
+	bool				error;
+	int				ch_num;
+	unsigned int			line_reqno;
+	unsigned int			sgidx;
+};
+
+struct moxart_dmadev {
+	struct dma_device		dma_slave;
+	struct moxart_chan		slave_chans[APB_DMA_MAX_CHANNEL];
+};
+
+struct moxart_filter_data {
+	struct moxart_dmadev		*mdc;
+	struct of_phandle_args		*dma_spec;
+};
+
+static const unsigned int es_bytes[] = {
+	[MOXART_DMA_DATA_TYPE_S8] = 1,
+	[MOXART_DMA_DATA_TYPE_S16] = 2,
+	[MOXART_DMA_DATA_TYPE_S32] = 4,
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+	return &chan->dev->device;
+}
+
+static inline struct moxart_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct moxart_chan, vc.chan);
+}
+
+static inline struct moxart_desc *to_moxart_dma_desc(
+	struct dma_async_tx_descriptor *t)
+{
+	return container_of(t, struct moxart_desc, vd.tx);
+}
+
+static void moxart_dma_desc_free(struct virt_dma_desc *vd)
+{
+	kfree(container_of(vd, struct moxart_desc, vd));
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	unsigned long flags;
+	LIST_HEAD(head);
+	u32 ctrl;
+
+	dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
+
+	spin_lock_irqsave(&ch->vc.lock, flags);
+
+	if (ch->desc)
+		ch->desc = NULL;
+
+	ctrl = readl(ch->base + REG_OFF_CTRL);
+	ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, ch->base + REG_OFF_CTRL);
+
+	vchan_get_all_descriptors(&ch->vc, &head);
+	spin_unlock_irqrestore(&ch->vc.lock, flags);
+	vchan_dma_desc_free_list(&ch->vc, &head);
+
+	return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+			       struct dma_slave_config *cfg)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	u32 ctrl;
+
+	ch->cfg = *cfg;
+
+	ctrl = readl(ch->base + REG_OFF_CTRL);
+	ctrl |= APB_DMA_BURST_MODE;
+	ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
+	ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
+
+	switch (ch->cfg.src_addr_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		ctrl |= APB_DMA_DATA_WIDTH_1;
+		if (ch->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_1_4;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_1_4;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		ctrl |= APB_DMA_DATA_WIDTH_2;
+		if (ch->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_2_8;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_2_8;
+		break;
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+		ctrl &= ~APB_DMA_DATA_WIDTH;
+		if (ch->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_4_16;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_4_16;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (ch->cfg.direction == DMA_MEM_TO_DEV) {
+		ctrl &= ~APB_DMA_DEST_SELECT;
+		ctrl |= APB_DMA_SOURCE_SELECT;
+		ctrl |= (ch->line_reqno << 16 &
+			 APB_DMA_DEST_REQ_NO_MASK);
+	} else {
+		ctrl |= APB_DMA_DEST_SELECT;
+		ctrl &= ~APB_DMA_SOURCE_SELECT;
+		ctrl |= (ch->line_reqno << 24 &
+			 APB_DMA_SOURCE_REQ_NO_MASK);
+	}
+
+	writel(ctrl, ch->base + REG_OFF_CTRL);
+
+	return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+			  unsigned long arg)
+{
+	int ret = 0;
+
+	switch (cmd) {
+	case DMA_PAUSE:
+	case DMA_RESUME:
+		return -EINVAL;
+	case DMA_TERMINATE_ALL:
+		moxart_terminate_all(chan);
+		break;
+	case DMA_SLAVE_CONFIG:
+		ret = moxart_slave_config(chan, (struct dma_slave_config *)arg);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static struct dma_async_tx_descriptor *moxart_prep_slave_sg(
+	struct dma_chan *chan, struct scatterlist *sgl,
+	unsigned int sg_len, enum dma_transfer_direction dir,
+	unsigned long tx_flags, void *context)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	struct moxart_desc *d;
+	enum dma_slave_buswidth dev_width;
+	dma_addr_t dev_addr;
+	struct scatterlist *sgent;
+	unsigned int es;
+	unsigned int i;
+
+	if (!is_slave_direction(dir)) {
+		dev_err(chan2dev(chan), "%s: invalid DMA direction\n",
+			__func__);
+		return NULL;
+	}
+
+	if (dir == DMA_DEV_TO_MEM) {
+		dev_addr = ch->cfg.src_addr;
+		dev_width = ch->cfg.src_addr_width;
+	} else {
+		dev_addr = ch->cfg.dst_addr;
+		dev_width = ch->cfg.dst_addr_width;
+	}
+
+	switch (dev_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		es = MOXART_DMA_DATA_TYPE_S8;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		es = MOXART_DMA_DATA_TYPE_S16;
+		break;
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+		es = MOXART_DMA_DATA_TYPE_S32;
+		break;
+	default:
+		dev_err(chan2dev(chan), "%s: unsupported data width (%u)\n",
+			__func__, dev_width);
+		return NULL;
+	}
+
+	d = kzalloc(sizeof(*d) + sg_len * sizeof(d->sg[0]), GFP_ATOMIC);
+	if (!d)
+		return NULL;
+
+	d->dma_dir = dir;
+	d->dev_addr = dev_addr;
+	d->es = es;
+
+	for_each_sg(sgl, sgent, sg_len, i) {
+		d->sg[i].addr = sg_dma_address(sgent);
+		d->sg[i].len = sg_dma_len(sgent);
+	}
+
+	d->sglen = sg_len;
+
+	ch->error = 0;
+
+	return vchan_tx_prep(&ch->vc, &d->vd, tx_flags);
+}
+
+static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
+					struct of_dma *ofdma)
+{
+	struct moxart_dmadev *mdc = ofdma->of_dma_data;
+	struct dma_chan *chan;
+	struct moxart_chan *ch;
+
+	chan = dma_get_any_slave_channel(&mdc->dma_slave);
+	if (!chan)
+		return NULL;
+
+	ch = to_moxart_dma_chan(chan);
+	ch->line_reqno = dma_spec->args[0];
+
+	return chan;
+}
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+
+	dev_dbg(chan2dev(chan), "%s: allocating channel #%u\n",
+		__func__, ch->ch_num);
+	ch->allocated = 1;
+
+	return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+
+	vchan_free_chan_resources(&ch->vc);
+
+	dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+		__func__, ch->ch_num);
+	ch->allocated = 0;
+}
+
+static void moxart_dma_set_params(struct moxart_chan *ch, dma_addr_t src_addr,
+				  dma_addr_t dst_addr)
+{
+	writel(src_addr, ch->base + REG_OFF_ADDRESS_SOURCE);
+	writel(dst_addr, ch->base + REG_OFF_ADDRESS_DEST);
+}
+
+static void moxart_set_transfer_params(struct moxart_chan *ch, unsigned int len)
+{
+	struct moxart_desc *d = ch->desc;
+	unsigned int sglen_div = es_bytes[d->es];
+
+	d->dma_cycles = len >> sglen_div;
+
+	/*
+	 * There are 4 cycles on 64 bytes copied, i.e. one cycle copies 16
+	 * bytes ( when width is APB_DMAB_DATA_WIDTH_4 ).
+	 */
+	writel(d->dma_cycles, ch->base + REG_OFF_CYCLES);
+
+	dev_dbg(chan2dev(&ch->vc.chan), "%s: set %u DMA cycles (len=%u)\n",
+		__func__, d->dma_cycles, len);
+}
+
+static void moxart_start_dma(struct moxart_chan *ch)
+{
+	u32 ctrl;
+
+	ctrl = readl(ch->base + REG_OFF_CTRL);
+	ctrl |= (APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, ch->base + REG_OFF_CTRL);
+}
+
+static void moxart_dma_start_sg(struct moxart_chan *ch, unsigned int idx)
+{
+	struct moxart_desc *d = ch->desc;
+	struct moxart_sg *sg = ch->desc->sg + idx;
+
+	if (ch->desc->dma_dir == DMA_MEM_TO_DEV)
+		moxart_dma_set_params(ch, sg->addr, d->dev_addr);
+	else if (ch->desc->dma_dir == DMA_DEV_TO_MEM)
+		moxart_dma_set_params(ch, d->dev_addr, sg->addr);
+
+	moxart_set_transfer_params(ch, sg->len);
+
+	moxart_start_dma(ch);
+}
+
+static void moxart_dma_start_desc(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	struct virt_dma_desc *vd;
+
+	vd = vchan_next_desc(&ch->vc);
+
+	if (!vd) {
+		ch->desc = NULL;
+		return;
+	}
+
+	list_del(&vd->node);
+
+	ch->desc = to_moxart_dma_desc(&vd->tx);
+	ch->sgidx = 0;
+
+	moxart_dma_start_sg(ch, 0);
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ch->vc.lock, flags);
+	if (vchan_issue_pending(&ch->vc) && !ch->desc)
+		moxart_dma_start_desc(chan);
+	spin_unlock_irqrestore(&ch->vc.lock, flags);
+}
+
+static size_t moxart_dma_desc_size(struct moxart_desc *d)
+{
+	unsigned int i;
+	size_t size;
+
+	for (size = i = 0; i < d->sglen; i++)
+		size += d->sg[i].len;
+
+	return size;
+}
+
+static size_t moxart_dma_desc_size_in_flight(struct moxart_chan *ch)
+{
+	size_t size;
+	unsigned int completed_cycles, cycles, i;
+
+	for (size = i = 0; i <= ch->sgidx; i++)
+		size += ch->desc->sg[i].len;
+	cycles = readl(ch->base + REG_OFF_CYCLES);
+	completed_cycles = (ch->desc->dma_cycles - cycles);
+	size -= completed_cycles << es_bytes[ch->desc->es];
+
+	dev_dbg(chan2dev(&ch->vc.chan), "%s: size=%zu\n", __func__, size);
+
+	return size;
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+					dma_cookie_t cookie,
+					struct dma_tx_state *txstate)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	struct virt_dma_desc *vd;
+	struct moxart_desc *d;
+	enum dma_status ret;
+	unsigned long flags;
+
+	/*
+	 * dma_cookie_status() assigns initial residue value.
+	 */
+	ret = dma_cookie_status(chan, cookie, txstate);
+
+	spin_lock_irqsave(&ch->vc.lock, flags);
+	vd = vchan_find_desc(&ch->vc, cookie);
+	if (vd) {
+		d = to_moxart_dma_desc(&vd->tx);
+		txstate->residue = moxart_dma_desc_size(d);
+	} else if (ch->desc && ch->desc->vd.tx.cookie == cookie) {
+		txstate->residue = moxart_dma_desc_size_in_flight(ch);
+	}
+	spin_unlock_irqrestore(&ch->vc.lock, flags);
+
+	if (ch->error)
+		return DMA_ERROR;
+
+	return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+	dma->device_prep_slave_sg		= moxart_prep_slave_sg;
+	dma->device_alloc_chan_resources	= moxart_alloc_chan_resources;
+	dma->device_free_chan_resources		= moxart_free_chan_resources;
+	dma->device_issue_pending		= moxart_issue_pending;
+	dma->device_tx_status			= moxart_tx_status;
+	dma->device_control			= moxart_control;
+	dma->dev				= dev;
+
+	INIT_LIST_HEAD(&dma->channels);
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+	struct moxart_dmadev *mc = devid;
+	struct moxart_chan *ch = &mc->slave_chans[0];
+	unsigned int i;
+	unsigned long flags;
+	u32 ctrl;
+
+	dev_dbg(chan2dev(&ch->vc.chan), "%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+		if (!ch->allocated)
+			continue;
+
+		ctrl = readl(ch->base + REG_OFF_CTRL);
+
+		dev_dbg(chan2dev(&ch->vc.chan), "%s: ch=%p ch->base=%p ctrl=%x\n",
+			__func__, ch, ch->base, ctrl);
+
+		if (ctrl & APB_DMA_FIN_INT_STS) {
+			ctrl &= ~APB_DMA_FIN_INT_STS;
+			if (ch->desc) {
+				spin_lock_irqsave(&ch->vc.lock, flags);
+				if (++ch->sgidx < ch->desc->sglen) {
+					moxart_dma_start_sg(ch, ch->sgidx);
+				} else {
+					vchan_cookie_complete(&ch->desc->vd);
+					moxart_dma_start_desc(&ch->vc.chan);
+				}
+				spin_unlock_irqrestore(&ch->vc.lock, flags);
+			}
+		}
+
+		if (ctrl & APB_DMA_ERR_INT_STS) {
+			ctrl &= ~APB_DMA_ERR_INT_STS;
+			ch->error = 1;
+		}
+
+		writel(ctrl, ch->base + REG_OFF_CTRL);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int moxart_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct resource *res;
+	static void __iomem *dma_base_addr;
+	int ret, i;
+	unsigned int irq;
+	struct moxart_chan *ch;
+	struct moxart_dmadev *mdc;
+
+	mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+	if (!mdc) {
+		dev_err(dev, "can't allocate DMA container\n");
+		return -ENOMEM;
+	}
+
+	irq = irq_of_parse_and_map(node, 0);
+	if (irq == NO_IRQ) {
+		dev_err(dev, "no IRQ resource\n");
+		return -EINVAL;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dma_base_addr = devm_ioremap_resource(dev, res);
+	if (IS_ERR(dma_base_addr))
+		return PTR_ERR(dma_base_addr);
+
+	dma_cap_zero(mdc->dma_slave.cap_mask);
+	dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+	dma_cap_set(DMA_PRIVATE, mdc->dma_slave.cap_mask);
+
+	moxart_dma_init(&mdc->dma_slave, dev);
+
+	ch = &mdc->slave_chans[0];
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+		ch->ch_num = i;
+		ch->base = dma_base_addr + i * REG_OFF_CHAN_SIZE;
+		ch->allocated = 0;
+
+		ch->vc.desc_free = moxart_dma_desc_free;
+		vchan_init(&ch->vc, &mdc->dma_slave);
+
+		dev_dbg(dev, "%s: chs[%d]: ch->ch_num=%u ch->base=%p\n",
+			__func__, i, ch->ch_num, ch->base);
+	}
+
+	platform_set_drvdata(pdev, mdc);
+
+	ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
+			       "moxart-dma-engine", mdc);
+	if (ret) {
+		dev_err(dev, "devm_request_irq failed\n");
+		return ret;
+	}
+
+	ret = dma_async_device_register(&mdc->dma_slave);
+	if (ret) {
+		dev_err(dev, "dma_async_device_register failed\n");
+		return ret;
+	}
+
+	ret = of_dma_controller_register(node, moxart_of_xlate, mdc);
+	if (ret) {
+		dev_err(dev, "of_dma_controller_register failed\n");
+		dma_async_device_unregister(&mdc->dma_slave);
+		return ret;
+	}
+
+	dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq);
+
+	return 0;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+	struct moxart_dmadev *m = platform_get_drvdata(pdev);
+
+	dma_async_device_unregister(&m->dma_slave);
+
+	if (pdev->dev.of_node)
+		of_dma_controller_free(pdev->dev.of_node);
+
+	return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+	{ .compatible = "moxa,moxart-dma" },
+	{ }
+};
+
+static struct platform_driver moxart_driver = {
+	.probe	= moxart_probe,
+	.remove	= moxart_remove,
+	.driver = {
+		.name		= "moxart-dma-engine",
+		.owner		= THIS_MODULE,
+		.of_match_table	= moxart_dma_match,
+	},
+};
+
+static int moxart_init(void)
+{
+	return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+	platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
-- 
1.8.2.1

^ permalink raw reply related	[flat|nested] 80+ messages in thread

* Re: [PATCH v14] dmaengine: Add MOXA ART DMA engine driver
  2013-12-12 12:32                           ` Jonas Jensen
@ 2013-12-13 16:02                             ` Lars-Peter Clausen
  -1 siblings, 0 replies; 80+ messages in thread
From: Lars-Peter Clausen @ 2013-12-13 16:02 UTC (permalink / raw)
  To: Jonas Jensen
  Cc: dmaengine, linux-arm-kernel, linux-kernel, arm, vinod.koul, djbw,
	arnd, linux, mark.rutland, andriy.shevchenko

On 12/12/2013 01:32 PM, Jonas Jensen wrote:
> The MOXA ART SoC has a DMA controller capable of offloading expensive
> memory operations, such as large copies. This patch adds support for
> the controller including four channels. Two of these are used to
> handle MMC copy on the UC-7112-LX hardware. The remaining two can be
> used in a future audio driver or client application.
> 
> Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
> ---
[...]
> +static size_t moxart_dma_desc_size_in_flight(struct moxart_chan *ch)
> +{
> +	size_t size;
> +	unsigned int completed_cycles, cycles, i;
> +
> +	for (size = i = 0; i <= ch->sgidx; i++)
> +		size += ch->desc->sg[i].len;

This still does not look right. The residue is the amount of data that still
needs to be transferred, so you need to count the the segments that have not
been completed yet. Just like I wrote in the last mail loop from ch->sgidx
to ch->desc->sglen.

> +	cycles = readl(ch->base + REG_OFF_CYCLES);
> +	completed_cycles = (ch->desc->dma_cycles - cycles);
> +	size -= completed_cycles << es_bytes[ch->desc->es];
> +
> +	dev_dbg(chan2dev(&ch->vc.chan), "%s: size=%zu\n", __func__, size);
> +
> +	return size;
> +}
[...]

^ permalink raw reply	[flat|nested] 80+ messages in thread

* [PATCH v14] dmaengine: Add MOXA ART DMA engine driver
@ 2013-12-13 16:02                             ` Lars-Peter Clausen
  0 siblings, 0 replies; 80+ messages in thread
From: Lars-Peter Clausen @ 2013-12-13 16:02 UTC (permalink / raw)
  To: linux-arm-kernel

On 12/12/2013 01:32 PM, Jonas Jensen wrote:
> The MOXA ART SoC has a DMA controller capable of offloading expensive
> memory operations, such as large copies. This patch adds support for
> the controller including four channels. Two of these are used to
> handle MMC copy on the UC-7112-LX hardware. The remaining two can be
> used in a future audio driver or client application.
> 
> Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
> ---
[...]
> +static size_t moxart_dma_desc_size_in_flight(struct moxart_chan *ch)
> +{
> +	size_t size;
> +	unsigned int completed_cycles, cycles, i;
> +
> +	for (size = i = 0; i <= ch->sgidx; i++)
> +		size += ch->desc->sg[i].len;

This still does not look right. The residue is the amount of data that still
needs to be transferred, so you need to count the the segments that have not
been completed yet. Just like I wrote in the last mail loop from ch->sgidx
to ch->desc->sglen.

> +	cycles = readl(ch->base + REG_OFF_CYCLES);
> +	completed_cycles = (ch->desc->dma_cycles - cycles);
> +	size -= completed_cycles << es_bytes[ch->desc->es];
> +
> +	dev_dbg(chan2dev(&ch->vc.chan), "%s: size=%zu\n", __func__, size);
> +
> +	return size;
> +}
[...]

^ permalink raw reply	[flat|nested] 80+ messages in thread

* [PATCH v15] dmaengine: Add MOXA ART DMA engine driver
  2013-12-12 12:32                           ` Jonas Jensen
@ 2013-12-16 10:24                             ` Jonas Jensen
  -1 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2013-12-16 10:24 UTC (permalink / raw)
  To: dmaengine
  Cc: linux-arm-kernel, linux-kernel, arm, vinod.koul, djbw, arnd,
	linux, mark.rutland, andriy.shevchenko, lars, Jonas Jensen

The MOXA ART SoC has a DMA controller capable of offloading expensive
memory operations, such as large copies. This patch adds support for
the controller including four channels. Two of these are used to
handle MMC copy on the UC-7112-LX hardware. The remaining two can be
used in a future audio driver or client application.

Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
---

Notes:
    Changes since v14:
    1. add "completed_sgs" parameter to moxart_dma_desc_size()
    2. use [1], corrected the in flight residue calculation
    
    Applies to next-20131216

 .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  45 ++
 drivers/dma/Kconfig                                |   8 +
 drivers/dma/Makefile                               |   1 +
 drivers/dma/moxart-dma.c                           | 699 +++++++++++++++++++++
 4 files changed, 753 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
 create mode 100644 drivers/dma/moxart-dma.c

diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644
index 0000000..8a9f355
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -0,0 +1,45 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible :	Must be "moxa,moxart-dma"
+- reg :		Should contain registers location and length
+- interrupts :	Should contain an interrupt-specifier for the sole
+		interrupt generated by the device
+- #dma-cells :	Should be 1, a single cell holding a line request number
+
+Example:
+
+	dma: dma@90500000 {
+		compatible = "moxa,moxart-dma";
+		reg = <0x90500080 0x40>;
+		interrupts = <24 0>;
+		#dma-cells = <1>;
+	};
+
+
+Clients:
+
+DMA clients connected to the MOXA ART DMA controller must use the format
+described in the dma.txt file, using a two-cell specifier for each channel:
+a phandle plus one integer cells.
+The two cells in order are:
+
+1. A phandle pointing to the DMA controller.
+2. Peripheral identifier for the hardware handshaking interface.
+
+Example:
+Use specific request line passing from dma
+For example, MMC request line is 5
+
+	sdhci: sdhci@98e00000 {
+		compatible = "moxa,moxart-sdhci";
+		reg = <0x98e00000 0x5C>;
+		interrupts = <5 0>;
+		clocks = <&clk_apb>;
+		dmas =  <&dma 5>,
+			<&dma 5>;
+		dma-names = "tx", "rx";
+	};
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index ac58b08..8f50f4d 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -336,6 +336,14 @@ config K3_DMA
 	  Support the DMA engine for Hisilicon K3 platform
 	  devices.
 
+config MOXART_DMA
+	tristate "MOXART DMA support"
+	depends on ARCH_MOXART
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Enable support for the MOXA ART SoC DMA controller.
+
 config DMA_ENGINE
 	bool
 
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 0ce2da9..551bfcd 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -42,3 +42,4 @@ obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
 obj-$(CONFIG_TI_CPPI41) += cppi41.o
 obj-$(CONFIG_K3_DMA) += k3dma.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..3258e48
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,699 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+#include <linux/bitops.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+#define APB_DMA_MAX_CHANNEL			4
+
+#define REG_OFF_ADDRESS_SOURCE			0
+#define REG_OFF_ADDRESS_DEST			4
+#define REG_OFF_CYCLES				8
+#define REG_OFF_CTRL				12
+#define REG_OFF_CHAN_SIZE			16
+
+#define APB_DMA_ENABLE				BIT(0)
+#define APB_DMA_FIN_INT_STS			BIT(1)
+#define APB_DMA_FIN_INT_EN			BIT(2)
+#define APB_DMA_BURST_MODE			BIT(3)
+#define APB_DMA_ERR_INT_STS			BIT(4)
+#define APB_DMA_ERR_INT_EN			BIT(5)
+
+/*
+ * Unset: APB
+ * Set:   AHB
+ */
+#define APB_DMA_SOURCE_SELECT			0x40
+#define APB_DMA_DEST_SELECT			0x80
+
+#define APB_DMA_SOURCE				0x100
+#define APB_DMA_DEST				0x1000
+
+#define APB_DMA_SOURCE_MASK			0x700
+#define APB_DMA_DEST_MASK			0x7000
+
+/*
+ * 000: No increment
+ * 001: +1 (Burst=0), +4  (Burst=1)
+ * 010: +2 (Burst=0), +8  (Burst=1)
+ * 011: +4 (Burst=0), +16 (Burst=1)
+ * 101: -1 (Burst=0), -4  (Burst=1)
+ * 110: -2 (Burst=0), -8  (Burst=1)
+ * 111: -4 (Burst=0), -16 (Burst=1)
+ */
+#define APB_DMA_SOURCE_INC_0			0
+#define APB_DMA_SOURCE_INC_1_4			0x100
+#define APB_DMA_SOURCE_INC_2_8			0x200
+#define APB_DMA_SOURCE_INC_4_16			0x300
+#define APB_DMA_SOURCE_DEC_1_4			0x500
+#define APB_DMA_SOURCE_DEC_2_8			0x600
+#define APB_DMA_SOURCE_DEC_4_16			0x700
+#define APB_DMA_DEST_INC_0			0
+#define APB_DMA_DEST_INC_1_4			0x1000
+#define APB_DMA_DEST_INC_2_8			0x2000
+#define APB_DMA_DEST_INC_4_16			0x3000
+#define APB_DMA_DEST_DEC_1_4			0x5000
+#define APB_DMA_DEST_DEC_2_8			0x6000
+#define APB_DMA_DEST_DEC_4_16			0x7000
+
+/*
+ * Request signal select source/destination address for DMA hardware handshake.
+ *
+ * The request line number is a property of the DMA controller itself,
+ * e.g. MMC must always request channels where dma_slave_config->slave_id is 5.
+ *
+ * 0:    No request / Grant signal
+ * 1-15: Request    / Grant signal
+ */
+#define APB_DMA_SOURCE_REQ_NO			0x1000000
+#define APB_DMA_SOURCE_REQ_NO_MASK		0xf000000
+#define APB_DMA_DEST_REQ_NO			0x10000
+#define APB_DMA_DEST_REQ_NO_MASK		0xf0000
+
+#define APB_DMA_DATA_WIDTH			0x100000
+#define APB_DMA_DATA_WIDTH_MASK			0x300000
+/*
+ * Data width of transfer:
+ *
+ * 00: Word
+ * 01: Half
+ * 10: Byte
+ */
+#define APB_DMA_DATA_WIDTH_4			0
+#define APB_DMA_DATA_WIDTH_2			0x100000
+#define APB_DMA_DATA_WIDTH_1			0x200000
+
+#define APB_DMA_CYCLES_MASK			0x00ffffff
+
+#define MOXART_DMA_DATA_TYPE_S8			0x00
+#define MOXART_DMA_DATA_TYPE_S16		0x01
+#define MOXART_DMA_DATA_TYPE_S32		0x02
+
+struct moxart_sg {
+	dma_addr_t addr;
+	uint32_t len;
+};
+
+struct moxart_desc {
+	enum dma_transfer_direction	dma_dir;
+	dma_addr_t			dev_addr;
+	unsigned int			sglen;
+	unsigned int			dma_cycles;
+	struct virt_dma_desc		vd;
+	uint8_t				es;
+	struct moxart_sg		sg[0];
+};
+
+struct moxart_chan {
+	struct virt_dma_chan		vc;
+
+	void __iomem			*base;
+	struct moxart_desc		*desc;
+
+	struct dma_slave_config		cfg;
+
+	bool				allocated;
+	bool				error;
+	int				ch_num;
+	unsigned int			line_reqno;
+	unsigned int			sgidx;
+};
+
+struct moxart_dmadev {
+	struct dma_device		dma_slave;
+	struct moxart_chan		slave_chans[APB_DMA_MAX_CHANNEL];
+};
+
+struct moxart_filter_data {
+	struct moxart_dmadev		*mdc;
+	struct of_phandle_args		*dma_spec;
+};
+
+static const unsigned int es_bytes[] = {
+	[MOXART_DMA_DATA_TYPE_S8] = 1,
+	[MOXART_DMA_DATA_TYPE_S16] = 2,
+	[MOXART_DMA_DATA_TYPE_S32] = 4,
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+	return &chan->dev->device;
+}
+
+static inline struct moxart_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct moxart_chan, vc.chan);
+}
+
+static inline struct moxart_desc *to_moxart_dma_desc(
+	struct dma_async_tx_descriptor *t)
+{
+	return container_of(t, struct moxart_desc, vd.tx);
+}
+
+static void moxart_dma_desc_free(struct virt_dma_desc *vd)
+{
+	kfree(container_of(vd, struct moxart_desc, vd));
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	unsigned long flags;
+	LIST_HEAD(head);
+	u32 ctrl;
+
+	dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
+
+	spin_lock_irqsave(&ch->vc.lock, flags);
+
+	if (ch->desc)
+		ch->desc = NULL;
+
+	ctrl = readl(ch->base + REG_OFF_CTRL);
+	ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, ch->base + REG_OFF_CTRL);
+
+	vchan_get_all_descriptors(&ch->vc, &head);
+	spin_unlock_irqrestore(&ch->vc.lock, flags);
+	vchan_dma_desc_free_list(&ch->vc, &head);
+
+	return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+			       struct dma_slave_config *cfg)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	u32 ctrl;
+
+	ch->cfg = *cfg;
+
+	ctrl = readl(ch->base + REG_OFF_CTRL);
+	ctrl |= APB_DMA_BURST_MODE;
+	ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
+	ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
+
+	switch (ch->cfg.src_addr_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		ctrl |= APB_DMA_DATA_WIDTH_1;
+		if (ch->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_1_4;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_1_4;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		ctrl |= APB_DMA_DATA_WIDTH_2;
+		if (ch->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_2_8;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_2_8;
+		break;
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+		ctrl &= ~APB_DMA_DATA_WIDTH;
+		if (ch->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_4_16;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_4_16;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (ch->cfg.direction == DMA_MEM_TO_DEV) {
+		ctrl &= ~APB_DMA_DEST_SELECT;
+		ctrl |= APB_DMA_SOURCE_SELECT;
+		ctrl |= (ch->line_reqno << 16 &
+			 APB_DMA_DEST_REQ_NO_MASK);
+	} else {
+		ctrl |= APB_DMA_DEST_SELECT;
+		ctrl &= ~APB_DMA_SOURCE_SELECT;
+		ctrl |= (ch->line_reqno << 24 &
+			 APB_DMA_SOURCE_REQ_NO_MASK);
+	}
+
+	writel(ctrl, ch->base + REG_OFF_CTRL);
+
+	return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+			  unsigned long arg)
+{
+	int ret = 0;
+
+	switch (cmd) {
+	case DMA_PAUSE:
+	case DMA_RESUME:
+		return -EINVAL;
+	case DMA_TERMINATE_ALL:
+		moxart_terminate_all(chan);
+		break;
+	case DMA_SLAVE_CONFIG:
+		ret = moxart_slave_config(chan, (struct dma_slave_config *)arg);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static struct dma_async_tx_descriptor *moxart_prep_slave_sg(
+	struct dma_chan *chan, struct scatterlist *sgl,
+	unsigned int sg_len, enum dma_transfer_direction dir,
+	unsigned long tx_flags, void *context)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	struct moxart_desc *d;
+	enum dma_slave_buswidth dev_width;
+	dma_addr_t dev_addr;
+	struct scatterlist *sgent;
+	unsigned int es;
+	unsigned int i;
+
+	if (!is_slave_direction(dir)) {
+		dev_err(chan2dev(chan), "%s: invalid DMA direction\n",
+			__func__);
+		return NULL;
+	}
+
+	if (dir == DMA_DEV_TO_MEM) {
+		dev_addr = ch->cfg.src_addr;
+		dev_width = ch->cfg.src_addr_width;
+	} else {
+		dev_addr = ch->cfg.dst_addr;
+		dev_width = ch->cfg.dst_addr_width;
+	}
+
+	switch (dev_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		es = MOXART_DMA_DATA_TYPE_S8;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		es = MOXART_DMA_DATA_TYPE_S16;
+		break;
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+		es = MOXART_DMA_DATA_TYPE_S32;
+		break;
+	default:
+		dev_err(chan2dev(chan), "%s: unsupported data width (%u)\n",
+			__func__, dev_width);
+		return NULL;
+	}
+
+	d = kzalloc(sizeof(*d) + sg_len * sizeof(d->sg[0]), GFP_ATOMIC);
+	if (!d)
+		return NULL;
+
+	d->dma_dir = dir;
+	d->dev_addr = dev_addr;
+	d->es = es;
+
+	for_each_sg(sgl, sgent, sg_len, i) {
+		d->sg[i].addr = sg_dma_address(sgent);
+		d->sg[i].len = sg_dma_len(sgent);
+	}
+
+	d->sglen = sg_len;
+
+	ch->error = 0;
+
+	return vchan_tx_prep(&ch->vc, &d->vd, tx_flags);
+}
+
+static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
+					struct of_dma *ofdma)
+{
+	struct moxart_dmadev *mdc = ofdma->of_dma_data;
+	struct dma_chan *chan;
+	struct moxart_chan *ch;
+
+	chan = dma_get_any_slave_channel(&mdc->dma_slave);
+	if (!chan)
+		return NULL;
+
+	ch = to_moxart_dma_chan(chan);
+	ch->line_reqno = dma_spec->args[0];
+
+	return chan;
+}
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+
+	dev_dbg(chan2dev(chan), "%s: allocating channel #%u\n",
+		__func__, ch->ch_num);
+	ch->allocated = 1;
+
+	return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+
+	vchan_free_chan_resources(&ch->vc);
+
+	dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+		__func__, ch->ch_num);
+	ch->allocated = 0;
+}
+
+static void moxart_dma_set_params(struct moxart_chan *ch, dma_addr_t src_addr,
+				  dma_addr_t dst_addr)
+{
+	writel(src_addr, ch->base + REG_OFF_ADDRESS_SOURCE);
+	writel(dst_addr, ch->base + REG_OFF_ADDRESS_DEST);
+}
+
+static void moxart_set_transfer_params(struct moxart_chan *ch, unsigned int len)
+{
+	struct moxart_desc *d = ch->desc;
+	unsigned int sglen_div = es_bytes[d->es];
+
+	d->dma_cycles = len >> sglen_div;
+
+	/*
+	 * There are 4 cycles on 64 bytes copied, i.e. one cycle copies 16
+	 * bytes ( when width is APB_DMAB_DATA_WIDTH_4 ).
+	 */
+	writel(d->dma_cycles, ch->base + REG_OFF_CYCLES);
+
+	dev_dbg(chan2dev(&ch->vc.chan), "%s: set %u DMA cycles (len=%u)\n",
+		__func__, d->dma_cycles, len);
+}
+
+static void moxart_start_dma(struct moxart_chan *ch)
+{
+	u32 ctrl;
+
+	ctrl = readl(ch->base + REG_OFF_CTRL);
+	ctrl |= (APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, ch->base + REG_OFF_CTRL);
+}
+
+static void moxart_dma_start_sg(struct moxart_chan *ch, unsigned int idx)
+{
+	struct moxart_desc *d = ch->desc;
+	struct moxart_sg *sg = ch->desc->sg + idx;
+
+	if (ch->desc->dma_dir == DMA_MEM_TO_DEV)
+		moxart_dma_set_params(ch, sg->addr, d->dev_addr);
+	else if (ch->desc->dma_dir == DMA_DEV_TO_MEM)
+		moxart_dma_set_params(ch, d->dev_addr, sg->addr);
+
+	moxart_set_transfer_params(ch, sg->len);
+
+	moxart_start_dma(ch);
+}
+
+static void moxart_dma_start_desc(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	struct virt_dma_desc *vd;
+
+	vd = vchan_next_desc(&ch->vc);
+
+	if (!vd) {
+		ch->desc = NULL;
+		return;
+	}
+
+	list_del(&vd->node);
+
+	ch->desc = to_moxart_dma_desc(&vd->tx);
+	ch->sgidx = 0;
+
+	moxart_dma_start_sg(ch, 0);
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ch->vc.lock, flags);
+	if (vchan_issue_pending(&ch->vc) && !ch->desc)
+		moxart_dma_start_desc(chan);
+	spin_unlock_irqrestore(&ch->vc.lock, flags);
+}
+
+static size_t moxart_dma_desc_size(struct moxart_desc *d,
+				   unsigned int completed_sgs)
+{
+	unsigned int i;
+	size_t size;
+
+	for (size = i = completed_sgs; i < d->sglen; i++)
+		size += d->sg[i].len;
+
+	return size;
+}
+
+static size_t moxart_dma_desc_size_in_flight(struct moxart_chan *ch)
+{
+	size_t size;
+	unsigned int completed_cycles, cycles;
+
+	size = moxart_dma_desc_size(ch->desc, ch->sgidx);
+	cycles = readl(ch->base + REG_OFF_CYCLES);
+	completed_cycles = (ch->desc->dma_cycles - cycles);
+	size -= completed_cycles << es_bytes[ch->desc->es];
+
+	dev_dbg(chan2dev(&ch->vc.chan), "%s: size=%zu\n", __func__, size);
+
+	return size;
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+					dma_cookie_t cookie,
+					struct dma_tx_state *txstate)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	struct virt_dma_desc *vd;
+	struct moxart_desc *d;
+	enum dma_status ret;
+	unsigned long flags;
+
+	/*
+	 * dma_cookie_status() assigns initial residue value.
+	 */
+	ret = dma_cookie_status(chan, cookie, txstate);
+
+	spin_lock_irqsave(&ch->vc.lock, flags);
+	vd = vchan_find_desc(&ch->vc, cookie);
+	if (vd) {
+		d = to_moxart_dma_desc(&vd->tx);
+		txstate->residue = moxart_dma_desc_size(d, 0);
+	} else if (ch->desc && ch->desc->vd.tx.cookie == cookie) {
+		txstate->residue = moxart_dma_desc_size_in_flight(ch);
+	}
+	spin_unlock_irqrestore(&ch->vc.lock, flags);
+
+	if (ch->error)
+		return DMA_ERROR;
+
+	return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+	dma->device_prep_slave_sg		= moxart_prep_slave_sg;
+	dma->device_alloc_chan_resources	= moxart_alloc_chan_resources;
+	dma->device_free_chan_resources		= moxart_free_chan_resources;
+	dma->device_issue_pending		= moxart_issue_pending;
+	dma->device_tx_status			= moxart_tx_status;
+	dma->device_control			= moxart_control;
+	dma->dev				= dev;
+
+	INIT_LIST_HEAD(&dma->channels);
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+	struct moxart_dmadev *mc = devid;
+	struct moxart_chan *ch = &mc->slave_chans[0];
+	unsigned int i;
+	unsigned long flags;
+	u32 ctrl;
+
+	dev_dbg(chan2dev(&ch->vc.chan), "%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+		if (!ch->allocated)
+			continue;
+
+		ctrl = readl(ch->base + REG_OFF_CTRL);
+
+		dev_dbg(chan2dev(&ch->vc.chan), "%s: ch=%p ch->base=%p ctrl=%x\n",
+			__func__, ch, ch->base, ctrl);
+
+		if (ctrl & APB_DMA_FIN_INT_STS) {
+			ctrl &= ~APB_DMA_FIN_INT_STS;
+			if (ch->desc) {
+				spin_lock_irqsave(&ch->vc.lock, flags);
+				if (++ch->sgidx < ch->desc->sglen) {
+					moxart_dma_start_sg(ch, ch->sgidx);
+				} else {
+					vchan_cookie_complete(&ch->desc->vd);
+					moxart_dma_start_desc(&ch->vc.chan);
+				}
+				spin_unlock_irqrestore(&ch->vc.lock, flags);
+			}
+		}
+
+		if (ctrl & APB_DMA_ERR_INT_STS) {
+			ctrl &= ~APB_DMA_ERR_INT_STS;
+			ch->error = 1;
+		}
+
+		writel(ctrl, ch->base + REG_OFF_CTRL);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int moxart_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct resource *res;
+	static void __iomem *dma_base_addr;
+	int ret, i;
+	unsigned int irq;
+	struct moxart_chan *ch;
+	struct moxart_dmadev *mdc;
+
+	mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+	if (!mdc) {
+		dev_err(dev, "can't allocate DMA container\n");
+		return -ENOMEM;
+	}
+
+	irq = irq_of_parse_and_map(node, 0);
+	if (irq == NO_IRQ) {
+		dev_err(dev, "no IRQ resource\n");
+		return -EINVAL;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dma_base_addr = devm_ioremap_resource(dev, res);
+	if (IS_ERR(dma_base_addr))
+		return PTR_ERR(dma_base_addr);
+
+	dma_cap_zero(mdc->dma_slave.cap_mask);
+	dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+	dma_cap_set(DMA_PRIVATE, mdc->dma_slave.cap_mask);
+
+	moxart_dma_init(&mdc->dma_slave, dev);
+
+	ch = &mdc->slave_chans[0];
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+		ch->ch_num = i;
+		ch->base = dma_base_addr + i * REG_OFF_CHAN_SIZE;
+		ch->allocated = 0;
+
+		ch->vc.desc_free = moxart_dma_desc_free;
+		vchan_init(&ch->vc, &mdc->dma_slave);
+
+		dev_dbg(dev, "%s: chs[%d]: ch->ch_num=%u ch->base=%p\n",
+			__func__, i, ch->ch_num, ch->base);
+	}
+
+	platform_set_drvdata(pdev, mdc);
+
+	ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
+			       "moxart-dma-engine", mdc);
+	if (ret) {
+		dev_err(dev, "devm_request_irq failed\n");
+		return ret;
+	}
+
+	ret = dma_async_device_register(&mdc->dma_slave);
+	if (ret) {
+		dev_err(dev, "dma_async_device_register failed\n");
+		return ret;
+	}
+
+	ret = of_dma_controller_register(node, moxart_of_xlate, mdc);
+	if (ret) {
+		dev_err(dev, "of_dma_controller_register failed\n");
+		dma_async_device_unregister(&mdc->dma_slave);
+		return ret;
+	}
+
+	dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq);
+
+	return 0;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+	struct moxart_dmadev *m = platform_get_drvdata(pdev);
+
+	dma_async_device_unregister(&m->dma_slave);
+
+	if (pdev->dev.of_node)
+		of_dma_controller_free(pdev->dev.of_node);
+
+	return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+	{ .compatible = "moxa,moxart-dma" },
+	{ }
+};
+
+static struct platform_driver moxart_driver = {
+	.probe	= moxart_probe,
+	.remove	= moxart_remove,
+	.driver = {
+		.name		= "moxart-dma-engine",
+		.owner		= THIS_MODULE,
+		.of_match_table	= moxart_dma_match,
+	},
+};
+
+static int moxart_init(void)
+{
+	return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+	platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
-- 
1.8.2.1


^ permalink raw reply related	[flat|nested] 80+ messages in thread

* [PATCH v15] dmaengine: Add MOXA ART DMA engine driver
@ 2013-12-16 10:24                             ` Jonas Jensen
  0 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2013-12-16 10:24 UTC (permalink / raw)
  To: linux-arm-kernel

The MOXA ART SoC has a DMA controller capable of offloading expensive
memory operations, such as large copies. This patch adds support for
the controller including four channels. Two of these are used to
handle MMC copy on the UC-7112-LX hardware. The remaining two can be
used in a future audio driver or client application.

Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
---

Notes:
    Changes since v14:
    1. add "completed_sgs" parameter to moxart_dma_desc_size()
    2. use [1], corrected the in flight residue calculation
    
    Applies to next-20131216

 .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  45 ++
 drivers/dma/Kconfig                                |   8 +
 drivers/dma/Makefile                               |   1 +
 drivers/dma/moxart-dma.c                           | 699 +++++++++++++++++++++
 4 files changed, 753 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
 create mode 100644 drivers/dma/moxart-dma.c

diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644
index 0000000..8a9f355
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -0,0 +1,45 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible :	Must be "moxa,moxart-dma"
+- reg :		Should contain registers location and length
+- interrupts :	Should contain an interrupt-specifier for the sole
+		interrupt generated by the device
+- #dma-cells :	Should be 1, a single cell holding a line request number
+
+Example:
+
+	dma: dma at 90500000 {
+		compatible = "moxa,moxart-dma";
+		reg = <0x90500080 0x40>;
+		interrupts = <24 0>;
+		#dma-cells = <1>;
+	};
+
+
+Clients:
+
+DMA clients connected to the MOXA ART DMA controller must use the format
+described in the dma.txt file, using a two-cell specifier for each channel:
+a phandle plus one integer cells.
+The two cells in order are:
+
+1. A phandle pointing to the DMA controller.
+2. Peripheral identifier for the hardware handshaking interface.
+
+Example:
+Use specific request line passing from dma
+For example, MMC request line is 5
+
+	sdhci: sdhci at 98e00000 {
+		compatible = "moxa,moxart-sdhci";
+		reg = <0x98e00000 0x5C>;
+		interrupts = <5 0>;
+		clocks = <&clk_apb>;
+		dmas =  <&dma 5>,
+			<&dma 5>;
+		dma-names = "tx", "rx";
+	};
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index ac58b08..8f50f4d 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -336,6 +336,14 @@ config K3_DMA
 	  Support the DMA engine for Hisilicon K3 platform
 	  devices.
 
+config MOXART_DMA
+	tristate "MOXART DMA support"
+	depends on ARCH_MOXART
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Enable support for the MOXA ART SoC DMA controller.
+
 config DMA_ENGINE
 	bool
 
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 0ce2da9..551bfcd 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -42,3 +42,4 @@ obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
 obj-$(CONFIG_TI_CPPI41) += cppi41.o
 obj-$(CONFIG_K3_DMA) += k3dma.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..3258e48
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,699 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+#include <linux/bitops.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+#define APB_DMA_MAX_CHANNEL			4
+
+#define REG_OFF_ADDRESS_SOURCE			0
+#define REG_OFF_ADDRESS_DEST			4
+#define REG_OFF_CYCLES				8
+#define REG_OFF_CTRL				12
+#define REG_OFF_CHAN_SIZE			16
+
+#define APB_DMA_ENABLE				BIT(0)
+#define APB_DMA_FIN_INT_STS			BIT(1)
+#define APB_DMA_FIN_INT_EN			BIT(2)
+#define APB_DMA_BURST_MODE			BIT(3)
+#define APB_DMA_ERR_INT_STS			BIT(4)
+#define APB_DMA_ERR_INT_EN			BIT(5)
+
+/*
+ * Unset: APB
+ * Set:   AHB
+ */
+#define APB_DMA_SOURCE_SELECT			0x40
+#define APB_DMA_DEST_SELECT			0x80
+
+#define APB_DMA_SOURCE				0x100
+#define APB_DMA_DEST				0x1000
+
+#define APB_DMA_SOURCE_MASK			0x700
+#define APB_DMA_DEST_MASK			0x7000
+
+/*
+ * 000: No increment
+ * 001: +1 (Burst=0), +4  (Burst=1)
+ * 010: +2 (Burst=0), +8  (Burst=1)
+ * 011: +4 (Burst=0), +16 (Burst=1)
+ * 101: -1 (Burst=0), -4  (Burst=1)
+ * 110: -2 (Burst=0), -8  (Burst=1)
+ * 111: -4 (Burst=0), -16 (Burst=1)
+ */
+#define APB_DMA_SOURCE_INC_0			0
+#define APB_DMA_SOURCE_INC_1_4			0x100
+#define APB_DMA_SOURCE_INC_2_8			0x200
+#define APB_DMA_SOURCE_INC_4_16			0x300
+#define APB_DMA_SOURCE_DEC_1_4			0x500
+#define APB_DMA_SOURCE_DEC_2_8			0x600
+#define APB_DMA_SOURCE_DEC_4_16			0x700
+#define APB_DMA_DEST_INC_0			0
+#define APB_DMA_DEST_INC_1_4			0x1000
+#define APB_DMA_DEST_INC_2_8			0x2000
+#define APB_DMA_DEST_INC_4_16			0x3000
+#define APB_DMA_DEST_DEC_1_4			0x5000
+#define APB_DMA_DEST_DEC_2_8			0x6000
+#define APB_DMA_DEST_DEC_4_16			0x7000
+
+/*
+ * Request signal select source/destination address for DMA hardware handshake.
+ *
+ * The request line number is a property of the DMA controller itself,
+ * e.g. MMC must always request channels where dma_slave_config->slave_id is 5.
+ *
+ * 0:    No request / Grant signal
+ * 1-15: Request    / Grant signal
+ */
+#define APB_DMA_SOURCE_REQ_NO			0x1000000
+#define APB_DMA_SOURCE_REQ_NO_MASK		0xf000000
+#define APB_DMA_DEST_REQ_NO			0x10000
+#define APB_DMA_DEST_REQ_NO_MASK		0xf0000
+
+#define APB_DMA_DATA_WIDTH			0x100000
+#define APB_DMA_DATA_WIDTH_MASK			0x300000
+/*
+ * Data width of transfer:
+ *
+ * 00: Word
+ * 01: Half
+ * 10: Byte
+ */
+#define APB_DMA_DATA_WIDTH_4			0
+#define APB_DMA_DATA_WIDTH_2			0x100000
+#define APB_DMA_DATA_WIDTH_1			0x200000
+
+#define APB_DMA_CYCLES_MASK			0x00ffffff
+
+#define MOXART_DMA_DATA_TYPE_S8			0x00
+#define MOXART_DMA_DATA_TYPE_S16		0x01
+#define MOXART_DMA_DATA_TYPE_S32		0x02
+
+struct moxart_sg {
+	dma_addr_t addr;
+	uint32_t len;
+};
+
+struct moxart_desc {
+	enum dma_transfer_direction	dma_dir;
+	dma_addr_t			dev_addr;
+	unsigned int			sglen;
+	unsigned int			dma_cycles;
+	struct virt_dma_desc		vd;
+	uint8_t				es;
+	struct moxart_sg		sg[0];
+};
+
+struct moxart_chan {
+	struct virt_dma_chan		vc;
+
+	void __iomem			*base;
+	struct moxart_desc		*desc;
+
+	struct dma_slave_config		cfg;
+
+	bool				allocated;
+	bool				error;
+	int				ch_num;
+	unsigned int			line_reqno;
+	unsigned int			sgidx;
+};
+
+struct moxart_dmadev {
+	struct dma_device		dma_slave;
+	struct moxart_chan		slave_chans[APB_DMA_MAX_CHANNEL];
+};
+
+struct moxart_filter_data {
+	struct moxart_dmadev		*mdc;
+	struct of_phandle_args		*dma_spec;
+};
+
+static const unsigned int es_bytes[] = {
+	[MOXART_DMA_DATA_TYPE_S8] = 1,
+	[MOXART_DMA_DATA_TYPE_S16] = 2,
+	[MOXART_DMA_DATA_TYPE_S32] = 4,
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+	return &chan->dev->device;
+}
+
+static inline struct moxart_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct moxart_chan, vc.chan);
+}
+
+static inline struct moxart_desc *to_moxart_dma_desc(
+	struct dma_async_tx_descriptor *t)
+{
+	return container_of(t, struct moxart_desc, vd.tx);
+}
+
+static void moxart_dma_desc_free(struct virt_dma_desc *vd)
+{
+	kfree(container_of(vd, struct moxart_desc, vd));
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	unsigned long flags;
+	LIST_HEAD(head);
+	u32 ctrl;
+
+	dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
+
+	spin_lock_irqsave(&ch->vc.lock, flags);
+
+	if (ch->desc)
+		ch->desc = NULL;
+
+	ctrl = readl(ch->base + REG_OFF_CTRL);
+	ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, ch->base + REG_OFF_CTRL);
+
+	vchan_get_all_descriptors(&ch->vc, &head);
+	spin_unlock_irqrestore(&ch->vc.lock, flags);
+	vchan_dma_desc_free_list(&ch->vc, &head);
+
+	return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+			       struct dma_slave_config *cfg)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	u32 ctrl;
+
+	ch->cfg = *cfg;
+
+	ctrl = readl(ch->base + REG_OFF_CTRL);
+	ctrl |= APB_DMA_BURST_MODE;
+	ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
+	ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
+
+	switch (ch->cfg.src_addr_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		ctrl |= APB_DMA_DATA_WIDTH_1;
+		if (ch->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_1_4;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_1_4;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		ctrl |= APB_DMA_DATA_WIDTH_2;
+		if (ch->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_2_8;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_2_8;
+		break;
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+		ctrl &= ~APB_DMA_DATA_WIDTH;
+		if (ch->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_4_16;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_4_16;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (ch->cfg.direction == DMA_MEM_TO_DEV) {
+		ctrl &= ~APB_DMA_DEST_SELECT;
+		ctrl |= APB_DMA_SOURCE_SELECT;
+		ctrl |= (ch->line_reqno << 16 &
+			 APB_DMA_DEST_REQ_NO_MASK);
+	} else {
+		ctrl |= APB_DMA_DEST_SELECT;
+		ctrl &= ~APB_DMA_SOURCE_SELECT;
+		ctrl |= (ch->line_reqno << 24 &
+			 APB_DMA_SOURCE_REQ_NO_MASK);
+	}
+
+	writel(ctrl, ch->base + REG_OFF_CTRL);
+
+	return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+			  unsigned long arg)
+{
+	int ret = 0;
+
+	switch (cmd) {
+	case DMA_PAUSE:
+	case DMA_RESUME:
+		return -EINVAL;
+	case DMA_TERMINATE_ALL:
+		moxart_terminate_all(chan);
+		break;
+	case DMA_SLAVE_CONFIG:
+		ret = moxart_slave_config(chan, (struct dma_slave_config *)arg);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static struct dma_async_tx_descriptor *moxart_prep_slave_sg(
+	struct dma_chan *chan, struct scatterlist *sgl,
+	unsigned int sg_len, enum dma_transfer_direction dir,
+	unsigned long tx_flags, void *context)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	struct moxart_desc *d;
+	enum dma_slave_buswidth dev_width;
+	dma_addr_t dev_addr;
+	struct scatterlist *sgent;
+	unsigned int es;
+	unsigned int i;
+
+	if (!is_slave_direction(dir)) {
+		dev_err(chan2dev(chan), "%s: invalid DMA direction\n",
+			__func__);
+		return NULL;
+	}
+
+	if (dir == DMA_DEV_TO_MEM) {
+		dev_addr = ch->cfg.src_addr;
+		dev_width = ch->cfg.src_addr_width;
+	} else {
+		dev_addr = ch->cfg.dst_addr;
+		dev_width = ch->cfg.dst_addr_width;
+	}
+
+	switch (dev_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		es = MOXART_DMA_DATA_TYPE_S8;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		es = MOXART_DMA_DATA_TYPE_S16;
+		break;
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+		es = MOXART_DMA_DATA_TYPE_S32;
+		break;
+	default:
+		dev_err(chan2dev(chan), "%s: unsupported data width (%u)\n",
+			__func__, dev_width);
+		return NULL;
+	}
+
+	d = kzalloc(sizeof(*d) + sg_len * sizeof(d->sg[0]), GFP_ATOMIC);
+	if (!d)
+		return NULL;
+
+	d->dma_dir = dir;
+	d->dev_addr = dev_addr;
+	d->es = es;
+
+	for_each_sg(sgl, sgent, sg_len, i) {
+		d->sg[i].addr = sg_dma_address(sgent);
+		d->sg[i].len = sg_dma_len(sgent);
+	}
+
+	d->sglen = sg_len;
+
+	ch->error = 0;
+
+	return vchan_tx_prep(&ch->vc, &d->vd, tx_flags);
+}
+
+static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
+					struct of_dma *ofdma)
+{
+	struct moxart_dmadev *mdc = ofdma->of_dma_data;
+	struct dma_chan *chan;
+	struct moxart_chan *ch;
+
+	chan = dma_get_any_slave_channel(&mdc->dma_slave);
+	if (!chan)
+		return NULL;
+
+	ch = to_moxart_dma_chan(chan);
+	ch->line_reqno = dma_spec->args[0];
+
+	return chan;
+}
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+
+	dev_dbg(chan2dev(chan), "%s: allocating channel #%u\n",
+		__func__, ch->ch_num);
+	ch->allocated = 1;
+
+	return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+
+	vchan_free_chan_resources(&ch->vc);
+
+	dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+		__func__, ch->ch_num);
+	ch->allocated = 0;
+}
+
+static void moxart_dma_set_params(struct moxart_chan *ch, dma_addr_t src_addr,
+				  dma_addr_t dst_addr)
+{
+	writel(src_addr, ch->base + REG_OFF_ADDRESS_SOURCE);
+	writel(dst_addr, ch->base + REG_OFF_ADDRESS_DEST);
+}
+
+static void moxart_set_transfer_params(struct moxart_chan *ch, unsigned int len)
+{
+	struct moxart_desc *d = ch->desc;
+	unsigned int sglen_div = es_bytes[d->es];
+
+	d->dma_cycles = len >> sglen_div;
+
+	/*
+	 * There are 4 cycles on 64 bytes copied, i.e. one cycle copies 16
+	 * bytes ( when width is APB_DMAB_DATA_WIDTH_4 ).
+	 */
+	writel(d->dma_cycles, ch->base + REG_OFF_CYCLES);
+
+	dev_dbg(chan2dev(&ch->vc.chan), "%s: set %u DMA cycles (len=%u)\n",
+		__func__, d->dma_cycles, len);
+}
+
+static void moxart_start_dma(struct moxart_chan *ch)
+{
+	u32 ctrl;
+
+	ctrl = readl(ch->base + REG_OFF_CTRL);
+	ctrl |= (APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, ch->base + REG_OFF_CTRL);
+}
+
+static void moxart_dma_start_sg(struct moxart_chan *ch, unsigned int idx)
+{
+	struct moxart_desc *d = ch->desc;
+	struct moxart_sg *sg = ch->desc->sg + idx;
+
+	if (ch->desc->dma_dir == DMA_MEM_TO_DEV)
+		moxart_dma_set_params(ch, sg->addr, d->dev_addr);
+	else if (ch->desc->dma_dir == DMA_DEV_TO_MEM)
+		moxart_dma_set_params(ch, d->dev_addr, sg->addr);
+
+	moxart_set_transfer_params(ch, sg->len);
+
+	moxart_start_dma(ch);
+}
+
+static void moxart_dma_start_desc(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	struct virt_dma_desc *vd;
+
+	vd = vchan_next_desc(&ch->vc);
+
+	if (!vd) {
+		ch->desc = NULL;
+		return;
+	}
+
+	list_del(&vd->node);
+
+	ch->desc = to_moxart_dma_desc(&vd->tx);
+	ch->sgidx = 0;
+
+	moxart_dma_start_sg(ch, 0);
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ch->vc.lock, flags);
+	if (vchan_issue_pending(&ch->vc) && !ch->desc)
+		moxart_dma_start_desc(chan);
+	spin_unlock_irqrestore(&ch->vc.lock, flags);
+}
+
+static size_t moxart_dma_desc_size(struct moxart_desc *d,
+				   unsigned int completed_sgs)
+{
+	unsigned int i;
+	size_t size;
+
+	for (size = i = completed_sgs; i < d->sglen; i++)
+		size += d->sg[i].len;
+
+	return size;
+}
+
+static size_t moxart_dma_desc_size_in_flight(struct moxart_chan *ch)
+{
+	size_t size;
+	unsigned int completed_cycles, cycles;
+
+	size = moxart_dma_desc_size(ch->desc, ch->sgidx);
+	cycles = readl(ch->base + REG_OFF_CYCLES);
+	completed_cycles = (ch->desc->dma_cycles - cycles);
+	size -= completed_cycles << es_bytes[ch->desc->es];
+
+	dev_dbg(chan2dev(&ch->vc.chan), "%s: size=%zu\n", __func__, size);
+
+	return size;
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+					dma_cookie_t cookie,
+					struct dma_tx_state *txstate)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	struct virt_dma_desc *vd;
+	struct moxart_desc *d;
+	enum dma_status ret;
+	unsigned long flags;
+
+	/*
+	 * dma_cookie_status() assigns initial residue value.
+	 */
+	ret = dma_cookie_status(chan, cookie, txstate);
+
+	spin_lock_irqsave(&ch->vc.lock, flags);
+	vd = vchan_find_desc(&ch->vc, cookie);
+	if (vd) {
+		d = to_moxart_dma_desc(&vd->tx);
+		txstate->residue = moxart_dma_desc_size(d, 0);
+	} else if (ch->desc && ch->desc->vd.tx.cookie == cookie) {
+		txstate->residue = moxart_dma_desc_size_in_flight(ch);
+	}
+	spin_unlock_irqrestore(&ch->vc.lock, flags);
+
+	if (ch->error)
+		return DMA_ERROR;
+
+	return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+	dma->device_prep_slave_sg		= moxart_prep_slave_sg;
+	dma->device_alloc_chan_resources	= moxart_alloc_chan_resources;
+	dma->device_free_chan_resources		= moxart_free_chan_resources;
+	dma->device_issue_pending		= moxart_issue_pending;
+	dma->device_tx_status			= moxart_tx_status;
+	dma->device_control			= moxart_control;
+	dma->dev				= dev;
+
+	INIT_LIST_HEAD(&dma->channels);
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+	struct moxart_dmadev *mc = devid;
+	struct moxart_chan *ch = &mc->slave_chans[0];
+	unsigned int i;
+	unsigned long flags;
+	u32 ctrl;
+
+	dev_dbg(chan2dev(&ch->vc.chan), "%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+		if (!ch->allocated)
+			continue;
+
+		ctrl = readl(ch->base + REG_OFF_CTRL);
+
+		dev_dbg(chan2dev(&ch->vc.chan), "%s: ch=%p ch->base=%p ctrl=%x\n",
+			__func__, ch, ch->base, ctrl);
+
+		if (ctrl & APB_DMA_FIN_INT_STS) {
+			ctrl &= ~APB_DMA_FIN_INT_STS;
+			if (ch->desc) {
+				spin_lock_irqsave(&ch->vc.lock, flags);
+				if (++ch->sgidx < ch->desc->sglen) {
+					moxart_dma_start_sg(ch, ch->sgidx);
+				} else {
+					vchan_cookie_complete(&ch->desc->vd);
+					moxart_dma_start_desc(&ch->vc.chan);
+				}
+				spin_unlock_irqrestore(&ch->vc.lock, flags);
+			}
+		}
+
+		if (ctrl & APB_DMA_ERR_INT_STS) {
+			ctrl &= ~APB_DMA_ERR_INT_STS;
+			ch->error = 1;
+		}
+
+		writel(ctrl, ch->base + REG_OFF_CTRL);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int moxart_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct resource *res;
+	static void __iomem *dma_base_addr;
+	int ret, i;
+	unsigned int irq;
+	struct moxart_chan *ch;
+	struct moxart_dmadev *mdc;
+
+	mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+	if (!mdc) {
+		dev_err(dev, "can't allocate DMA container\n");
+		return -ENOMEM;
+	}
+
+	irq = irq_of_parse_and_map(node, 0);
+	if (irq == NO_IRQ) {
+		dev_err(dev, "no IRQ resource\n");
+		return -EINVAL;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dma_base_addr = devm_ioremap_resource(dev, res);
+	if (IS_ERR(dma_base_addr))
+		return PTR_ERR(dma_base_addr);
+
+	dma_cap_zero(mdc->dma_slave.cap_mask);
+	dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+	dma_cap_set(DMA_PRIVATE, mdc->dma_slave.cap_mask);
+
+	moxart_dma_init(&mdc->dma_slave, dev);
+
+	ch = &mdc->slave_chans[0];
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+		ch->ch_num = i;
+		ch->base = dma_base_addr + i * REG_OFF_CHAN_SIZE;
+		ch->allocated = 0;
+
+		ch->vc.desc_free = moxart_dma_desc_free;
+		vchan_init(&ch->vc, &mdc->dma_slave);
+
+		dev_dbg(dev, "%s: chs[%d]: ch->ch_num=%u ch->base=%p\n",
+			__func__, i, ch->ch_num, ch->base);
+	}
+
+	platform_set_drvdata(pdev, mdc);
+
+	ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
+			       "moxart-dma-engine", mdc);
+	if (ret) {
+		dev_err(dev, "devm_request_irq failed\n");
+		return ret;
+	}
+
+	ret = dma_async_device_register(&mdc->dma_slave);
+	if (ret) {
+		dev_err(dev, "dma_async_device_register failed\n");
+		return ret;
+	}
+
+	ret = of_dma_controller_register(node, moxart_of_xlate, mdc);
+	if (ret) {
+		dev_err(dev, "of_dma_controller_register failed\n");
+		dma_async_device_unregister(&mdc->dma_slave);
+		return ret;
+	}
+
+	dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq);
+
+	return 0;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+	struct moxart_dmadev *m = platform_get_drvdata(pdev);
+
+	dma_async_device_unregister(&m->dma_slave);
+
+	if (pdev->dev.of_node)
+		of_dma_controller_free(pdev->dev.of_node);
+
+	return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+	{ .compatible = "moxa,moxart-dma" },
+	{ }
+};
+
+static struct platform_driver moxart_driver = {
+	.probe	= moxart_probe,
+	.remove	= moxart_remove,
+	.driver = {
+		.name		= "moxart-dma-engine",
+		.owner		= THIS_MODULE,
+		.of_match_table	= moxart_dma_match,
+	},
+};
+
+static int moxart_init(void)
+{
+	return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+	platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
-- 
1.8.2.1

^ permalink raw reply related	[flat|nested] 80+ messages in thread

* [PATCH v16] dmaengine: Add MOXA ART DMA engine driver
  2013-12-16 10:24                             ` Jonas Jensen
@ 2014-01-17  8:46                               ` Jonas Jensen
  -1 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2014-01-17  8:46 UTC (permalink / raw)
  To: dmaengine
  Cc: linux-arm-kernel, linux-kernel, arm, vinod.koul, djbw, arnd,
	linux, mark.rutland, andriy.shevchenko, lars, Jonas Jensen

The MOXA ART SoC has a DMA controller capable of offloading expensive
memory operations, such as large copies. This patch adds support for
the controller including four channels. Two of these are used to
handle MMC copy on the UC-7112-LX hardware. The remaining two can be
used in a future audio driver or client application.

Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
---

Notes:
    Changes since v15:
    1. rebase drivers/dma/Kconfig to next-20140117
    
    Applies to next-20140117

 .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  45 ++
 drivers/dma/Kconfig                                |   8 +
 drivers/dma/Makefile                               |   1 +
 drivers/dma/moxart-dma.c                           | 699 +++++++++++++++++++++
 4 files changed, 753 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
 create mode 100644 drivers/dma/moxart-dma.c

diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644
index 0000000..8a9f355
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -0,0 +1,45 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible :	Must be "moxa,moxart-dma"
+- reg :		Should contain registers location and length
+- interrupts :	Should contain an interrupt-specifier for the sole
+		interrupt generated by the device
+- #dma-cells :	Should be 1, a single cell holding a line request number
+
+Example:
+
+	dma: dma@90500000 {
+		compatible = "moxa,moxart-dma";
+		reg = <0x90500080 0x40>;
+		interrupts = <24 0>;
+		#dma-cells = <1>;
+	};
+
+
+Clients:
+
+DMA clients connected to the MOXA ART DMA controller must use the format
+described in the dma.txt file, using a two-cell specifier for each channel:
+a phandle plus one integer cells.
+The two cells in order are:
+
+1. A phandle pointing to the DMA controller.
+2. Peripheral identifier for the hardware handshaking interface.
+
+Example:
+Use specific request line passing from dma
+For example, MMC request line is 5
+
+	sdhci: sdhci@98e00000 {
+		compatible = "moxa,moxart-sdhci";
+		reg = <0x98e00000 0x5C>;
+		interrupts = <5 0>;
+		clocks = <&clk_apb>;
+		dmas =  <&dma 5>,
+			<&dma 5>;
+		dma-names = "tx", "rx";
+	};
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 9ae6f54..9bed1a2 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -342,6 +342,14 @@ config K3_DMA
 	  Support the DMA engine for Hisilicon K3 platform
 	  devices.
 
+config MOXART_DMA
+	tristate "MOXART DMA support"
+	depends on ARCH_MOXART
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Enable support for the MOXA ART SoC DMA controller.
+
 config DMA_ENGINE
 	bool
 
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 0a6f08e..a029d0f4 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -43,3 +43,4 @@ obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
 obj-$(CONFIG_TI_CPPI41) += cppi41.o
 obj-$(CONFIG_K3_DMA) += k3dma.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..3258e48
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,699 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+#include <linux/bitops.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+#define APB_DMA_MAX_CHANNEL			4
+
+#define REG_OFF_ADDRESS_SOURCE			0
+#define REG_OFF_ADDRESS_DEST			4
+#define REG_OFF_CYCLES				8
+#define REG_OFF_CTRL				12
+#define REG_OFF_CHAN_SIZE			16
+
+#define APB_DMA_ENABLE				BIT(0)
+#define APB_DMA_FIN_INT_STS			BIT(1)
+#define APB_DMA_FIN_INT_EN			BIT(2)
+#define APB_DMA_BURST_MODE			BIT(3)
+#define APB_DMA_ERR_INT_STS			BIT(4)
+#define APB_DMA_ERR_INT_EN			BIT(5)
+
+/*
+ * Unset: APB
+ * Set:   AHB
+ */
+#define APB_DMA_SOURCE_SELECT			0x40
+#define APB_DMA_DEST_SELECT			0x80
+
+#define APB_DMA_SOURCE				0x100
+#define APB_DMA_DEST				0x1000
+
+#define APB_DMA_SOURCE_MASK			0x700
+#define APB_DMA_DEST_MASK			0x7000
+
+/*
+ * 000: No increment
+ * 001: +1 (Burst=0), +4  (Burst=1)
+ * 010: +2 (Burst=0), +8  (Burst=1)
+ * 011: +4 (Burst=0), +16 (Burst=1)
+ * 101: -1 (Burst=0), -4  (Burst=1)
+ * 110: -2 (Burst=0), -8  (Burst=1)
+ * 111: -4 (Burst=0), -16 (Burst=1)
+ */
+#define APB_DMA_SOURCE_INC_0			0
+#define APB_DMA_SOURCE_INC_1_4			0x100
+#define APB_DMA_SOURCE_INC_2_8			0x200
+#define APB_DMA_SOURCE_INC_4_16			0x300
+#define APB_DMA_SOURCE_DEC_1_4			0x500
+#define APB_DMA_SOURCE_DEC_2_8			0x600
+#define APB_DMA_SOURCE_DEC_4_16			0x700
+#define APB_DMA_DEST_INC_0			0
+#define APB_DMA_DEST_INC_1_4			0x1000
+#define APB_DMA_DEST_INC_2_8			0x2000
+#define APB_DMA_DEST_INC_4_16			0x3000
+#define APB_DMA_DEST_DEC_1_4			0x5000
+#define APB_DMA_DEST_DEC_2_8			0x6000
+#define APB_DMA_DEST_DEC_4_16			0x7000
+
+/*
+ * Request signal select source/destination address for DMA hardware handshake.
+ *
+ * The request line number is a property of the DMA controller itself,
+ * e.g. MMC must always request channels where dma_slave_config->slave_id is 5.
+ *
+ * 0:    No request / Grant signal
+ * 1-15: Request    / Grant signal
+ */
+#define APB_DMA_SOURCE_REQ_NO			0x1000000
+#define APB_DMA_SOURCE_REQ_NO_MASK		0xf000000
+#define APB_DMA_DEST_REQ_NO			0x10000
+#define APB_DMA_DEST_REQ_NO_MASK		0xf0000
+
+#define APB_DMA_DATA_WIDTH			0x100000
+#define APB_DMA_DATA_WIDTH_MASK			0x300000
+/*
+ * Data width of transfer:
+ *
+ * 00: Word
+ * 01: Half
+ * 10: Byte
+ */
+#define APB_DMA_DATA_WIDTH_4			0
+#define APB_DMA_DATA_WIDTH_2			0x100000
+#define APB_DMA_DATA_WIDTH_1			0x200000
+
+#define APB_DMA_CYCLES_MASK			0x00ffffff
+
+#define MOXART_DMA_DATA_TYPE_S8			0x00
+#define MOXART_DMA_DATA_TYPE_S16		0x01
+#define MOXART_DMA_DATA_TYPE_S32		0x02
+
+struct moxart_sg {
+	dma_addr_t addr;
+	uint32_t len;
+};
+
+struct moxart_desc {
+	enum dma_transfer_direction	dma_dir;
+	dma_addr_t			dev_addr;
+	unsigned int			sglen;
+	unsigned int			dma_cycles;
+	struct virt_dma_desc		vd;
+	uint8_t				es;
+	struct moxart_sg		sg[0];
+};
+
+struct moxart_chan {
+	struct virt_dma_chan		vc;
+
+	void __iomem			*base;
+	struct moxart_desc		*desc;
+
+	struct dma_slave_config		cfg;
+
+	bool				allocated;
+	bool				error;
+	int				ch_num;
+	unsigned int			line_reqno;
+	unsigned int			sgidx;
+};
+
+struct moxart_dmadev {
+	struct dma_device		dma_slave;
+	struct moxart_chan		slave_chans[APB_DMA_MAX_CHANNEL];
+};
+
+struct moxart_filter_data {
+	struct moxart_dmadev		*mdc;
+	struct of_phandle_args		*dma_spec;
+};
+
+static const unsigned int es_bytes[] = {
+	[MOXART_DMA_DATA_TYPE_S8] = 1,
+	[MOXART_DMA_DATA_TYPE_S16] = 2,
+	[MOXART_DMA_DATA_TYPE_S32] = 4,
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+	return &chan->dev->device;
+}
+
+static inline struct moxart_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct moxart_chan, vc.chan);
+}
+
+static inline struct moxart_desc *to_moxart_dma_desc(
+	struct dma_async_tx_descriptor *t)
+{
+	return container_of(t, struct moxart_desc, vd.tx);
+}
+
+static void moxart_dma_desc_free(struct virt_dma_desc *vd)
+{
+	kfree(container_of(vd, struct moxart_desc, vd));
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	unsigned long flags;
+	LIST_HEAD(head);
+	u32 ctrl;
+
+	dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
+
+	spin_lock_irqsave(&ch->vc.lock, flags);
+
+	if (ch->desc)
+		ch->desc = NULL;
+
+	ctrl = readl(ch->base + REG_OFF_CTRL);
+	ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, ch->base + REG_OFF_CTRL);
+
+	vchan_get_all_descriptors(&ch->vc, &head);
+	spin_unlock_irqrestore(&ch->vc.lock, flags);
+	vchan_dma_desc_free_list(&ch->vc, &head);
+
+	return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+			       struct dma_slave_config *cfg)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	u32 ctrl;
+
+	ch->cfg = *cfg;
+
+	ctrl = readl(ch->base + REG_OFF_CTRL);
+	ctrl |= APB_DMA_BURST_MODE;
+	ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
+	ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
+
+	switch (ch->cfg.src_addr_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		ctrl |= APB_DMA_DATA_WIDTH_1;
+		if (ch->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_1_4;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_1_4;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		ctrl |= APB_DMA_DATA_WIDTH_2;
+		if (ch->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_2_8;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_2_8;
+		break;
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+		ctrl &= ~APB_DMA_DATA_WIDTH;
+		if (ch->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_4_16;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_4_16;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (ch->cfg.direction == DMA_MEM_TO_DEV) {
+		ctrl &= ~APB_DMA_DEST_SELECT;
+		ctrl |= APB_DMA_SOURCE_SELECT;
+		ctrl |= (ch->line_reqno << 16 &
+			 APB_DMA_DEST_REQ_NO_MASK);
+	} else {
+		ctrl |= APB_DMA_DEST_SELECT;
+		ctrl &= ~APB_DMA_SOURCE_SELECT;
+		ctrl |= (ch->line_reqno << 24 &
+			 APB_DMA_SOURCE_REQ_NO_MASK);
+	}
+
+	writel(ctrl, ch->base + REG_OFF_CTRL);
+
+	return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+			  unsigned long arg)
+{
+	int ret = 0;
+
+	switch (cmd) {
+	case DMA_PAUSE:
+	case DMA_RESUME:
+		return -EINVAL;
+	case DMA_TERMINATE_ALL:
+		moxart_terminate_all(chan);
+		break;
+	case DMA_SLAVE_CONFIG:
+		ret = moxart_slave_config(chan, (struct dma_slave_config *)arg);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static struct dma_async_tx_descriptor *moxart_prep_slave_sg(
+	struct dma_chan *chan, struct scatterlist *sgl,
+	unsigned int sg_len, enum dma_transfer_direction dir,
+	unsigned long tx_flags, void *context)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	struct moxart_desc *d;
+	enum dma_slave_buswidth dev_width;
+	dma_addr_t dev_addr;
+	struct scatterlist *sgent;
+	unsigned int es;
+	unsigned int i;
+
+	if (!is_slave_direction(dir)) {
+		dev_err(chan2dev(chan), "%s: invalid DMA direction\n",
+			__func__);
+		return NULL;
+	}
+
+	if (dir == DMA_DEV_TO_MEM) {
+		dev_addr = ch->cfg.src_addr;
+		dev_width = ch->cfg.src_addr_width;
+	} else {
+		dev_addr = ch->cfg.dst_addr;
+		dev_width = ch->cfg.dst_addr_width;
+	}
+
+	switch (dev_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		es = MOXART_DMA_DATA_TYPE_S8;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		es = MOXART_DMA_DATA_TYPE_S16;
+		break;
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+		es = MOXART_DMA_DATA_TYPE_S32;
+		break;
+	default:
+		dev_err(chan2dev(chan), "%s: unsupported data width (%u)\n",
+			__func__, dev_width);
+		return NULL;
+	}
+
+	d = kzalloc(sizeof(*d) + sg_len * sizeof(d->sg[0]), GFP_ATOMIC);
+	if (!d)
+		return NULL;
+
+	d->dma_dir = dir;
+	d->dev_addr = dev_addr;
+	d->es = es;
+
+	for_each_sg(sgl, sgent, sg_len, i) {
+		d->sg[i].addr = sg_dma_address(sgent);
+		d->sg[i].len = sg_dma_len(sgent);
+	}
+
+	d->sglen = sg_len;
+
+	ch->error = 0;
+
+	return vchan_tx_prep(&ch->vc, &d->vd, tx_flags);
+}
+
+static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
+					struct of_dma *ofdma)
+{
+	struct moxart_dmadev *mdc = ofdma->of_dma_data;
+	struct dma_chan *chan;
+	struct moxart_chan *ch;
+
+	chan = dma_get_any_slave_channel(&mdc->dma_slave);
+	if (!chan)
+		return NULL;
+
+	ch = to_moxart_dma_chan(chan);
+	ch->line_reqno = dma_spec->args[0];
+
+	return chan;
+}
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+
+	dev_dbg(chan2dev(chan), "%s: allocating channel #%u\n",
+		__func__, ch->ch_num);
+	ch->allocated = 1;
+
+	return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+
+	vchan_free_chan_resources(&ch->vc);
+
+	dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+		__func__, ch->ch_num);
+	ch->allocated = 0;
+}
+
+static void moxart_dma_set_params(struct moxart_chan *ch, dma_addr_t src_addr,
+				  dma_addr_t dst_addr)
+{
+	writel(src_addr, ch->base + REG_OFF_ADDRESS_SOURCE);
+	writel(dst_addr, ch->base + REG_OFF_ADDRESS_DEST);
+}
+
+static void moxart_set_transfer_params(struct moxart_chan *ch, unsigned int len)
+{
+	struct moxart_desc *d = ch->desc;
+	unsigned int sglen_div = es_bytes[d->es];
+
+	d->dma_cycles = len >> sglen_div;
+
+	/*
+	 * There are 4 cycles on 64 bytes copied, i.e. one cycle copies 16
+	 * bytes ( when width is APB_DMAB_DATA_WIDTH_4 ).
+	 */
+	writel(d->dma_cycles, ch->base + REG_OFF_CYCLES);
+
+	dev_dbg(chan2dev(&ch->vc.chan), "%s: set %u DMA cycles (len=%u)\n",
+		__func__, d->dma_cycles, len);
+}
+
+static void moxart_start_dma(struct moxart_chan *ch)
+{
+	u32 ctrl;
+
+	ctrl = readl(ch->base + REG_OFF_CTRL);
+	ctrl |= (APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, ch->base + REG_OFF_CTRL);
+}
+
+static void moxart_dma_start_sg(struct moxart_chan *ch, unsigned int idx)
+{
+	struct moxart_desc *d = ch->desc;
+	struct moxart_sg *sg = ch->desc->sg + idx;
+
+	if (ch->desc->dma_dir == DMA_MEM_TO_DEV)
+		moxart_dma_set_params(ch, sg->addr, d->dev_addr);
+	else if (ch->desc->dma_dir == DMA_DEV_TO_MEM)
+		moxart_dma_set_params(ch, d->dev_addr, sg->addr);
+
+	moxart_set_transfer_params(ch, sg->len);
+
+	moxart_start_dma(ch);
+}
+
+static void moxart_dma_start_desc(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	struct virt_dma_desc *vd;
+
+	vd = vchan_next_desc(&ch->vc);
+
+	if (!vd) {
+		ch->desc = NULL;
+		return;
+	}
+
+	list_del(&vd->node);
+
+	ch->desc = to_moxart_dma_desc(&vd->tx);
+	ch->sgidx = 0;
+
+	moxart_dma_start_sg(ch, 0);
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ch->vc.lock, flags);
+	if (vchan_issue_pending(&ch->vc) && !ch->desc)
+		moxart_dma_start_desc(chan);
+	spin_unlock_irqrestore(&ch->vc.lock, flags);
+}
+
+static size_t moxart_dma_desc_size(struct moxart_desc *d,
+				   unsigned int completed_sgs)
+{
+	unsigned int i;
+	size_t size;
+
+	for (size = i = completed_sgs; i < d->sglen; i++)
+		size += d->sg[i].len;
+
+	return size;
+}
+
+static size_t moxart_dma_desc_size_in_flight(struct moxart_chan *ch)
+{
+	size_t size;
+	unsigned int completed_cycles, cycles;
+
+	size = moxart_dma_desc_size(ch->desc, ch->sgidx);
+	cycles = readl(ch->base + REG_OFF_CYCLES);
+	completed_cycles = (ch->desc->dma_cycles - cycles);
+	size -= completed_cycles << es_bytes[ch->desc->es];
+
+	dev_dbg(chan2dev(&ch->vc.chan), "%s: size=%zu\n", __func__, size);
+
+	return size;
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+					dma_cookie_t cookie,
+					struct dma_tx_state *txstate)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	struct virt_dma_desc *vd;
+	struct moxart_desc *d;
+	enum dma_status ret;
+	unsigned long flags;
+
+	/*
+	 * dma_cookie_status() assigns initial residue value.
+	 */
+	ret = dma_cookie_status(chan, cookie, txstate);
+
+	spin_lock_irqsave(&ch->vc.lock, flags);
+	vd = vchan_find_desc(&ch->vc, cookie);
+	if (vd) {
+		d = to_moxart_dma_desc(&vd->tx);
+		txstate->residue = moxart_dma_desc_size(d, 0);
+	} else if (ch->desc && ch->desc->vd.tx.cookie == cookie) {
+		txstate->residue = moxart_dma_desc_size_in_flight(ch);
+	}
+	spin_unlock_irqrestore(&ch->vc.lock, flags);
+
+	if (ch->error)
+		return DMA_ERROR;
+
+	return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+	dma->device_prep_slave_sg		= moxart_prep_slave_sg;
+	dma->device_alloc_chan_resources	= moxart_alloc_chan_resources;
+	dma->device_free_chan_resources		= moxart_free_chan_resources;
+	dma->device_issue_pending		= moxart_issue_pending;
+	dma->device_tx_status			= moxart_tx_status;
+	dma->device_control			= moxart_control;
+	dma->dev				= dev;
+
+	INIT_LIST_HEAD(&dma->channels);
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+	struct moxart_dmadev *mc = devid;
+	struct moxart_chan *ch = &mc->slave_chans[0];
+	unsigned int i;
+	unsigned long flags;
+	u32 ctrl;
+
+	dev_dbg(chan2dev(&ch->vc.chan), "%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+		if (!ch->allocated)
+			continue;
+
+		ctrl = readl(ch->base + REG_OFF_CTRL);
+
+		dev_dbg(chan2dev(&ch->vc.chan), "%s: ch=%p ch->base=%p ctrl=%x\n",
+			__func__, ch, ch->base, ctrl);
+
+		if (ctrl & APB_DMA_FIN_INT_STS) {
+			ctrl &= ~APB_DMA_FIN_INT_STS;
+			if (ch->desc) {
+				spin_lock_irqsave(&ch->vc.lock, flags);
+				if (++ch->sgidx < ch->desc->sglen) {
+					moxart_dma_start_sg(ch, ch->sgidx);
+				} else {
+					vchan_cookie_complete(&ch->desc->vd);
+					moxart_dma_start_desc(&ch->vc.chan);
+				}
+				spin_unlock_irqrestore(&ch->vc.lock, flags);
+			}
+		}
+
+		if (ctrl & APB_DMA_ERR_INT_STS) {
+			ctrl &= ~APB_DMA_ERR_INT_STS;
+			ch->error = 1;
+		}
+
+		writel(ctrl, ch->base + REG_OFF_CTRL);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int moxart_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct resource *res;
+	static void __iomem *dma_base_addr;
+	int ret, i;
+	unsigned int irq;
+	struct moxart_chan *ch;
+	struct moxart_dmadev *mdc;
+
+	mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+	if (!mdc) {
+		dev_err(dev, "can't allocate DMA container\n");
+		return -ENOMEM;
+	}
+
+	irq = irq_of_parse_and_map(node, 0);
+	if (irq == NO_IRQ) {
+		dev_err(dev, "no IRQ resource\n");
+		return -EINVAL;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dma_base_addr = devm_ioremap_resource(dev, res);
+	if (IS_ERR(dma_base_addr))
+		return PTR_ERR(dma_base_addr);
+
+	dma_cap_zero(mdc->dma_slave.cap_mask);
+	dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+	dma_cap_set(DMA_PRIVATE, mdc->dma_slave.cap_mask);
+
+	moxart_dma_init(&mdc->dma_slave, dev);
+
+	ch = &mdc->slave_chans[0];
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+		ch->ch_num = i;
+		ch->base = dma_base_addr + i * REG_OFF_CHAN_SIZE;
+		ch->allocated = 0;
+
+		ch->vc.desc_free = moxart_dma_desc_free;
+		vchan_init(&ch->vc, &mdc->dma_slave);
+
+		dev_dbg(dev, "%s: chs[%d]: ch->ch_num=%u ch->base=%p\n",
+			__func__, i, ch->ch_num, ch->base);
+	}
+
+	platform_set_drvdata(pdev, mdc);
+
+	ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
+			       "moxart-dma-engine", mdc);
+	if (ret) {
+		dev_err(dev, "devm_request_irq failed\n");
+		return ret;
+	}
+
+	ret = dma_async_device_register(&mdc->dma_slave);
+	if (ret) {
+		dev_err(dev, "dma_async_device_register failed\n");
+		return ret;
+	}
+
+	ret = of_dma_controller_register(node, moxart_of_xlate, mdc);
+	if (ret) {
+		dev_err(dev, "of_dma_controller_register failed\n");
+		dma_async_device_unregister(&mdc->dma_slave);
+		return ret;
+	}
+
+	dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq);
+
+	return 0;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+	struct moxart_dmadev *m = platform_get_drvdata(pdev);
+
+	dma_async_device_unregister(&m->dma_slave);
+
+	if (pdev->dev.of_node)
+		of_dma_controller_free(pdev->dev.of_node);
+
+	return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+	{ .compatible = "moxa,moxart-dma" },
+	{ }
+};
+
+static struct platform_driver moxart_driver = {
+	.probe	= moxart_probe,
+	.remove	= moxart_remove,
+	.driver = {
+		.name		= "moxart-dma-engine",
+		.owner		= THIS_MODULE,
+		.of_match_table	= moxart_dma_match,
+	},
+};
+
+static int moxart_init(void)
+{
+	return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+	platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
-- 
1.8.2.1


^ permalink raw reply related	[flat|nested] 80+ messages in thread

* [PATCH v16] dmaengine: Add MOXA ART DMA engine driver
@ 2014-01-17  8:46                               ` Jonas Jensen
  0 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2014-01-17  8:46 UTC (permalink / raw)
  To: linux-arm-kernel

The MOXA ART SoC has a DMA controller capable of offloading expensive
memory operations, such as large copies. This patch adds support for
the controller including four channels. Two of these are used to
handle MMC copy on the UC-7112-LX hardware. The remaining two can be
used in a future audio driver or client application.

Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
---

Notes:
    Changes since v15:
    1. rebase drivers/dma/Kconfig to next-20140117
    
    Applies to next-20140117

 .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  45 ++
 drivers/dma/Kconfig                                |   8 +
 drivers/dma/Makefile                               |   1 +
 drivers/dma/moxart-dma.c                           | 699 +++++++++++++++++++++
 4 files changed, 753 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
 create mode 100644 drivers/dma/moxart-dma.c

diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644
index 0000000..8a9f355
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -0,0 +1,45 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible :	Must be "moxa,moxart-dma"
+- reg :		Should contain registers location and length
+- interrupts :	Should contain an interrupt-specifier for the sole
+		interrupt generated by the device
+- #dma-cells :	Should be 1, a single cell holding a line request number
+
+Example:
+
+	dma: dma at 90500000 {
+		compatible = "moxa,moxart-dma";
+		reg = <0x90500080 0x40>;
+		interrupts = <24 0>;
+		#dma-cells = <1>;
+	};
+
+
+Clients:
+
+DMA clients connected to the MOXA ART DMA controller must use the format
+described in the dma.txt file, using a two-cell specifier for each channel:
+a phandle plus one integer cells.
+The two cells in order are:
+
+1. A phandle pointing to the DMA controller.
+2. Peripheral identifier for the hardware handshaking interface.
+
+Example:
+Use specific request line passing from dma
+For example, MMC request line is 5
+
+	sdhci: sdhci at 98e00000 {
+		compatible = "moxa,moxart-sdhci";
+		reg = <0x98e00000 0x5C>;
+		interrupts = <5 0>;
+		clocks = <&clk_apb>;
+		dmas =  <&dma 5>,
+			<&dma 5>;
+		dma-names = "tx", "rx";
+	};
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 9ae6f54..9bed1a2 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -342,6 +342,14 @@ config K3_DMA
 	  Support the DMA engine for Hisilicon K3 platform
 	  devices.
 
+config MOXART_DMA
+	tristate "MOXART DMA support"
+	depends on ARCH_MOXART
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Enable support for the MOXA ART SoC DMA controller.
+
 config DMA_ENGINE
 	bool
 
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 0a6f08e..a029d0f4 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -43,3 +43,4 @@ obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
 obj-$(CONFIG_TI_CPPI41) += cppi41.o
 obj-$(CONFIG_K3_DMA) += k3dma.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..3258e48
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,699 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+#include <linux/bitops.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+#define APB_DMA_MAX_CHANNEL			4
+
+#define REG_OFF_ADDRESS_SOURCE			0
+#define REG_OFF_ADDRESS_DEST			4
+#define REG_OFF_CYCLES				8
+#define REG_OFF_CTRL				12
+#define REG_OFF_CHAN_SIZE			16
+
+#define APB_DMA_ENABLE				BIT(0)
+#define APB_DMA_FIN_INT_STS			BIT(1)
+#define APB_DMA_FIN_INT_EN			BIT(2)
+#define APB_DMA_BURST_MODE			BIT(3)
+#define APB_DMA_ERR_INT_STS			BIT(4)
+#define APB_DMA_ERR_INT_EN			BIT(5)
+
+/*
+ * Unset: APB
+ * Set:   AHB
+ */
+#define APB_DMA_SOURCE_SELECT			0x40
+#define APB_DMA_DEST_SELECT			0x80
+
+#define APB_DMA_SOURCE				0x100
+#define APB_DMA_DEST				0x1000
+
+#define APB_DMA_SOURCE_MASK			0x700
+#define APB_DMA_DEST_MASK			0x7000
+
+/*
+ * 000: No increment
+ * 001: +1 (Burst=0), +4  (Burst=1)
+ * 010: +2 (Burst=0), +8  (Burst=1)
+ * 011: +4 (Burst=0), +16 (Burst=1)
+ * 101: -1 (Burst=0), -4  (Burst=1)
+ * 110: -2 (Burst=0), -8  (Burst=1)
+ * 111: -4 (Burst=0), -16 (Burst=1)
+ */
+#define APB_DMA_SOURCE_INC_0			0
+#define APB_DMA_SOURCE_INC_1_4			0x100
+#define APB_DMA_SOURCE_INC_2_8			0x200
+#define APB_DMA_SOURCE_INC_4_16			0x300
+#define APB_DMA_SOURCE_DEC_1_4			0x500
+#define APB_DMA_SOURCE_DEC_2_8			0x600
+#define APB_DMA_SOURCE_DEC_4_16			0x700
+#define APB_DMA_DEST_INC_0			0
+#define APB_DMA_DEST_INC_1_4			0x1000
+#define APB_DMA_DEST_INC_2_8			0x2000
+#define APB_DMA_DEST_INC_4_16			0x3000
+#define APB_DMA_DEST_DEC_1_4			0x5000
+#define APB_DMA_DEST_DEC_2_8			0x6000
+#define APB_DMA_DEST_DEC_4_16			0x7000
+
+/*
+ * Request signal select source/destination address for DMA hardware handshake.
+ *
+ * The request line number is a property of the DMA controller itself,
+ * e.g. MMC must always request channels where dma_slave_config->slave_id is 5.
+ *
+ * 0:    No request / Grant signal
+ * 1-15: Request    / Grant signal
+ */
+#define APB_DMA_SOURCE_REQ_NO			0x1000000
+#define APB_DMA_SOURCE_REQ_NO_MASK		0xf000000
+#define APB_DMA_DEST_REQ_NO			0x10000
+#define APB_DMA_DEST_REQ_NO_MASK		0xf0000
+
+#define APB_DMA_DATA_WIDTH			0x100000
+#define APB_DMA_DATA_WIDTH_MASK			0x300000
+/*
+ * Data width of transfer:
+ *
+ * 00: Word
+ * 01: Half
+ * 10: Byte
+ */
+#define APB_DMA_DATA_WIDTH_4			0
+#define APB_DMA_DATA_WIDTH_2			0x100000
+#define APB_DMA_DATA_WIDTH_1			0x200000
+
+#define APB_DMA_CYCLES_MASK			0x00ffffff
+
+#define MOXART_DMA_DATA_TYPE_S8			0x00
+#define MOXART_DMA_DATA_TYPE_S16		0x01
+#define MOXART_DMA_DATA_TYPE_S32		0x02
+
+struct moxart_sg {
+	dma_addr_t addr;
+	uint32_t len;
+};
+
+struct moxart_desc {
+	enum dma_transfer_direction	dma_dir;
+	dma_addr_t			dev_addr;
+	unsigned int			sglen;
+	unsigned int			dma_cycles;
+	struct virt_dma_desc		vd;
+	uint8_t				es;
+	struct moxart_sg		sg[0];
+};
+
+struct moxart_chan {
+	struct virt_dma_chan		vc;
+
+	void __iomem			*base;
+	struct moxart_desc		*desc;
+
+	struct dma_slave_config		cfg;
+
+	bool				allocated;
+	bool				error;
+	int				ch_num;
+	unsigned int			line_reqno;
+	unsigned int			sgidx;
+};
+
+struct moxart_dmadev {
+	struct dma_device		dma_slave;
+	struct moxart_chan		slave_chans[APB_DMA_MAX_CHANNEL];
+};
+
+struct moxart_filter_data {
+	struct moxart_dmadev		*mdc;
+	struct of_phandle_args		*dma_spec;
+};
+
+static const unsigned int es_bytes[] = {
+	[MOXART_DMA_DATA_TYPE_S8] = 1,
+	[MOXART_DMA_DATA_TYPE_S16] = 2,
+	[MOXART_DMA_DATA_TYPE_S32] = 4,
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+	return &chan->dev->device;
+}
+
+static inline struct moxart_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct moxart_chan, vc.chan);
+}
+
+static inline struct moxart_desc *to_moxart_dma_desc(
+	struct dma_async_tx_descriptor *t)
+{
+	return container_of(t, struct moxart_desc, vd.tx);
+}
+
+static void moxart_dma_desc_free(struct virt_dma_desc *vd)
+{
+	kfree(container_of(vd, struct moxart_desc, vd));
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	unsigned long flags;
+	LIST_HEAD(head);
+	u32 ctrl;
+
+	dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
+
+	spin_lock_irqsave(&ch->vc.lock, flags);
+
+	if (ch->desc)
+		ch->desc = NULL;
+
+	ctrl = readl(ch->base + REG_OFF_CTRL);
+	ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, ch->base + REG_OFF_CTRL);
+
+	vchan_get_all_descriptors(&ch->vc, &head);
+	spin_unlock_irqrestore(&ch->vc.lock, flags);
+	vchan_dma_desc_free_list(&ch->vc, &head);
+
+	return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+			       struct dma_slave_config *cfg)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	u32 ctrl;
+
+	ch->cfg = *cfg;
+
+	ctrl = readl(ch->base + REG_OFF_CTRL);
+	ctrl |= APB_DMA_BURST_MODE;
+	ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
+	ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
+
+	switch (ch->cfg.src_addr_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		ctrl |= APB_DMA_DATA_WIDTH_1;
+		if (ch->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_1_4;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_1_4;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		ctrl |= APB_DMA_DATA_WIDTH_2;
+		if (ch->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_2_8;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_2_8;
+		break;
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+		ctrl &= ~APB_DMA_DATA_WIDTH;
+		if (ch->cfg.direction != DMA_MEM_TO_DEV)
+			ctrl |= APB_DMA_DEST_INC_4_16;
+		else
+			ctrl |= APB_DMA_SOURCE_INC_4_16;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (ch->cfg.direction == DMA_MEM_TO_DEV) {
+		ctrl &= ~APB_DMA_DEST_SELECT;
+		ctrl |= APB_DMA_SOURCE_SELECT;
+		ctrl |= (ch->line_reqno << 16 &
+			 APB_DMA_DEST_REQ_NO_MASK);
+	} else {
+		ctrl |= APB_DMA_DEST_SELECT;
+		ctrl &= ~APB_DMA_SOURCE_SELECT;
+		ctrl |= (ch->line_reqno << 24 &
+			 APB_DMA_SOURCE_REQ_NO_MASK);
+	}
+
+	writel(ctrl, ch->base + REG_OFF_CTRL);
+
+	return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+			  unsigned long arg)
+{
+	int ret = 0;
+
+	switch (cmd) {
+	case DMA_PAUSE:
+	case DMA_RESUME:
+		return -EINVAL;
+	case DMA_TERMINATE_ALL:
+		moxart_terminate_all(chan);
+		break;
+	case DMA_SLAVE_CONFIG:
+		ret = moxart_slave_config(chan, (struct dma_slave_config *)arg);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static struct dma_async_tx_descriptor *moxart_prep_slave_sg(
+	struct dma_chan *chan, struct scatterlist *sgl,
+	unsigned int sg_len, enum dma_transfer_direction dir,
+	unsigned long tx_flags, void *context)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	struct moxart_desc *d;
+	enum dma_slave_buswidth dev_width;
+	dma_addr_t dev_addr;
+	struct scatterlist *sgent;
+	unsigned int es;
+	unsigned int i;
+
+	if (!is_slave_direction(dir)) {
+		dev_err(chan2dev(chan), "%s: invalid DMA direction\n",
+			__func__);
+		return NULL;
+	}
+
+	if (dir == DMA_DEV_TO_MEM) {
+		dev_addr = ch->cfg.src_addr;
+		dev_width = ch->cfg.src_addr_width;
+	} else {
+		dev_addr = ch->cfg.dst_addr;
+		dev_width = ch->cfg.dst_addr_width;
+	}
+
+	switch (dev_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		es = MOXART_DMA_DATA_TYPE_S8;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		es = MOXART_DMA_DATA_TYPE_S16;
+		break;
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+		es = MOXART_DMA_DATA_TYPE_S32;
+		break;
+	default:
+		dev_err(chan2dev(chan), "%s: unsupported data width (%u)\n",
+			__func__, dev_width);
+		return NULL;
+	}
+
+	d = kzalloc(sizeof(*d) + sg_len * sizeof(d->sg[0]), GFP_ATOMIC);
+	if (!d)
+		return NULL;
+
+	d->dma_dir = dir;
+	d->dev_addr = dev_addr;
+	d->es = es;
+
+	for_each_sg(sgl, sgent, sg_len, i) {
+		d->sg[i].addr = sg_dma_address(sgent);
+		d->sg[i].len = sg_dma_len(sgent);
+	}
+
+	d->sglen = sg_len;
+
+	ch->error = 0;
+
+	return vchan_tx_prep(&ch->vc, &d->vd, tx_flags);
+}
+
+static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
+					struct of_dma *ofdma)
+{
+	struct moxart_dmadev *mdc = ofdma->of_dma_data;
+	struct dma_chan *chan;
+	struct moxart_chan *ch;
+
+	chan = dma_get_any_slave_channel(&mdc->dma_slave);
+	if (!chan)
+		return NULL;
+
+	ch = to_moxart_dma_chan(chan);
+	ch->line_reqno = dma_spec->args[0];
+
+	return chan;
+}
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+
+	dev_dbg(chan2dev(chan), "%s: allocating channel #%u\n",
+		__func__, ch->ch_num);
+	ch->allocated = 1;
+
+	return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+
+	vchan_free_chan_resources(&ch->vc);
+
+	dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+		__func__, ch->ch_num);
+	ch->allocated = 0;
+}
+
+static void moxart_dma_set_params(struct moxart_chan *ch, dma_addr_t src_addr,
+				  dma_addr_t dst_addr)
+{
+	writel(src_addr, ch->base + REG_OFF_ADDRESS_SOURCE);
+	writel(dst_addr, ch->base + REG_OFF_ADDRESS_DEST);
+}
+
+static void moxart_set_transfer_params(struct moxart_chan *ch, unsigned int len)
+{
+	struct moxart_desc *d = ch->desc;
+	unsigned int sglen_div = es_bytes[d->es];
+
+	d->dma_cycles = len >> sglen_div;
+
+	/*
+	 * There are 4 cycles on 64 bytes copied, i.e. one cycle copies 16
+	 * bytes ( when width is APB_DMAB_DATA_WIDTH_4 ).
+	 */
+	writel(d->dma_cycles, ch->base + REG_OFF_CYCLES);
+
+	dev_dbg(chan2dev(&ch->vc.chan), "%s: set %u DMA cycles (len=%u)\n",
+		__func__, d->dma_cycles, len);
+}
+
+static void moxart_start_dma(struct moxart_chan *ch)
+{
+	u32 ctrl;
+
+	ctrl = readl(ch->base + REG_OFF_CTRL);
+	ctrl |= (APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+	writel(ctrl, ch->base + REG_OFF_CTRL);
+}
+
+static void moxart_dma_start_sg(struct moxart_chan *ch, unsigned int idx)
+{
+	struct moxart_desc *d = ch->desc;
+	struct moxart_sg *sg = ch->desc->sg + idx;
+
+	if (ch->desc->dma_dir == DMA_MEM_TO_DEV)
+		moxart_dma_set_params(ch, sg->addr, d->dev_addr);
+	else if (ch->desc->dma_dir == DMA_DEV_TO_MEM)
+		moxart_dma_set_params(ch, d->dev_addr, sg->addr);
+
+	moxart_set_transfer_params(ch, sg->len);
+
+	moxart_start_dma(ch);
+}
+
+static void moxart_dma_start_desc(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	struct virt_dma_desc *vd;
+
+	vd = vchan_next_desc(&ch->vc);
+
+	if (!vd) {
+		ch->desc = NULL;
+		return;
+	}
+
+	list_del(&vd->node);
+
+	ch->desc = to_moxart_dma_desc(&vd->tx);
+	ch->sgidx = 0;
+
+	moxart_dma_start_sg(ch, 0);
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ch->vc.lock, flags);
+	if (vchan_issue_pending(&ch->vc) && !ch->desc)
+		moxart_dma_start_desc(chan);
+	spin_unlock_irqrestore(&ch->vc.lock, flags);
+}
+
+static size_t moxart_dma_desc_size(struct moxart_desc *d,
+				   unsigned int completed_sgs)
+{
+	unsigned int i;
+	size_t size;
+
+	for (size = i = completed_sgs; i < d->sglen; i++)
+		size += d->sg[i].len;
+
+	return size;
+}
+
+static size_t moxart_dma_desc_size_in_flight(struct moxart_chan *ch)
+{
+	size_t size;
+	unsigned int completed_cycles, cycles;
+
+	size = moxart_dma_desc_size(ch->desc, ch->sgidx);
+	cycles = readl(ch->base + REG_OFF_CYCLES);
+	completed_cycles = (ch->desc->dma_cycles - cycles);
+	size -= completed_cycles << es_bytes[ch->desc->es];
+
+	dev_dbg(chan2dev(&ch->vc.chan), "%s: size=%zu\n", __func__, size);
+
+	return size;
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+					dma_cookie_t cookie,
+					struct dma_tx_state *txstate)
+{
+	struct moxart_chan *ch = to_moxart_dma_chan(chan);
+	struct virt_dma_desc *vd;
+	struct moxart_desc *d;
+	enum dma_status ret;
+	unsigned long flags;
+
+	/*
+	 * dma_cookie_status() assigns initial residue value.
+	 */
+	ret = dma_cookie_status(chan, cookie, txstate);
+
+	spin_lock_irqsave(&ch->vc.lock, flags);
+	vd = vchan_find_desc(&ch->vc, cookie);
+	if (vd) {
+		d = to_moxart_dma_desc(&vd->tx);
+		txstate->residue = moxart_dma_desc_size(d, 0);
+	} else if (ch->desc && ch->desc->vd.tx.cookie == cookie) {
+		txstate->residue = moxart_dma_desc_size_in_flight(ch);
+	}
+	spin_unlock_irqrestore(&ch->vc.lock, flags);
+
+	if (ch->error)
+		return DMA_ERROR;
+
+	return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+	dma->device_prep_slave_sg		= moxart_prep_slave_sg;
+	dma->device_alloc_chan_resources	= moxart_alloc_chan_resources;
+	dma->device_free_chan_resources		= moxart_free_chan_resources;
+	dma->device_issue_pending		= moxart_issue_pending;
+	dma->device_tx_status			= moxart_tx_status;
+	dma->device_control			= moxart_control;
+	dma->dev				= dev;
+
+	INIT_LIST_HEAD(&dma->channels);
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+	struct moxart_dmadev *mc = devid;
+	struct moxart_chan *ch = &mc->slave_chans[0];
+	unsigned int i;
+	unsigned long flags;
+	u32 ctrl;
+
+	dev_dbg(chan2dev(&ch->vc.chan), "%s\n", __func__);
+
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+		if (!ch->allocated)
+			continue;
+
+		ctrl = readl(ch->base + REG_OFF_CTRL);
+
+		dev_dbg(chan2dev(&ch->vc.chan), "%s: ch=%p ch->base=%p ctrl=%x\n",
+			__func__, ch, ch->base, ctrl);
+
+		if (ctrl & APB_DMA_FIN_INT_STS) {
+			ctrl &= ~APB_DMA_FIN_INT_STS;
+			if (ch->desc) {
+				spin_lock_irqsave(&ch->vc.lock, flags);
+				if (++ch->sgidx < ch->desc->sglen) {
+					moxart_dma_start_sg(ch, ch->sgidx);
+				} else {
+					vchan_cookie_complete(&ch->desc->vd);
+					moxart_dma_start_desc(&ch->vc.chan);
+				}
+				spin_unlock_irqrestore(&ch->vc.lock, flags);
+			}
+		}
+
+		if (ctrl & APB_DMA_ERR_INT_STS) {
+			ctrl &= ~APB_DMA_ERR_INT_STS;
+			ch->error = 1;
+		}
+
+		writel(ctrl, ch->base + REG_OFF_CTRL);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int moxart_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct resource *res;
+	static void __iomem *dma_base_addr;
+	int ret, i;
+	unsigned int irq;
+	struct moxart_chan *ch;
+	struct moxart_dmadev *mdc;
+
+	mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+	if (!mdc) {
+		dev_err(dev, "can't allocate DMA container\n");
+		return -ENOMEM;
+	}
+
+	irq = irq_of_parse_and_map(node, 0);
+	if (irq == NO_IRQ) {
+		dev_err(dev, "no IRQ resource\n");
+		return -EINVAL;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dma_base_addr = devm_ioremap_resource(dev, res);
+	if (IS_ERR(dma_base_addr))
+		return PTR_ERR(dma_base_addr);
+
+	dma_cap_zero(mdc->dma_slave.cap_mask);
+	dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+	dma_cap_set(DMA_PRIVATE, mdc->dma_slave.cap_mask);
+
+	moxart_dma_init(&mdc->dma_slave, dev);
+
+	ch = &mdc->slave_chans[0];
+	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+		ch->ch_num = i;
+		ch->base = dma_base_addr + i * REG_OFF_CHAN_SIZE;
+		ch->allocated = 0;
+
+		ch->vc.desc_free = moxart_dma_desc_free;
+		vchan_init(&ch->vc, &mdc->dma_slave);
+
+		dev_dbg(dev, "%s: chs[%d]: ch->ch_num=%u ch->base=%p\n",
+			__func__, i, ch->ch_num, ch->base);
+	}
+
+	platform_set_drvdata(pdev, mdc);
+
+	ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
+			       "moxart-dma-engine", mdc);
+	if (ret) {
+		dev_err(dev, "devm_request_irq failed\n");
+		return ret;
+	}
+
+	ret = dma_async_device_register(&mdc->dma_slave);
+	if (ret) {
+		dev_err(dev, "dma_async_device_register failed\n");
+		return ret;
+	}
+
+	ret = of_dma_controller_register(node, moxart_of_xlate, mdc);
+	if (ret) {
+		dev_err(dev, "of_dma_controller_register failed\n");
+		dma_async_device_unregister(&mdc->dma_slave);
+		return ret;
+	}
+
+	dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq);
+
+	return 0;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+	struct moxart_dmadev *m = platform_get_drvdata(pdev);
+
+	dma_async_device_unregister(&m->dma_slave);
+
+	if (pdev->dev.of_node)
+		of_dma_controller_free(pdev->dev.of_node);
+
+	return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+	{ .compatible = "moxa,moxart-dma" },
+	{ }
+};
+
+static struct platform_driver moxart_driver = {
+	.probe	= moxart_probe,
+	.remove	= moxart_remove,
+	.driver = {
+		.name		= "moxart-dma-engine",
+		.owner		= THIS_MODULE,
+		.of_match_table	= moxart_dma_match,
+	},
+};
+
+static int moxart_init(void)
+{
+	return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+	platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
-- 
1.8.2.1

^ permalink raw reply related	[flat|nested] 80+ messages in thread

* Fwd: [PATCH v16] dmaengine: Add MOXA ART DMA engine driver
       [not found]                               ` <1389948365-13999-1-git-send-email-jonas.jensen-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
@ 2014-01-17 13:29                                 ` Jonas Jensen
  0 siblings, 0 replies; 80+ messages in thread
From: Jonas Jensen @ 2014-01-17 13:29 UTC (permalink / raw)
  To: devicetree-u79uwXL29TY76Z2rM5mHXA

Forwarded to devicetree-u79uwXL29TY76Z2rM5mHXA@public.gmane.org because I forgot CC.


---------- Forwarded message ----------
From: Jonas Jensen <jonas.jensen-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
Date: 17 January 2014 09:46
Subject: [PATCH v16] dmaengine: Add MOXA ART DMA engine driver
To: dmaengine-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
Cc: linux-arm-kernel-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r@public.gmane.org,
linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org, arm-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org, vinod.koul-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org,
djbw-b10kYP2dOMg@public.gmane.org, arnd-r2nGTMty4D4@public.gmane.org, linux-lFZ/pmaqli7XmaaqVzeoHQ@public.gmane.org,
mark.rutland-5wv7dgnIgG8@public.gmane.org, andriy.shevchenko-VuQAYsv1563Yd54FQh9/CA@public.gmane.org,
lars-Qo5EllUWu/uELgA04lAiVw@public.gmane.org, Jonas Jensen <jonas.jensen-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>


The MOXA ART SoC has a DMA controller capable of offloading expensive
memory operations, such as large copies. This patch adds support for
the controller including four channels. Two of these are used to
handle MMC copy on the UC-7112-LX hardware. The remaining two can be
used in a future audio driver or client application.

Signed-off-by: Jonas Jensen <jonas.jensen-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
---

Notes:
    Changes since v15:
    1. rebase drivers/dma/Kconfig to next-20140117

    Applies to next-20140117

 .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  45 ++
 drivers/dma/Kconfig                                |   8 +
 drivers/dma/Makefile                               |   1 +
 drivers/dma/moxart-dma.c                           | 699 +++++++++++++++++++++
 4 files changed, 753 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
 create mode 100644 drivers/dma/moxart-dma.c

diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644
index 0000000..8a9f355
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -0,0 +1,45 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible : Must be "moxa,moxart-dma"
+- reg :                Should contain registers location and length
+- interrupts : Should contain an interrupt-specifier for the sole
+               interrupt generated by the device
+- #dma-cells : Should be 1, a single cell holding a line request number
+
+Example:
+
+       dma: dma@90500000 {
+               compatible = "moxa,moxart-dma";
+               reg = <0x90500080 0x40>;
+               interrupts = <24 0>;
+               #dma-cells = <1>;
+       };
+
+
+Clients:
+
+DMA clients connected to the MOXA ART DMA controller must use the format
+described in the dma.txt file, using a two-cell specifier for each channel:
+a phandle plus one integer cells.
+The two cells in order are:
+
+1. A phandle pointing to the DMA controller.
+2. Peripheral identifier for the hardware handshaking interface.
+
+Example:
+Use specific request line passing from dma
+For example, MMC request line is 5
+
+       sdhci: sdhci@98e00000 {
+               compatible = "moxa,moxart-sdhci";
+               reg = <0x98e00000 0x5C>;
+               interrupts = <5 0>;
+               clocks = <&clk_apb>;
+               dmas =  <&dma 5>,
+                       <&dma 5>;
+               dma-names = "tx", "rx";
+       };
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 9ae6f54..9bed1a2 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -342,6 +342,14 @@ config K3_DMA
          Support the DMA engine for Hisilicon K3 platform
          devices.

+config MOXART_DMA
+       tristate "MOXART DMA support"
+       depends on ARCH_MOXART
+       select DMA_ENGINE
+       select DMA_VIRTUAL_CHANNELS
+       help
+         Enable support for the MOXA ART SoC DMA controller.
+
 config DMA_ENGINE
        bool

diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 0a6f08e..a029d0f4 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -43,3 +43,4 @@ obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
 obj-$(CONFIG_TI_CPPI41) += cppi41.o
 obj-$(CONFIG_K3_DMA) += k3dma.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 0000000..3258e48
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,699 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+#include <linux/bitops.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+#define APB_DMA_MAX_CHANNEL                    4
+
+#define REG_OFF_ADDRESS_SOURCE                 0
+#define REG_OFF_ADDRESS_DEST                   4
+#define REG_OFF_CYCLES                         8
+#define REG_OFF_CTRL                           12
+#define REG_OFF_CHAN_SIZE                      16
+
+#define APB_DMA_ENABLE                         BIT(0)
+#define APB_DMA_FIN_INT_STS                    BIT(1)
+#define APB_DMA_FIN_INT_EN                     BIT(2)
+#define APB_DMA_BURST_MODE                     BIT(3)
+#define APB_DMA_ERR_INT_STS                    BIT(4)
+#define APB_DMA_ERR_INT_EN                     BIT(5)
+
+/*
+ * Unset: APB
+ * Set:   AHB
+ */
+#define APB_DMA_SOURCE_SELECT                  0x40
+#define APB_DMA_DEST_SELECT                    0x80
+
+#define APB_DMA_SOURCE                         0x100
+#define APB_DMA_DEST                           0x1000
+
+#define APB_DMA_SOURCE_MASK                    0x700
+#define APB_DMA_DEST_MASK                      0x7000
+
+/*
+ * 000: No increment
+ * 001: +1 (Burst=0), +4  (Burst=1)
+ * 010: +2 (Burst=0), +8  (Burst=1)
+ * 011: +4 (Burst=0), +16 (Burst=1)
+ * 101: -1 (Burst=0), -4  (Burst=1)
+ * 110: -2 (Burst=0), -8  (Burst=1)
+ * 111: -4 (Burst=0), -16 (Burst=1)
+ */
+#define APB_DMA_SOURCE_INC_0                   0
+#define APB_DMA_SOURCE_INC_1_4                 0x100
+#define APB_DMA_SOURCE_INC_2_8                 0x200
+#define APB_DMA_SOURCE_INC_4_16                        0x300
+#define APB_DMA_SOURCE_DEC_1_4                 0x500
+#define APB_DMA_SOURCE_DEC_2_8                 0x600
+#define APB_DMA_SOURCE_DEC_4_16                        0x700
+#define APB_DMA_DEST_INC_0                     0
+#define APB_DMA_DEST_INC_1_4                   0x1000
+#define APB_DMA_DEST_INC_2_8                   0x2000
+#define APB_DMA_DEST_INC_4_16                  0x3000
+#define APB_DMA_DEST_DEC_1_4                   0x5000
+#define APB_DMA_DEST_DEC_2_8                   0x6000
+#define APB_DMA_DEST_DEC_4_16                  0x7000
+
+/*
+ * Request signal select source/destination address for DMA hardware handshake.
+ *
+ * The request line number is a property of the DMA controller itself,
+ * e.g. MMC must always request channels where dma_slave_config->slave_id is 5.
+ *
+ * 0:    No request / Grant signal
+ * 1-15: Request    / Grant signal
+ */
+#define APB_DMA_SOURCE_REQ_NO                  0x1000000
+#define APB_DMA_SOURCE_REQ_NO_MASK             0xf000000
+#define APB_DMA_DEST_REQ_NO                    0x10000
+#define APB_DMA_DEST_REQ_NO_MASK               0xf0000
+
+#define APB_DMA_DATA_WIDTH                     0x100000
+#define APB_DMA_DATA_WIDTH_MASK                        0x300000
+/*
+ * Data width of transfer:
+ *
+ * 00: Word
+ * 01: Half
+ * 10: Byte
+ */
+#define APB_DMA_DATA_WIDTH_4                   0
+#define APB_DMA_DATA_WIDTH_2                   0x100000
+#define APB_DMA_DATA_WIDTH_1                   0x200000
+
+#define APB_DMA_CYCLES_MASK                    0x00ffffff
+
+#define MOXART_DMA_DATA_TYPE_S8                        0x00
+#define MOXART_DMA_DATA_TYPE_S16               0x01
+#define MOXART_DMA_DATA_TYPE_S32               0x02
+
+struct moxart_sg {
+       dma_addr_t addr;
+       uint32_t len;
+};
+
+struct moxart_desc {
+       enum dma_transfer_direction     dma_dir;
+       dma_addr_t                      dev_addr;
+       unsigned int                    sglen;
+       unsigned int                    dma_cycles;
+       struct virt_dma_desc            vd;
+       uint8_t                         es;
+       struct moxart_sg                sg[0];
+};
+
+struct moxart_chan {
+       struct virt_dma_chan            vc;
+
+       void __iomem                    *base;
+       struct moxart_desc              *desc;
+
+       struct dma_slave_config         cfg;
+
+       bool                            allocated;
+       bool                            error;
+       int                             ch_num;
+       unsigned int                    line_reqno;
+       unsigned int                    sgidx;
+};
+
+struct moxart_dmadev {
+       struct dma_device               dma_slave;
+       struct moxart_chan              slave_chans[APB_DMA_MAX_CHANNEL];
+};
+
+struct moxart_filter_data {
+       struct moxart_dmadev            *mdc;
+       struct of_phandle_args          *dma_spec;
+};
+
+static const unsigned int es_bytes[] = {
+       [MOXART_DMA_DATA_TYPE_S8] = 1,
+       [MOXART_DMA_DATA_TYPE_S16] = 2,
+       [MOXART_DMA_DATA_TYPE_S32] = 4,
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+       return &chan->dev->device;
+}
+
+static inline struct moxart_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+       return container_of(c, struct moxart_chan, vc.chan);
+}
+
+static inline struct moxart_desc *to_moxart_dma_desc(
+       struct dma_async_tx_descriptor *t)
+{
+       return container_of(t, struct moxart_desc, vd.tx);
+}
+
+static void moxart_dma_desc_free(struct virt_dma_desc *vd)
+{
+       kfree(container_of(vd, struct moxart_desc, vd));
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+       struct moxart_chan *ch = to_moxart_dma_chan(chan);
+       unsigned long flags;
+       LIST_HEAD(head);
+       u32 ctrl;
+
+       dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
+
+       spin_lock_irqsave(&ch->vc.lock, flags);
+
+       if (ch->desc)
+               ch->desc = NULL;
+
+       ctrl = readl(ch->base + REG_OFF_CTRL);
+       ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+       writel(ctrl, ch->base + REG_OFF_CTRL);
+
+       vchan_get_all_descriptors(&ch->vc, &head);
+       spin_unlock_irqrestore(&ch->vc.lock, flags);
+       vchan_dma_desc_free_list(&ch->vc, &head);
+
+       return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+                              struct dma_slave_config *cfg)
+{
+       struct moxart_chan *ch = to_moxart_dma_chan(chan);
+       u32 ctrl;
+
+       ch->cfg = *cfg;
+
+       ctrl = readl(ch->base + REG_OFF_CTRL);
+       ctrl |= APB_DMA_BURST_MODE;
+       ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
+       ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
+
+       switch (ch->cfg.src_addr_width) {
+       case DMA_SLAVE_BUSWIDTH_1_BYTE:
+               ctrl |= APB_DMA_DATA_WIDTH_1;
+               if (ch->cfg.direction != DMA_MEM_TO_DEV)
+                       ctrl |= APB_DMA_DEST_INC_1_4;
+               else
+                       ctrl |= APB_DMA_SOURCE_INC_1_4;
+               break;
+       case DMA_SLAVE_BUSWIDTH_2_BYTES:
+               ctrl |= APB_DMA_DATA_WIDTH_2;
+               if (ch->cfg.direction != DMA_MEM_TO_DEV)
+                       ctrl |= APB_DMA_DEST_INC_2_8;
+               else
+                       ctrl |= APB_DMA_SOURCE_INC_2_8;
+               break;
+       case DMA_SLAVE_BUSWIDTH_4_BYTES:
+               ctrl &= ~APB_DMA_DATA_WIDTH;
+               if (ch->cfg.direction != DMA_MEM_TO_DEV)
+                       ctrl |= APB_DMA_DEST_INC_4_16;
+               else
+                       ctrl |= APB_DMA_SOURCE_INC_4_16;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       if (ch->cfg.direction == DMA_MEM_TO_DEV) {
+               ctrl &= ~APB_DMA_DEST_SELECT;
+               ctrl |= APB_DMA_SOURCE_SELECT;
+               ctrl |= (ch->line_reqno << 16 &
+                        APB_DMA_DEST_REQ_NO_MASK);
+       } else {
+               ctrl |= APB_DMA_DEST_SELECT;
+               ctrl &= ~APB_DMA_SOURCE_SELECT;
+               ctrl |= (ch->line_reqno << 24 &
+                        APB_DMA_SOURCE_REQ_NO_MASK);
+       }
+
+       writel(ctrl, ch->base + REG_OFF_CTRL);
+
+       return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+                         unsigned long arg)
+{
+       int ret = 0;
+
+       switch (cmd) {
+       case DMA_PAUSE:
+       case DMA_RESUME:
+               return -EINVAL;
+       case DMA_TERMINATE_ALL:
+               moxart_terminate_all(chan);
+               break;
+       case DMA_SLAVE_CONFIG:
+               ret = moxart_slave_config(chan, (struct dma_slave_config *)arg);
+               break;
+       default:
+               ret = -ENOSYS;
+       }
+
+       return ret;
+}
+
+static struct dma_async_tx_descriptor *moxart_prep_slave_sg(
+       struct dma_chan *chan, struct scatterlist *sgl,
+       unsigned int sg_len, enum dma_transfer_direction dir,
+       unsigned long tx_flags, void *context)
+{
+       struct moxart_chan *ch = to_moxart_dma_chan(chan);
+       struct moxart_desc *d;
+       enum dma_slave_buswidth dev_width;
+       dma_addr_t dev_addr;
+       struct scatterlist *sgent;
+       unsigned int es;
+       unsigned int i;
+
+       if (!is_slave_direction(dir)) {
+               dev_err(chan2dev(chan), "%s: invalid DMA direction\n",
+                       __func__);
+               return NULL;
+       }
+
+       if (dir == DMA_DEV_TO_MEM) {
+               dev_addr = ch->cfg.src_addr;
+               dev_width = ch->cfg.src_addr_width;
+       } else {
+               dev_addr = ch->cfg.dst_addr;
+               dev_width = ch->cfg.dst_addr_width;
+       }
+
+       switch (dev_width) {
+       case DMA_SLAVE_BUSWIDTH_1_BYTE:
+               es = MOXART_DMA_DATA_TYPE_S8;
+               break;
+       case DMA_SLAVE_BUSWIDTH_2_BYTES:
+               es = MOXART_DMA_DATA_TYPE_S16;
+               break;
+       case DMA_SLAVE_BUSWIDTH_4_BYTES:
+               es = MOXART_DMA_DATA_TYPE_S32;
+               break;
+       default:
+               dev_err(chan2dev(chan), "%s: unsupported data width (%u)\n",
+                       __func__, dev_width);
+               return NULL;
+       }
+
+       d = kzalloc(sizeof(*d) + sg_len * sizeof(d->sg[0]), GFP_ATOMIC);
+       if (!d)
+               return NULL;
+
+       d->dma_dir = dir;
+       d->dev_addr = dev_addr;
+       d->es = es;
+
+       for_each_sg(sgl, sgent, sg_len, i) {
+               d->sg[i].addr = sg_dma_address(sgent);
+               d->sg[i].len = sg_dma_len(sgent);
+       }
+
+       d->sglen = sg_len;
+
+       ch->error = 0;
+
+       return vchan_tx_prep(&ch->vc, &d->vd, tx_flags);
+}
+
+static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
+                                       struct of_dma *ofdma)
+{
+       struct moxart_dmadev *mdc = ofdma->of_dma_data;
+       struct dma_chan *chan;
+       struct moxart_chan *ch;
+
+       chan = dma_get_any_slave_channel(&mdc->dma_slave);
+       if (!chan)
+               return NULL;
+
+       ch = to_moxart_dma_chan(chan);
+       ch->line_reqno = dma_spec->args[0];
+
+       return chan;
+}
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+       struct moxart_chan *ch = to_moxart_dma_chan(chan);
+
+       dev_dbg(chan2dev(chan), "%s: allocating channel #%u\n",
+               __func__, ch->ch_num);
+       ch->allocated = 1;
+
+       return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+       struct moxart_chan *ch = to_moxart_dma_chan(chan);
+
+       vchan_free_chan_resources(&ch->vc);
+
+       dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+               __func__, ch->ch_num);
+       ch->allocated = 0;
+}
+
+static void moxart_dma_set_params(struct moxart_chan *ch, dma_addr_t src_addr,
+                                 dma_addr_t dst_addr)
+{
+       writel(src_addr, ch->base + REG_OFF_ADDRESS_SOURCE);
+       writel(dst_addr, ch->base + REG_OFF_ADDRESS_DEST);
+}
+
+static void moxart_set_transfer_params(struct moxart_chan *ch,
unsigned int len)
+{
+       struct moxart_desc *d = ch->desc;
+       unsigned int sglen_div = es_bytes[d->es];
+
+       d->dma_cycles = len >> sglen_div;
+
+       /*
+        * There are 4 cycles on 64 bytes copied, i.e. one cycle copies 16
+        * bytes ( when width is APB_DMAB_DATA_WIDTH_4 ).
+        */
+       writel(d->dma_cycles, ch->base + REG_OFF_CYCLES);
+
+       dev_dbg(chan2dev(&ch->vc.chan), "%s: set %u DMA cycles (len=%u)\n",
+               __func__, d->dma_cycles, len);
+}
+
+static void moxart_start_dma(struct moxart_chan *ch)
+{
+       u32 ctrl;
+
+       ctrl = readl(ch->base + REG_OFF_CTRL);
+       ctrl |= (APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+       writel(ctrl, ch->base + REG_OFF_CTRL);
+}
+
+static void moxart_dma_start_sg(struct moxart_chan *ch, unsigned int idx)
+{
+       struct moxart_desc *d = ch->desc;
+       struct moxart_sg *sg = ch->desc->sg + idx;
+
+       if (ch->desc->dma_dir == DMA_MEM_TO_DEV)
+               moxart_dma_set_params(ch, sg->addr, d->dev_addr);
+       else if (ch->desc->dma_dir == DMA_DEV_TO_MEM)
+               moxart_dma_set_params(ch, d->dev_addr, sg->addr);
+
+       moxart_set_transfer_params(ch, sg->len);
+
+       moxart_start_dma(ch);
+}
+
+static void moxart_dma_start_desc(struct dma_chan *chan)
+{
+       struct moxart_chan *ch = to_moxart_dma_chan(chan);
+       struct virt_dma_desc *vd;
+
+       vd = vchan_next_desc(&ch->vc);
+
+       if (!vd) {
+               ch->desc = NULL;
+               return;
+       }
+
+       list_del(&vd->node);
+
+       ch->desc = to_moxart_dma_desc(&vd->tx);
+       ch->sgidx = 0;
+
+       moxart_dma_start_sg(ch, 0);
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+       struct moxart_chan *ch = to_moxart_dma_chan(chan);
+       unsigned long flags;
+
+       spin_lock_irqsave(&ch->vc.lock, flags);
+       if (vchan_issue_pending(&ch->vc) && !ch->desc)
+               moxart_dma_start_desc(chan);
+       spin_unlock_irqrestore(&ch->vc.lock, flags);
+}
+
+static size_t moxart_dma_desc_size(struct moxart_desc *d,
+                                  unsigned int completed_sgs)
+{
+       unsigned int i;
+       size_t size;
+
+       for (size = i = completed_sgs; i < d->sglen; i++)
+               size += d->sg[i].len;
+
+       return size;
+}
+
+static size_t moxart_dma_desc_size_in_flight(struct moxart_chan *ch)
+{
+       size_t size;
+       unsigned int completed_cycles, cycles;
+
+       size = moxart_dma_desc_size(ch->desc, ch->sgidx);
+       cycles = readl(ch->base + REG_OFF_CYCLES);
+       completed_cycles = (ch->desc->dma_cycles - cycles);
+       size -= completed_cycles << es_bytes[ch->desc->es];
+
+       dev_dbg(chan2dev(&ch->vc.chan), "%s: size=%zu\n", __func__, size);
+
+       return size;
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+                                       dma_cookie_t cookie,
+                                       struct dma_tx_state *txstate)
+{
+       struct moxart_chan *ch = to_moxart_dma_chan(chan);
+       struct virt_dma_desc *vd;
+       struct moxart_desc *d;
+       enum dma_status ret;
+       unsigned long flags;
+
+       /*
+        * dma_cookie_status() assigns initial residue value.
+        */
+       ret = dma_cookie_status(chan, cookie, txstate);
+
+       spin_lock_irqsave(&ch->vc.lock, flags);
+       vd = vchan_find_desc(&ch->vc, cookie);
+       if (vd) {
+               d = to_moxart_dma_desc(&vd->tx);
+               txstate->residue = moxart_dma_desc_size(d, 0);
+       } else if (ch->desc && ch->desc->vd.tx.cookie == cookie) {
+               txstate->residue = moxart_dma_desc_size_in_flight(ch);
+       }
+       spin_unlock_irqrestore(&ch->vc.lock, flags);
+
+       if (ch->error)
+               return DMA_ERROR;
+
+       return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+       dma->device_prep_slave_sg               = moxart_prep_slave_sg;
+       dma->device_alloc_chan_resources        = moxart_alloc_chan_resources;
+       dma->device_free_chan_resources         = moxart_free_chan_resources;
+       dma->device_issue_pending               = moxart_issue_pending;
+       dma->device_tx_status                   = moxart_tx_status;
+       dma->device_control                     = moxart_control;
+       dma->dev                                = dev;
+
+       INIT_LIST_HEAD(&dma->channels);
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+       struct moxart_dmadev *mc = devid;
+       struct moxart_chan *ch = &mc->slave_chans[0];
+       unsigned int i;
+       unsigned long flags;
+       u32 ctrl;
+
+       dev_dbg(chan2dev(&ch->vc.chan), "%s\n", __func__);
+
+       for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+               if (!ch->allocated)
+                       continue;
+
+               ctrl = readl(ch->base + REG_OFF_CTRL);
+
+               dev_dbg(chan2dev(&ch->vc.chan), "%s: ch=%p ch->base=%p
ctrl=%x\n",
+                       __func__, ch, ch->base, ctrl);
+
+               if (ctrl & APB_DMA_FIN_INT_STS) {
+                       ctrl &= ~APB_DMA_FIN_INT_STS;
+                       if (ch->desc) {
+                               spin_lock_irqsave(&ch->vc.lock, flags);
+                               if (++ch->sgidx < ch->desc->sglen) {
+                                       moxart_dma_start_sg(ch, ch->sgidx);
+                               } else {
+                                       vchan_cookie_complete(&ch->desc->vd);
+                                       moxart_dma_start_desc(&ch->vc.chan);
+                               }
+                               spin_unlock_irqrestore(&ch->vc.lock, flags);
+                       }
+               }
+
+               if (ctrl & APB_DMA_ERR_INT_STS) {
+                       ctrl &= ~APB_DMA_ERR_INT_STS;
+                       ch->error = 1;
+               }
+
+               writel(ctrl, ch->base + REG_OFF_CTRL);
+       }
+
+       return IRQ_HANDLED;
+}
+
+static int moxart_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device_node *node = dev->of_node;
+       struct resource *res;
+       static void __iomem *dma_base_addr;
+       int ret, i;
+       unsigned int irq;
+       struct moxart_chan *ch;
+       struct moxart_dmadev *mdc;
+
+       mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+       if (!mdc) {
+               dev_err(dev, "can't allocate DMA container\n");
+               return -ENOMEM;
+       }
+
+       irq = irq_of_parse_and_map(node, 0);
+       if (irq == NO_IRQ) {
+               dev_err(dev, "no IRQ resource\n");
+               return -EINVAL;
+       }
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       dma_base_addr = devm_ioremap_resource(dev, res);
+       if (IS_ERR(dma_base_addr))
+               return PTR_ERR(dma_base_addr);
+
+       dma_cap_zero(mdc->dma_slave.cap_mask);
+       dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+       dma_cap_set(DMA_PRIVATE, mdc->dma_slave.cap_mask);
+
+       moxart_dma_init(&mdc->dma_slave, dev);
+
+       ch = &mdc->slave_chans[0];
+       for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+               ch->ch_num = i;
+               ch->base = dma_base_addr + i * REG_OFF_CHAN_SIZE;
+               ch->allocated = 0;
+
+               ch->vc.desc_free = moxart_dma_desc_free;
+               vchan_init(&ch->vc, &mdc->dma_slave);
+
+               dev_dbg(dev, "%s: chs[%d]: ch->ch_num=%u ch->base=%p\n",
+                       __func__, i, ch->ch_num, ch->base);
+       }
+
+       platform_set_drvdata(pdev, mdc);
+
+       ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
+                              "moxart-dma-engine", mdc);
+       if (ret) {
+               dev_err(dev, "devm_request_irq failed\n");
+               return ret;
+       }
+
+       ret = dma_async_device_register(&mdc->dma_slave);
+       if (ret) {
+               dev_err(dev, "dma_async_device_register failed\n");
+               return ret;
+       }
+
+       ret = of_dma_controller_register(node, moxart_of_xlate, mdc);
+       if (ret) {
+               dev_err(dev, "of_dma_controller_register failed\n");
+               dma_async_device_unregister(&mdc->dma_slave);
+               return ret;
+       }
+
+       dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq);
+
+       return 0;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+       struct moxart_dmadev *m = platform_get_drvdata(pdev);
+
+       dma_async_device_unregister(&m->dma_slave);
+
+       if (pdev->dev.of_node)
+               of_dma_controller_free(pdev->dev.of_node);
+
+       return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+       { .compatible = "moxa,moxart-dma" },
+       { }
+};
+
+static struct platform_driver moxart_driver = {
+       .probe  = moxart_probe,
+       .remove = moxart_remove,
+       .driver = {
+               .name           = "moxart-dma-engine",
+               .owner          = THIS_MODULE,
+               .of_match_table = moxart_dma_match,
+       },
+};
+
+static int moxart_init(void)
+{
+       return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+       platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
--
1.8.2.1
--
To unsubscribe from this list: send the line "unsubscribe devicetree" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply related	[flat|nested] 80+ messages in thread

* Re: [PATCH v16] dmaengine: Add MOXA ART DMA engine driver
  2014-01-17  8:46                               ` Jonas Jensen
@ 2014-01-17 14:42                                 ` Arnd Bergmann
  -1 siblings, 0 replies; 80+ messages in thread
From: Arnd Bergmann @ 2014-01-17 14:42 UTC (permalink / raw)
  To: Jonas Jensen
  Cc: dmaengine, linux-arm-kernel, linux-kernel, arm, vinod.koul, djbw,
	linux, mark.rutland, andriy.shevchenko, lars

On Friday 17 January 2014, Jonas Jensen wrote:
> The MOXA ART SoC has a DMA controller capable of offloading expensive
> memory operations, such as large copies. This patch adds support for
> the controller including four channels. Two of these are used to
> handle MMC copy on the UC-7112-LX hardware. The remaining two can be
> used in a future audio driver or client application.
> 
> Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
> ---

Acked-by: Arnd Bergmann <arnd@arndb.de>

^ permalink raw reply	[flat|nested] 80+ messages in thread

* [PATCH v16] dmaengine: Add MOXA ART DMA engine driver
@ 2014-01-17 14:42                                 ` Arnd Bergmann
  0 siblings, 0 replies; 80+ messages in thread
From: Arnd Bergmann @ 2014-01-17 14:42 UTC (permalink / raw)
  To: linux-arm-kernel

On Friday 17 January 2014, Jonas Jensen wrote:
> The MOXA ART SoC has a DMA controller capable of offloading expensive
> memory operations, such as large copies. This patch adds support for
> the controller including four channels. Two of these are used to
> handle MMC copy on the UC-7112-LX hardware. The remaining two can be
> used in a future audio driver or client application.
> 
> Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>
> ---

Acked-by: Arnd Bergmann <arnd@arndb.de>

^ permalink raw reply	[flat|nested] 80+ messages in thread

* Re: [PATCH v16] dmaengine: Add MOXA ART DMA engine driver
  2014-01-17  8:46                               ` Jonas Jensen
@ 2014-01-20  7:07                                 ` Vinod Koul
  -1 siblings, 0 replies; 80+ messages in thread
From: Vinod Koul @ 2014-01-20  7:07 UTC (permalink / raw)
  To: Jonas Jensen
  Cc: dmaengine, linux-arm-kernel, linux-kernel, arm, djbw, arnd,
	linux, mark.rutland, andriy.shevchenko, lars

On Fri, Jan 17, 2014 at 09:46:05AM +0100, Jonas Jensen wrote:
> The MOXA ART SoC has a DMA controller capable of offloading expensive
> memory operations, such as large copies. This patch adds support for
> the controller including four channels. Two of these are used to
> handle MMC copy on the UC-7112-LX hardware. The remaining two can be
> used in a future audio driver or client application.
> 
> Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>

Applied thanks.

Though I wasnt able to find the ARCH_MOXART in my next, it resolved after using
the linux-next as test tree and was able to compile test

--
~Vinod
> ---
> 
> Notes:
>     Changes since v15:
>     1. rebase drivers/dma/Kconfig to next-20140117
>     
>     Applies to next-20140117
> 
>  .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  45 ++
>  drivers/dma/Kconfig                                |   8 +
>  drivers/dma/Makefile                               |   1 +
>  drivers/dma/moxart-dma.c                           | 699 +++++++++++++++++++++
>  4 files changed, 753 insertions(+)
>  create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
>  create mode 100644 drivers/dma/moxart-dma.c
> 
> diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> new file mode 100644
> index 0000000..8a9f355
> --- /dev/null
> +++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> @@ -0,0 +1,45 @@
> +MOXA ART DMA Controller
> +
> +See dma.txt first
> +
> +Required properties:
> +
> +- compatible :	Must be "moxa,moxart-dma"
> +- reg :		Should contain registers location and length
> +- interrupts :	Should contain an interrupt-specifier for the sole
> +		interrupt generated by the device
> +- #dma-cells :	Should be 1, a single cell holding a line request number
> +
> +Example:
> +
> +	dma: dma@90500000 {
> +		compatible = "moxa,moxart-dma";
> +		reg = <0x90500080 0x40>;
> +		interrupts = <24 0>;
> +		#dma-cells = <1>;
> +	};
> +
> +
> +Clients:
> +
> +DMA clients connected to the MOXA ART DMA controller must use the format
> +described in the dma.txt file, using a two-cell specifier for each channel:
> +a phandle plus one integer cells.
> +The two cells in order are:
> +
> +1. A phandle pointing to the DMA controller.
> +2. Peripheral identifier for the hardware handshaking interface.
> +
> +Example:
> +Use specific request line passing from dma
> +For example, MMC request line is 5
> +
> +	sdhci: sdhci@98e00000 {
> +		compatible = "moxa,moxart-sdhci";
> +		reg = <0x98e00000 0x5C>;
> +		interrupts = <5 0>;
> +		clocks = <&clk_apb>;
> +		dmas =  <&dma 5>,
> +			<&dma 5>;
> +		dma-names = "tx", "rx";
> +	};
> diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
> index 9ae6f54..9bed1a2 100644
> --- a/drivers/dma/Kconfig
> +++ b/drivers/dma/Kconfig
> @@ -342,6 +342,14 @@ config K3_DMA
>  	  Support the DMA engine for Hisilicon K3 platform
>  	  devices.
>  
> +config MOXART_DMA
> +	tristate "MOXART DMA support"
> +	depends on ARCH_MOXART
> +	select DMA_ENGINE
> +	select DMA_VIRTUAL_CHANNELS
> +	help
> +	  Enable support for the MOXA ART SoC DMA controller.
> +
>  config DMA_ENGINE
>  	bool
>  
> diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
> index 0a6f08e..a029d0f4 100644
> --- a/drivers/dma/Makefile
> +++ b/drivers/dma/Makefile
> @@ -43,3 +43,4 @@ obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
>  obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
>  obj-$(CONFIG_TI_CPPI41) += cppi41.o
>  obj-$(CONFIG_K3_DMA) += k3dma.o
> +obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
> diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
> new file mode 100644
> index 0000000..3258e48
> --- /dev/null
> +++ b/drivers/dma/moxart-dma.c
> @@ -0,0 +1,699 @@
> +/*
> + * MOXA ART SoCs DMA Engine support.
> + *
> + * Copyright (C) 2013 Jonas Jensen
> + *
> + * Jonas Jensen <jonas.jensen@gmail.com>
> + *
> + * This file is licensed under the terms of the GNU General Public
> + * License version 2.  This program is licensed "as is" without any
> + * warranty of any kind, whether express or implied.
> + */
> +
> +#include <linux/dmaengine.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/err.h>
> +#include <linux/init.h>
> +#include <linux/interrupt.h>
> +#include <linux/list.h>
> +#include <linux/module.h>
> +#include <linux/platform_device.h>
> +#include <linux/slab.h>
> +#include <linux/spinlock.h>
> +#include <linux/of_address.h>
> +#include <linux/of_irq.h>
> +#include <linux/of_dma.h>
> +#include <linux/bitops.h>
> +
> +#include <asm/cacheflush.h>
> +
> +#include "dmaengine.h"
> +#include "virt-dma.h"
> +
> +#define APB_DMA_MAX_CHANNEL			4
> +
> +#define REG_OFF_ADDRESS_SOURCE			0
> +#define REG_OFF_ADDRESS_DEST			4
> +#define REG_OFF_CYCLES				8
> +#define REG_OFF_CTRL				12
> +#define REG_OFF_CHAN_SIZE			16
> +
> +#define APB_DMA_ENABLE				BIT(0)
> +#define APB_DMA_FIN_INT_STS			BIT(1)
> +#define APB_DMA_FIN_INT_EN			BIT(2)
> +#define APB_DMA_BURST_MODE			BIT(3)
> +#define APB_DMA_ERR_INT_STS			BIT(4)
> +#define APB_DMA_ERR_INT_EN			BIT(5)
> +
> +/*
> + * Unset: APB
> + * Set:   AHB
> + */
> +#define APB_DMA_SOURCE_SELECT			0x40
> +#define APB_DMA_DEST_SELECT			0x80
> +
> +#define APB_DMA_SOURCE				0x100
> +#define APB_DMA_DEST				0x1000
> +
> +#define APB_DMA_SOURCE_MASK			0x700
> +#define APB_DMA_DEST_MASK			0x7000
> +
> +/*
> + * 000: No increment
> + * 001: +1 (Burst=0), +4  (Burst=1)
> + * 010: +2 (Burst=0), +8  (Burst=1)
> + * 011: +4 (Burst=0), +16 (Burst=1)
> + * 101: -1 (Burst=0), -4  (Burst=1)
> + * 110: -2 (Burst=0), -8  (Burst=1)
> + * 111: -4 (Burst=0), -16 (Burst=1)
> + */
> +#define APB_DMA_SOURCE_INC_0			0
> +#define APB_DMA_SOURCE_INC_1_4			0x100
> +#define APB_DMA_SOURCE_INC_2_8			0x200
> +#define APB_DMA_SOURCE_INC_4_16			0x300
> +#define APB_DMA_SOURCE_DEC_1_4			0x500
> +#define APB_DMA_SOURCE_DEC_2_8			0x600
> +#define APB_DMA_SOURCE_DEC_4_16			0x700
> +#define APB_DMA_DEST_INC_0			0
> +#define APB_DMA_DEST_INC_1_4			0x1000
> +#define APB_DMA_DEST_INC_2_8			0x2000
> +#define APB_DMA_DEST_INC_4_16			0x3000
> +#define APB_DMA_DEST_DEC_1_4			0x5000
> +#define APB_DMA_DEST_DEC_2_8			0x6000
> +#define APB_DMA_DEST_DEC_4_16			0x7000
> +
> +/*
> + * Request signal select source/destination address for DMA hardware handshake.
> + *
> + * The request line number is a property of the DMA controller itself,
> + * e.g. MMC must always request channels where dma_slave_config->slave_id is 5.
> + *
> + * 0:    No request / Grant signal
> + * 1-15: Request    / Grant signal
> + */
> +#define APB_DMA_SOURCE_REQ_NO			0x1000000
> +#define APB_DMA_SOURCE_REQ_NO_MASK		0xf000000
> +#define APB_DMA_DEST_REQ_NO			0x10000
> +#define APB_DMA_DEST_REQ_NO_MASK		0xf0000
> +
> +#define APB_DMA_DATA_WIDTH			0x100000
> +#define APB_DMA_DATA_WIDTH_MASK			0x300000
> +/*
> + * Data width of transfer:
> + *
> + * 00: Word
> + * 01: Half
> + * 10: Byte
> + */
> +#define APB_DMA_DATA_WIDTH_4			0
> +#define APB_DMA_DATA_WIDTH_2			0x100000
> +#define APB_DMA_DATA_WIDTH_1			0x200000
> +
> +#define APB_DMA_CYCLES_MASK			0x00ffffff
> +
> +#define MOXART_DMA_DATA_TYPE_S8			0x00
> +#define MOXART_DMA_DATA_TYPE_S16		0x01
> +#define MOXART_DMA_DATA_TYPE_S32		0x02
> +
> +struct moxart_sg {
> +	dma_addr_t addr;
> +	uint32_t len;
> +};
> +
> +struct moxart_desc {
> +	enum dma_transfer_direction	dma_dir;
> +	dma_addr_t			dev_addr;
> +	unsigned int			sglen;
> +	unsigned int			dma_cycles;
> +	struct virt_dma_desc		vd;
> +	uint8_t				es;
> +	struct moxart_sg		sg[0];
> +};
> +
> +struct moxart_chan {
> +	struct virt_dma_chan		vc;
> +
> +	void __iomem			*base;
> +	struct moxart_desc		*desc;
> +
> +	struct dma_slave_config		cfg;
> +
> +	bool				allocated;
> +	bool				error;
> +	int				ch_num;
> +	unsigned int			line_reqno;
> +	unsigned int			sgidx;
> +};
> +
> +struct moxart_dmadev {
> +	struct dma_device		dma_slave;
> +	struct moxart_chan		slave_chans[APB_DMA_MAX_CHANNEL];
> +};
> +
> +struct moxart_filter_data {
> +	struct moxart_dmadev		*mdc;
> +	struct of_phandle_args		*dma_spec;
> +};
> +
> +static const unsigned int es_bytes[] = {
> +	[MOXART_DMA_DATA_TYPE_S8] = 1,
> +	[MOXART_DMA_DATA_TYPE_S16] = 2,
> +	[MOXART_DMA_DATA_TYPE_S32] = 4,
> +};
> +
> +static struct device *chan2dev(struct dma_chan *chan)
> +{
> +	return &chan->dev->device;
> +}
> +
> +static inline struct moxart_chan *to_moxart_dma_chan(struct dma_chan *c)
> +{
> +	return container_of(c, struct moxart_chan, vc.chan);
> +}
> +
> +static inline struct moxart_desc *to_moxart_dma_desc(
> +	struct dma_async_tx_descriptor *t)
> +{
> +	return container_of(t, struct moxart_desc, vd.tx);
> +}
> +
> +static void moxart_dma_desc_free(struct virt_dma_desc *vd)
> +{
> +	kfree(container_of(vd, struct moxart_desc, vd));
> +}
> +
> +static int moxart_terminate_all(struct dma_chan *chan)
> +{
> +	struct moxart_chan *ch = to_moxart_dma_chan(chan);
> +	unsigned long flags;
> +	LIST_HEAD(head);
> +	u32 ctrl;
> +
> +	dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
> +
> +	spin_lock_irqsave(&ch->vc.lock, flags);
> +
> +	if (ch->desc)
> +		ch->desc = NULL;
> +
> +	ctrl = readl(ch->base + REG_OFF_CTRL);
> +	ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
> +	writel(ctrl, ch->base + REG_OFF_CTRL);
> +
> +	vchan_get_all_descriptors(&ch->vc, &head);
> +	spin_unlock_irqrestore(&ch->vc.lock, flags);
> +	vchan_dma_desc_free_list(&ch->vc, &head);
> +
> +	return 0;
> +}
> +
> +static int moxart_slave_config(struct dma_chan *chan,
> +			       struct dma_slave_config *cfg)
> +{
> +	struct moxart_chan *ch = to_moxart_dma_chan(chan);
> +	u32 ctrl;
> +
> +	ch->cfg = *cfg;
> +
> +	ctrl = readl(ch->base + REG_OFF_CTRL);
> +	ctrl |= APB_DMA_BURST_MODE;
> +	ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
> +	ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
> +
> +	switch (ch->cfg.src_addr_width) {
> +	case DMA_SLAVE_BUSWIDTH_1_BYTE:
> +		ctrl |= APB_DMA_DATA_WIDTH_1;
> +		if (ch->cfg.direction != DMA_MEM_TO_DEV)
> +			ctrl |= APB_DMA_DEST_INC_1_4;
> +		else
> +			ctrl |= APB_DMA_SOURCE_INC_1_4;
> +		break;
> +	case DMA_SLAVE_BUSWIDTH_2_BYTES:
> +		ctrl |= APB_DMA_DATA_WIDTH_2;
> +		if (ch->cfg.direction != DMA_MEM_TO_DEV)
> +			ctrl |= APB_DMA_DEST_INC_2_8;
> +		else
> +			ctrl |= APB_DMA_SOURCE_INC_2_8;
> +		break;
> +	case DMA_SLAVE_BUSWIDTH_4_BYTES:
> +		ctrl &= ~APB_DMA_DATA_WIDTH;
> +		if (ch->cfg.direction != DMA_MEM_TO_DEV)
> +			ctrl |= APB_DMA_DEST_INC_4_16;
> +		else
> +			ctrl |= APB_DMA_SOURCE_INC_4_16;
> +		break;
> +	default:
> +		return -EINVAL;
> +	}
> +
> +	if (ch->cfg.direction == DMA_MEM_TO_DEV) {
> +		ctrl &= ~APB_DMA_DEST_SELECT;
> +		ctrl |= APB_DMA_SOURCE_SELECT;
> +		ctrl |= (ch->line_reqno << 16 &
> +			 APB_DMA_DEST_REQ_NO_MASK);
> +	} else {
> +		ctrl |= APB_DMA_DEST_SELECT;
> +		ctrl &= ~APB_DMA_SOURCE_SELECT;
> +		ctrl |= (ch->line_reqno << 24 &
> +			 APB_DMA_SOURCE_REQ_NO_MASK);
> +	}
> +
> +	writel(ctrl, ch->base + REG_OFF_CTRL);
> +
> +	return 0;
> +}
> +
> +static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
> +			  unsigned long arg)
> +{
> +	int ret = 0;
> +
> +	switch (cmd) {
> +	case DMA_PAUSE:
> +	case DMA_RESUME:
> +		return -EINVAL;
> +	case DMA_TERMINATE_ALL:
> +		moxart_terminate_all(chan);
> +		break;
> +	case DMA_SLAVE_CONFIG:
> +		ret = moxart_slave_config(chan, (struct dma_slave_config *)arg);
> +		break;
> +	default:
> +		ret = -ENOSYS;
> +	}
> +
> +	return ret;
> +}
> +
> +static struct dma_async_tx_descriptor *moxart_prep_slave_sg(
> +	struct dma_chan *chan, struct scatterlist *sgl,
> +	unsigned int sg_len, enum dma_transfer_direction dir,
> +	unsigned long tx_flags, void *context)
> +{
> +	struct moxart_chan *ch = to_moxart_dma_chan(chan);
> +	struct moxart_desc *d;
> +	enum dma_slave_buswidth dev_width;
> +	dma_addr_t dev_addr;
> +	struct scatterlist *sgent;
> +	unsigned int es;
> +	unsigned int i;
> +
> +	if (!is_slave_direction(dir)) {
> +		dev_err(chan2dev(chan), "%s: invalid DMA direction\n",
> +			__func__);
> +		return NULL;
> +	}
> +
> +	if (dir == DMA_DEV_TO_MEM) {
> +		dev_addr = ch->cfg.src_addr;
> +		dev_width = ch->cfg.src_addr_width;
> +	} else {
> +		dev_addr = ch->cfg.dst_addr;
> +		dev_width = ch->cfg.dst_addr_width;
> +	}
> +
> +	switch (dev_width) {
> +	case DMA_SLAVE_BUSWIDTH_1_BYTE:
> +		es = MOXART_DMA_DATA_TYPE_S8;
> +		break;
> +	case DMA_SLAVE_BUSWIDTH_2_BYTES:
> +		es = MOXART_DMA_DATA_TYPE_S16;
> +		break;
> +	case DMA_SLAVE_BUSWIDTH_4_BYTES:
> +		es = MOXART_DMA_DATA_TYPE_S32;
> +		break;
> +	default:
> +		dev_err(chan2dev(chan), "%s: unsupported data width (%u)\n",
> +			__func__, dev_width);
> +		return NULL;
> +	}
> +
> +	d = kzalloc(sizeof(*d) + sg_len * sizeof(d->sg[0]), GFP_ATOMIC);
> +	if (!d)
> +		return NULL;
> +
> +	d->dma_dir = dir;
> +	d->dev_addr = dev_addr;
> +	d->es = es;
> +
> +	for_each_sg(sgl, sgent, sg_len, i) {
> +		d->sg[i].addr = sg_dma_address(sgent);
> +		d->sg[i].len = sg_dma_len(sgent);
> +	}
> +
> +	d->sglen = sg_len;
> +
> +	ch->error = 0;
> +
> +	return vchan_tx_prep(&ch->vc, &d->vd, tx_flags);
> +}
> +
> +static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
> +					struct of_dma *ofdma)
> +{
> +	struct moxart_dmadev *mdc = ofdma->of_dma_data;
> +	struct dma_chan *chan;
> +	struct moxart_chan *ch;
> +
> +	chan = dma_get_any_slave_channel(&mdc->dma_slave);
> +	if (!chan)
> +		return NULL;
> +
> +	ch = to_moxart_dma_chan(chan);
> +	ch->line_reqno = dma_spec->args[0];
> +
> +	return chan;
> +}
> +
> +static int moxart_alloc_chan_resources(struct dma_chan *chan)
> +{
> +	struct moxart_chan *ch = to_moxart_dma_chan(chan);
> +
> +	dev_dbg(chan2dev(chan), "%s: allocating channel #%u\n",
> +		__func__, ch->ch_num);
> +	ch->allocated = 1;
> +
> +	return 0;
> +}
> +
> +static void moxart_free_chan_resources(struct dma_chan *chan)
> +{
> +	struct moxart_chan *ch = to_moxart_dma_chan(chan);
> +
> +	vchan_free_chan_resources(&ch->vc);
> +
> +	dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
> +		__func__, ch->ch_num);
> +	ch->allocated = 0;
> +}
> +
> +static void moxart_dma_set_params(struct moxart_chan *ch, dma_addr_t src_addr,
> +				  dma_addr_t dst_addr)
> +{
> +	writel(src_addr, ch->base + REG_OFF_ADDRESS_SOURCE);
> +	writel(dst_addr, ch->base + REG_OFF_ADDRESS_DEST);
> +}
> +
> +static void moxart_set_transfer_params(struct moxart_chan *ch, unsigned int len)
> +{
> +	struct moxart_desc *d = ch->desc;
> +	unsigned int sglen_div = es_bytes[d->es];
> +
> +	d->dma_cycles = len >> sglen_div;
> +
> +	/*
> +	 * There are 4 cycles on 64 bytes copied, i.e. one cycle copies 16
> +	 * bytes ( when width is APB_DMAB_DATA_WIDTH_4 ).
> +	 */
> +	writel(d->dma_cycles, ch->base + REG_OFF_CYCLES);
> +
> +	dev_dbg(chan2dev(&ch->vc.chan), "%s: set %u DMA cycles (len=%u)\n",
> +		__func__, d->dma_cycles, len);
> +}
> +
> +static void moxart_start_dma(struct moxart_chan *ch)
> +{
> +	u32 ctrl;
> +
> +	ctrl = readl(ch->base + REG_OFF_CTRL);
> +	ctrl |= (APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
> +	writel(ctrl, ch->base + REG_OFF_CTRL);
> +}
> +
> +static void moxart_dma_start_sg(struct moxart_chan *ch, unsigned int idx)
> +{
> +	struct moxart_desc *d = ch->desc;
> +	struct moxart_sg *sg = ch->desc->sg + idx;
> +
> +	if (ch->desc->dma_dir == DMA_MEM_TO_DEV)
> +		moxart_dma_set_params(ch, sg->addr, d->dev_addr);
> +	else if (ch->desc->dma_dir == DMA_DEV_TO_MEM)
> +		moxart_dma_set_params(ch, d->dev_addr, sg->addr);
> +
> +	moxart_set_transfer_params(ch, sg->len);
> +
> +	moxart_start_dma(ch);
> +}
> +
> +static void moxart_dma_start_desc(struct dma_chan *chan)
> +{
> +	struct moxart_chan *ch = to_moxart_dma_chan(chan);
> +	struct virt_dma_desc *vd;
> +
> +	vd = vchan_next_desc(&ch->vc);
> +
> +	if (!vd) {
> +		ch->desc = NULL;
> +		return;
> +	}
> +
> +	list_del(&vd->node);
> +
> +	ch->desc = to_moxart_dma_desc(&vd->tx);
> +	ch->sgidx = 0;
> +
> +	moxart_dma_start_sg(ch, 0);
> +}
> +
> +static void moxart_issue_pending(struct dma_chan *chan)
> +{
> +	struct moxart_chan *ch = to_moxart_dma_chan(chan);
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&ch->vc.lock, flags);
> +	if (vchan_issue_pending(&ch->vc) && !ch->desc)
> +		moxart_dma_start_desc(chan);
> +	spin_unlock_irqrestore(&ch->vc.lock, flags);
> +}
> +
> +static size_t moxart_dma_desc_size(struct moxart_desc *d,
> +				   unsigned int completed_sgs)
> +{
> +	unsigned int i;
> +	size_t size;
> +
> +	for (size = i = completed_sgs; i < d->sglen; i++)
> +		size += d->sg[i].len;
> +
> +	return size;
> +}
> +
> +static size_t moxart_dma_desc_size_in_flight(struct moxart_chan *ch)
> +{
> +	size_t size;
> +	unsigned int completed_cycles, cycles;
> +
> +	size = moxart_dma_desc_size(ch->desc, ch->sgidx);
> +	cycles = readl(ch->base + REG_OFF_CYCLES);
> +	completed_cycles = (ch->desc->dma_cycles - cycles);
> +	size -= completed_cycles << es_bytes[ch->desc->es];
> +
> +	dev_dbg(chan2dev(&ch->vc.chan), "%s: size=%zu\n", __func__, size);
> +
> +	return size;
> +}
> +
> +static enum dma_status moxart_tx_status(struct dma_chan *chan,
> +					dma_cookie_t cookie,
> +					struct dma_tx_state *txstate)
> +{
> +	struct moxart_chan *ch = to_moxart_dma_chan(chan);
> +	struct virt_dma_desc *vd;
> +	struct moxart_desc *d;
> +	enum dma_status ret;
> +	unsigned long flags;
> +
> +	/*
> +	 * dma_cookie_status() assigns initial residue value.
> +	 */
> +	ret = dma_cookie_status(chan, cookie, txstate);
> +
> +	spin_lock_irqsave(&ch->vc.lock, flags);
> +	vd = vchan_find_desc(&ch->vc, cookie);
> +	if (vd) {
> +		d = to_moxart_dma_desc(&vd->tx);
> +		txstate->residue = moxart_dma_desc_size(d, 0);
> +	} else if (ch->desc && ch->desc->vd.tx.cookie == cookie) {
> +		txstate->residue = moxart_dma_desc_size_in_flight(ch);
> +	}
> +	spin_unlock_irqrestore(&ch->vc.lock, flags);
> +
> +	if (ch->error)
> +		return DMA_ERROR;
> +
> +	return ret;
> +}
> +
> +static void moxart_dma_init(struct dma_device *dma, struct device *dev)
> +{
> +	dma->device_prep_slave_sg		= moxart_prep_slave_sg;
> +	dma->device_alloc_chan_resources	= moxart_alloc_chan_resources;
> +	dma->device_free_chan_resources		= moxart_free_chan_resources;
> +	dma->device_issue_pending		= moxart_issue_pending;
> +	dma->device_tx_status			= moxart_tx_status;
> +	dma->device_control			= moxart_control;
> +	dma->dev				= dev;
> +
> +	INIT_LIST_HEAD(&dma->channels);
> +}
> +
> +static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
> +{
> +	struct moxart_dmadev *mc = devid;
> +	struct moxart_chan *ch = &mc->slave_chans[0];
> +	unsigned int i;
> +	unsigned long flags;
> +	u32 ctrl;
> +
> +	dev_dbg(chan2dev(&ch->vc.chan), "%s\n", __func__);
> +
> +	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
> +		if (!ch->allocated)
> +			continue;
> +
> +		ctrl = readl(ch->base + REG_OFF_CTRL);
> +
> +		dev_dbg(chan2dev(&ch->vc.chan), "%s: ch=%p ch->base=%p ctrl=%x\n",
> +			__func__, ch, ch->base, ctrl);
> +
> +		if (ctrl & APB_DMA_FIN_INT_STS) {
> +			ctrl &= ~APB_DMA_FIN_INT_STS;
> +			if (ch->desc) {
> +				spin_lock_irqsave(&ch->vc.lock, flags);
> +				if (++ch->sgidx < ch->desc->sglen) {
> +					moxart_dma_start_sg(ch, ch->sgidx);
> +				} else {
> +					vchan_cookie_complete(&ch->desc->vd);
> +					moxart_dma_start_desc(&ch->vc.chan);
> +				}
> +				spin_unlock_irqrestore(&ch->vc.lock, flags);
> +			}
> +		}
> +
> +		if (ctrl & APB_DMA_ERR_INT_STS) {
> +			ctrl &= ~APB_DMA_ERR_INT_STS;
> +			ch->error = 1;
> +		}
> +
> +		writel(ctrl, ch->base + REG_OFF_CTRL);
> +	}
> +
> +	return IRQ_HANDLED;
> +}
> +
> +static int moxart_probe(struct platform_device *pdev)
> +{
> +	struct device *dev = &pdev->dev;
> +	struct device_node *node = dev->of_node;
> +	struct resource *res;
> +	static void __iomem *dma_base_addr;
> +	int ret, i;
> +	unsigned int irq;
> +	struct moxart_chan *ch;
> +	struct moxart_dmadev *mdc;
> +
> +	mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
> +	if (!mdc) {
> +		dev_err(dev, "can't allocate DMA container\n");
> +		return -ENOMEM;
> +	}
> +
> +	irq = irq_of_parse_and_map(node, 0);
> +	if (irq == NO_IRQ) {
> +		dev_err(dev, "no IRQ resource\n");
> +		return -EINVAL;
> +	}
> +
> +	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
> +	dma_base_addr = devm_ioremap_resource(dev, res);
> +	if (IS_ERR(dma_base_addr))
> +		return PTR_ERR(dma_base_addr);
> +
> +	dma_cap_zero(mdc->dma_slave.cap_mask);
> +	dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
> +	dma_cap_set(DMA_PRIVATE, mdc->dma_slave.cap_mask);
> +
> +	moxart_dma_init(&mdc->dma_slave, dev);
> +
> +	ch = &mdc->slave_chans[0];
> +	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
> +		ch->ch_num = i;
> +		ch->base = dma_base_addr + i * REG_OFF_CHAN_SIZE;
> +		ch->allocated = 0;
> +
> +		ch->vc.desc_free = moxart_dma_desc_free;
> +		vchan_init(&ch->vc, &mdc->dma_slave);
> +
> +		dev_dbg(dev, "%s: chs[%d]: ch->ch_num=%u ch->base=%p\n",
> +			__func__, i, ch->ch_num, ch->base);
> +	}
> +
> +	platform_set_drvdata(pdev, mdc);
> +
> +	ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
> +			       "moxart-dma-engine", mdc);
> +	if (ret) {
> +		dev_err(dev, "devm_request_irq failed\n");
> +		return ret;
> +	}
> +
> +	ret = dma_async_device_register(&mdc->dma_slave);
> +	if (ret) {
> +		dev_err(dev, "dma_async_device_register failed\n");
> +		return ret;
> +	}
> +
> +	ret = of_dma_controller_register(node, moxart_of_xlate, mdc);
> +	if (ret) {
> +		dev_err(dev, "of_dma_controller_register failed\n");
> +		dma_async_device_unregister(&mdc->dma_slave);
> +		return ret;
> +	}
> +
> +	dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq);
> +
> +	return 0;
> +}
> +
> +static int moxart_remove(struct platform_device *pdev)
> +{
> +	struct moxart_dmadev *m = platform_get_drvdata(pdev);
> +
> +	dma_async_device_unregister(&m->dma_slave);
> +
> +	if (pdev->dev.of_node)
> +		of_dma_controller_free(pdev->dev.of_node);
> +
> +	return 0;
> +}
> +
> +static const struct of_device_id moxart_dma_match[] = {
> +	{ .compatible = "moxa,moxart-dma" },
> +	{ }
> +};
> +
> +static struct platform_driver moxart_driver = {
> +	.probe	= moxart_probe,
> +	.remove	= moxart_remove,
> +	.driver = {
> +		.name		= "moxart-dma-engine",
> +		.owner		= THIS_MODULE,
> +		.of_match_table	= moxart_dma_match,
> +	},
> +};
> +
> +static int moxart_init(void)
> +{
> +	return platform_driver_register(&moxart_driver);
> +}
> +subsys_initcall(moxart_init);
> +
> +static void __exit moxart_exit(void)
> +{
> +	platform_driver_unregister(&moxart_driver);
> +}
> +module_exit(moxart_exit);
> +
> +MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
> +MODULE_DESCRIPTION("MOXART DMA engine driver");
> +MODULE_LICENSE("GPL v2");
> -- 
> 1.8.2.1
> 

-- 

^ permalink raw reply	[flat|nested] 80+ messages in thread

* [PATCH v16] dmaengine: Add MOXA ART DMA engine driver
@ 2014-01-20  7:07                                 ` Vinod Koul
  0 siblings, 0 replies; 80+ messages in thread
From: Vinod Koul @ 2014-01-20  7:07 UTC (permalink / raw)
  To: linux-arm-kernel

On Fri, Jan 17, 2014 at 09:46:05AM +0100, Jonas Jensen wrote:
> The MOXA ART SoC has a DMA controller capable of offloading expensive
> memory operations, such as large copies. This patch adds support for
> the controller including four channels. Two of these are used to
> handle MMC copy on the UC-7112-LX hardware. The remaining two can be
> used in a future audio driver or client application.
> 
> Signed-off-by: Jonas Jensen <jonas.jensen@gmail.com>

Applied thanks.

Though I wasnt able to find the ARCH_MOXART in my next, it resolved after using
the linux-next as test tree and was able to compile test

--
~Vinod
> ---
> 
> Notes:
>     Changes since v15:
>     1. rebase drivers/dma/Kconfig to next-20140117
>     
>     Applies to next-20140117
> 
>  .../devicetree/bindings/dma/moxa,moxart-dma.txt    |  45 ++
>  drivers/dma/Kconfig                                |   8 +
>  drivers/dma/Makefile                               |   1 +
>  drivers/dma/moxart-dma.c                           | 699 +++++++++++++++++++++
>  4 files changed, 753 insertions(+)
>  create mode 100644 Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
>  create mode 100644 drivers/dma/moxart-dma.c
> 
> diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> new file mode 100644
> index 0000000..8a9f355
> --- /dev/null
> +++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
> @@ -0,0 +1,45 @@
> +MOXA ART DMA Controller
> +
> +See dma.txt first
> +
> +Required properties:
> +
> +- compatible :	Must be "moxa,moxart-dma"
> +- reg :		Should contain registers location and length
> +- interrupts :	Should contain an interrupt-specifier for the sole
> +		interrupt generated by the device
> +- #dma-cells :	Should be 1, a single cell holding a line request number
> +
> +Example:
> +
> +	dma: dma at 90500000 {
> +		compatible = "moxa,moxart-dma";
> +		reg = <0x90500080 0x40>;
> +		interrupts = <24 0>;
> +		#dma-cells = <1>;
> +	};
> +
> +
> +Clients:
> +
> +DMA clients connected to the MOXA ART DMA controller must use the format
> +described in the dma.txt file, using a two-cell specifier for each channel:
> +a phandle plus one integer cells.
> +The two cells in order are:
> +
> +1. A phandle pointing to the DMA controller.
> +2. Peripheral identifier for the hardware handshaking interface.
> +
> +Example:
> +Use specific request line passing from dma
> +For example, MMC request line is 5
> +
> +	sdhci: sdhci at 98e00000 {
> +		compatible = "moxa,moxart-sdhci";
> +		reg = <0x98e00000 0x5C>;
> +		interrupts = <5 0>;
> +		clocks = <&clk_apb>;
> +		dmas =  <&dma 5>,
> +			<&dma 5>;
> +		dma-names = "tx", "rx";
> +	};
> diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
> index 9ae6f54..9bed1a2 100644
> --- a/drivers/dma/Kconfig
> +++ b/drivers/dma/Kconfig
> @@ -342,6 +342,14 @@ config K3_DMA
>  	  Support the DMA engine for Hisilicon K3 platform
>  	  devices.
>  
> +config MOXART_DMA
> +	tristate "MOXART DMA support"
> +	depends on ARCH_MOXART
> +	select DMA_ENGINE
> +	select DMA_VIRTUAL_CHANNELS
> +	help
> +	  Enable support for the MOXA ART SoC DMA controller.
> +
>  config DMA_ENGINE
>  	bool
>  
> diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
> index 0a6f08e..a029d0f4 100644
> --- a/drivers/dma/Makefile
> +++ b/drivers/dma/Makefile
> @@ -43,3 +43,4 @@ obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
>  obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
>  obj-$(CONFIG_TI_CPPI41) += cppi41.o
>  obj-$(CONFIG_K3_DMA) += k3dma.o
> +obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
> diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
> new file mode 100644
> index 0000000..3258e48
> --- /dev/null
> +++ b/drivers/dma/moxart-dma.c
> @@ -0,0 +1,699 @@
> +/*
> + * MOXA ART SoCs DMA Engine support.
> + *
> + * Copyright (C) 2013 Jonas Jensen
> + *
> + * Jonas Jensen <jonas.jensen@gmail.com>
> + *
> + * This file is licensed under the terms of the GNU General Public
> + * License version 2.  This program is licensed "as is" without any
> + * warranty of any kind, whether express or implied.
> + */
> +
> +#include <linux/dmaengine.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/err.h>
> +#include <linux/init.h>
> +#include <linux/interrupt.h>
> +#include <linux/list.h>
> +#include <linux/module.h>
> +#include <linux/platform_device.h>
> +#include <linux/slab.h>
> +#include <linux/spinlock.h>
> +#include <linux/of_address.h>
> +#include <linux/of_irq.h>
> +#include <linux/of_dma.h>
> +#include <linux/bitops.h>
> +
> +#include <asm/cacheflush.h>
> +
> +#include "dmaengine.h"
> +#include "virt-dma.h"
> +
> +#define APB_DMA_MAX_CHANNEL			4
> +
> +#define REG_OFF_ADDRESS_SOURCE			0
> +#define REG_OFF_ADDRESS_DEST			4
> +#define REG_OFF_CYCLES				8
> +#define REG_OFF_CTRL				12
> +#define REG_OFF_CHAN_SIZE			16
> +
> +#define APB_DMA_ENABLE				BIT(0)
> +#define APB_DMA_FIN_INT_STS			BIT(1)
> +#define APB_DMA_FIN_INT_EN			BIT(2)
> +#define APB_DMA_BURST_MODE			BIT(3)
> +#define APB_DMA_ERR_INT_STS			BIT(4)
> +#define APB_DMA_ERR_INT_EN			BIT(5)
> +
> +/*
> + * Unset: APB
> + * Set:   AHB
> + */
> +#define APB_DMA_SOURCE_SELECT			0x40
> +#define APB_DMA_DEST_SELECT			0x80
> +
> +#define APB_DMA_SOURCE				0x100
> +#define APB_DMA_DEST				0x1000
> +
> +#define APB_DMA_SOURCE_MASK			0x700
> +#define APB_DMA_DEST_MASK			0x7000
> +
> +/*
> + * 000: No increment
> + * 001: +1 (Burst=0), +4  (Burst=1)
> + * 010: +2 (Burst=0), +8  (Burst=1)
> + * 011: +4 (Burst=0), +16 (Burst=1)
> + * 101: -1 (Burst=0), -4  (Burst=1)
> + * 110: -2 (Burst=0), -8  (Burst=1)
> + * 111: -4 (Burst=0), -16 (Burst=1)
> + */
> +#define APB_DMA_SOURCE_INC_0			0
> +#define APB_DMA_SOURCE_INC_1_4			0x100
> +#define APB_DMA_SOURCE_INC_2_8			0x200
> +#define APB_DMA_SOURCE_INC_4_16			0x300
> +#define APB_DMA_SOURCE_DEC_1_4			0x500
> +#define APB_DMA_SOURCE_DEC_2_8			0x600
> +#define APB_DMA_SOURCE_DEC_4_16			0x700
> +#define APB_DMA_DEST_INC_0			0
> +#define APB_DMA_DEST_INC_1_4			0x1000
> +#define APB_DMA_DEST_INC_2_8			0x2000
> +#define APB_DMA_DEST_INC_4_16			0x3000
> +#define APB_DMA_DEST_DEC_1_4			0x5000
> +#define APB_DMA_DEST_DEC_2_8			0x6000
> +#define APB_DMA_DEST_DEC_4_16			0x7000
> +
> +/*
> + * Request signal select source/destination address for DMA hardware handshake.
> + *
> + * The request line number is a property of the DMA controller itself,
> + * e.g. MMC must always request channels where dma_slave_config->slave_id is 5.
> + *
> + * 0:    No request / Grant signal
> + * 1-15: Request    / Grant signal
> + */
> +#define APB_DMA_SOURCE_REQ_NO			0x1000000
> +#define APB_DMA_SOURCE_REQ_NO_MASK		0xf000000
> +#define APB_DMA_DEST_REQ_NO			0x10000
> +#define APB_DMA_DEST_REQ_NO_MASK		0xf0000
> +
> +#define APB_DMA_DATA_WIDTH			0x100000
> +#define APB_DMA_DATA_WIDTH_MASK			0x300000
> +/*
> + * Data width of transfer:
> + *
> + * 00: Word
> + * 01: Half
> + * 10: Byte
> + */
> +#define APB_DMA_DATA_WIDTH_4			0
> +#define APB_DMA_DATA_WIDTH_2			0x100000
> +#define APB_DMA_DATA_WIDTH_1			0x200000
> +
> +#define APB_DMA_CYCLES_MASK			0x00ffffff
> +
> +#define MOXART_DMA_DATA_TYPE_S8			0x00
> +#define MOXART_DMA_DATA_TYPE_S16		0x01
> +#define MOXART_DMA_DATA_TYPE_S32		0x02
> +
> +struct moxart_sg {
> +	dma_addr_t addr;
> +	uint32_t len;
> +};
> +
> +struct moxart_desc {
> +	enum dma_transfer_direction	dma_dir;
> +	dma_addr_t			dev_addr;
> +	unsigned int			sglen;
> +	unsigned int			dma_cycles;
> +	struct virt_dma_desc		vd;
> +	uint8_t				es;
> +	struct moxart_sg		sg[0];
> +};
> +
> +struct moxart_chan {
> +	struct virt_dma_chan		vc;
> +
> +	void __iomem			*base;
> +	struct moxart_desc		*desc;
> +
> +	struct dma_slave_config		cfg;
> +
> +	bool				allocated;
> +	bool				error;
> +	int				ch_num;
> +	unsigned int			line_reqno;
> +	unsigned int			sgidx;
> +};
> +
> +struct moxart_dmadev {
> +	struct dma_device		dma_slave;
> +	struct moxart_chan		slave_chans[APB_DMA_MAX_CHANNEL];
> +};
> +
> +struct moxart_filter_data {
> +	struct moxart_dmadev		*mdc;
> +	struct of_phandle_args		*dma_spec;
> +};
> +
> +static const unsigned int es_bytes[] = {
> +	[MOXART_DMA_DATA_TYPE_S8] = 1,
> +	[MOXART_DMA_DATA_TYPE_S16] = 2,
> +	[MOXART_DMA_DATA_TYPE_S32] = 4,
> +};
> +
> +static struct device *chan2dev(struct dma_chan *chan)
> +{
> +	return &chan->dev->device;
> +}
> +
> +static inline struct moxart_chan *to_moxart_dma_chan(struct dma_chan *c)
> +{
> +	return container_of(c, struct moxart_chan, vc.chan);
> +}
> +
> +static inline struct moxart_desc *to_moxart_dma_desc(
> +	struct dma_async_tx_descriptor *t)
> +{
> +	return container_of(t, struct moxart_desc, vd.tx);
> +}
> +
> +static void moxart_dma_desc_free(struct virt_dma_desc *vd)
> +{
> +	kfree(container_of(vd, struct moxart_desc, vd));
> +}
> +
> +static int moxart_terminate_all(struct dma_chan *chan)
> +{
> +	struct moxart_chan *ch = to_moxart_dma_chan(chan);
> +	unsigned long flags;
> +	LIST_HEAD(head);
> +	u32 ctrl;
> +
> +	dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
> +
> +	spin_lock_irqsave(&ch->vc.lock, flags);
> +
> +	if (ch->desc)
> +		ch->desc = NULL;
> +
> +	ctrl = readl(ch->base + REG_OFF_CTRL);
> +	ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
> +	writel(ctrl, ch->base + REG_OFF_CTRL);
> +
> +	vchan_get_all_descriptors(&ch->vc, &head);
> +	spin_unlock_irqrestore(&ch->vc.lock, flags);
> +	vchan_dma_desc_free_list(&ch->vc, &head);
> +
> +	return 0;
> +}
> +
> +static int moxart_slave_config(struct dma_chan *chan,
> +			       struct dma_slave_config *cfg)
> +{
> +	struct moxart_chan *ch = to_moxart_dma_chan(chan);
> +	u32 ctrl;
> +
> +	ch->cfg = *cfg;
> +
> +	ctrl = readl(ch->base + REG_OFF_CTRL);
> +	ctrl |= APB_DMA_BURST_MODE;
> +	ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
> +	ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
> +
> +	switch (ch->cfg.src_addr_width) {
> +	case DMA_SLAVE_BUSWIDTH_1_BYTE:
> +		ctrl |= APB_DMA_DATA_WIDTH_1;
> +		if (ch->cfg.direction != DMA_MEM_TO_DEV)
> +			ctrl |= APB_DMA_DEST_INC_1_4;
> +		else
> +			ctrl |= APB_DMA_SOURCE_INC_1_4;
> +		break;
> +	case DMA_SLAVE_BUSWIDTH_2_BYTES:
> +		ctrl |= APB_DMA_DATA_WIDTH_2;
> +		if (ch->cfg.direction != DMA_MEM_TO_DEV)
> +			ctrl |= APB_DMA_DEST_INC_2_8;
> +		else
> +			ctrl |= APB_DMA_SOURCE_INC_2_8;
> +		break;
> +	case DMA_SLAVE_BUSWIDTH_4_BYTES:
> +		ctrl &= ~APB_DMA_DATA_WIDTH;
> +		if (ch->cfg.direction != DMA_MEM_TO_DEV)
> +			ctrl |= APB_DMA_DEST_INC_4_16;
> +		else
> +			ctrl |= APB_DMA_SOURCE_INC_4_16;
> +		break;
> +	default:
> +		return -EINVAL;
> +	}
> +
> +	if (ch->cfg.direction == DMA_MEM_TO_DEV) {
> +		ctrl &= ~APB_DMA_DEST_SELECT;
> +		ctrl |= APB_DMA_SOURCE_SELECT;
> +		ctrl |= (ch->line_reqno << 16 &
> +			 APB_DMA_DEST_REQ_NO_MASK);
> +	} else {
> +		ctrl |= APB_DMA_DEST_SELECT;
> +		ctrl &= ~APB_DMA_SOURCE_SELECT;
> +		ctrl |= (ch->line_reqno << 24 &
> +			 APB_DMA_SOURCE_REQ_NO_MASK);
> +	}
> +
> +	writel(ctrl, ch->base + REG_OFF_CTRL);
> +
> +	return 0;
> +}
> +
> +static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
> +			  unsigned long arg)
> +{
> +	int ret = 0;
> +
> +	switch (cmd) {
> +	case DMA_PAUSE:
> +	case DMA_RESUME:
> +		return -EINVAL;
> +	case DMA_TERMINATE_ALL:
> +		moxart_terminate_all(chan);
> +		break;
> +	case DMA_SLAVE_CONFIG:
> +		ret = moxart_slave_config(chan, (struct dma_slave_config *)arg);
> +		break;
> +	default:
> +		ret = -ENOSYS;
> +	}
> +
> +	return ret;
> +}
> +
> +static struct dma_async_tx_descriptor *moxart_prep_slave_sg(
> +	struct dma_chan *chan, struct scatterlist *sgl,
> +	unsigned int sg_len, enum dma_transfer_direction dir,
> +	unsigned long tx_flags, void *context)
> +{
> +	struct moxart_chan *ch = to_moxart_dma_chan(chan);
> +	struct moxart_desc *d;
> +	enum dma_slave_buswidth dev_width;
> +	dma_addr_t dev_addr;
> +	struct scatterlist *sgent;
> +	unsigned int es;
> +	unsigned int i;
> +
> +	if (!is_slave_direction(dir)) {
> +		dev_err(chan2dev(chan), "%s: invalid DMA direction\n",
> +			__func__);
> +		return NULL;
> +	}
> +
> +	if (dir == DMA_DEV_TO_MEM) {
> +		dev_addr = ch->cfg.src_addr;
> +		dev_width = ch->cfg.src_addr_width;
> +	} else {
> +		dev_addr = ch->cfg.dst_addr;
> +		dev_width = ch->cfg.dst_addr_width;
> +	}
> +
> +	switch (dev_width) {
> +	case DMA_SLAVE_BUSWIDTH_1_BYTE:
> +		es = MOXART_DMA_DATA_TYPE_S8;
> +		break;
> +	case DMA_SLAVE_BUSWIDTH_2_BYTES:
> +		es = MOXART_DMA_DATA_TYPE_S16;
> +		break;
> +	case DMA_SLAVE_BUSWIDTH_4_BYTES:
> +		es = MOXART_DMA_DATA_TYPE_S32;
> +		break;
> +	default:
> +		dev_err(chan2dev(chan), "%s: unsupported data width (%u)\n",
> +			__func__, dev_width);
> +		return NULL;
> +	}
> +
> +	d = kzalloc(sizeof(*d) + sg_len * sizeof(d->sg[0]), GFP_ATOMIC);
> +	if (!d)
> +		return NULL;
> +
> +	d->dma_dir = dir;
> +	d->dev_addr = dev_addr;
> +	d->es = es;
> +
> +	for_each_sg(sgl, sgent, sg_len, i) {
> +		d->sg[i].addr = sg_dma_address(sgent);
> +		d->sg[i].len = sg_dma_len(sgent);
> +	}
> +
> +	d->sglen = sg_len;
> +
> +	ch->error = 0;
> +
> +	return vchan_tx_prep(&ch->vc, &d->vd, tx_flags);
> +}
> +
> +static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
> +					struct of_dma *ofdma)
> +{
> +	struct moxart_dmadev *mdc = ofdma->of_dma_data;
> +	struct dma_chan *chan;
> +	struct moxart_chan *ch;
> +
> +	chan = dma_get_any_slave_channel(&mdc->dma_slave);
> +	if (!chan)
> +		return NULL;
> +
> +	ch = to_moxart_dma_chan(chan);
> +	ch->line_reqno = dma_spec->args[0];
> +
> +	return chan;
> +}
> +
> +static int moxart_alloc_chan_resources(struct dma_chan *chan)
> +{
> +	struct moxart_chan *ch = to_moxart_dma_chan(chan);
> +
> +	dev_dbg(chan2dev(chan), "%s: allocating channel #%u\n",
> +		__func__, ch->ch_num);
> +	ch->allocated = 1;
> +
> +	return 0;
> +}
> +
> +static void moxart_free_chan_resources(struct dma_chan *chan)
> +{
> +	struct moxart_chan *ch = to_moxart_dma_chan(chan);
> +
> +	vchan_free_chan_resources(&ch->vc);
> +
> +	dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
> +		__func__, ch->ch_num);
> +	ch->allocated = 0;
> +}
> +
> +static void moxart_dma_set_params(struct moxart_chan *ch, dma_addr_t src_addr,
> +				  dma_addr_t dst_addr)
> +{
> +	writel(src_addr, ch->base + REG_OFF_ADDRESS_SOURCE);
> +	writel(dst_addr, ch->base + REG_OFF_ADDRESS_DEST);
> +}
> +
> +static void moxart_set_transfer_params(struct moxart_chan *ch, unsigned int len)
> +{
> +	struct moxart_desc *d = ch->desc;
> +	unsigned int sglen_div = es_bytes[d->es];
> +
> +	d->dma_cycles = len >> sglen_div;
> +
> +	/*
> +	 * There are 4 cycles on 64 bytes copied, i.e. one cycle copies 16
> +	 * bytes ( when width is APB_DMAB_DATA_WIDTH_4 ).
> +	 */
> +	writel(d->dma_cycles, ch->base + REG_OFF_CYCLES);
> +
> +	dev_dbg(chan2dev(&ch->vc.chan), "%s: set %u DMA cycles (len=%u)\n",
> +		__func__, d->dma_cycles, len);
> +}
> +
> +static void moxart_start_dma(struct moxart_chan *ch)
> +{
> +	u32 ctrl;
> +
> +	ctrl = readl(ch->base + REG_OFF_CTRL);
> +	ctrl |= (APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
> +	writel(ctrl, ch->base + REG_OFF_CTRL);
> +}
> +
> +static void moxart_dma_start_sg(struct moxart_chan *ch, unsigned int idx)
> +{
> +	struct moxart_desc *d = ch->desc;
> +	struct moxart_sg *sg = ch->desc->sg + idx;
> +
> +	if (ch->desc->dma_dir == DMA_MEM_TO_DEV)
> +		moxart_dma_set_params(ch, sg->addr, d->dev_addr);
> +	else if (ch->desc->dma_dir == DMA_DEV_TO_MEM)
> +		moxart_dma_set_params(ch, d->dev_addr, sg->addr);
> +
> +	moxart_set_transfer_params(ch, sg->len);
> +
> +	moxart_start_dma(ch);
> +}
> +
> +static void moxart_dma_start_desc(struct dma_chan *chan)
> +{
> +	struct moxart_chan *ch = to_moxart_dma_chan(chan);
> +	struct virt_dma_desc *vd;
> +
> +	vd = vchan_next_desc(&ch->vc);
> +
> +	if (!vd) {
> +		ch->desc = NULL;
> +		return;
> +	}
> +
> +	list_del(&vd->node);
> +
> +	ch->desc = to_moxart_dma_desc(&vd->tx);
> +	ch->sgidx = 0;
> +
> +	moxart_dma_start_sg(ch, 0);
> +}
> +
> +static void moxart_issue_pending(struct dma_chan *chan)
> +{
> +	struct moxart_chan *ch = to_moxart_dma_chan(chan);
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&ch->vc.lock, flags);
> +	if (vchan_issue_pending(&ch->vc) && !ch->desc)
> +		moxart_dma_start_desc(chan);
> +	spin_unlock_irqrestore(&ch->vc.lock, flags);
> +}
> +
> +static size_t moxart_dma_desc_size(struct moxart_desc *d,
> +				   unsigned int completed_sgs)
> +{
> +	unsigned int i;
> +	size_t size;
> +
> +	for (size = i = completed_sgs; i < d->sglen; i++)
> +		size += d->sg[i].len;
> +
> +	return size;
> +}
> +
> +static size_t moxart_dma_desc_size_in_flight(struct moxart_chan *ch)
> +{
> +	size_t size;
> +	unsigned int completed_cycles, cycles;
> +
> +	size = moxart_dma_desc_size(ch->desc, ch->sgidx);
> +	cycles = readl(ch->base + REG_OFF_CYCLES);
> +	completed_cycles = (ch->desc->dma_cycles - cycles);
> +	size -= completed_cycles << es_bytes[ch->desc->es];
> +
> +	dev_dbg(chan2dev(&ch->vc.chan), "%s: size=%zu\n", __func__, size);
> +
> +	return size;
> +}
> +
> +static enum dma_status moxart_tx_status(struct dma_chan *chan,
> +					dma_cookie_t cookie,
> +					struct dma_tx_state *txstate)
> +{
> +	struct moxart_chan *ch = to_moxart_dma_chan(chan);
> +	struct virt_dma_desc *vd;
> +	struct moxart_desc *d;
> +	enum dma_status ret;
> +	unsigned long flags;
> +
> +	/*
> +	 * dma_cookie_status() assigns initial residue value.
> +	 */
> +	ret = dma_cookie_status(chan, cookie, txstate);
> +
> +	spin_lock_irqsave(&ch->vc.lock, flags);
> +	vd = vchan_find_desc(&ch->vc, cookie);
> +	if (vd) {
> +		d = to_moxart_dma_desc(&vd->tx);
> +		txstate->residue = moxart_dma_desc_size(d, 0);
> +	} else if (ch->desc && ch->desc->vd.tx.cookie == cookie) {
> +		txstate->residue = moxart_dma_desc_size_in_flight(ch);
> +	}
> +	spin_unlock_irqrestore(&ch->vc.lock, flags);
> +
> +	if (ch->error)
> +		return DMA_ERROR;
> +
> +	return ret;
> +}
> +
> +static void moxart_dma_init(struct dma_device *dma, struct device *dev)
> +{
> +	dma->device_prep_slave_sg		= moxart_prep_slave_sg;
> +	dma->device_alloc_chan_resources	= moxart_alloc_chan_resources;
> +	dma->device_free_chan_resources		= moxart_free_chan_resources;
> +	dma->device_issue_pending		= moxart_issue_pending;
> +	dma->device_tx_status			= moxart_tx_status;
> +	dma->device_control			= moxart_control;
> +	dma->dev				= dev;
> +
> +	INIT_LIST_HEAD(&dma->channels);
> +}
> +
> +static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
> +{
> +	struct moxart_dmadev *mc = devid;
> +	struct moxart_chan *ch = &mc->slave_chans[0];
> +	unsigned int i;
> +	unsigned long flags;
> +	u32 ctrl;
> +
> +	dev_dbg(chan2dev(&ch->vc.chan), "%s\n", __func__);
> +
> +	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
> +		if (!ch->allocated)
> +			continue;
> +
> +		ctrl = readl(ch->base + REG_OFF_CTRL);
> +
> +		dev_dbg(chan2dev(&ch->vc.chan), "%s: ch=%p ch->base=%p ctrl=%x\n",
> +			__func__, ch, ch->base, ctrl);
> +
> +		if (ctrl & APB_DMA_FIN_INT_STS) {
> +			ctrl &= ~APB_DMA_FIN_INT_STS;
> +			if (ch->desc) {
> +				spin_lock_irqsave(&ch->vc.lock, flags);
> +				if (++ch->sgidx < ch->desc->sglen) {
> +					moxart_dma_start_sg(ch, ch->sgidx);
> +				} else {
> +					vchan_cookie_complete(&ch->desc->vd);
> +					moxart_dma_start_desc(&ch->vc.chan);
> +				}
> +				spin_unlock_irqrestore(&ch->vc.lock, flags);
> +			}
> +		}
> +
> +		if (ctrl & APB_DMA_ERR_INT_STS) {
> +			ctrl &= ~APB_DMA_ERR_INT_STS;
> +			ch->error = 1;
> +		}
> +
> +		writel(ctrl, ch->base + REG_OFF_CTRL);
> +	}
> +
> +	return IRQ_HANDLED;
> +}
> +
> +static int moxart_probe(struct platform_device *pdev)
> +{
> +	struct device *dev = &pdev->dev;
> +	struct device_node *node = dev->of_node;
> +	struct resource *res;
> +	static void __iomem *dma_base_addr;
> +	int ret, i;
> +	unsigned int irq;
> +	struct moxart_chan *ch;
> +	struct moxart_dmadev *mdc;
> +
> +	mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
> +	if (!mdc) {
> +		dev_err(dev, "can't allocate DMA container\n");
> +		return -ENOMEM;
> +	}
> +
> +	irq = irq_of_parse_and_map(node, 0);
> +	if (irq == NO_IRQ) {
> +		dev_err(dev, "no IRQ resource\n");
> +		return -EINVAL;
> +	}
> +
> +	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
> +	dma_base_addr = devm_ioremap_resource(dev, res);
> +	if (IS_ERR(dma_base_addr))
> +		return PTR_ERR(dma_base_addr);
> +
> +	dma_cap_zero(mdc->dma_slave.cap_mask);
> +	dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
> +	dma_cap_set(DMA_PRIVATE, mdc->dma_slave.cap_mask);
> +
> +	moxart_dma_init(&mdc->dma_slave, dev);
> +
> +	ch = &mdc->slave_chans[0];
> +	for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
> +		ch->ch_num = i;
> +		ch->base = dma_base_addr + i * REG_OFF_CHAN_SIZE;
> +		ch->allocated = 0;
> +
> +		ch->vc.desc_free = moxart_dma_desc_free;
> +		vchan_init(&ch->vc, &mdc->dma_slave);
> +
> +		dev_dbg(dev, "%s: chs[%d]: ch->ch_num=%u ch->base=%p\n",
> +			__func__, i, ch->ch_num, ch->base);
> +	}
> +
> +	platform_set_drvdata(pdev, mdc);
> +
> +	ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
> +			       "moxart-dma-engine", mdc);
> +	if (ret) {
> +		dev_err(dev, "devm_request_irq failed\n");
> +		return ret;
> +	}
> +
> +	ret = dma_async_device_register(&mdc->dma_slave);
> +	if (ret) {
> +		dev_err(dev, "dma_async_device_register failed\n");
> +		return ret;
> +	}
> +
> +	ret = of_dma_controller_register(node, moxart_of_xlate, mdc);
> +	if (ret) {
> +		dev_err(dev, "of_dma_controller_register failed\n");
> +		dma_async_device_unregister(&mdc->dma_slave);
> +		return ret;
> +	}
> +
> +	dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq);
> +
> +	return 0;
> +}
> +
> +static int moxart_remove(struct platform_device *pdev)
> +{
> +	struct moxart_dmadev *m = platform_get_drvdata(pdev);
> +
> +	dma_async_device_unregister(&m->dma_slave);
> +
> +	if (pdev->dev.of_node)
> +		of_dma_controller_free(pdev->dev.of_node);
> +
> +	return 0;
> +}
> +
> +static const struct of_device_id moxart_dma_match[] = {
> +	{ .compatible = "moxa,moxart-dma" },
> +	{ }
> +};
> +
> +static struct platform_driver moxart_driver = {
> +	.probe	= moxart_probe,
> +	.remove	= moxart_remove,
> +	.driver = {
> +		.name		= "moxart-dma-engine",
> +		.owner		= THIS_MODULE,
> +		.of_match_table	= moxart_dma_match,
> +	},
> +};
> +
> +static int moxart_init(void)
> +{
> +	return platform_driver_register(&moxart_driver);
> +}
> +subsys_initcall(moxart_init);
> +
> +static void __exit moxart_exit(void)
> +{
> +	platform_driver_unregister(&moxart_driver);
> +}
> +module_exit(moxart_exit);
> +
> +MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
> +MODULE_DESCRIPTION("MOXART DMA engine driver");
> +MODULE_LICENSE("GPL v2");
> -- 
> 1.8.2.1
> 

-- 

^ permalink raw reply	[flat|nested] 80+ messages in thread

end of thread, other threads:[~2014-01-20  8:09 UTC | newest]

Thread overview: 80+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2013-07-10  8:51 [PATCH] dmaengine: Add MOXA ART DMA engine driver Jonas Jensen
2013-07-10  8:51 ` Jonas Jensen
2013-07-10  9:30 ` Russell King - ARM Linux
2013-07-10  9:30   ` Russell King - ARM Linux
2013-07-10  9:48   ` Jonas Jensen
2013-07-10  9:48     ` Jonas Jensen
2013-07-10 12:43 ` [PATCH v2] " Jonas Jensen
2013-07-10 12:43   ` Jonas Jensen
2013-07-17 10:06   ` [PATCH v3] " Jonas Jensen
2013-07-17 10:06     ` Jonas Jensen
2013-07-29 13:44     ` [PATCH v4] " Jonas Jensen
2013-07-29 13:44       ` Jonas Jensen
2013-07-29 16:35       ` Arnd Bergmann
2013-07-29 16:35         ` Arnd Bergmann
2013-08-02 12:28         ` Jonas Jensen
2013-08-02 12:28           ` Jonas Jensen
2013-08-02 19:28           ` Arnd Bergmann
2013-08-02 19:28             ` Arnd Bergmann
2013-08-02 12:03       ` [PATCH v5] " Jonas Jensen
2013-08-02 12:03         ` Jonas Jensen
2013-08-02 13:28         ` [PATCH v6] " Jonas Jensen
2013-08-02 13:28           ` Jonas Jensen
2013-08-02 13:51           ` Russell King - ARM Linux
2013-08-02 13:51             ` Russell King - ARM Linux
2013-08-02 14:09           ` Mark Rutland
2013-08-02 14:09             ` Mark Rutland
2013-08-05 14:37           ` [PATCH v7] " Jonas Jensen
2013-08-05 14:37             ` Jonas Jensen
2013-08-05 16:57             ` Mark Rutland
2013-08-05 16:57               ` Mark Rutland
2013-08-05 20:49             ` Arnd Bergmann
2013-08-05 20:49               ` Arnd Bergmann
2013-08-06 12:38             ` [PATCH v8] " Jonas Jensen
2013-08-06 12:38               ` Jonas Jensen
2013-08-06 18:42               ` Arnd Bergmann
2013-08-06 18:42                 ` Arnd Bergmann
2013-08-07 15:13               ` Mark Rutland
2013-08-07 15:13                 ` Mark Rutland
2013-10-07 13:42                 ` Jonas Jensen
2013-10-07 13:42                   ` Jonas Jensen
2013-10-07 13:13               ` [PATCH v9] " Jonas Jensen
2013-10-07 13:13                 ` Jonas Jensen
2013-10-07 14:10                 ` [PATCH v10] " Jonas Jensen
2013-10-07 14:10                   ` Jonas Jensen
2013-10-07 15:12                   ` Mark Rutland
2013-10-07 15:12                     ` Mark Rutland
2013-10-07 15:12                     ` Mark Rutland
2013-10-08  9:53                     ` Jonas Jensen
2013-10-08  9:53                       ` Jonas Jensen
2013-10-08  9:53                       ` Jonas Jensen
2013-10-08 12:55                       ` Mark Rutland
2013-10-08 12:55                         ` Mark Rutland
2013-10-08 12:55                         ` Mark Rutland
2013-10-08  8:42                   ` [PATCH v11] " Jonas Jensen
2013-10-08  8:42                     ` Jonas Jensen
2013-11-13 13:59                     ` Vinod Koul
2013-11-13 13:59                       ` Vinod Koul
2013-11-13 17:16                       ` Arnd Bergmann
2013-11-13 17:16                         ` Arnd Bergmann
2013-12-06 14:27                     ` [PATCH v12] " Jonas Jensen
2013-12-06 14:27                       ` Jonas Jensen
2013-12-11 15:13                       ` [PATCH v13] " Jonas Jensen
2013-12-11 15:13                         ` Jonas Jensen
2013-12-11 21:27                         ` Arnd Bergmann
2013-12-11 21:27                           ` Arnd Bergmann
2013-12-12  9:16                         ` Andy Shevchenko
2013-12-12  9:16                           ` Andy Shevchenko
2013-12-12 12:32                         ` [PATCH v14] " Jonas Jensen
2013-12-12 12:32                           ` Jonas Jensen
2013-12-13 16:02                           ` Lars-Peter Clausen
2013-12-13 16:02                             ` Lars-Peter Clausen
2013-12-16 10:24                           ` [PATCH v15] " Jonas Jensen
2013-12-16 10:24                             ` Jonas Jensen
2014-01-17  8:46                             ` [PATCH v16] " Jonas Jensen
2014-01-17  8:46                               ` Jonas Jensen
     [not found]                               ` <1389948365-13999-1-git-send-email-jonas.jensen-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2014-01-17 13:29                                 ` Fwd: " Jonas Jensen
2014-01-17 14:42                               ` Arnd Bergmann
2014-01-17 14:42                                 ` Arnd Bergmann
2014-01-20  7:07                               ` Vinod Koul
2014-01-20  7:07                                 ` Vinod Koul

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.