All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-07  5:41 ` Barry Song
  0 siblings, 0 replies; 84+ messages in thread
From: Barry Song @ 2011-09-07  5:41 UTC (permalink / raw)
  To: dan.j.williams, vinod.koul, arnd
  Cc: workgroup.linux, linux-arm-kernel, linux-kernel, Rongjun Ying,
	Barry Song

From: Rongjun Ying <rongjun.ying@csr.com>

Signed-off-by: Rongjun Ying <rongjun.ying@csr.com>
Signed-off-by: Barry Song <Baohua.Song@csr.com>
---
 MAINTAINERS                 |    1 +
 drivers/dma/Kconfig         |    7 +
 drivers/dma/Makefile        |    1 +
 drivers/dma/sirf-dma.c      |  590 +++++++++++++++++++++++++++++++++++++++++++
 include/linux/sirfsoc_dma.h |   18 ++
 5 files changed, 617 insertions(+), 0 deletions(-)
 create mode 100644 drivers/dma/sirf-dma.c
 create mode 100644 include/linux/sirfsoc_dma.h

diff --git a/MAINTAINERS b/MAINTAINERS
index 28f65c2..c1237ba 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -739,6 +739,7 @@ M:	Barry Song <baohua.song@csr.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
 F:	arch/arm/mach-prima2/
+F:	drivers/dma/sirf-dma*
 
 ARM/EBSA110 MACHINE SUPPORT
 M:	Russell King <linux@arm.linux.org.uk>
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 2e3b3d3..1341bcd 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -187,6 +187,13 @@ config TIMB_DMA
 	help
 	  Enable support for the Timberdale FPGA DMA engine.
 
+config SIRF_DMA
+	tristate "CSR SiRFprimaII DMA support"
+	depends on ARCH_PRIMA2
+	select DMA_ENGINE
+	help
+	  Enable support for the CSR SiRFprimaII DMA engine.
+
 config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
 	bool
 
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 30cf3b1..009a222 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
 obj-$(CONFIG_IMX_DMA) += imx-dma.o
 obj-$(CONFIG_MXS_DMA) += mxs-dma.o
 obj-$(CONFIG_TIMB_DMA) += timb_dma.o
+obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
 obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
 obj-$(CONFIG_PL330_DMA) += pl330.o
 obj-$(CONFIG_PCH_DMA) += pch_dma.o
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
new file mode 100644
index 0000000..70b0f09
--- /dev/null
+++ b/drivers/dma/sirf-dma.c
@@ -0,0 +1,590 @@
+/*
+ * DMA controller driver for CSR SiRFprimaII
+ *
+ * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ *
+ * Licensed under GPLv2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/sirfsoc_dma.h>
+
+#define SIRFSOC_DMA_DESCRIPTORS                 16
+#define SIRFSOC_DMA_CHANNELS                    16
+
+#define SIRFSOC_DMA_CH_ADDR                     0x00
+#define SIRFSOC_DMA_CH_XLEN                     0x04
+#define SIRFSOC_DMA_CH_YLEN                     0x08
+#define SIRFSOC_DMA_CH_CTRL                     0x0C
+
+#define SIRFSOC_DMA_WIDTH_0                     0x100
+#define SIRFSOC_DMA_CH_VALID                    0x140
+#define SIRFSOC_DMA_CH_INT                      0x144
+#define SIRFSOC_DMA_INT_EN                      0x148
+#define SIRFSOC_DMA_CH_LOOP_CTRL                0x150
+
+#define SIRFSOC_DMA_MODE_CTRL_BIT               4
+#define SIRFSOC_DMA_DIR_CTRL_BIT                5
+
+struct sirfsoc_dma_desc {
+	struct dma_async_tx_descriptor	desc;
+	struct list_head		node;
+};
+
+struct sirfsoc_dma_chan {
+	struct dma_chan			chan;
+	struct list_head		free;
+	struct list_head		prepared;
+	struct list_head		queued;
+	struct list_head		active;
+	struct list_head		completed;
+	dma_cookie_t			completed_cookie;
+
+	/* Lock for this structure */
+	spinlock_t			lock;
+
+	/* SiRFprimaII 2D-DMA parameters */
+	int             xlen;           /* DMA xlen */
+	int             ylen;           /* DMA ylen */
+	int             width;          /* DMA width */
+
+	int             direction;
+	int             mode;
+	u32             addr;
+};
+
+struct sirfsoc_dma {
+	struct dma_device		dma;
+	struct tasklet_struct		tasklet;
+	struct sirfsoc_dma_chan		channels[SIRFSOC_DMA_CHANNELS];
+	void __iomem			*regs;
+	int				irq;
+};
+
+#define DRV_NAME	"sirfsoc_dma"
+
+/* Convert struct dma_chan to struct sirfsoc_dma_chan */
+static inline struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct sirfsoc_dma_chan, chan);
+}
+
+/* Convert struct dma_chan to struct sirfsoc_dma */
+static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c)
+{
+	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c);
+	return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]);
+}
+
+/*
+ * Execute all queued DMA descriptors.
+ *
+ * Following requirements must be met while calling sirfsoc_dma_execute():
+ * a) schan->lock is acquired,
+ * b) schan->active list is empty,
+ * c) schan->queued list contains at least one entry.
+ */
+static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan)
+{
+	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
+	int cid = schan->chan.chan_id;
+
+	/* Move the first queued descriptor to active list */
+	list_move_tail(&schan->queued, &schan->active);
+
+	writel_relaxed(schan->width, sdma->regs + SIRFSOC_DMA_WIDTH_0 + cid * 4);
+	writel_relaxed(cid | (schan->mode << SIRFSOC_DMA_MODE_CTRL_BIT) |
+		(schan->direction << SIRFSOC_DMA_DIR_CTRL_BIT),
+		sdma->regs + cid * 0x10 + SIRFSOC_DMA_CH_CTRL);
+	writel_relaxed(schan->xlen, sdma->regs + cid * 0x10 + SIRFSOC_DMA_CH_XLEN);
+	writel_relaxed(schan->ylen, sdma->regs + cid * 0x10 + SIRFSOC_DMA_CH_YLEN);
+	writel_relaxed(readl_relaxed(sdma->regs + SIRFSOC_DMA_INT_EN) | (1 << cid),
+		sdma->regs + SIRFSOC_DMA_INT_EN);
+	writel_relaxed(schan->addr >> 2, sdma->regs + cid * 0x10 + SIRFSOC_DMA_CH_ADDR);
+}
+
+/* Interrupt handler */
+static irqreturn_t sirfsoc_dma_irq(int irq, void *data)
+{
+	struct sirfsoc_dma *sdma = data;
+	struct sirfsoc_dma_chan *schan;
+	u32 is;
+	int ch;
+
+	is = readl_relaxed(sdma->regs + SIRFSOC_DMA_CH_INT);
+	while ((ch = fls(is) - 1) >= 0) {
+		is &= ~(1 << ch);
+		writel_relaxed(1 << ch, sdma->regs + SIRFSOC_DMA_CH_INT);
+		schan = &sdma->channels[ch];
+
+		spin_lock(&schan->lock);
+
+		/* Execute queued descriptors */
+		list_splice_tail_init(&schan->active, &schan->completed);
+		if (!list_empty(&schan->queued))
+			sirfsoc_dma_execute(schan);
+
+		spin_unlock(&schan->lock);
+	}
+
+	/* Schedule tasklet */
+	tasklet_schedule(&sdma->tasklet);
+
+	return IRQ_HANDLED;
+}
+
+/* process completed descriptors */
+static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
+{
+	dma_cookie_t last_cookie = 0;
+	struct sirfsoc_dma_chan *schan;
+	struct sirfsoc_dma_desc *mdesc;
+	struct dma_async_tx_descriptor *desc;
+	unsigned long flags;
+	LIST_HEAD(list);
+	int i;
+
+	for (i = 0; i < sdma->dma.chancnt; i++) {
+		schan = &sdma->channels[i];
+
+		/* Get all completed descriptors */
+		spin_lock_irqsave(&schan->lock, flags);
+		if (!list_empty(&schan->completed))
+			list_splice_tail_init(&schan->completed, &list);
+		spin_unlock_irqrestore(&schan->lock, flags);
+
+		if (list_empty(&list))
+			continue;
+
+		/* Execute callbacks and run dependencies */
+		list_for_each_entry(mdesc, &list, node) {
+			desc = &mdesc->desc;
+
+			if (desc->callback)
+				desc->callback(desc->callback_param);
+
+			last_cookie = desc->cookie;
+			dma_run_dependencies(desc);
+		}
+
+		/* Free descriptors */
+		spin_lock_irqsave(&schan->lock, flags);
+		list_splice_tail_init(&list, &schan->free);
+		schan->completed_cookie = last_cookie;
+		spin_unlock_irqrestore(&schan->lock, flags);
+	}
+}
+
+/* DMA Tasklet */
+static void sirfsoc_dma_tasklet(unsigned long data)
+{
+	struct sirfsoc_dma *sdma = (void *)data;
+
+	sirfsoc_dma_process_completed(sdma);
+}
+
+/* Submit descriptor to hardware */
+static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
+{
+	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan);
+	struct sirfsoc_dma_desc *mdesc;
+	unsigned long flags;
+	dma_cookie_t cookie;
+
+	mdesc = container_of(txd, struct sirfsoc_dma_desc, desc);
+
+	spin_lock_irqsave(&schan->lock, flags);
+
+	/* Move descriptor to queue */
+	list_move_tail(&mdesc->node, &schan->queued);
+
+	/* If channel is idle, execute all queued descriptors */
+	if (list_empty(&schan->active))
+		sirfsoc_dma_execute(schan);
+
+	/* Update cookie */
+	cookie = schan->chan.cookie + 1;
+	if (cookie <= 0)
+		cookie = 1;
+
+	schan->chan.cookie = cookie;
+	mdesc->desc.cookie = cookie;
+
+	spin_unlock_irqrestore(&schan->lock, flags);
+
+	return cookie;
+}
+
+static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan,
+	struct sirfsoc_dma_slave_config *config)
+{
+	u32 addr, direction;
+	unsigned long flags;
+
+	switch (config->generic_config.direction) {
+	case DMA_FROM_DEVICE:
+		direction = 0;
+		addr = config->generic_config.dst_addr;
+		break;
+
+	case DMA_TO_DEVICE:
+		direction = 1;
+		addr = config->generic_config.src_addr;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	if ((config->generic_config.src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
+		(config->generic_config.dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES))
+		return -EINVAL;
+
+	spin_lock_irqsave(&schan->lock, flags);
+	schan->addr = addr;
+	schan->direction = direction;
+	schan->xlen = config->xlen;
+	schan->ylen = config->ylen;
+	schan->width = config->width;
+	schan->mode = (config->generic_config.src_maxburst == 4 ? 1 : 0);
+	spin_unlock_irqrestore(&schan->lock, flags);
+
+	return 0;
+}
+
+static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan)
+{
+	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
+	int cid = schan->chan.chan_id;
+	unsigned long flags;
+
+	writel_relaxed(readl_relaxed(sdma->regs + SIRFSOC_DMA_INT_EN) & ~(1 << cid),
+		sdma->regs + SIRFSOC_DMA_INT_EN);
+	writel_relaxed(1 << cid, sdma->regs + SIRFSOC_DMA_CH_VALID);
+
+	spin_lock_irqsave(&schan->lock, flags);
+
+	list_splice_tail_init(&schan->queued, &schan->free);
+	spin_unlock_irqrestore(&schan->lock, flags);
+
+	return 0;
+}
+
+static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+	unsigned long arg)
+{
+	struct sirfsoc_dma_slave_config *config;
+	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+
+	switch (cmd) {
+	case DMA_TERMINATE_ALL:
+		return sirfsoc_dma_terminate_all(schan);
+	case DMA_SLAVE_CONFIG:
+		config = (struct sirfsoc_dma_slave_config *)arg;
+		return sirfsoc_dma_slave_config(schan, config);
+
+	default:
+		break;
+	}
+
+	return -ENOSYS;
+}
+
+/* Alloc channel resources */
+static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
+	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+	struct sirfsoc_dma_desc *mdesc;
+	unsigned long flags;
+	LIST_HEAD(descs);
+	int i;
+
+	/* Alloc descriptors for this channel */
+	for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) {
+		mdesc = kzalloc(sizeof(struct sirfsoc_dma_desc), GFP_KERNEL);
+		if (!mdesc) {
+			dev_notice(sdma->dma.dev, "Memory allocation error. "
+				"Allocated only %u descriptors\n", i);
+			break;
+		}
+
+		dma_async_tx_descriptor_init(&mdesc->desc, chan);
+		mdesc->desc.flags = DMA_CTRL_ACK;
+		mdesc->desc.tx_submit = sirfsoc_dma_tx_submit;
+
+		list_add_tail(&mdesc->node, &descs);
+	}
+
+	/* Return error only if no descriptors were allocated */
+	if (i == 0)
+		return -ENOMEM;
+
+	spin_lock_irqsave(&schan->lock, flags);
+
+	list_splice_tail_init(&descs, &schan->free);
+	spin_unlock_irqrestore(&schan->lock, flags);
+
+	return 0;
+}
+
+/* Free channel resources */
+static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
+{
+	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+	struct sirfsoc_dma_desc *mdesc, *tmp;
+	unsigned long flags;
+	LIST_HEAD(descs);
+
+	spin_lock_irqsave(&schan->lock, flags);
+
+	/* Channel must be idle */
+	BUG_ON(!list_empty(&schan->prepared));
+	BUG_ON(!list_empty(&schan->queued));
+	BUG_ON(!list_empty(&schan->active));
+	BUG_ON(!list_empty(&schan->completed));
+
+	/* Move data */
+	list_splice_tail_init(&schan->free, &descs);
+
+	spin_unlock_irqrestore(&schan->lock, flags);
+
+	/* Free descriptors */
+	list_for_each_entry_safe(mdesc, tmp, &descs, node)
+		kfree(mdesc);
+}
+
+/* Send pending descriptor to hardware */
+static void sirfsoc_dma_issue_pending(struct dma_chan *chan)
+{
+	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&schan->lock, flags);
+
+	if (list_empty(&schan->active) && !list_empty(&schan->queued))
+		sirfsoc_dma_execute(schan);
+
+	spin_unlock_irqrestore(&schan->lock, flags);
+}
+
+/* Check request completion status */
+static enum dma_status
+sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+	struct dma_tx_state *txstate)
+{
+	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+	unsigned long flags;
+	dma_cookie_t last_used;
+	dma_cookie_t last_complete;
+
+	spin_lock_irqsave(&schan->lock, flags);
+	last_used = schan->chan.cookie;
+	last_complete = schan->completed_cookie;
+	spin_unlock_irqrestore(&schan->lock, flags);
+
+	dma_set_tx_state(txstate, last_complete, last_used, 0);
+	return dma_async_is_complete(cookie, last_complete, last_used);
+}
+
+/* Prepare descriptor for memory to memory copy */
+static struct dma_async_tx_descriptor *
+sirfsoc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
+	size_t len, unsigned long flags)
+{
+	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
+	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+	struct sirfsoc_dma_desc *mdesc = NULL;
+	unsigned long iflags;
+
+	/* Get free descriptor */
+	spin_lock_irqsave(&schan->lock, iflags);
+	if (!list_empty(&schan->free)) {
+		mdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
+			node);
+		list_del(&mdesc->node);
+	}
+	spin_unlock_irqrestore(&schan->lock, iflags);
+
+	if (!mdesc) {
+		/* try to free completed descriptors */
+		sirfsoc_dma_process_completed(sdma);
+		return NULL;
+	}
+
+	/* Place descriptor in prepared list */
+	spin_lock_irqsave(&schan->lock, iflags);
+	list_add_tail(&mdesc->node, &schan->prepared);
+	spin_unlock_irqrestore(&schan->lock, iflags);
+
+	return &mdesc->desc;
+}
+
+/*
+ * The DMA controller consists of 16 independent DMA channels.
+ * Each channel is allocated to a different function
+ */
+bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
+{
+	unsigned int ch_nr = (unsigned int) chan_id;
+
+	if (ch_nr == chan->chan_id)
+		return true;
+
+	return false;
+}
+EXPORT_SYMBOL(sirfsoc_dma_filter_id);
+
+static int __devinit sirfsoc_dma_probe(struct platform_device *op)
+{
+	struct device_node *dn = op->dev.of_node;
+	struct device *dev = &op->dev;
+	struct dma_device *dma;
+	struct sirfsoc_dma *sdma;
+	struct sirfsoc_dma_chan *schan;
+	struct resource res;
+	ulong regs_start, regs_size;
+	u32 id;
+	int retval, i;
+
+	sdma = devm_kzalloc(dev, sizeof(struct sirfsoc_dma), GFP_KERNEL);
+	if (!sdma) {
+		dev_err(dev, "Memory exhausted!\n");
+		return -ENOMEM;
+	}
+
+	if (of_property_read_u32(dn, "cell-index", &id)) {
+		dev_err(dev, "Fail to get DMAC index\n");
+		return -ENODEV;
+	}
+
+	sdma->irq = irq_of_parse_and_map(dn, 0);
+	if (sdma->irq == NO_IRQ) {
+		dev_err(dev, "Error mapping IRQ!\n");
+		return -EINVAL;
+	}
+
+	retval = of_address_to_resource(dn, 0, &res);
+	if (retval) {
+		dev_err(dev, "Error parsing memory region!\n");
+		return retval;
+	}
+
+	regs_start = res.start;
+	regs_size = resource_size(&res);
+
+	if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
+		dev_err(dev, "Error requesting memory region!\n");
+		return -EBUSY;
+	}
+
+	sdma->regs = devm_ioremap(dev, regs_start, regs_size);
+	if (!sdma->regs) {
+		dev_err(dev, "Error mapping memory region!\n");
+		return -ENOMEM;
+	}
+
+	retval = devm_request_irq(dev, sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME,
+		sdma);
+	if (retval) {
+		dev_err(dev, "Error requesting IRQ!\n");
+		return -EINVAL;
+	}
+
+	dma = &sdma->dma;
+	dma->dev = dev;
+	dma->chancnt = SIRFSOC_DMA_CHANNELS;
+
+	dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
+	dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
+	dma->device_issue_pending = sirfsoc_dma_issue_pending;
+	dma->device_control = sirfsoc_dma_control;
+	dma->device_tx_status = sirfsoc_dma_tx_status;
+	dma->device_prep_dma_memcpy = sirfsoc_dma_prep_memcpy;
+
+	INIT_LIST_HEAD(&dma->channels);
+	dma_cap_set(DMA_MEMCPY, dma->cap_mask);
+
+	for (i = 0; i < dma->chancnt; i++) {
+		schan = &sdma->channels[i];
+
+		schan->chan.device = dma;
+		schan->chan.chan_id = dma->chancnt * id + i;
+		schan->chan.cookie = 1;
+		schan->completed_cookie = schan->chan.cookie;
+
+		INIT_LIST_HEAD(&schan->free);
+		INIT_LIST_HEAD(&schan->prepared);
+		INIT_LIST_HEAD(&schan->queued);
+		INIT_LIST_HEAD(&schan->active);
+		INIT_LIST_HEAD(&schan->completed);
+
+		spin_lock_init(&schan->lock);
+		list_add_tail(&schan->chan.device_node, &dma->channels);
+	}
+
+	tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
+
+	/* Register DMA engine */
+	dev_set_drvdata(dev, sdma);
+	retval = dma_async_device_register(dma);
+	if (retval) {
+		devm_free_irq(dev, sdma->irq, sdma);
+		irq_dispose_mapping(sdma->irq);
+	}
+
+	return retval;
+}
+
+static int __devexit sirfsoc_dma_remove(struct platform_device *op)
+{
+	struct device *dev = &op->dev;
+	struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
+
+	dma_async_device_unregister(&sdma->dma);
+	devm_free_irq(dev, sdma->irq, sdma);
+	irq_dispose_mapping(sdma->irq);
+
+	return 0;
+}
+
+static struct of_device_id sirfsoc_dma_match[] = {
+	{ .compatible = "sirf,prima2-dmac", },
+	{},
+};
+
+static struct platform_driver sirfsoc_dma_driver = {
+	.probe		= sirfsoc_dma_probe,
+	.remove		= __devexit_p(sirfsoc_dma_remove),
+	.driver = {
+		.name = DRV_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table	= sirfsoc_dma_match,
+	},
+};
+
+static int __init sirfsoc_dma_init(void)
+{
+	return platform_driver_register(&sirfsoc_dma_driver);
+}
+module_init(sirfsoc_dma_init);
+
+static void __exit sirfsoc_dma_exit(void)
+{
+	platform_driver_unregister(&sirfsoc_dma_driver);
+}
+module_exit(sirfsoc_dma_exit);
+
+MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, "
+	"Barry Song <baohua.song@csr.com>");
+MODULE_DESCRIPTION("SIRFSOC DMA control driver");
+MODULE_LICENSE("GPL");
diff --git a/include/linux/sirfsoc_dma.h b/include/linux/sirfsoc_dma.h
new file mode 100644
index 0000000..75d2d86
--- /dev/null
+++ b/include/linux/sirfsoc_dma.h
@@ -0,0 +1,18 @@
+#ifndef _SIRFSOC_DMA_H_
+#define _SIRFSOC_DMA_H_
+/*
+ * create a custom slave config struct for CSR SiRFprimaII and pass that,
+ * and make dma_slave_config a member of that struct
+ */
+struct sirfsoc_dma_slave_config {
+	struct dma_slave_config generic_config;
+
+	/* CSR SiRFprimaII 2D-DMA config */
+	int             xlen;           /* DMA xlen */
+	int             ylen;           /* DMA ylen */
+	int             width;          /* DMA width */
+};
+
+bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id);
+
+#endif
-- 
1.7.1



Member of the CSR plc group of companies. CSR plc registered in England and Wales, registered number 4187346, registered office Churchill House, Cambridge Business Park, Cowley Road, Cambridge, CB4 0WZ, United Kingdom
More information can be found at www.csr.com. Follow CSR on Twitter at http://twitter.com/CSR_PLC and read our blog at www.csr.com/blog

^ permalink raw reply related	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-07  5:41 ` Barry Song
  0 siblings, 0 replies; 84+ messages in thread
From: Barry Song @ 2011-09-07  5:41 UTC (permalink / raw)
  To: linux-arm-kernel

From: Rongjun Ying <rongjun.ying@csr.com>

Signed-off-by: Rongjun Ying <rongjun.ying@csr.com>
Signed-off-by: Barry Song <Baohua.Song@csr.com>
---
 MAINTAINERS                 |    1 +
 drivers/dma/Kconfig         |    7 +
 drivers/dma/Makefile        |    1 +
 drivers/dma/sirf-dma.c      |  590 +++++++++++++++++++++++++++++++++++++++++++
 include/linux/sirfsoc_dma.h |   18 ++
 5 files changed, 617 insertions(+), 0 deletions(-)
 create mode 100644 drivers/dma/sirf-dma.c
 create mode 100644 include/linux/sirfsoc_dma.h

diff --git a/MAINTAINERS b/MAINTAINERS
index 28f65c2..c1237ba 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -739,6 +739,7 @@ M:	Barry Song <baohua.song@csr.com>
 L:	linux-arm-kernel at lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
 F:	arch/arm/mach-prima2/
+F:	drivers/dma/sirf-dma*
 
 ARM/EBSA110 MACHINE SUPPORT
 M:	Russell King <linux@arm.linux.org.uk>
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 2e3b3d3..1341bcd 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -187,6 +187,13 @@ config TIMB_DMA
 	help
 	  Enable support for the Timberdale FPGA DMA engine.
 
+config SIRF_DMA
+	tristate "CSR SiRFprimaII DMA support"
+	depends on ARCH_PRIMA2
+	select DMA_ENGINE
+	help
+	  Enable support for the CSR SiRFprimaII DMA engine.
+
 config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
 	bool
 
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 30cf3b1..009a222 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
 obj-$(CONFIG_IMX_DMA) += imx-dma.o
 obj-$(CONFIG_MXS_DMA) += mxs-dma.o
 obj-$(CONFIG_TIMB_DMA) += timb_dma.o
+obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
 obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
 obj-$(CONFIG_PL330_DMA) += pl330.o
 obj-$(CONFIG_PCH_DMA) += pch_dma.o
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
new file mode 100644
index 0000000..70b0f09
--- /dev/null
+++ b/drivers/dma/sirf-dma.c
@@ -0,0 +1,590 @@
+/*
+ * DMA controller driver for CSR SiRFprimaII
+ *
+ * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ *
+ * Licensed under GPLv2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/sirfsoc_dma.h>
+
+#define SIRFSOC_DMA_DESCRIPTORS                 16
+#define SIRFSOC_DMA_CHANNELS                    16
+
+#define SIRFSOC_DMA_CH_ADDR                     0x00
+#define SIRFSOC_DMA_CH_XLEN                     0x04
+#define SIRFSOC_DMA_CH_YLEN                     0x08
+#define SIRFSOC_DMA_CH_CTRL                     0x0C
+
+#define SIRFSOC_DMA_WIDTH_0                     0x100
+#define SIRFSOC_DMA_CH_VALID                    0x140
+#define SIRFSOC_DMA_CH_INT                      0x144
+#define SIRFSOC_DMA_INT_EN                      0x148
+#define SIRFSOC_DMA_CH_LOOP_CTRL                0x150
+
+#define SIRFSOC_DMA_MODE_CTRL_BIT               4
+#define SIRFSOC_DMA_DIR_CTRL_BIT                5
+
+struct sirfsoc_dma_desc {
+	struct dma_async_tx_descriptor	desc;
+	struct list_head		node;
+};
+
+struct sirfsoc_dma_chan {
+	struct dma_chan			chan;
+	struct list_head		free;
+	struct list_head		prepared;
+	struct list_head		queued;
+	struct list_head		active;
+	struct list_head		completed;
+	dma_cookie_t			completed_cookie;
+
+	/* Lock for this structure */
+	spinlock_t			lock;
+
+	/* SiRFprimaII 2D-DMA parameters */
+	int             xlen;           /* DMA xlen */
+	int             ylen;           /* DMA ylen */
+	int             width;          /* DMA width */
+
+	int             direction;
+	int             mode;
+	u32             addr;
+};
+
+struct sirfsoc_dma {
+	struct dma_device		dma;
+	struct tasklet_struct		tasklet;
+	struct sirfsoc_dma_chan		channels[SIRFSOC_DMA_CHANNELS];
+	void __iomem			*regs;
+	int				irq;
+};
+
+#define DRV_NAME	"sirfsoc_dma"
+
+/* Convert struct dma_chan to struct sirfsoc_dma_chan */
+static inline struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct sirfsoc_dma_chan, chan);
+}
+
+/* Convert struct dma_chan to struct sirfsoc_dma */
+static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c)
+{
+	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c);
+	return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]);
+}
+
+/*
+ * Execute all queued DMA descriptors.
+ *
+ * Following requirements must be met while calling sirfsoc_dma_execute():
+ * a) schan->lock is acquired,
+ * b) schan->active list is empty,
+ * c) schan->queued list contains at least one entry.
+ */
+static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan)
+{
+	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
+	int cid = schan->chan.chan_id;
+
+	/* Move the first queued descriptor to active list */
+	list_move_tail(&schan->queued, &schan->active);
+
+	writel_relaxed(schan->width, sdma->regs + SIRFSOC_DMA_WIDTH_0 + cid * 4);
+	writel_relaxed(cid | (schan->mode << SIRFSOC_DMA_MODE_CTRL_BIT) |
+		(schan->direction << SIRFSOC_DMA_DIR_CTRL_BIT),
+		sdma->regs + cid * 0x10 + SIRFSOC_DMA_CH_CTRL);
+	writel_relaxed(schan->xlen, sdma->regs + cid * 0x10 + SIRFSOC_DMA_CH_XLEN);
+	writel_relaxed(schan->ylen, sdma->regs + cid * 0x10 + SIRFSOC_DMA_CH_YLEN);
+	writel_relaxed(readl_relaxed(sdma->regs + SIRFSOC_DMA_INT_EN) | (1 << cid),
+		sdma->regs + SIRFSOC_DMA_INT_EN);
+	writel_relaxed(schan->addr >> 2, sdma->regs + cid * 0x10 + SIRFSOC_DMA_CH_ADDR);
+}
+
+/* Interrupt handler */
+static irqreturn_t sirfsoc_dma_irq(int irq, void *data)
+{
+	struct sirfsoc_dma *sdma = data;
+	struct sirfsoc_dma_chan *schan;
+	u32 is;
+	int ch;
+
+	is = readl_relaxed(sdma->regs + SIRFSOC_DMA_CH_INT);
+	while ((ch = fls(is) - 1) >= 0) {
+		is &= ~(1 << ch);
+		writel_relaxed(1 << ch, sdma->regs + SIRFSOC_DMA_CH_INT);
+		schan = &sdma->channels[ch];
+
+		spin_lock(&schan->lock);
+
+		/* Execute queued descriptors */
+		list_splice_tail_init(&schan->active, &schan->completed);
+		if (!list_empty(&schan->queued))
+			sirfsoc_dma_execute(schan);
+
+		spin_unlock(&schan->lock);
+	}
+
+	/* Schedule tasklet */
+	tasklet_schedule(&sdma->tasklet);
+
+	return IRQ_HANDLED;
+}
+
+/* process completed descriptors */
+static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
+{
+	dma_cookie_t last_cookie = 0;
+	struct sirfsoc_dma_chan *schan;
+	struct sirfsoc_dma_desc *mdesc;
+	struct dma_async_tx_descriptor *desc;
+	unsigned long flags;
+	LIST_HEAD(list);
+	int i;
+
+	for (i = 0; i < sdma->dma.chancnt; i++) {
+		schan = &sdma->channels[i];
+
+		/* Get all completed descriptors */
+		spin_lock_irqsave(&schan->lock, flags);
+		if (!list_empty(&schan->completed))
+			list_splice_tail_init(&schan->completed, &list);
+		spin_unlock_irqrestore(&schan->lock, flags);
+
+		if (list_empty(&list))
+			continue;
+
+		/* Execute callbacks and run dependencies */
+		list_for_each_entry(mdesc, &list, node) {
+			desc = &mdesc->desc;
+
+			if (desc->callback)
+				desc->callback(desc->callback_param);
+
+			last_cookie = desc->cookie;
+			dma_run_dependencies(desc);
+		}
+
+		/* Free descriptors */
+		spin_lock_irqsave(&schan->lock, flags);
+		list_splice_tail_init(&list, &schan->free);
+		schan->completed_cookie = last_cookie;
+		spin_unlock_irqrestore(&schan->lock, flags);
+	}
+}
+
+/* DMA Tasklet */
+static void sirfsoc_dma_tasklet(unsigned long data)
+{
+	struct sirfsoc_dma *sdma = (void *)data;
+
+	sirfsoc_dma_process_completed(sdma);
+}
+
+/* Submit descriptor to hardware */
+static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
+{
+	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan);
+	struct sirfsoc_dma_desc *mdesc;
+	unsigned long flags;
+	dma_cookie_t cookie;
+
+	mdesc = container_of(txd, struct sirfsoc_dma_desc, desc);
+
+	spin_lock_irqsave(&schan->lock, flags);
+
+	/* Move descriptor to queue */
+	list_move_tail(&mdesc->node, &schan->queued);
+
+	/* If channel is idle, execute all queued descriptors */
+	if (list_empty(&schan->active))
+		sirfsoc_dma_execute(schan);
+
+	/* Update cookie */
+	cookie = schan->chan.cookie + 1;
+	if (cookie <= 0)
+		cookie = 1;
+
+	schan->chan.cookie = cookie;
+	mdesc->desc.cookie = cookie;
+
+	spin_unlock_irqrestore(&schan->lock, flags);
+
+	return cookie;
+}
+
+static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan,
+	struct sirfsoc_dma_slave_config *config)
+{
+	u32 addr, direction;
+	unsigned long flags;
+
+	switch (config->generic_config.direction) {
+	case DMA_FROM_DEVICE:
+		direction = 0;
+		addr = config->generic_config.dst_addr;
+		break;
+
+	case DMA_TO_DEVICE:
+		direction = 1;
+		addr = config->generic_config.src_addr;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	if ((config->generic_config.src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
+		(config->generic_config.dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES))
+		return -EINVAL;
+
+	spin_lock_irqsave(&schan->lock, flags);
+	schan->addr = addr;
+	schan->direction = direction;
+	schan->xlen = config->xlen;
+	schan->ylen = config->ylen;
+	schan->width = config->width;
+	schan->mode = (config->generic_config.src_maxburst == 4 ? 1 : 0);
+	spin_unlock_irqrestore(&schan->lock, flags);
+
+	return 0;
+}
+
+static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan)
+{
+	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
+	int cid = schan->chan.chan_id;
+	unsigned long flags;
+
+	writel_relaxed(readl_relaxed(sdma->regs + SIRFSOC_DMA_INT_EN) & ~(1 << cid),
+		sdma->regs + SIRFSOC_DMA_INT_EN);
+	writel_relaxed(1 << cid, sdma->regs + SIRFSOC_DMA_CH_VALID);
+
+	spin_lock_irqsave(&schan->lock, flags);
+
+	list_splice_tail_init(&schan->queued, &schan->free);
+	spin_unlock_irqrestore(&schan->lock, flags);
+
+	return 0;
+}
+
+static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+	unsigned long arg)
+{
+	struct sirfsoc_dma_slave_config *config;
+	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+
+	switch (cmd) {
+	case DMA_TERMINATE_ALL:
+		return sirfsoc_dma_terminate_all(schan);
+	case DMA_SLAVE_CONFIG:
+		config = (struct sirfsoc_dma_slave_config *)arg;
+		return sirfsoc_dma_slave_config(schan, config);
+
+	default:
+		break;
+	}
+
+	return -ENOSYS;
+}
+
+/* Alloc channel resources */
+static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
+	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+	struct sirfsoc_dma_desc *mdesc;
+	unsigned long flags;
+	LIST_HEAD(descs);
+	int i;
+
+	/* Alloc descriptors for this channel */
+	for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) {
+		mdesc = kzalloc(sizeof(struct sirfsoc_dma_desc), GFP_KERNEL);
+		if (!mdesc) {
+			dev_notice(sdma->dma.dev, "Memory allocation error. "
+				"Allocated only %u descriptors\n", i);
+			break;
+		}
+
+		dma_async_tx_descriptor_init(&mdesc->desc, chan);
+		mdesc->desc.flags = DMA_CTRL_ACK;
+		mdesc->desc.tx_submit = sirfsoc_dma_tx_submit;
+
+		list_add_tail(&mdesc->node, &descs);
+	}
+
+	/* Return error only if no descriptors were allocated */
+	if (i == 0)
+		return -ENOMEM;
+
+	spin_lock_irqsave(&schan->lock, flags);
+
+	list_splice_tail_init(&descs, &schan->free);
+	spin_unlock_irqrestore(&schan->lock, flags);
+
+	return 0;
+}
+
+/* Free channel resources */
+static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
+{
+	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+	struct sirfsoc_dma_desc *mdesc, *tmp;
+	unsigned long flags;
+	LIST_HEAD(descs);
+
+	spin_lock_irqsave(&schan->lock, flags);
+
+	/* Channel must be idle */
+	BUG_ON(!list_empty(&schan->prepared));
+	BUG_ON(!list_empty(&schan->queued));
+	BUG_ON(!list_empty(&schan->active));
+	BUG_ON(!list_empty(&schan->completed));
+
+	/* Move data */
+	list_splice_tail_init(&schan->free, &descs);
+
+	spin_unlock_irqrestore(&schan->lock, flags);
+
+	/* Free descriptors */
+	list_for_each_entry_safe(mdesc, tmp, &descs, node)
+		kfree(mdesc);
+}
+
+/* Send pending descriptor to hardware */
+static void sirfsoc_dma_issue_pending(struct dma_chan *chan)
+{
+	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&schan->lock, flags);
+
+	if (list_empty(&schan->active) && !list_empty(&schan->queued))
+		sirfsoc_dma_execute(schan);
+
+	spin_unlock_irqrestore(&schan->lock, flags);
+}
+
+/* Check request completion status */
+static enum dma_status
+sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+	struct dma_tx_state *txstate)
+{
+	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+	unsigned long flags;
+	dma_cookie_t last_used;
+	dma_cookie_t last_complete;
+
+	spin_lock_irqsave(&schan->lock, flags);
+	last_used = schan->chan.cookie;
+	last_complete = schan->completed_cookie;
+	spin_unlock_irqrestore(&schan->lock, flags);
+
+	dma_set_tx_state(txstate, last_complete, last_used, 0);
+	return dma_async_is_complete(cookie, last_complete, last_used);
+}
+
+/* Prepare descriptor for memory to memory copy */
+static struct dma_async_tx_descriptor *
+sirfsoc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
+	size_t len, unsigned long flags)
+{
+	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
+	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+	struct sirfsoc_dma_desc *mdesc = NULL;
+	unsigned long iflags;
+
+	/* Get free descriptor */
+	spin_lock_irqsave(&schan->lock, iflags);
+	if (!list_empty(&schan->free)) {
+		mdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
+			node);
+		list_del(&mdesc->node);
+	}
+	spin_unlock_irqrestore(&schan->lock, iflags);
+
+	if (!mdesc) {
+		/* try to free completed descriptors */
+		sirfsoc_dma_process_completed(sdma);
+		return NULL;
+	}
+
+	/* Place descriptor in prepared list */
+	spin_lock_irqsave(&schan->lock, iflags);
+	list_add_tail(&mdesc->node, &schan->prepared);
+	spin_unlock_irqrestore(&schan->lock, iflags);
+
+	return &mdesc->desc;
+}
+
+/*
+ * The DMA controller consists of 16 independent DMA channels.
+ * Each channel is allocated to a different function
+ */
+bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
+{
+	unsigned int ch_nr = (unsigned int) chan_id;
+
+	if (ch_nr == chan->chan_id)
+		return true;
+
+	return false;
+}
+EXPORT_SYMBOL(sirfsoc_dma_filter_id);
+
+static int __devinit sirfsoc_dma_probe(struct platform_device *op)
+{
+	struct device_node *dn = op->dev.of_node;
+	struct device *dev = &op->dev;
+	struct dma_device *dma;
+	struct sirfsoc_dma *sdma;
+	struct sirfsoc_dma_chan *schan;
+	struct resource res;
+	ulong regs_start, regs_size;
+	u32 id;
+	int retval, i;
+
+	sdma = devm_kzalloc(dev, sizeof(struct sirfsoc_dma), GFP_KERNEL);
+	if (!sdma) {
+		dev_err(dev, "Memory exhausted!\n");
+		return -ENOMEM;
+	}
+
+	if (of_property_read_u32(dn, "cell-index", &id)) {
+		dev_err(dev, "Fail to get DMAC index\n");
+		return -ENODEV;
+	}
+
+	sdma->irq = irq_of_parse_and_map(dn, 0);
+	if (sdma->irq == NO_IRQ) {
+		dev_err(dev, "Error mapping IRQ!\n");
+		return -EINVAL;
+	}
+
+	retval = of_address_to_resource(dn, 0, &res);
+	if (retval) {
+		dev_err(dev, "Error parsing memory region!\n");
+		return retval;
+	}
+
+	regs_start = res.start;
+	regs_size = resource_size(&res);
+
+	if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
+		dev_err(dev, "Error requesting memory region!\n");
+		return -EBUSY;
+	}
+
+	sdma->regs = devm_ioremap(dev, regs_start, regs_size);
+	if (!sdma->regs) {
+		dev_err(dev, "Error mapping memory region!\n");
+		return -ENOMEM;
+	}
+
+	retval = devm_request_irq(dev, sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME,
+		sdma);
+	if (retval) {
+		dev_err(dev, "Error requesting IRQ!\n");
+		return -EINVAL;
+	}
+
+	dma = &sdma->dma;
+	dma->dev = dev;
+	dma->chancnt = SIRFSOC_DMA_CHANNELS;
+
+	dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
+	dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
+	dma->device_issue_pending = sirfsoc_dma_issue_pending;
+	dma->device_control = sirfsoc_dma_control;
+	dma->device_tx_status = sirfsoc_dma_tx_status;
+	dma->device_prep_dma_memcpy = sirfsoc_dma_prep_memcpy;
+
+	INIT_LIST_HEAD(&dma->channels);
+	dma_cap_set(DMA_MEMCPY, dma->cap_mask);
+
+	for (i = 0; i < dma->chancnt; i++) {
+		schan = &sdma->channels[i];
+
+		schan->chan.device = dma;
+		schan->chan.chan_id = dma->chancnt * id + i;
+		schan->chan.cookie = 1;
+		schan->completed_cookie = schan->chan.cookie;
+
+		INIT_LIST_HEAD(&schan->free);
+		INIT_LIST_HEAD(&schan->prepared);
+		INIT_LIST_HEAD(&schan->queued);
+		INIT_LIST_HEAD(&schan->active);
+		INIT_LIST_HEAD(&schan->completed);
+
+		spin_lock_init(&schan->lock);
+		list_add_tail(&schan->chan.device_node, &dma->channels);
+	}
+
+	tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
+
+	/* Register DMA engine */
+	dev_set_drvdata(dev, sdma);
+	retval = dma_async_device_register(dma);
+	if (retval) {
+		devm_free_irq(dev, sdma->irq, sdma);
+		irq_dispose_mapping(sdma->irq);
+	}
+
+	return retval;
+}
+
+static int __devexit sirfsoc_dma_remove(struct platform_device *op)
+{
+	struct device *dev = &op->dev;
+	struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
+
+	dma_async_device_unregister(&sdma->dma);
+	devm_free_irq(dev, sdma->irq, sdma);
+	irq_dispose_mapping(sdma->irq);
+
+	return 0;
+}
+
+static struct of_device_id sirfsoc_dma_match[] = {
+	{ .compatible = "sirf,prima2-dmac", },
+	{},
+};
+
+static struct platform_driver sirfsoc_dma_driver = {
+	.probe		= sirfsoc_dma_probe,
+	.remove		= __devexit_p(sirfsoc_dma_remove),
+	.driver = {
+		.name = DRV_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table	= sirfsoc_dma_match,
+	},
+};
+
+static int __init sirfsoc_dma_init(void)
+{
+	return platform_driver_register(&sirfsoc_dma_driver);
+}
+module_init(sirfsoc_dma_init);
+
+static void __exit sirfsoc_dma_exit(void)
+{
+	platform_driver_unregister(&sirfsoc_dma_driver);
+}
+module_exit(sirfsoc_dma_exit);
+
+MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, "
+	"Barry Song <baohua.song@csr.com>");
+MODULE_DESCRIPTION("SIRFSOC DMA control driver");
+MODULE_LICENSE("GPL");
diff --git a/include/linux/sirfsoc_dma.h b/include/linux/sirfsoc_dma.h
new file mode 100644
index 0000000..75d2d86
--- /dev/null
+++ b/include/linux/sirfsoc_dma.h
@@ -0,0 +1,18 @@
+#ifndef _SIRFSOC_DMA_H_
+#define _SIRFSOC_DMA_H_
+/*
+ * create a custom slave config struct for CSR SiRFprimaII and pass that,
+ * and make dma_slave_config a member of that struct
+ */
+struct sirfsoc_dma_slave_config {
+	struct dma_slave_config generic_config;
+
+	/* CSR SiRFprimaII 2D-DMA config */
+	int             xlen;           /* DMA xlen */
+	int             ylen;           /* DMA ylen */
+	int             width;          /* DMA width */
+};
+
+bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id);
+
+#endif
-- 
1.7.1



Member of the CSR plc group of companies. CSR plc registered in England and Wales, registered number 4187346, registered office Churchill House, Cambridge Business Park, Cowley Road, Cambridge, CB4 0WZ, United Kingdom
More information can be found at www.csr.com. Follow CSR on Twitter at http://twitter.com/CSR_PLC and read our blog at www.csr.com/blog

^ permalink raw reply related	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-07  5:41 ` Barry Song
@ 2011-09-07 16:14   ` Koul, Vinod
  -1 siblings, 0 replies; 84+ messages in thread
From: Koul, Vinod @ 2011-09-07 16:14 UTC (permalink / raw)
  To: Baohua.Song
  Cc: Williams, Dan J, arnd, workgroup.linux, linux-arm-kernel,
	linux-kernel, rongjun.ying

[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #1: Type: text/plain; charset="utf-8", Size: 18744 bytes --]

On Tue, 2011-09-06 at 22:41 -0700, Barry Song wrote:
> From: Rongjun Ying <rongjun.ying@csr.com>

> +config SIRF_DMA
> +     tristate "CSR SiRFprimaII DMA support"
> +     depends on ARCH_PRIMA2
> +     select DMA_ENGINE
> +     help
> +       Enable support for the CSR SiRFprimaII DMA engine.
How different is it from the other primacell based DMA drivers, and why
wouldn't it make sense to use/modify one of them?

> +/*
> + * Execute all queued DMA descriptors.
> + *
> + * Following requirements must be met while calling sirfsoc_dma_execute():
> + * a) schan->lock is acquired,
> + * b) schan->active list is empty,
> + * c) schan->queued list contains at least one entry.
> + */
Please use kernel-doc format...

> +static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan)
> +{
> +     struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
> +     int cid = schan->chan.chan_id;
> +
> +     /* Move the first queued descriptor to active list */
> +     list_move_tail(&schan->queued, &schan->active);
> +
> +     writel_relaxed(schan->width, sdma->regs + SIRFSOC_DMA_WIDTH_0 + cid * 4);
> +     writel_relaxed(cid | (schan->mode << SIRFSOC_DMA_MODE_CTRL_BIT) |
> +             (schan->direction << SIRFSOC_DMA_DIR_CTRL_BIT),
> +             sdma->regs + cid * 0x10 + SIRFSOC_DMA_CH_CTRL);
> +     writel_relaxed(schan->xlen, sdma->regs + cid * 0x10 + SIRFSOC_DMA_CH_XLEN);
> +     writel_relaxed(schan->ylen, sdma->regs + cid * 0x10 + SIRFSOC_DMA_CH_YLEN);
> +     writel_relaxed(readl_relaxed(sdma->regs + SIRFSOC_DMA_INT_EN) | (1 << cid),
> +             sdma->regs + SIRFSOC_DMA_INT_EN);
> +     writel_relaxed(schan->addr >> 2, sdma->regs + cid * 0x10 + SIRFSOC_DMA_CH_ADDR);
> +}
> +
> +/* Interrupt handler */
> +static irqreturn_t sirfsoc_dma_irq(int irq, void *data)
> +{
> +     struct sirfsoc_dma *sdma = data;
> +     struct sirfsoc_dma_chan *schan;
> +     u32 is;
> +     int ch;
> +
> +     is = readl_relaxed(sdma->regs + SIRFSOC_DMA_CH_INT);
> +     while ((ch = fls(is) - 1) >= 0) {
> +             is &= ~(1 << ch);
> +             writel_relaxed(1 << ch, sdma->regs + SIRFSOC_DMA_CH_INT);
> +             schan = &sdma->channels[ch];
> +
> +             spin_lock(&schan->lock);
> +
> +             /* Execute queued descriptors */
> +             list_splice_tail_init(&schan->active, &schan->completed);
> +             if (!list_empty(&schan->queued))
> +                     sirfsoc_dma_execute(schan);
> +
> +             spin_unlock(&schan->lock);
> +     }
Here you know which channel has triggered interrupt and you may pass
this info to your tasklet and avoid scanning again there

> +
> +     /* Schedule tasklet */
> +     tasklet_schedule(&sdma->tasklet);
> +
> +     return IRQ_HANDLED;
> +}
> +
> +/* process completed descriptors */
> +static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
> +{
> +     dma_cookie_t last_cookie = 0;
> +     struct sirfsoc_dma_chan *schan;
> +     struct sirfsoc_dma_desc *mdesc;
> +     struct dma_async_tx_descriptor *desc;
> +     unsigned long flags;
> +     LIST_HEAD(list);
> +     int i;
> +
> +     for (i = 0; i < sdma->dma.chancnt; i++) {
> +             schan = &sdma->channels[i];
> +
> +             /* Get all completed descriptors */
> +             spin_lock_irqsave(&schan->lock, flags);
this will block interrupts, i dont see a reason why this should be used
here??

> +             if (!list_empty(&schan->completed))
> +                     list_splice_tail_init(&schan->completed, &list);
> +             spin_unlock_irqrestore(&schan->lock, flags);
> +
> +             if (list_empty(&list))
> +                     continue;
> +
> +             /* Execute callbacks and run dependencies */
> +             list_for_each_entry(mdesc, &list, node) {
> +                     desc = &mdesc->desc;
> +
> +                     if (desc->callback)
> +                             desc->callback(desc->callback_param);
> +
> +                     last_cookie = desc->cookie;
> +                     dma_run_dependencies(desc);
> +             }
> +
> +             /* Free descriptors */
> +             spin_lock_irqsave(&schan->lock, flags);
> +             list_splice_tail_init(&list, &schan->free);
> +             schan->completed_cookie = last_cookie;
> +             spin_unlock_irqrestore(&schan->lock, flags);
> +     }
> +}
> +
> +/* DMA Tasklet */
> +static void sirfsoc_dma_tasklet(unsigned long data)
> +{
> +     struct sirfsoc_dma *sdma = (void *)data;
> +
> +     sirfsoc_dma_process_completed(sdma);
> +}
> +
> +/* Submit descriptor to hardware */
> +static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
> +{
> +     struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan);
> +     struct sirfsoc_dma_desc *mdesc;
> +     unsigned long flags;
> +     dma_cookie_t cookie;
> +
> +     mdesc = container_of(txd, struct sirfsoc_dma_desc, desc);
> +
> +     spin_lock_irqsave(&schan->lock, flags);
> +
> +     /* Move descriptor to queue */
> +     list_move_tail(&mdesc->node, &schan->queued);
> +
> +     /* If channel is idle, execute all queued descriptors */
> +     if (list_empty(&schan->active))
> +             sirfsoc_dma_execute(schan);
this is wrong, this should be done in .issue_pending

> +
> +     /* Update cookie */
> +     cookie = schan->chan.cookie + 1;
> +     if (cookie <= 0)
> +             cookie = 1;
> +
> +     schan->chan.cookie = cookie;
> +     mdesc->desc.cookie = cookie;
> +
> +     spin_unlock_irqrestore(&schan->lock, flags);
> +
> +     return cookie;
> +}
> +
> +static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan,
> +     struct sirfsoc_dma_slave_config *config)
> +{
> +     u32 addr, direction;
> +     unsigned long flags;
> +
> +     switch (config->generic_config.direction) {
> +     case DMA_FROM_DEVICE:
> +             direction = 0;
> +             addr = config->generic_config.dst_addr;
> +             break;
> +
> +     case DMA_TO_DEVICE:
> +             direction = 1;
> +             addr = config->generic_config.src_addr;
> +             break;
> +
> +     default:
> +             return -EINVAL;
> +     }
> +
> +     if ((config->generic_config.src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
> +             (config->generic_config.dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES))
> +             return -EINVAL;
> +
> +     spin_lock_irqsave(&schan->lock, flags);
> +     schan->addr = addr;
> +     schan->direction = direction;
> +     schan->xlen = config->xlen;
> +     schan->ylen = config->ylen;
> +     schan->width = config->width;
what do these parameters mean, is width the dma fifo width, if so use
existing members for that

> +     schan->mode = (config->generic_config.src_maxburst == 4 ? 1 : 0);
> +     spin_unlock_irqrestore(&schan->lock, flags);
> +
> +     return 0;
> +}
> +
> +static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan)
> +{
> +     struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
> +     int cid = schan->chan.chan_id;
> +     unsigned long flags;
> +
> +     writel_relaxed(readl_relaxed(sdma->regs + SIRFSOC_DMA_INT_EN) & ~(1 << cid),
> +             sdma->regs + SIRFSOC_DMA_INT_EN);
> +     writel_relaxed(1 << cid, sdma->regs + SIRFSOC_DMA_CH_VALID);
> +
> +     spin_lock_irqsave(&schan->lock, flags);
> +
> +     list_splice_tail_init(&schan->queued, &schan->free);
what about active list
> +     spin_unlock_irqrestore(&schan->lock, flags);
> +
> +     return 0;
> +}
> +
> +static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
> +     unsigned long arg)
> +{
> +     struct sirfsoc_dma_slave_config *config;
> +     struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
> +
> +     switch (cmd) {
> +     case DMA_TERMINATE_ALL:
> +             return sirfsoc_dma_terminate_all(schan);
> +     case DMA_SLAVE_CONFIG:
> +             config = (struct sirfsoc_dma_slave_config *)arg;
> +             return sirfsoc_dma_slave_config(schan, config);
> +
> +     default:
> +             break;
> +     }
> +
> +     return -ENOSYS;
> +}
> +
> +/* Alloc channel resources */
> +static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
> +{
> +     struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
> +     struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
> +     struct sirfsoc_dma_desc *mdesc;
> +     unsigned long flags;
> +     LIST_HEAD(descs);
> +     int i;
> +
> +     /* Alloc descriptors for this channel */
> +     for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) {
> +             mdesc = kzalloc(sizeof(struct sirfsoc_dma_desc), GFP_KERNEL);
> +             if (!mdesc) {
> +                     dev_notice(sdma->dma.dev, "Memory allocation error. "
> +                             "Allocated only %u descriptors\n", i);
> +                     break;
> +             }
> +
> +             dma_async_tx_descriptor_init(&mdesc->desc, chan);
> +             mdesc->desc.flags = DMA_CTRL_ACK;
> +             mdesc->desc.tx_submit = sirfsoc_dma_tx_submit;
> +
> +             list_add_tail(&mdesc->node, &descs);
> +     }
> +
> +     /* Return error only if no descriptors were allocated */
> +     if (i == 0)
> +             return -ENOMEM;
> +
> +     spin_lock_irqsave(&schan->lock, flags);
> +
> +     list_splice_tail_init(&descs, &schan->free);
> +     spin_unlock_irqrestore(&schan->lock, flags);
> +
> +     return 0;
> +}
> +
> +/* Free channel resources */
> +static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
> +{
> +     struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
> +     struct sirfsoc_dma_desc *mdesc, *tmp;
> +     unsigned long flags;
> +     LIST_HEAD(descs);
> +
> +     spin_lock_irqsave(&schan->lock, flags);
> +
> +     /* Channel must be idle */
> +     BUG_ON(!list_empty(&schan->prepared));
> +     BUG_ON(!list_empty(&schan->queued));
> +     BUG_ON(!list_empty(&schan->active));
> +     BUG_ON(!list_empty(&schan->completed));
> +
> +     /* Move data */
> +     list_splice_tail_init(&schan->free, &descs);
> +
> +     spin_unlock_irqrestore(&schan->lock, flags);
> +
> +     /* Free descriptors */
> +     list_for_each_entry_safe(mdesc, tmp, &descs, node)
> +             kfree(mdesc);
> +}
> +
> +/* Send pending descriptor to hardware */
> +static void sirfsoc_dma_issue_pending(struct dma_chan *chan)
> +{
> +     struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
> +     unsigned long flags;
> +
> +     spin_lock_irqsave(&schan->lock, flags);
> +
> +     if (list_empty(&schan->active) && !list_empty(&schan->queued))
> +             sirfsoc_dma_execute(schan);
> +
> +     spin_unlock_irqrestore(&schan->lock, flags);
> +}
> +
> +/* Check request completion status */
> +static enum dma_status
> +sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
> +     struct dma_tx_state *txstate)
> +{
> +     struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
> +     unsigned long flags;
> +     dma_cookie_t last_used;
> +     dma_cookie_t last_complete;
> +
> +     spin_lock_irqsave(&schan->lock, flags);
> +     last_used = schan->chan.cookie;
> +     last_complete = schan->completed_cookie;
> +     spin_unlock_irqrestore(&schan->lock, flags);
> +
> +     dma_set_tx_state(txstate, last_complete, last_used, 0);
> +     return dma_async_is_complete(cookie, last_complete, last_used);
> +}
> +
> +/* Prepare descriptor for memory to memory copy */
> +static struct dma_async_tx_descriptor *
> +sirfsoc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
> +     size_t len, unsigned long flags)
> +{
> +     struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
> +     struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
> +     struct sirfsoc_dma_desc *mdesc = NULL;
> +     unsigned long iflags;
> +
> +     /* Get free descriptor */
> +     spin_lock_irqsave(&schan->lock, iflags);
> +     if (!list_empty(&schan->free)) {
> +             mdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
> +                     node);
> +             list_del(&mdesc->node);
> +     }
> +     spin_unlock_irqrestore(&schan->lock, iflags);
> +
> +     if (!mdesc) {
> +             /* try to free completed descriptors */
> +             sirfsoc_dma_process_completed(sdma);
> +             return NULL;
> +     }
> +
> +     /* Place descriptor in prepared list */
> +     spin_lock_irqsave(&schan->lock, iflags);
> +     list_add_tail(&mdesc->node, &schan->prepared);
> +     spin_unlock_irqrestore(&schan->lock, iflags);
> +
> +     return &mdesc->desc;
> +}
> +
> +/*
> + * The DMA controller consists of 16 independent DMA channels.
> + * Each channel is allocated to a different function
> + */
> +bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
> +{
> +     unsigned int ch_nr = (unsigned int) chan_id;
> +
> +     if (ch_nr == chan->chan_id)
> +             return true;
> +
> +     return false;
> +}
> +EXPORT_SYMBOL(sirfsoc_dma_filter_id);
> +
> +static int __devinit sirfsoc_dma_probe(struct platform_device *op)
> +{
> +     struct device_node *dn = op->dev.of_node;
> +     struct device *dev = &op->dev;
> +     struct dma_device *dma;
> +     struct sirfsoc_dma *sdma;
> +     struct sirfsoc_dma_chan *schan;
> +     struct resource res;
> +     ulong regs_start, regs_size;
> +     u32 id;
> +     int retval, i;
> +
> +     sdma = devm_kzalloc(dev, sizeof(struct sirfsoc_dma), GFP_KERNEL);
> +     if (!sdma) {
> +             dev_err(dev, "Memory exhausted!\n");
> +             return -ENOMEM;
> +     }
> +
> +     if (of_property_read_u32(dn, "cell-index", &id)) {
> +             dev_err(dev, "Fail to get DMAC index\n");
> +             return -ENODEV;
> +     }
> +
> +     sdma->irq = irq_of_parse_and_map(dn, 0);
> +     if (sdma->irq == NO_IRQ) {
> +             dev_err(dev, "Error mapping IRQ!\n");
> +             return -EINVAL;
> +     }
> +
> +     retval = of_address_to_resource(dn, 0, &res);
> +     if (retval) {
> +             dev_err(dev, "Error parsing memory region!\n");
> +             return retval;
> +     }
> +
> +     regs_start = res.start;
> +     regs_size = resource_size(&res);
> +
> +     if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
> +             dev_err(dev, "Error requesting memory region!\n");
> +             return -EBUSY;
> +     }
> +
> +     sdma->regs = devm_ioremap(dev, regs_start, regs_size);
> +     if (!sdma->regs) {
> +             dev_err(dev, "Error mapping memory region!\n");
> +             return -ENOMEM;
> +     }
> +
> +     retval = devm_request_irq(dev, sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME,
> +             sdma);
> +     if (retval) {
> +             dev_err(dev, "Error requesting IRQ!\n");
> +             return -EINVAL;
> +     }
> +
> +     dma = &sdma->dma;
> +     dma->dev = dev;
> +     dma->chancnt = SIRFSOC_DMA_CHANNELS;
> +
> +     dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
> +     dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
> +     dma->device_issue_pending = sirfsoc_dma_issue_pending;
> +     dma->device_control = sirfsoc_dma_control;
> +     dma->device_tx_status = sirfsoc_dma_tx_status;
> +     dma->device_prep_dma_memcpy = sirfsoc_dma_prep_memcpy;
> +
> +     INIT_LIST_HEAD(&dma->channels);
> +     dma_cap_set(DMA_MEMCPY, dma->cap_mask);
DMA_SLAVE as well..

> +
> +     for (i = 0; i < dma->chancnt; i++) {
> +             schan = &sdma->channels[i];
> +
> +             schan->chan.device = dma;
> +             schan->chan.chan_id = dma->chancnt * id + i;
> +             schan->chan.cookie = 1;
> +             schan->completed_cookie = schan->chan.cookie;
> +
> +             INIT_LIST_HEAD(&schan->free);
> +             INIT_LIST_HEAD(&schan->prepared);
> +             INIT_LIST_HEAD(&schan->queued);
> +             INIT_LIST_HEAD(&schan->active);
> +             INIT_LIST_HEAD(&schan->completed);
> +
> +             spin_lock_init(&schan->lock);
> +             list_add_tail(&schan->chan.device_node, &dma->channels);
> +     }
> +
> +     tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
> +
> +     /* Register DMA engine */
> +     dev_set_drvdata(dev, sdma);
> +     retval = dma_async_device_register(dma);
> +     if (retval) {
> +             devm_free_irq(dev, sdma->irq, sdma);
> +             irq_dispose_mapping(sdma->irq);
> +     }
> +
> +     return retval;
> +}
> +
> +static int __devexit sirfsoc_dma_remove(struct platform_device *op)
> +{
> +     struct device *dev = &op->dev;
> +     struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
> +
> +     dma_async_device_unregister(&sdma->dma);
> +     devm_free_irq(dev, sdma->irq, sdma);
> +     irq_dispose_mapping(sdma->irq);
> +
> +     return 0;
> +}
> +
> +static struct of_device_id sirfsoc_dma_match[] = {
> +     { .compatible = "sirf,prima2-dmac", },
> +     {},
> +};
> +
> +static struct platform_driver sirfsoc_dma_driver = {
> +     .probe          = sirfsoc_dma_probe,
> +     .remove         = __devexit_p(sirfsoc_dma_remove),
> +     .driver = {
> +             .name = DRV_NAME,
> +             .owner = THIS_MODULE,
> +             .of_match_table = sirfsoc_dma_match,
> +     },
> +};
> +
> +static int __init sirfsoc_dma_init(void)
> +{
> +     return platform_driver_register(&sirfsoc_dma_driver);
> +}
> +module_init(sirfsoc_dma_init);
> +
> +static void __exit sirfsoc_dma_exit(void)
> +{
> +     platform_driver_unregister(&sirfsoc_dma_driver);
> +}
> +module_exit(sirfsoc_dma_exit);
> +
> +MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, "
> +     "Barry Song <baohua.song@csr.com>");
> +MODULE_DESCRIPTION("SIRFSOC DMA control driver");
> +MODULE_LICENSE("GPL");
> diff --git a/include/linux/sirfsoc_dma.h b/include/linux/sirfsoc_dma.h
> new file mode 100644
> index 0000000..75d2d86
> --- /dev/null
> +++ b/include/linux/sirfsoc_dma.h
> @@ -0,0 +1,18 @@
> +#ifndef _SIRFSOC_DMA_H_
> +#define _SIRFSOC_DMA_H_
> +/*
> + * create a custom slave config struct for CSR SiRFprimaII and pass that,
> + * and make dma_slave_config a member of that struct
> + */
> +struct sirfsoc_dma_slave_config {
> +     struct dma_slave_config generic_config;
> +
> +     /* CSR SiRFprimaII 2D-DMA config */
> +     int             xlen;           /* DMA xlen */
> +     int             ylen;           /* DMA ylen */
what lengths?

> +     int             width;          /* DMA width */
> +};
> +
> +bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id);
> +
> +#endif


ÿôèº{.nÇ+‰·Ÿ®‰­†+%ŠËÿ±éݶ\x17¥Šwÿº{.nÇ+‰·¥Š{±þG«éÿŠ{ayº\x1dʇڙë,j\a­¢f£¢·hšïêÿ‘êçz_è®\x03(­éšŽŠÝ¢j"ú\x1a¶^[m§ÿÿ¾\a«þG«éÿ¢¸?™¨è­Ú&£ø§~á¶iO•æ¬z·švØ^\x14\x04\x1a¶^[m§ÿÿÃ\fÿ¶ìÿ¢¸?–I¥

^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-07 16:14   ` Koul, Vinod
  0 siblings, 0 replies; 84+ messages in thread
From: Koul, Vinod @ 2011-09-07 16:14 UTC (permalink / raw)
  To: linux-arm-kernel

On Tue, 2011-09-06 at 22:41 -0700, Barry Song wrote:
> From: Rongjun Ying <rongjun.ying@csr.com>

> +config SIRF_DMA
> +     tristate "CSR SiRFprimaII DMA support"
> +     depends on ARCH_PRIMA2
> +     select DMA_ENGINE
> +     help
> +       Enable support for the CSR SiRFprimaII DMA engine.
How different is it from the other primacell based DMA drivers, and why
wouldn't it make sense to use/modify one of them?

> +/*
> + * Execute all queued DMA descriptors.
> + *
> + * Following requirements must be met while calling sirfsoc_dma_execute():
> + * a) schan->lock is acquired,
> + * b) schan->active list is empty,
> + * c) schan->queued list contains at least one entry.
> + */
Please use kernel-doc format...

> +static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan)
> +{
> +     struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
> +     int cid = schan->chan.chan_id;
> +
> +     /* Move the first queued descriptor to active list */
> +     list_move_tail(&schan->queued, &schan->active);
> +
> +     writel_relaxed(schan->width, sdma->regs + SIRFSOC_DMA_WIDTH_0 + cid * 4);
> +     writel_relaxed(cid | (schan->mode << SIRFSOC_DMA_MODE_CTRL_BIT) |
> +             (schan->direction << SIRFSOC_DMA_DIR_CTRL_BIT),
> +             sdma->regs + cid * 0x10 + SIRFSOC_DMA_CH_CTRL);
> +     writel_relaxed(schan->xlen, sdma->regs + cid * 0x10 + SIRFSOC_DMA_CH_XLEN);
> +     writel_relaxed(schan->ylen, sdma->regs + cid * 0x10 + SIRFSOC_DMA_CH_YLEN);
> +     writel_relaxed(readl_relaxed(sdma->regs + SIRFSOC_DMA_INT_EN) | (1 << cid),
> +             sdma->regs + SIRFSOC_DMA_INT_EN);
> +     writel_relaxed(schan->addr >> 2, sdma->regs + cid * 0x10 + SIRFSOC_DMA_CH_ADDR);
> +}
> +
> +/* Interrupt handler */
> +static irqreturn_t sirfsoc_dma_irq(int irq, void *data)
> +{
> +     struct sirfsoc_dma *sdma = data;
> +     struct sirfsoc_dma_chan *schan;
> +     u32 is;
> +     int ch;
> +
> +     is = readl_relaxed(sdma->regs + SIRFSOC_DMA_CH_INT);
> +     while ((ch = fls(is) - 1) >= 0) {
> +             is &= ~(1 << ch);
> +             writel_relaxed(1 << ch, sdma->regs + SIRFSOC_DMA_CH_INT);
> +             schan = &sdma->channels[ch];
> +
> +             spin_lock(&schan->lock);
> +
> +             /* Execute queued descriptors */
> +             list_splice_tail_init(&schan->active, &schan->completed);
> +             if (!list_empty(&schan->queued))
> +                     sirfsoc_dma_execute(schan);
> +
> +             spin_unlock(&schan->lock);
> +     }
Here you know which channel has triggered interrupt and you may pass
this info to your tasklet and avoid scanning again there

> +
> +     /* Schedule tasklet */
> +     tasklet_schedule(&sdma->tasklet);
> +
> +     return IRQ_HANDLED;
> +}
> +
> +/* process completed descriptors */
> +static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
> +{
> +     dma_cookie_t last_cookie = 0;
> +     struct sirfsoc_dma_chan *schan;
> +     struct sirfsoc_dma_desc *mdesc;
> +     struct dma_async_tx_descriptor *desc;
> +     unsigned long flags;
> +     LIST_HEAD(list);
> +     int i;
> +
> +     for (i = 0; i < sdma->dma.chancnt; i++) {
> +             schan = &sdma->channels[i];
> +
> +             /* Get all completed descriptors */
> +             spin_lock_irqsave(&schan->lock, flags);
this will block interrupts, i dont see a reason why this should be used
here??

> +             if (!list_empty(&schan->completed))
> +                     list_splice_tail_init(&schan->completed, &list);
> +             spin_unlock_irqrestore(&schan->lock, flags);
> +
> +             if (list_empty(&list))
> +                     continue;
> +
> +             /* Execute callbacks and run dependencies */
> +             list_for_each_entry(mdesc, &list, node) {
> +                     desc = &mdesc->desc;
> +
> +                     if (desc->callback)
> +                             desc->callback(desc->callback_param);
> +
> +                     last_cookie = desc->cookie;
> +                     dma_run_dependencies(desc);
> +             }
> +
> +             /* Free descriptors */
> +             spin_lock_irqsave(&schan->lock, flags);
> +             list_splice_tail_init(&list, &schan->free);
> +             schan->completed_cookie = last_cookie;
> +             spin_unlock_irqrestore(&schan->lock, flags);
> +     }
> +}
> +
> +/* DMA Tasklet */
> +static void sirfsoc_dma_tasklet(unsigned long data)
> +{
> +     struct sirfsoc_dma *sdma = (void *)data;
> +
> +     sirfsoc_dma_process_completed(sdma);
> +}
> +
> +/* Submit descriptor to hardware */
> +static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
> +{
> +     struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan);
> +     struct sirfsoc_dma_desc *mdesc;
> +     unsigned long flags;
> +     dma_cookie_t cookie;
> +
> +     mdesc = container_of(txd, struct sirfsoc_dma_desc, desc);
> +
> +     spin_lock_irqsave(&schan->lock, flags);
> +
> +     /* Move descriptor to queue */
> +     list_move_tail(&mdesc->node, &schan->queued);
> +
> +     /* If channel is idle, execute all queued descriptors */
> +     if (list_empty(&schan->active))
> +             sirfsoc_dma_execute(schan);
this is wrong, this should be done in .issue_pending

> +
> +     /* Update cookie */
> +     cookie = schan->chan.cookie + 1;
> +     if (cookie <= 0)
> +             cookie = 1;
> +
> +     schan->chan.cookie = cookie;
> +     mdesc->desc.cookie = cookie;
> +
> +     spin_unlock_irqrestore(&schan->lock, flags);
> +
> +     return cookie;
> +}
> +
> +static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan,
> +     struct sirfsoc_dma_slave_config *config)
> +{
> +     u32 addr, direction;
> +     unsigned long flags;
> +
> +     switch (config->generic_config.direction) {
> +     case DMA_FROM_DEVICE:
> +             direction = 0;
> +             addr = config->generic_config.dst_addr;
> +             break;
> +
> +     case DMA_TO_DEVICE:
> +             direction = 1;
> +             addr = config->generic_config.src_addr;
> +             break;
> +
> +     default:
> +             return -EINVAL;
> +     }
> +
> +     if ((config->generic_config.src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
> +             (config->generic_config.dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES))
> +             return -EINVAL;
> +
> +     spin_lock_irqsave(&schan->lock, flags);
> +     schan->addr = addr;
> +     schan->direction = direction;
> +     schan->xlen = config->xlen;
> +     schan->ylen = config->ylen;
> +     schan->width = config->width;
what do these parameters mean, is width the dma fifo width, if so use
existing members for that

> +     schan->mode = (config->generic_config.src_maxburst == 4 ? 1 : 0);
> +     spin_unlock_irqrestore(&schan->lock, flags);
> +
> +     return 0;
> +}
> +
> +static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan)
> +{
> +     struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
> +     int cid = schan->chan.chan_id;
> +     unsigned long flags;
> +
> +     writel_relaxed(readl_relaxed(sdma->regs + SIRFSOC_DMA_INT_EN) & ~(1 << cid),
> +             sdma->regs + SIRFSOC_DMA_INT_EN);
> +     writel_relaxed(1 << cid, sdma->regs + SIRFSOC_DMA_CH_VALID);
> +
> +     spin_lock_irqsave(&schan->lock, flags);
> +
> +     list_splice_tail_init(&schan->queued, &schan->free);
what about active list
> +     spin_unlock_irqrestore(&schan->lock, flags);
> +
> +     return 0;
> +}
> +
> +static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
> +     unsigned long arg)
> +{
> +     struct sirfsoc_dma_slave_config *config;
> +     struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
> +
> +     switch (cmd) {
> +     case DMA_TERMINATE_ALL:
> +             return sirfsoc_dma_terminate_all(schan);
> +     case DMA_SLAVE_CONFIG:
> +             config = (struct sirfsoc_dma_slave_config *)arg;
> +             return sirfsoc_dma_slave_config(schan, config);
> +
> +     default:
> +             break;
> +     }
> +
> +     return -ENOSYS;
> +}
> +
> +/* Alloc channel resources */
> +static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
> +{
> +     struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
> +     struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
> +     struct sirfsoc_dma_desc *mdesc;
> +     unsigned long flags;
> +     LIST_HEAD(descs);
> +     int i;
> +
> +     /* Alloc descriptors for this channel */
> +     for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) {
> +             mdesc = kzalloc(sizeof(struct sirfsoc_dma_desc), GFP_KERNEL);
> +             if (!mdesc) {
> +                     dev_notice(sdma->dma.dev, "Memory allocation error. "
> +                             "Allocated only %u descriptors\n", i);
> +                     break;
> +             }
> +
> +             dma_async_tx_descriptor_init(&mdesc->desc, chan);
> +             mdesc->desc.flags = DMA_CTRL_ACK;
> +             mdesc->desc.tx_submit = sirfsoc_dma_tx_submit;
> +
> +             list_add_tail(&mdesc->node, &descs);
> +     }
> +
> +     /* Return error only if no descriptors were allocated */
> +     if (i == 0)
> +             return -ENOMEM;
> +
> +     spin_lock_irqsave(&schan->lock, flags);
> +
> +     list_splice_tail_init(&descs, &schan->free);
> +     spin_unlock_irqrestore(&schan->lock, flags);
> +
> +     return 0;
> +}
> +
> +/* Free channel resources */
> +static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
> +{
> +     struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
> +     struct sirfsoc_dma_desc *mdesc, *tmp;
> +     unsigned long flags;
> +     LIST_HEAD(descs);
> +
> +     spin_lock_irqsave(&schan->lock, flags);
> +
> +     /* Channel must be idle */
> +     BUG_ON(!list_empty(&schan->prepared));
> +     BUG_ON(!list_empty(&schan->queued));
> +     BUG_ON(!list_empty(&schan->active));
> +     BUG_ON(!list_empty(&schan->completed));
> +
> +     /* Move data */
> +     list_splice_tail_init(&schan->free, &descs);
> +
> +     spin_unlock_irqrestore(&schan->lock, flags);
> +
> +     /* Free descriptors */
> +     list_for_each_entry_safe(mdesc, tmp, &descs, node)
> +             kfree(mdesc);
> +}
> +
> +/* Send pending descriptor to hardware */
> +static void sirfsoc_dma_issue_pending(struct dma_chan *chan)
> +{
> +     struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
> +     unsigned long flags;
> +
> +     spin_lock_irqsave(&schan->lock, flags);
> +
> +     if (list_empty(&schan->active) && !list_empty(&schan->queued))
> +             sirfsoc_dma_execute(schan);
> +
> +     spin_unlock_irqrestore(&schan->lock, flags);
> +}
> +
> +/* Check request completion status */
> +static enum dma_status
> +sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
> +     struct dma_tx_state *txstate)
> +{
> +     struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
> +     unsigned long flags;
> +     dma_cookie_t last_used;
> +     dma_cookie_t last_complete;
> +
> +     spin_lock_irqsave(&schan->lock, flags);
> +     last_used = schan->chan.cookie;
> +     last_complete = schan->completed_cookie;
> +     spin_unlock_irqrestore(&schan->lock, flags);
> +
> +     dma_set_tx_state(txstate, last_complete, last_used, 0);
> +     return dma_async_is_complete(cookie, last_complete, last_used);
> +}
> +
> +/* Prepare descriptor for memory to memory copy */
> +static struct dma_async_tx_descriptor *
> +sirfsoc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
> +     size_t len, unsigned long flags)
> +{
> +     struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
> +     struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
> +     struct sirfsoc_dma_desc *mdesc = NULL;
> +     unsigned long iflags;
> +
> +     /* Get free descriptor */
> +     spin_lock_irqsave(&schan->lock, iflags);
> +     if (!list_empty(&schan->free)) {
> +             mdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
> +                     node);
> +             list_del(&mdesc->node);
> +     }
> +     spin_unlock_irqrestore(&schan->lock, iflags);
> +
> +     if (!mdesc) {
> +             /* try to free completed descriptors */
> +             sirfsoc_dma_process_completed(sdma);
> +             return NULL;
> +     }
> +
> +     /* Place descriptor in prepared list */
> +     spin_lock_irqsave(&schan->lock, iflags);
> +     list_add_tail(&mdesc->node, &schan->prepared);
> +     spin_unlock_irqrestore(&schan->lock, iflags);
> +
> +     return &mdesc->desc;
> +}
> +
> +/*
> + * The DMA controller consists of 16 independent DMA channels.
> + * Each channel is allocated to a different function
> + */
> +bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
> +{
> +     unsigned int ch_nr = (unsigned int) chan_id;
> +
> +     if (ch_nr == chan->chan_id)
> +             return true;
> +
> +     return false;
> +}
> +EXPORT_SYMBOL(sirfsoc_dma_filter_id);
> +
> +static int __devinit sirfsoc_dma_probe(struct platform_device *op)
> +{
> +     struct device_node *dn = op->dev.of_node;
> +     struct device *dev = &op->dev;
> +     struct dma_device *dma;
> +     struct sirfsoc_dma *sdma;
> +     struct sirfsoc_dma_chan *schan;
> +     struct resource res;
> +     ulong regs_start, regs_size;
> +     u32 id;
> +     int retval, i;
> +
> +     sdma = devm_kzalloc(dev, sizeof(struct sirfsoc_dma), GFP_KERNEL);
> +     if (!sdma) {
> +             dev_err(dev, "Memory exhausted!\n");
> +             return -ENOMEM;
> +     }
> +
> +     if (of_property_read_u32(dn, "cell-index", &id)) {
> +             dev_err(dev, "Fail to get DMAC index\n");
> +             return -ENODEV;
> +     }
> +
> +     sdma->irq = irq_of_parse_and_map(dn, 0);
> +     if (sdma->irq == NO_IRQ) {
> +             dev_err(dev, "Error mapping IRQ!\n");
> +             return -EINVAL;
> +     }
> +
> +     retval = of_address_to_resource(dn, 0, &res);
> +     if (retval) {
> +             dev_err(dev, "Error parsing memory region!\n");
> +             return retval;
> +     }
> +
> +     regs_start = res.start;
> +     regs_size = resource_size(&res);
> +
> +     if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
> +             dev_err(dev, "Error requesting memory region!\n");
> +             return -EBUSY;
> +     }
> +
> +     sdma->regs = devm_ioremap(dev, regs_start, regs_size);
> +     if (!sdma->regs) {
> +             dev_err(dev, "Error mapping memory region!\n");
> +             return -ENOMEM;
> +     }
> +
> +     retval = devm_request_irq(dev, sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME,
> +             sdma);
> +     if (retval) {
> +             dev_err(dev, "Error requesting IRQ!\n");
> +             return -EINVAL;
> +     }
> +
> +     dma = &sdma->dma;
> +     dma->dev = dev;
> +     dma->chancnt = SIRFSOC_DMA_CHANNELS;
> +
> +     dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
> +     dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
> +     dma->device_issue_pending = sirfsoc_dma_issue_pending;
> +     dma->device_control = sirfsoc_dma_control;
> +     dma->device_tx_status = sirfsoc_dma_tx_status;
> +     dma->device_prep_dma_memcpy = sirfsoc_dma_prep_memcpy;
> +
> +     INIT_LIST_HEAD(&dma->channels);
> +     dma_cap_set(DMA_MEMCPY, dma->cap_mask);
DMA_SLAVE as well..

> +
> +     for (i = 0; i < dma->chancnt; i++) {
> +             schan = &sdma->channels[i];
> +
> +             schan->chan.device = dma;
> +             schan->chan.chan_id = dma->chancnt * id + i;
> +             schan->chan.cookie = 1;
> +             schan->completed_cookie = schan->chan.cookie;
> +
> +             INIT_LIST_HEAD(&schan->free);
> +             INIT_LIST_HEAD(&schan->prepared);
> +             INIT_LIST_HEAD(&schan->queued);
> +             INIT_LIST_HEAD(&schan->active);
> +             INIT_LIST_HEAD(&schan->completed);
> +
> +             spin_lock_init(&schan->lock);
> +             list_add_tail(&schan->chan.device_node, &dma->channels);
> +     }
> +
> +     tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
> +
> +     /* Register DMA engine */
> +     dev_set_drvdata(dev, sdma);
> +     retval = dma_async_device_register(dma);
> +     if (retval) {
> +             devm_free_irq(dev, sdma->irq, sdma);
> +             irq_dispose_mapping(sdma->irq);
> +     }
> +
> +     return retval;
> +}
> +
> +static int __devexit sirfsoc_dma_remove(struct platform_device *op)
> +{
> +     struct device *dev = &op->dev;
> +     struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
> +
> +     dma_async_device_unregister(&sdma->dma);
> +     devm_free_irq(dev, sdma->irq, sdma);
> +     irq_dispose_mapping(sdma->irq);
> +
> +     return 0;
> +}
> +
> +static struct of_device_id sirfsoc_dma_match[] = {
> +     { .compatible = "sirf,prima2-dmac", },
> +     {},
> +};
> +
> +static struct platform_driver sirfsoc_dma_driver = {
> +     .probe          = sirfsoc_dma_probe,
> +     .remove         = __devexit_p(sirfsoc_dma_remove),
> +     .driver = {
> +             .name = DRV_NAME,
> +             .owner = THIS_MODULE,
> +             .of_match_table = sirfsoc_dma_match,
> +     },
> +};
> +
> +static int __init sirfsoc_dma_init(void)
> +{
> +     return platform_driver_register(&sirfsoc_dma_driver);
> +}
> +module_init(sirfsoc_dma_init);
> +
> +static void __exit sirfsoc_dma_exit(void)
> +{
> +     platform_driver_unregister(&sirfsoc_dma_driver);
> +}
> +module_exit(sirfsoc_dma_exit);
> +
> +MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, "
> +     "Barry Song <baohua.song@csr.com>");
> +MODULE_DESCRIPTION("SIRFSOC DMA control driver");
> +MODULE_LICENSE("GPL");
> diff --git a/include/linux/sirfsoc_dma.h b/include/linux/sirfsoc_dma.h
> new file mode 100644
> index 0000000..75d2d86
> --- /dev/null
> +++ b/include/linux/sirfsoc_dma.h
> @@ -0,0 +1,18 @@
> +#ifndef _SIRFSOC_DMA_H_
> +#define _SIRFSOC_DMA_H_
> +/*
> + * create a custom slave config struct for CSR SiRFprimaII and pass that,
> + * and make dma_slave_config a member of that struct
> + */
> +struct sirfsoc_dma_slave_config {
> +     struct dma_slave_config generic_config;
> +
> +     /* CSR SiRFprimaII 2D-DMA config */
> +     int             xlen;           /* DMA xlen */
> +     int             ylen;           /* DMA ylen */
what lengths?

> +     int             width;          /* DMA width */
> +};
> +
> +bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id);
> +
> +#endif

^ permalink raw reply	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-07 16:14   ` Koul, Vinod
@ 2011-09-07 16:46     ` Barry Song
  -1 siblings, 0 replies; 84+ messages in thread
From: Barry Song @ 2011-09-07 16:46 UTC (permalink / raw)
  To: Koul, Vinod
  Cc: Baohua.Song, arnd, linux-kernel, workgroup.linux, rongjun.ying,
	Williams, Dan J, linux-arm-kernel

Hi Vinod,
thanks for your quick feedback.

2011/9/8 Koul, Vinod <vinod.koul@intel.com>:
> On Tue, 2011-09-06 at 22:41 -0700, Barry Song wrote:
>> From: Rongjun Ying <rongjun.ying@csr.com>
>
>> +config SIRF_DMA
>> +     tristate "CSR SiRFprimaII DMA support"
>> +     depends on ARCH_PRIMA2
>> +     select DMA_ENGINE
>> +     help
>> +       Enable support for the CSR SiRFprimaII DMA engine.
> How different is it from the other primacell based DMA drivers, and why
> wouldn't it make sense to use/modify one of them?

it is much different with primacell based DMA like pl080, pl330.
prima2 has a self-defined DMAC IP. basically it is a 2D mode dma with
two scales X and Y and direct way to start and stop DMA.
every channel has fixed function to serve only one perpheral. so you
find we have a filter id.

>
>> +/*
>> + * Execute all queued DMA descriptors.
>> + *
>> + * Following requirements must be met while calling sirfsoc_dma_execute():
>> + * a) schan->lock is acquired,
>> + * b) schan->active list is empty,
>> + * c) schan->queued list contains at least one entry.
>> + */
> Please use kernel-doc format...

ok.
>
>> +static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan)
>> +{
>> +     struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
>> +     int cid = schan->chan.chan_id;
>> +
>> +     /* Move the first queued descriptor to active list */
>> +     list_move_tail(&schan->queued, &schan->active);
>> +
>> +     writel_relaxed(schan->width, sdma->regs + SIRFSOC_DMA_WIDTH_0 + cid * 4);
>> +     writel_relaxed(cid | (schan->mode << SIRFSOC_DMA_MODE_CTRL_BIT) |
>> +             (schan->direction << SIRFSOC_DMA_DIR_CTRL_BIT),
>> +             sdma->regs + cid * 0x10 + SIRFSOC_DMA_CH_CTRL);
>> +     writel_relaxed(schan->xlen, sdma->regs + cid * 0x10 + SIRFSOC_DMA_CH_XLEN);
>> +     writel_relaxed(schan->ylen, sdma->regs + cid * 0x10 + SIRFSOC_DMA_CH_YLEN);
>> +     writel_relaxed(readl_relaxed(sdma->regs + SIRFSOC_DMA_INT_EN) | (1 << cid),
>> +             sdma->regs + SIRFSOC_DMA_INT_EN);
>> +     writel_relaxed(schan->addr >> 2, sdma->regs + cid * 0x10 + SIRFSOC_DMA_CH_ADDR);
>> +}
>> +
>> +/* Interrupt handler */
>> +static irqreturn_t sirfsoc_dma_irq(int irq, void *data)
>> +{
>> +     struct sirfsoc_dma *sdma = data;
>> +     struct sirfsoc_dma_chan *schan;
>> +     u32 is;
>> +     int ch;
>> +
>> +     is = readl_relaxed(sdma->regs + SIRFSOC_DMA_CH_INT);
>> +     while ((ch = fls(is) - 1) >= 0) {
>> +             is &= ~(1 << ch);
>> +             writel_relaxed(1 << ch, sdma->regs + SIRFSOC_DMA_CH_INT);
>> +             schan = &sdma->channels[ch];
>> +
>> +             spin_lock(&schan->lock);
>> +
>> +             /* Execute queued descriptors */
>> +             list_splice_tail_init(&schan->active, &schan->completed);
>> +             if (!list_empty(&schan->queued))
>> +                     sirfsoc_dma_execute(schan);
>> +
>> +             spin_unlock(&schan->lock);
>> +     }
> Here you know which channel has triggered interrupt and you may pass
> this info to your tasklet and avoid scanning again there

ok. let me see.

>
>> +
>> +     /* Schedule tasklet */
>> +     tasklet_schedule(&sdma->tasklet);
>> +
>> +     return IRQ_HANDLED;
>> +}
>> +
>> +/* process completed descriptors */
>> +static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
>> +{
>> +     dma_cookie_t last_cookie = 0;
>> +     struct sirfsoc_dma_chan *schan;
>> +     struct sirfsoc_dma_desc *mdesc;
>> +     struct dma_async_tx_descriptor *desc;
>> +     unsigned long flags;
>> +     LIST_HEAD(list);
>> +     int i;
>> +
>> +     for (i = 0; i < sdma->dma.chancnt; i++) {
>> +             schan = &sdma->channels[i];
>> +
>> +             /* Get all completed descriptors */
>> +             spin_lock_irqsave(&schan->lock, flags);
> this will block interrupts, i dont see a reason why this should be used
> here??

ok. no irq is accessing completed list.

>
>> +             if (!list_empty(&schan->completed))
>> +                     list_splice_tail_init(&schan->completed, &list);
>> +             spin_unlock_irqrestore(&schan->lock, flags);
>> +
>> +             if (list_empty(&list))
>> +                     continue;
>> +
>> +             /* Execute callbacks and run dependencies */
>> +             list_for_each_entry(mdesc, &list, node) {
>> +                     desc = &mdesc->desc;
>> +
>> +                     if (desc->callback)
>> +                             desc->callback(desc->callback_param);
>> +
>> +                     last_cookie = desc->cookie;
>> +                     dma_run_dependencies(desc);
>> +             }
>> +
>> +             /* Free descriptors */
>> +             spin_lock_irqsave(&schan->lock, flags);
>> +             list_splice_tail_init(&list, &schan->free);
>> +             schan->completed_cookie = last_cookie;
>> +             spin_unlock_irqrestore(&schan->lock, flags);
>> +     }
>> +}
>> +
>> +/* DMA Tasklet */
>> +static void sirfsoc_dma_tasklet(unsigned long data)
>> +{
>> +     struct sirfsoc_dma *sdma = (void *)data;
>> +
>> +     sirfsoc_dma_process_completed(sdma);
>> +}
>> +
>> +/* Submit descriptor to hardware */
>> +static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
>> +{
>> +     struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan);
>> +     struct sirfsoc_dma_desc *mdesc;
>> +     unsigned long flags;
>> +     dma_cookie_t cookie;
>> +
>> +     mdesc = container_of(txd, struct sirfsoc_dma_desc, desc);
>> +
>> +     spin_lock_irqsave(&schan->lock, flags);
>> +
>> +     /* Move descriptor to queue */
>> +     list_move_tail(&mdesc->node, &schan->queued);
>> +
>> +     /* If channel is idle, execute all queued descriptors */
>> +     if (list_empty(&schan->active))
>> +             sirfsoc_dma_execute(schan);
> this is wrong, this should be done in .issue_pending

ok. as i reference several current drivers in drivers/dma, they are
doing dma  start in submit....guess they are wrong too?

>
>> +
>> +     /* Update cookie */
>> +     cookie = schan->chan.cookie + 1;
>> +     if (cookie <= 0)
>> +             cookie = 1;
>> +
>> +     schan->chan.cookie = cookie;
>> +     mdesc->desc.cookie = cookie;
>> +
>> +     spin_unlock_irqrestore(&schan->lock, flags);
>> +
>> +     return cookie;
>> +}
>> +
>> +static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan,
>> +     struct sirfsoc_dma_slave_config *config)
>> +{
>> +     u32 addr, direction;
>> +     unsigned long flags;
>> +
>> +     switch (config->generic_config.direction) {
>> +     case DMA_FROM_DEVICE:
>> +             direction = 0;
>> +             addr = config->generic_config.dst_addr;
>> +             break;
>> +
>> +     case DMA_TO_DEVICE:
>> +             direction = 1;
>> +             addr = config->generic_config.src_addr;
>> +             break;
>> +
>> +     default:
>> +             return -EINVAL;
>> +     }
>> +
>> +     if ((config->generic_config.src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
>> +             (config->generic_config.dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES))
>> +             return -EINVAL;
>> +
>> +     spin_lock_irqsave(&schan->lock, flags);
>> +     schan->addr = addr;
>> +     schan->direction = direction;
>> +     schan->xlen = config->xlen;
>> +     schan->ylen = config->ylen;
>> +     schan->width = config->width;
> what do these parameters mean, is width the dma fifo width, if so use
> existing members for that

the width is not dma fifo width. prima2 required 3 parameters to begin
a 2D dma transfer, the relationship is as below:

<----------------width----------------->
|-------|-------------------------|----------|          ---
|        |                              |             |           ^
|        | <----------xlen----->|             |           |
|        |                              |             |         ylen
|        |                              |             |           |
|-------|-------------------------|-----------|        _v_

after i go back to office, i'll copy details from datasheet to you.

>
>> +     schan->mode = (config->generic_config.src_maxburst == 4 ? 1 : 0);
>> +     spin_unlock_irqrestore(&schan->lock, flags);
>> +
>> +     return 0;
>> +}
>> +
>> +static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan)
>> +{
>> +     struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
>> +     int cid = schan->chan.chan_id;
>> +     unsigned long flags;
>> +
>> +     writel_relaxed(readl_relaxed(sdma->regs + SIRFSOC_DMA_INT_EN) & ~(1 << cid),
>> +             sdma->regs + SIRFSOC_DMA_INT_EN);
>> +     writel_relaxed(1 << cid, sdma->regs + SIRFSOC_DMA_CH_VALID);
>> +
>> +     spin_lock_irqsave(&schan->lock, flags);
>> +
>> +     list_splice_tail_init(&schan->queued, &schan->free);
> what about active list
>> +     spin_unlock_irqrestore(&schan->lock, flags);
>> +
>> +     return 0;
>> +}
>> +
>> +static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
>> +     unsigned long arg)
>> +{
>> +     struct sirfsoc_dma_slave_config *config;
>> +     struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
>> +
>> +     switch (cmd) {
>> +     case DMA_TERMINATE_ALL:
>> +             return sirfsoc_dma_terminate_all(schan);
>> +     case DMA_SLAVE_CONFIG:
>> +             config = (struct sirfsoc_dma_slave_config *)arg;
>> +             return sirfsoc_dma_slave_config(schan, config);
>> +
>> +     default:
>> +             break;
>> +     }
>> +
>> +     return -ENOSYS;
>> +}
>> +
>> +/* Alloc channel resources */
>> +static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
>> +{
>> +     struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
>> +     struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
>> +     struct sirfsoc_dma_desc *mdesc;
>> +     unsigned long flags;
>> +     LIST_HEAD(descs);
>> +     int i;
>> +
>> +     /* Alloc descriptors for this channel */
>> +     for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) {
>> +             mdesc = kzalloc(sizeof(struct sirfsoc_dma_desc), GFP_KERNEL);
>> +             if (!mdesc) {
>> +                     dev_notice(sdma->dma.dev, "Memory allocation error. "
>> +                             "Allocated only %u descriptors\n", i);
>> +                     break;
>> +             }
>> +
>> +             dma_async_tx_descriptor_init(&mdesc->desc, chan);
>> +             mdesc->desc.flags = DMA_CTRL_ACK;
>> +             mdesc->desc.tx_submit = sirfsoc_dma_tx_submit;
>> +
>> +             list_add_tail(&mdesc->node, &descs);
>> +     }
>> +
>> +     /* Return error only if no descriptors were allocated */
>> +     if (i == 0)
>> +             return -ENOMEM;
>> +
>> +     spin_lock_irqsave(&schan->lock, flags);
>> +
>> +     list_splice_tail_init(&descs, &schan->free);
>> +     spin_unlock_irqrestore(&schan->lock, flags);
>> +
>> +     return 0;
>> +}
>> +
>> +/* Free channel resources */
>> +static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
>> +{
>> +     struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
>> +     struct sirfsoc_dma_desc *mdesc, *tmp;
>> +     unsigned long flags;
>> +     LIST_HEAD(descs);
>> +
>> +     spin_lock_irqsave(&schan->lock, flags);
>> +
>> +     /* Channel must be idle */
>> +     BUG_ON(!list_empty(&schan->prepared));
>> +     BUG_ON(!list_empty(&schan->queued));
>> +     BUG_ON(!list_empty(&schan->active));
>> +     BUG_ON(!list_empty(&schan->completed));
>> +
>> +     /* Move data */
>> +     list_splice_tail_init(&schan->free, &descs);
>> +
>> +     spin_unlock_irqrestore(&schan->lock, flags);
>> +
>> +     /* Free descriptors */
>> +     list_for_each_entry_safe(mdesc, tmp, &descs, node)
>> +             kfree(mdesc);
>> +}
>> +
>> +/* Send pending descriptor to hardware */
>> +static void sirfsoc_dma_issue_pending(struct dma_chan *chan)
>> +{
>> +     struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
>> +     unsigned long flags;
>> +
>> +     spin_lock_irqsave(&schan->lock, flags);
>> +
>> +     if (list_empty(&schan->active) && !list_empty(&schan->queued))
>> +             sirfsoc_dma_execute(schan);
>> +
>> +     spin_unlock_irqrestore(&schan->lock, flags);
>> +}
>> +
>> +/* Check request completion status */
>> +static enum dma_status
>> +sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
>> +     struct dma_tx_state *txstate)
>> +{
>> +     struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
>> +     unsigned long flags;
>> +     dma_cookie_t last_used;
>> +     dma_cookie_t last_complete;
>> +
>> +     spin_lock_irqsave(&schan->lock, flags);
>> +     last_used = schan->chan.cookie;
>> +     last_complete = schan->completed_cookie;
>> +     spin_unlock_irqrestore(&schan->lock, flags);
>> +
>> +     dma_set_tx_state(txstate, last_complete, last_used, 0);
>> +     return dma_async_is_complete(cookie, last_complete, last_used);
>> +}
>> +
>> +/* Prepare descriptor for memory to memory copy */
>> +static struct dma_async_tx_descriptor *
>> +sirfsoc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
>> +     size_t len, unsigned long flags)
>> +{
>> +     struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
>> +     struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
>> +     struct sirfsoc_dma_desc *mdesc = NULL;
>> +     unsigned long iflags;
>> +
>> +     /* Get free descriptor */
>> +     spin_lock_irqsave(&schan->lock, iflags);
>> +     if (!list_empty(&schan->free)) {
>> +             mdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
>> +                     node);
>> +             list_del(&mdesc->node);
>> +     }
>> +     spin_unlock_irqrestore(&schan->lock, iflags);
>> +
>> +     if (!mdesc) {
>> +             /* try to free completed descriptors */
>> +             sirfsoc_dma_process_completed(sdma);
>> +             return NULL;
>> +     }
>> +
>> +     /* Place descriptor in prepared list */
>> +     spin_lock_irqsave(&schan->lock, iflags);
>> +     list_add_tail(&mdesc->node, &schan->prepared);
>> +     spin_unlock_irqrestore(&schan->lock, iflags);
>> +
>> +     return &mdesc->desc;
>> +}
>> +
>> +/*
>> + * The DMA controller consists of 16 independent DMA channels.
>> + * Each channel is allocated to a different function
>> + */
>> +bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
>> +{
>> +     unsigned int ch_nr = (unsigned int) chan_id;
>> +
>> +     if (ch_nr == chan->chan_id)
>> +             return true;
>> +
>> +     return false;
>> +}
>> +EXPORT_SYMBOL(sirfsoc_dma_filter_id);
>> +
>> +static int __devinit sirfsoc_dma_probe(struct platform_device *op)
>> +{
>> +     struct device_node *dn = op->dev.of_node;
>> +     struct device *dev = &op->dev;
>> +     struct dma_device *dma;
>> +     struct sirfsoc_dma *sdma;
>> +     struct sirfsoc_dma_chan *schan;
>> +     struct resource res;
>> +     ulong regs_start, regs_size;
>> +     u32 id;
>> +     int retval, i;
>> +
>> +     sdma = devm_kzalloc(dev, sizeof(struct sirfsoc_dma), GFP_KERNEL);
>> +     if (!sdma) {
>> +             dev_err(dev, "Memory exhausted!\n");
>> +             return -ENOMEM;
>> +     }
>> +
>> +     if (of_property_read_u32(dn, "cell-index", &id)) {
>> +             dev_err(dev, "Fail to get DMAC index\n");
>> +             return -ENODEV;
>> +     }
>> +
>> +     sdma->irq = irq_of_parse_and_map(dn, 0);
>> +     if (sdma->irq == NO_IRQ) {
>> +             dev_err(dev, "Error mapping IRQ!\n");
>> +             return -EINVAL;
>> +     }
>> +
>> +     retval = of_address_to_resource(dn, 0, &res);
>> +     if (retval) {
>> +             dev_err(dev, "Error parsing memory region!\n");
>> +             return retval;
>> +     }
>> +
>> +     regs_start = res.start;
>> +     regs_size = resource_size(&res);
>> +
>> +     if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
>> +             dev_err(dev, "Error requesting memory region!\n");
>> +             return -EBUSY;
>> +     }
>> +
>> +     sdma->regs = devm_ioremap(dev, regs_start, regs_size);
>> +     if (!sdma->regs) {
>> +             dev_err(dev, "Error mapping memory region!\n");
>> +             return -ENOMEM;
>> +     }
>> +
>> +     retval = devm_request_irq(dev, sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME,
>> +             sdma);
>> +     if (retval) {
>> +             dev_err(dev, "Error requesting IRQ!\n");
>> +             return -EINVAL;
>> +     }
>> +
>> +     dma = &sdma->dma;
>> +     dma->dev = dev;
>> +     dma->chancnt = SIRFSOC_DMA_CHANNELS;
>> +
>> +     dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
>> +     dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
>> +     dma->device_issue_pending = sirfsoc_dma_issue_pending;
>> +     dma->device_control = sirfsoc_dma_control;
>> +     dma->device_tx_status = sirfsoc_dma_tx_status;
>> +     dma->device_prep_dma_memcpy = sirfsoc_dma_prep_memcpy;
>> +
>> +     INIT_LIST_HEAD(&dma->channels);
>> +     dma_cap_set(DMA_MEMCPY, dma->cap_mask);
> DMA_SLAVE as well..

ok.

>
>> +
>> +     for (i = 0; i < dma->chancnt; i++) {
>> +             schan = &sdma->channels[i];
>> +
>> +             schan->chan.device = dma;
>> +             schan->chan.chan_id = dma->chancnt * id + i;
>> +             schan->chan.cookie = 1;
>> +             schan->completed_cookie = schan->chan.cookie;
>> +
>> +             INIT_LIST_HEAD(&schan->free);
>> +             INIT_LIST_HEAD(&schan->prepared);
>> +             INIT_LIST_HEAD(&schan->queued);
>> +             INIT_LIST_HEAD(&schan->active);
>> +             INIT_LIST_HEAD(&schan->completed);
>> +
>> +             spin_lock_init(&schan->lock);
>> +             list_add_tail(&schan->chan.device_node, &dma->channels);
>> +     }
>> +
>> +     tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
>> +
>> +     /* Register DMA engine */
>> +     dev_set_drvdata(dev, sdma);
>> +     retval = dma_async_device_register(dma);
>> +     if (retval) {
>> +             devm_free_irq(dev, sdma->irq, sdma);
>> +             irq_dispose_mapping(sdma->irq);
>> +     }
>> +
>> +     return retval;
>> +}
>> +
>> +static int __devexit sirfsoc_dma_remove(struct platform_device *op)
>> +{
>> +     struct device *dev = &op->dev;
>> +     struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
>> +
>> +     dma_async_device_unregister(&sdma->dma);
>> +     devm_free_irq(dev, sdma->irq, sdma);
>> +     irq_dispose_mapping(sdma->irq);
>> +
>> +     return 0;
>> +}
>> +
>> +static struct of_device_id sirfsoc_dma_match[] = {
>> +     { .compatible = "sirf,prima2-dmac", },
>> +     {},
>> +};
>> +
>> +static struct platform_driver sirfsoc_dma_driver = {
>> +     .probe          = sirfsoc_dma_probe,
>> +     .remove         = __devexit_p(sirfsoc_dma_remove),
>> +     .driver = {
>> +             .name = DRV_NAME,
>> +             .owner = THIS_MODULE,
>> +             .of_match_table = sirfsoc_dma_match,
>> +     },
>> +};
>> +
>> +static int __init sirfsoc_dma_init(void)
>> +{
>> +     return platform_driver_register(&sirfsoc_dma_driver);
>> +}
>> +module_init(sirfsoc_dma_init);
>> +
>> +static void __exit sirfsoc_dma_exit(void)
>> +{
>> +     platform_driver_unregister(&sirfsoc_dma_driver);
>> +}
>> +module_exit(sirfsoc_dma_exit);
>> +
>> +MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, "
>> +     "Barry Song <baohua.song@csr.com>");
>> +MODULE_DESCRIPTION("SIRFSOC DMA control driver");
>> +MODULE_LICENSE("GPL");
>> diff --git a/include/linux/sirfsoc_dma.h b/include/linux/sirfsoc_dma.h
>> new file mode 100644
>> index 0000000..75d2d86
>> --- /dev/null
>> +++ b/include/linux/sirfsoc_dma.h
>> @@ -0,0 +1,18 @@
>> +#ifndef _SIRFSOC_DMA_H_
>> +#define _SIRFSOC_DMA_H_
>> +/*
>> + * create a custom slave config struct for CSR SiRFprimaII and pass that,
>> + * and make dma_slave_config a member of that struct
>> + */
>> +struct sirfsoc_dma_slave_config {
>> +     struct dma_slave_config generic_config;
>> +
>> +     /* CSR SiRFprimaII 2D-DMA config */
>> +     int             xlen;           /* DMA xlen */
>> +     int             ylen;           /* DMA ylen */
> what lengths?
>
>> +     int             width;          /* DMA width */
>> +};
>> +
>> +bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id);
>> +
>> +#endif

Thanks
barry

^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-07 16:46     ` Barry Song
  0 siblings, 0 replies; 84+ messages in thread
From: Barry Song @ 2011-09-07 16:46 UTC (permalink / raw)
  To: linux-arm-kernel

Hi Vinod,
thanks for your quick feedback.

2011/9/8 Koul, Vinod <vinod.koul@intel.com>:
> On Tue, 2011-09-06 at 22:41 -0700, Barry Song wrote:
>> From: Rongjun Ying <rongjun.ying@csr.com>
>
>> +config SIRF_DMA
>> + ? ? tristate "CSR SiRFprimaII DMA support"
>> + ? ? depends on ARCH_PRIMA2
>> + ? ? select DMA_ENGINE
>> + ? ? help
>> + ? ? ? Enable support for the CSR SiRFprimaII DMA engine.
> How different is it from the other primacell based DMA drivers, and why
> wouldn't it make sense to use/modify one of them?

it is much different with primacell based DMA like pl080, pl330.
prima2 has a self-defined DMAC IP. basically it is a 2D mode dma with
two scales X and Y and direct way to start and stop DMA.
every channel has fixed function to serve only one perpheral. so you
find we have a filter id.

>
>> +/*
>> + * Execute all queued DMA descriptors.
>> + *
>> + * Following requirements must be met while calling sirfsoc_dma_execute():
>> + * a) schan->lock is acquired,
>> + * b) schan->active list is empty,
>> + * c) schan->queued list contains at least one entry.
>> + */
> Please use kernel-doc format...

ok.
>
>> +static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan)
>> +{
>> + ? ? struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
>> + ? ? int cid = schan->chan.chan_id;
>> +
>> + ? ? /* Move the first queued descriptor to active list */
>> + ? ? list_move_tail(&schan->queued, &schan->active);
>> +
>> + ? ? writel_relaxed(schan->width, sdma->regs + SIRFSOC_DMA_WIDTH_0 + cid * 4);
>> + ? ? writel_relaxed(cid | (schan->mode << SIRFSOC_DMA_MODE_CTRL_BIT) |
>> + ? ? ? ? ? ? (schan->direction << SIRFSOC_DMA_DIR_CTRL_BIT),
>> + ? ? ? ? ? ? sdma->regs + cid * 0x10 + SIRFSOC_DMA_CH_CTRL);
>> + ? ? writel_relaxed(schan->xlen, sdma->regs + cid * 0x10 + SIRFSOC_DMA_CH_XLEN);
>> + ? ? writel_relaxed(schan->ylen, sdma->regs + cid * 0x10 + SIRFSOC_DMA_CH_YLEN);
>> + ? ? writel_relaxed(readl_relaxed(sdma->regs + SIRFSOC_DMA_INT_EN) | (1 << cid),
>> + ? ? ? ? ? ? sdma->regs + SIRFSOC_DMA_INT_EN);
>> + ? ? writel_relaxed(schan->addr >> 2, sdma->regs + cid * 0x10 + SIRFSOC_DMA_CH_ADDR);
>> +}
>> +
>> +/* Interrupt handler */
>> +static irqreturn_t sirfsoc_dma_irq(int irq, void *data)
>> +{
>> + ? ? struct sirfsoc_dma *sdma = data;
>> + ? ? struct sirfsoc_dma_chan *schan;
>> + ? ? u32 is;
>> + ? ? int ch;
>> +
>> + ? ? is = readl_relaxed(sdma->regs + SIRFSOC_DMA_CH_INT);
>> + ? ? while ((ch = fls(is) - 1) >= 0) {
>> + ? ? ? ? ? ? is &= ~(1 << ch);
>> + ? ? ? ? ? ? writel_relaxed(1 << ch, sdma->regs + SIRFSOC_DMA_CH_INT);
>> + ? ? ? ? ? ? schan = &sdma->channels[ch];
>> +
>> + ? ? ? ? ? ? spin_lock(&schan->lock);
>> +
>> + ? ? ? ? ? ? /* Execute queued descriptors */
>> + ? ? ? ? ? ? list_splice_tail_init(&schan->active, &schan->completed);
>> + ? ? ? ? ? ? if (!list_empty(&schan->queued))
>> + ? ? ? ? ? ? ? ? ? ? sirfsoc_dma_execute(schan);
>> +
>> + ? ? ? ? ? ? spin_unlock(&schan->lock);
>> + ? ? }
> Here you know which channel has triggered interrupt and you may pass
> this info to your tasklet and avoid scanning again there

ok. let me see.

>
>> +
>> + ? ? /* Schedule tasklet */
>> + ? ? tasklet_schedule(&sdma->tasklet);
>> +
>> + ? ? return IRQ_HANDLED;
>> +}
>> +
>> +/* process completed descriptors */
>> +static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
>> +{
>> + ? ? dma_cookie_t last_cookie = 0;
>> + ? ? struct sirfsoc_dma_chan *schan;
>> + ? ? struct sirfsoc_dma_desc *mdesc;
>> + ? ? struct dma_async_tx_descriptor *desc;
>> + ? ? unsigned long flags;
>> + ? ? LIST_HEAD(list);
>> + ? ? int i;
>> +
>> + ? ? for (i = 0; i < sdma->dma.chancnt; i++) {
>> + ? ? ? ? ? ? schan = &sdma->channels[i];
>> +
>> + ? ? ? ? ? ? /* Get all completed descriptors */
>> + ? ? ? ? ? ? spin_lock_irqsave(&schan->lock, flags);
> this will block interrupts, i dont see a reason why this should be used
> here??

ok. no irq is accessing completed list.

>
>> + ? ? ? ? ? ? if (!list_empty(&schan->completed))
>> + ? ? ? ? ? ? ? ? ? ? list_splice_tail_init(&schan->completed, &list);
>> + ? ? ? ? ? ? spin_unlock_irqrestore(&schan->lock, flags);
>> +
>> + ? ? ? ? ? ? if (list_empty(&list))
>> + ? ? ? ? ? ? ? ? ? ? continue;
>> +
>> + ? ? ? ? ? ? /* Execute callbacks and run dependencies */
>> + ? ? ? ? ? ? list_for_each_entry(mdesc, &list, node) {
>> + ? ? ? ? ? ? ? ? ? ? desc = &mdesc->desc;
>> +
>> + ? ? ? ? ? ? ? ? ? ? if (desc->callback)
>> + ? ? ? ? ? ? ? ? ? ? ? ? ? ? desc->callback(desc->callback_param);
>> +
>> + ? ? ? ? ? ? ? ? ? ? last_cookie = desc->cookie;
>> + ? ? ? ? ? ? ? ? ? ? dma_run_dependencies(desc);
>> + ? ? ? ? ? ? }
>> +
>> + ? ? ? ? ? ? /* Free descriptors */
>> + ? ? ? ? ? ? spin_lock_irqsave(&schan->lock, flags);
>> + ? ? ? ? ? ? list_splice_tail_init(&list, &schan->free);
>> + ? ? ? ? ? ? schan->completed_cookie = last_cookie;
>> + ? ? ? ? ? ? spin_unlock_irqrestore(&schan->lock, flags);
>> + ? ? }
>> +}
>> +
>> +/* DMA Tasklet */
>> +static void sirfsoc_dma_tasklet(unsigned long data)
>> +{
>> + ? ? struct sirfsoc_dma *sdma = (void *)data;
>> +
>> + ? ? sirfsoc_dma_process_completed(sdma);
>> +}
>> +
>> +/* Submit descriptor to hardware */
>> +static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
>> +{
>> + ? ? struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan);
>> + ? ? struct sirfsoc_dma_desc *mdesc;
>> + ? ? unsigned long flags;
>> + ? ? dma_cookie_t cookie;
>> +
>> + ? ? mdesc = container_of(txd, struct sirfsoc_dma_desc, desc);
>> +
>> + ? ? spin_lock_irqsave(&schan->lock, flags);
>> +
>> + ? ? /* Move descriptor to queue */
>> + ? ? list_move_tail(&mdesc->node, &schan->queued);
>> +
>> + ? ? /* If channel is idle, execute all queued descriptors */
>> + ? ? if (list_empty(&schan->active))
>> + ? ? ? ? ? ? sirfsoc_dma_execute(schan);
> this is wrong, this should be done in .issue_pending

ok. as i reference several current drivers in drivers/dma, they are
doing dma  start in submit....guess they are wrong too?

>
>> +
>> + ? ? /* Update cookie */
>> + ? ? cookie = schan->chan.cookie + 1;
>> + ? ? if (cookie <= 0)
>> + ? ? ? ? ? ? cookie = 1;
>> +
>> + ? ? schan->chan.cookie = cookie;
>> + ? ? mdesc->desc.cookie = cookie;
>> +
>> + ? ? spin_unlock_irqrestore(&schan->lock, flags);
>> +
>> + ? ? return cookie;
>> +}
>> +
>> +static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan,
>> + ? ? struct sirfsoc_dma_slave_config *config)
>> +{
>> + ? ? u32 addr, direction;
>> + ? ? unsigned long flags;
>> +
>> + ? ? switch (config->generic_config.direction) {
>> + ? ? case DMA_FROM_DEVICE:
>> + ? ? ? ? ? ? direction = 0;
>> + ? ? ? ? ? ? addr = config->generic_config.dst_addr;
>> + ? ? ? ? ? ? break;
>> +
>> + ? ? case DMA_TO_DEVICE:
>> + ? ? ? ? ? ? direction = 1;
>> + ? ? ? ? ? ? addr = config->generic_config.src_addr;
>> + ? ? ? ? ? ? break;
>> +
>> + ? ? default:
>> + ? ? ? ? ? ? return -EINVAL;
>> + ? ? }
>> +
>> + ? ? if ((config->generic_config.src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
>> + ? ? ? ? ? ? (config->generic_config.dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES))
>> + ? ? ? ? ? ? return -EINVAL;
>> +
>> + ? ? spin_lock_irqsave(&schan->lock, flags);
>> + ? ? schan->addr = addr;
>> + ? ? schan->direction = direction;
>> + ? ? schan->xlen = config->xlen;
>> + ? ? schan->ylen = config->ylen;
>> + ? ? schan->width = config->width;
> what do these parameters mean, is width the dma fifo width, if so use
> existing members for that

the width is not dma fifo width. prima2 required 3 parameters to begin
a 2D dma transfer, the relationship is as below:

<----------------width----------------->
|-------|-------------------------|----------|          ---
|        |                              |             |           ^
|        | <----------xlen----->|             |           |
|        |                              |             |         ylen
|        |                              |             |           |
|-------|-------------------------|-----------|        _v_

after i go back to office, i'll copy details from datasheet to you.

>
>> + ? ? schan->mode = (config->generic_config.src_maxburst == 4 ? 1 : 0);
>> + ? ? spin_unlock_irqrestore(&schan->lock, flags);
>> +
>> + ? ? return 0;
>> +}
>> +
>> +static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan)
>> +{
>> + ? ? struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
>> + ? ? int cid = schan->chan.chan_id;
>> + ? ? unsigned long flags;
>> +
>> + ? ? writel_relaxed(readl_relaxed(sdma->regs + SIRFSOC_DMA_INT_EN) & ~(1 << cid),
>> + ? ? ? ? ? ? sdma->regs + SIRFSOC_DMA_INT_EN);
>> + ? ? writel_relaxed(1 << cid, sdma->regs + SIRFSOC_DMA_CH_VALID);
>> +
>> + ? ? spin_lock_irqsave(&schan->lock, flags);
>> +
>> + ? ? list_splice_tail_init(&schan->queued, &schan->free);
> what about active list
>> + ? ? spin_unlock_irqrestore(&schan->lock, flags);
>> +
>> + ? ? return 0;
>> +}
>> +
>> +static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
>> + ? ? unsigned long arg)
>> +{
>> + ? ? struct sirfsoc_dma_slave_config *config;
>> + ? ? struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
>> +
>> + ? ? switch (cmd) {
>> + ? ? case DMA_TERMINATE_ALL:
>> + ? ? ? ? ? ? return sirfsoc_dma_terminate_all(schan);
>> + ? ? case DMA_SLAVE_CONFIG:
>> + ? ? ? ? ? ? config = (struct sirfsoc_dma_slave_config *)arg;
>> + ? ? ? ? ? ? return sirfsoc_dma_slave_config(schan, config);
>> +
>> + ? ? default:
>> + ? ? ? ? ? ? break;
>> + ? ? }
>> +
>> + ? ? return -ENOSYS;
>> +}
>> +
>> +/* Alloc channel resources */
>> +static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
>> +{
>> + ? ? struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
>> + ? ? struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
>> + ? ? struct sirfsoc_dma_desc *mdesc;
>> + ? ? unsigned long flags;
>> + ? ? LIST_HEAD(descs);
>> + ? ? int i;
>> +
>> + ? ? /* Alloc descriptors for this channel */
>> + ? ? for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) {
>> + ? ? ? ? ? ? mdesc = kzalloc(sizeof(struct sirfsoc_dma_desc), GFP_KERNEL);
>> + ? ? ? ? ? ? if (!mdesc) {
>> + ? ? ? ? ? ? ? ? ? ? dev_notice(sdma->dma.dev, "Memory allocation error. "
>> + ? ? ? ? ? ? ? ? ? ? ? ? ? ? "Allocated only %u descriptors\n", i);
>> + ? ? ? ? ? ? ? ? ? ? break;
>> + ? ? ? ? ? ? }
>> +
>> + ? ? ? ? ? ? dma_async_tx_descriptor_init(&mdesc->desc, chan);
>> + ? ? ? ? ? ? mdesc->desc.flags = DMA_CTRL_ACK;
>> + ? ? ? ? ? ? mdesc->desc.tx_submit = sirfsoc_dma_tx_submit;
>> +
>> + ? ? ? ? ? ? list_add_tail(&mdesc->node, &descs);
>> + ? ? }
>> +
>> + ? ? /* Return error only if no descriptors were allocated */
>> + ? ? if (i == 0)
>> + ? ? ? ? ? ? return -ENOMEM;
>> +
>> + ? ? spin_lock_irqsave(&schan->lock, flags);
>> +
>> + ? ? list_splice_tail_init(&descs, &schan->free);
>> + ? ? spin_unlock_irqrestore(&schan->lock, flags);
>> +
>> + ? ? return 0;
>> +}
>> +
>> +/* Free channel resources */
>> +static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
>> +{
>> + ? ? struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
>> + ? ? struct sirfsoc_dma_desc *mdesc, *tmp;
>> + ? ? unsigned long flags;
>> + ? ? LIST_HEAD(descs);
>> +
>> + ? ? spin_lock_irqsave(&schan->lock, flags);
>> +
>> + ? ? /* Channel must be idle */
>> + ? ? BUG_ON(!list_empty(&schan->prepared));
>> + ? ? BUG_ON(!list_empty(&schan->queued));
>> + ? ? BUG_ON(!list_empty(&schan->active));
>> + ? ? BUG_ON(!list_empty(&schan->completed));
>> +
>> + ? ? /* Move data */
>> + ? ? list_splice_tail_init(&schan->free, &descs);
>> +
>> + ? ? spin_unlock_irqrestore(&schan->lock, flags);
>> +
>> + ? ? /* Free descriptors */
>> + ? ? list_for_each_entry_safe(mdesc, tmp, &descs, node)
>> + ? ? ? ? ? ? kfree(mdesc);
>> +}
>> +
>> +/* Send pending descriptor to hardware */
>> +static void sirfsoc_dma_issue_pending(struct dma_chan *chan)
>> +{
>> + ? ? struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
>> + ? ? unsigned long flags;
>> +
>> + ? ? spin_lock_irqsave(&schan->lock, flags);
>> +
>> + ? ? if (list_empty(&schan->active) && !list_empty(&schan->queued))
>> + ? ? ? ? ? ? sirfsoc_dma_execute(schan);
>> +
>> + ? ? spin_unlock_irqrestore(&schan->lock, flags);
>> +}
>> +
>> +/* Check request completion status */
>> +static enum dma_status
>> +sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
>> + ? ? struct dma_tx_state *txstate)
>> +{
>> + ? ? struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
>> + ? ? unsigned long flags;
>> + ? ? dma_cookie_t last_used;
>> + ? ? dma_cookie_t last_complete;
>> +
>> + ? ? spin_lock_irqsave(&schan->lock, flags);
>> + ? ? last_used = schan->chan.cookie;
>> + ? ? last_complete = schan->completed_cookie;
>> + ? ? spin_unlock_irqrestore(&schan->lock, flags);
>> +
>> + ? ? dma_set_tx_state(txstate, last_complete, last_used, 0);
>> + ? ? return dma_async_is_complete(cookie, last_complete, last_used);
>> +}
>> +
>> +/* Prepare descriptor for memory to memory copy */
>> +static struct dma_async_tx_descriptor *
>> +sirfsoc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
>> + ? ? size_t len, unsigned long flags)
>> +{
>> + ? ? struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
>> + ? ? struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
>> + ? ? struct sirfsoc_dma_desc *mdesc = NULL;
>> + ? ? unsigned long iflags;
>> +
>> + ? ? /* Get free descriptor */
>> + ? ? spin_lock_irqsave(&schan->lock, iflags);
>> + ? ? if (!list_empty(&schan->free)) {
>> + ? ? ? ? ? ? mdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
>> + ? ? ? ? ? ? ? ? ? ? node);
>> + ? ? ? ? ? ? list_del(&mdesc->node);
>> + ? ? }
>> + ? ? spin_unlock_irqrestore(&schan->lock, iflags);
>> +
>> + ? ? if (!mdesc) {
>> + ? ? ? ? ? ? /* try to free completed descriptors */
>> + ? ? ? ? ? ? sirfsoc_dma_process_completed(sdma);
>> + ? ? ? ? ? ? return NULL;
>> + ? ? }
>> +
>> + ? ? /* Place descriptor in prepared list */
>> + ? ? spin_lock_irqsave(&schan->lock, iflags);
>> + ? ? list_add_tail(&mdesc->node, &schan->prepared);
>> + ? ? spin_unlock_irqrestore(&schan->lock, iflags);
>> +
>> + ? ? return &mdesc->desc;
>> +}
>> +
>> +/*
>> + * The DMA controller consists of 16 independent DMA channels.
>> + * Each channel is allocated to a different function
>> + */
>> +bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
>> +{
>> + ? ? unsigned int ch_nr = (unsigned int) chan_id;
>> +
>> + ? ? if (ch_nr == chan->chan_id)
>> + ? ? ? ? ? ? return true;
>> +
>> + ? ? return false;
>> +}
>> +EXPORT_SYMBOL(sirfsoc_dma_filter_id);
>> +
>> +static int __devinit sirfsoc_dma_probe(struct platform_device *op)
>> +{
>> + ? ? struct device_node *dn = op->dev.of_node;
>> + ? ? struct device *dev = &op->dev;
>> + ? ? struct dma_device *dma;
>> + ? ? struct sirfsoc_dma *sdma;
>> + ? ? struct sirfsoc_dma_chan *schan;
>> + ? ? struct resource res;
>> + ? ? ulong regs_start, regs_size;
>> + ? ? u32 id;
>> + ? ? int retval, i;
>> +
>> + ? ? sdma = devm_kzalloc(dev, sizeof(struct sirfsoc_dma), GFP_KERNEL);
>> + ? ? if (!sdma) {
>> + ? ? ? ? ? ? dev_err(dev, "Memory exhausted!\n");
>> + ? ? ? ? ? ? return -ENOMEM;
>> + ? ? }
>> +
>> + ? ? if (of_property_read_u32(dn, "cell-index", &id)) {
>> + ? ? ? ? ? ? dev_err(dev, "Fail to get DMAC index\n");
>> + ? ? ? ? ? ? return -ENODEV;
>> + ? ? }
>> +
>> + ? ? sdma->irq = irq_of_parse_and_map(dn, 0);
>> + ? ? if (sdma->irq == NO_IRQ) {
>> + ? ? ? ? ? ? dev_err(dev, "Error mapping IRQ!\n");
>> + ? ? ? ? ? ? return -EINVAL;
>> + ? ? }
>> +
>> + ? ? retval = of_address_to_resource(dn, 0, &res);
>> + ? ? if (retval) {
>> + ? ? ? ? ? ? dev_err(dev, "Error parsing memory region!\n");
>> + ? ? ? ? ? ? return retval;
>> + ? ? }
>> +
>> + ? ? regs_start = res.start;
>> + ? ? regs_size = resource_size(&res);
>> +
>> + ? ? if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
>> + ? ? ? ? ? ? dev_err(dev, "Error requesting memory region!\n");
>> + ? ? ? ? ? ? return -EBUSY;
>> + ? ? }
>> +
>> + ? ? sdma->regs = devm_ioremap(dev, regs_start, regs_size);
>> + ? ? if (!sdma->regs) {
>> + ? ? ? ? ? ? dev_err(dev, "Error mapping memory region!\n");
>> + ? ? ? ? ? ? return -ENOMEM;
>> + ? ? }
>> +
>> + ? ? retval = devm_request_irq(dev, sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME,
>> + ? ? ? ? ? ? sdma);
>> + ? ? if (retval) {
>> + ? ? ? ? ? ? dev_err(dev, "Error requesting IRQ!\n");
>> + ? ? ? ? ? ? return -EINVAL;
>> + ? ? }
>> +
>> + ? ? dma = &sdma->dma;
>> + ? ? dma->dev = dev;
>> + ? ? dma->chancnt = SIRFSOC_DMA_CHANNELS;
>> +
>> + ? ? dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
>> + ? ? dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
>> + ? ? dma->device_issue_pending = sirfsoc_dma_issue_pending;
>> + ? ? dma->device_control = sirfsoc_dma_control;
>> + ? ? dma->device_tx_status = sirfsoc_dma_tx_status;
>> + ? ? dma->device_prep_dma_memcpy = sirfsoc_dma_prep_memcpy;
>> +
>> + ? ? INIT_LIST_HEAD(&dma->channels);
>> + ? ? dma_cap_set(DMA_MEMCPY, dma->cap_mask);
> DMA_SLAVE as well..

ok.

>
>> +
>> + ? ? for (i = 0; i < dma->chancnt; i++) {
>> + ? ? ? ? ? ? schan = &sdma->channels[i];
>> +
>> + ? ? ? ? ? ? schan->chan.device = dma;
>> + ? ? ? ? ? ? schan->chan.chan_id = dma->chancnt * id + i;
>> + ? ? ? ? ? ? schan->chan.cookie = 1;
>> + ? ? ? ? ? ? schan->completed_cookie = schan->chan.cookie;
>> +
>> + ? ? ? ? ? ? INIT_LIST_HEAD(&schan->free);
>> + ? ? ? ? ? ? INIT_LIST_HEAD(&schan->prepared);
>> + ? ? ? ? ? ? INIT_LIST_HEAD(&schan->queued);
>> + ? ? ? ? ? ? INIT_LIST_HEAD(&schan->active);
>> + ? ? ? ? ? ? INIT_LIST_HEAD(&schan->completed);
>> +
>> + ? ? ? ? ? ? spin_lock_init(&schan->lock);
>> + ? ? ? ? ? ? list_add_tail(&schan->chan.device_node, &dma->channels);
>> + ? ? }
>> +
>> + ? ? tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
>> +
>> + ? ? /* Register DMA engine */
>> + ? ? dev_set_drvdata(dev, sdma);
>> + ? ? retval = dma_async_device_register(dma);
>> + ? ? if (retval) {
>> + ? ? ? ? ? ? devm_free_irq(dev, sdma->irq, sdma);
>> + ? ? ? ? ? ? irq_dispose_mapping(sdma->irq);
>> + ? ? }
>> +
>> + ? ? return retval;
>> +}
>> +
>> +static int __devexit sirfsoc_dma_remove(struct platform_device *op)
>> +{
>> + ? ? struct device *dev = &op->dev;
>> + ? ? struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
>> +
>> + ? ? dma_async_device_unregister(&sdma->dma);
>> + ? ? devm_free_irq(dev, sdma->irq, sdma);
>> + ? ? irq_dispose_mapping(sdma->irq);
>> +
>> + ? ? return 0;
>> +}
>> +
>> +static struct of_device_id sirfsoc_dma_match[] = {
>> + ? ? { .compatible = "sirf,prima2-dmac", },
>> + ? ? {},
>> +};
>> +
>> +static struct platform_driver sirfsoc_dma_driver = {
>> + ? ? .probe ? ? ? ? ?= sirfsoc_dma_probe,
>> + ? ? .remove ? ? ? ? = __devexit_p(sirfsoc_dma_remove),
>> + ? ? .driver = {
>> + ? ? ? ? ? ? .name = DRV_NAME,
>> + ? ? ? ? ? ? .owner = THIS_MODULE,
>> + ? ? ? ? ? ? .of_match_table = sirfsoc_dma_match,
>> + ? ? },
>> +};
>> +
>> +static int __init sirfsoc_dma_init(void)
>> +{
>> + ? ? return platform_driver_register(&sirfsoc_dma_driver);
>> +}
>> +module_init(sirfsoc_dma_init);
>> +
>> +static void __exit sirfsoc_dma_exit(void)
>> +{
>> + ? ? platform_driver_unregister(&sirfsoc_dma_driver);
>> +}
>> +module_exit(sirfsoc_dma_exit);
>> +
>> +MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, "
>> + ? ? "Barry Song <baohua.song@csr.com>");
>> +MODULE_DESCRIPTION("SIRFSOC DMA control driver");
>> +MODULE_LICENSE("GPL");
>> diff --git a/include/linux/sirfsoc_dma.h b/include/linux/sirfsoc_dma.h
>> new file mode 100644
>> index 0000000..75d2d86
>> --- /dev/null
>> +++ b/include/linux/sirfsoc_dma.h
>> @@ -0,0 +1,18 @@
>> +#ifndef _SIRFSOC_DMA_H_
>> +#define _SIRFSOC_DMA_H_
>> +/*
>> + * create a custom slave config struct for CSR SiRFprimaII and pass that,
>> + * and make dma_slave_config a member of that struct
>> + */
>> +struct sirfsoc_dma_slave_config {
>> + ? ? struct dma_slave_config generic_config;
>> +
>> + ? ? /* CSR SiRFprimaII 2D-DMA config */
>> + ? ? int ? ? ? ? ? ? xlen; ? ? ? ? ? /* DMA xlen */
>> + ? ? int ? ? ? ? ? ? ylen; ? ? ? ? ? /* DMA ylen */
> what lengths?
>
>> + ? ? int ? ? ? ? ? ? width; ? ? ? ? ?/* DMA width */
>> +};
>> +
>> +bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id);
>> +
>> +#endif

Thanks
barry

^ permalink raw reply	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-07 16:46     ` Barry Song
@ 2011-09-07 18:09       ` Koul, Vinod
  -1 siblings, 0 replies; 84+ messages in thread
From: Koul, Vinod @ 2011-09-07 18:09 UTC (permalink / raw)
  To: 21cnbao, jassisinghbrar, linus.walleij
  Cc: Williams, Dan J, arnd, linux-kernel, workgroup.linux,
	rongjun.ying, Baohua.Song, linux-arm-kernel

[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #1: Type: text/plain; charset="utf-8", Size: 17735 bytes --]

On Thu, 2011-09-08 at 00:46 +0800, Barry Song wrote:
> Hi Vinod,
> thanks for your quick feedback.
>
> 2011/9/8 Koul, Vinod <vinod.koul@intel.com>:
> > On Tue, 2011-09-06 at 22:41 -0700, Barry Song wrote:
> >> From: Rongjun Ying <rongjun.ying@csr.com>
> >
> >> +config SIRF_DMA
> >> +     tristate "CSR SiRFprimaII DMA support"
> >> +     depends on ARCH_PRIMA2
> >> +     select DMA_ENGINE
> >> +     help
> >> +       Enable support for the CSR SiRFprimaII DMA engine.
> > How different is it from the other primacell based DMA drivers, and why
> > wouldn't it make sense to use/modify one of them?
>
> it is much different with primacell based DMA like pl080, pl330.
> prima2 has a self-defined DMAC IP. basically it is a 2D mode dma with
> two scales X and Y and direct way to start and stop DMA.
> every channel has fixed function to serve only one perpheral. so you
> find we have a filter id.
okay, what do you mean by 2D mode? Is it similar to what TI folks, Linus
W and Jassi Brar posted RFC's on?
IIRC, other primacell dmacs do have this capability but not supported by
the drivers, so I think this could be added in current drivers, Jassi
and Linus W can comment better...

[snip]

>> +
> >> +/* Submit descriptor to hardware */
> >> +static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
> >> +{
> >> +     struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan);
> >> +     struct sirfsoc_dma_desc *mdesc;
> >> +     unsigned long flags;
> >> +     dma_cookie_t cookie;
> >> +
> >> +     mdesc = container_of(txd, struct sirfsoc_dma_desc, desc);
> >> +
> >> +     spin_lock_irqsave(&schan->lock, flags);
> >> +
> >> +     /* Move descriptor to queue */
> >> +     list_move_tail(&mdesc->node, &schan->queued);
> >> +
> >> +     /* If channel is idle, execute all queued descriptors */
> >> +     if (list_empty(&schan->active))
> >> +             sirfsoc_dma_execute(schan);
> > this is wrong, this should be done in .issue_pending
>
> ok. as i reference several current drivers in drivers/dma, they are
> doing dma  start in submit....guess they are wrong too?
Right, please see Documentation/dmaengine.txt.

>
> >
> >> +
> >> +     /* Update cookie */
> >> +     cookie = schan->chan.cookie + 1;
> >> +     if (cookie <= 0)
> >> +             cookie = 1;
> >> +
> >> +     schan->chan.cookie = cookie;
> >> +     mdesc->desc.cookie = cookie;
> >> +
> >> +     spin_unlock_irqrestore(&schan->lock, flags);
> >> +
> >> +     return cookie;
> >> +}
> >> +
> >> +static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan,
> >> +     struct sirfsoc_dma_slave_config *config)
> >> +{
> >> +     u32 addr, direction;
> >> +     unsigned long flags;
> >> +
> >> +     switch (config->generic_config.direction) {
> >> +     case DMA_FROM_DEVICE:
> >> +             direction = 0;
> >> +             addr = config->generic_config.dst_addr;
> >> +             break;
> >> +
> >> +     case DMA_TO_DEVICE:
> >> +             direction = 1;
> >> +             addr = config->generic_config.src_addr;
> >> +             break;
> >> +
> >> +     default:
> >> +             return -EINVAL;
> >> +     }
> >> +
> >> +     if ((config->generic_config.src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
> >> +             (config->generic_config.dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES))
> >> +             return -EINVAL;
> >> +
> >> +     spin_lock_irqsave(&schan->lock, flags);
> >> +     schan->addr = addr;
> >> +     schan->direction = direction;
> >> +     schan->xlen = config->xlen;
> >> +     schan->ylen = config->ylen;
> >> +     schan->width = config->width;
> > what do these parameters mean, is width the dma fifo width, if so use
> > existing members for that
>
> the width is not dma fifo width. prima2 required 3 parameters to begin
> a 2D dma transfer, the relationship is as below:
>
> <----------------width----------------->
> |-------|-------------------------|----------|          ---
> |        |                              |             |           ^
> |        | <----------xlen----->|             |           |
> |        |                              |             |         ylen
> |        |                              |             |           |
> |-------|-------------------------|-----------|        _v_
>
> after i go back to office, i'll copy details from datasheet to you.
>
> >
> >> +     schan->mode = (config->generic_config.src_maxburst == 4 ? 1 : 0);
> >> +     spin_unlock_irqrestore(&schan->lock, flags);
> >> +
> >> +     return 0;
> >> +}
> >> +
> >> +static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan)
> >> +{
> >> +     struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
> >> +     int cid = schan->chan.chan_id;
> >> +     unsigned long flags;
> >> +
> >> +     writel_relaxed(readl_relaxed(sdma->regs + SIRFSOC_DMA_INT_EN) & ~(1 << cid),
> >> +             sdma->regs + SIRFSOC_DMA_INT_EN);
> >> +     writel_relaxed(1 << cid, sdma->regs + SIRFSOC_DMA_CH_VALID);
> >> +
> >> +     spin_lock_irqsave(&schan->lock, flags);
> >> +
> >> +     list_splice_tail_init(&schan->queued, &schan->free);
> > what about active list
> >> +     spin_unlock_irqrestore(&schan->lock, flags);
> >> +
> >> +     return 0;
> >> +}
> >> +
> >> +static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
> >> +     unsigned long arg)
> >> +{
> >> +     struct sirfsoc_dma_slave_config *config;
> >> +     struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
> >> +
> >> +     switch (cmd) {
> >> +     case DMA_TERMINATE_ALL:
> >> +             return sirfsoc_dma_terminate_all(schan);
> >> +     case DMA_SLAVE_CONFIG:
> >> +             config = (struct sirfsoc_dma_slave_config *)arg;
> >> +             return sirfsoc_dma_slave_config(schan, config);
> >> +
> >> +     default:
> >> +             break;
> >> +     }
> >> +
> >> +     return -ENOSYS;
> >> +}
> >> +
> >> +/* Alloc channel resources */
> >> +static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
> >> +{
> >> +     struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
> >> +     struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
> >> +     struct sirfsoc_dma_desc *mdesc;
> >> +     unsigned long flags;
> >> +     LIST_HEAD(descs);
> >> +     int i;
> >> +
> >> +     /* Alloc descriptors for this channel */
> >> +     for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) {
> >> +             mdesc = kzalloc(sizeof(struct sirfsoc_dma_desc), GFP_KERNEL);
> >> +             if (!mdesc) {
> >> +                     dev_notice(sdma->dma.dev, "Memory allocation error. "
> >> +                             "Allocated only %u descriptors\n", i);
> >> +                     break;
> >> +             }
> >> +
> >> +             dma_async_tx_descriptor_init(&mdesc->desc, chan);
> >> +             mdesc->desc.flags = DMA_CTRL_ACK;
> >> +             mdesc->desc.tx_submit = sirfsoc_dma_tx_submit;
> >> +
> >> +             list_add_tail(&mdesc->node, &descs);
> >> +     }
> >> +
> >> +     /* Return error only if no descriptors were allocated */
> >> +     if (i == 0)
> >> +             return -ENOMEM;
> >> +
> >> +     spin_lock_irqsave(&schan->lock, flags);
> >> +
> >> +     list_splice_tail_init(&descs, &schan->free);
> >> +     spin_unlock_irqrestore(&schan->lock, flags);
> >> +
> >> +     return 0;
> >> +}
> >> +
> >> +/* Free channel resources */
> >> +static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
> >> +{
> >> +     struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
> >> +     struct sirfsoc_dma_desc *mdesc, *tmp;
> >> +     unsigned long flags;
> >> +     LIST_HEAD(descs);
> >> +
> >> +     spin_lock_irqsave(&schan->lock, flags);
> >> +
> >> +     /* Channel must be idle */
> >> +     BUG_ON(!list_empty(&schan->prepared));
> >> +     BUG_ON(!list_empty(&schan->queued));
> >> +     BUG_ON(!list_empty(&schan->active));
> >> +     BUG_ON(!list_empty(&schan->completed));
> >> +
> >> +     /* Move data */
> >> +     list_splice_tail_init(&schan->free, &descs);
> >> +
> >> +     spin_unlock_irqrestore(&schan->lock, flags);
> >> +
> >> +     /* Free descriptors */
> >> +     list_for_each_entry_safe(mdesc, tmp, &descs, node)
> >> +             kfree(mdesc);
> >> +}
> >> +
> >> +/* Send pending descriptor to hardware */
> >> +static void sirfsoc_dma_issue_pending(struct dma_chan *chan)
> >> +{
> >> +     struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
> >> +     unsigned long flags;
> >> +
> >> +     spin_lock_irqsave(&schan->lock, flags);
> >> +
> >> +     if (list_empty(&schan->active) && !list_empty(&schan->queued))
> >> +             sirfsoc_dma_execute(schan);
> >> +
> >> +     spin_unlock_irqrestore(&schan->lock, flags);
> >> +}
> >> +
> >> +/* Check request completion status */
> >> +static enum dma_status
> >> +sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
> >> +     struct dma_tx_state *txstate)
> >> +{
> >> +     struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
> >> +     unsigned long flags;
> >> +     dma_cookie_t last_used;
> >> +     dma_cookie_t last_complete;
> >> +
> >> +     spin_lock_irqsave(&schan->lock, flags);
> >> +     last_used = schan->chan.cookie;
> >> +     last_complete = schan->completed_cookie;
> >> +     spin_unlock_irqrestore(&schan->lock, flags);
> >> +
> >> +     dma_set_tx_state(txstate, last_complete, last_used, 0);
> >> +     return dma_async_is_complete(cookie, last_complete, last_used);
> >> +}
> >> +
> >> +/* Prepare descriptor for memory to memory copy */
> >> +static struct dma_async_tx_descriptor *
> >> +sirfsoc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
> >> +     size_t len, unsigned long flags)
> >> +{
> >> +     struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
> >> +     struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
> >> +     struct sirfsoc_dma_desc *mdesc = NULL;
> >> +     unsigned long iflags;
> >> +
> >> +     /* Get free descriptor */
> >> +     spin_lock_irqsave(&schan->lock, iflags);
> >> +     if (!list_empty(&schan->free)) {
> >> +             mdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
> >> +                     node);
> >> +             list_del(&mdesc->node);
> >> +     }
> >> +     spin_unlock_irqrestore(&schan->lock, iflags);
> >> +
> >> +     if (!mdesc) {
> >> +             /* try to free completed descriptors */
> >> +             sirfsoc_dma_process_completed(sdma);
> >> +             return NULL;
> >> +     }
> >> +
> >> +     /* Place descriptor in prepared list */
> >> +     spin_lock_irqsave(&schan->lock, iflags);
> >> +     list_add_tail(&mdesc->node, &schan->prepared);
> >> +     spin_unlock_irqrestore(&schan->lock, iflags);
> >> +
> >> +     return &mdesc->desc;
> >> +}
> >> +
> >> +/*
> >> + * The DMA controller consists of 16 independent DMA channels.
> >> + * Each channel is allocated to a different function
> >> + */
> >> +bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
> >> +{
> >> +     unsigned int ch_nr = (unsigned int) chan_id;
> >> +
> >> +     if (ch_nr == chan->chan_id)
> >> +             return true;
> >> +
> >> +     return false;
> >> +}
> >> +EXPORT_SYMBOL(sirfsoc_dma_filter_id);
> >> +
> >> +static int __devinit sirfsoc_dma_probe(struct platform_device *op)
> >> +{
> >> +     struct device_node *dn = op->dev.of_node;
> >> +     struct device *dev = &op->dev;
> >> +     struct dma_device *dma;
> >> +     struct sirfsoc_dma *sdma;
> >> +     struct sirfsoc_dma_chan *schan;
> >> +     struct resource res;
> >> +     ulong regs_start, regs_size;
> >> +     u32 id;
> >> +     int retval, i;
> >> +
> >> +     sdma = devm_kzalloc(dev, sizeof(struct sirfsoc_dma), GFP_KERNEL);
> >> +     if (!sdma) {
> >> +             dev_err(dev, "Memory exhausted!\n");
> >> +             return -ENOMEM;
> >> +     }
> >> +
> >> +     if (of_property_read_u32(dn, "cell-index", &id)) {
> >> +             dev_err(dev, "Fail to get DMAC index\n");
> >> +             return -ENODEV;
> >> +     }
> >> +
> >> +     sdma->irq = irq_of_parse_and_map(dn, 0);
> >> +     if (sdma->irq == NO_IRQ) {
> >> +             dev_err(dev, "Error mapping IRQ!\n");
> >> +             return -EINVAL;
> >> +     }
> >> +
> >> +     retval = of_address_to_resource(dn, 0, &res);
> >> +     if (retval) {
> >> +             dev_err(dev, "Error parsing memory region!\n");
> >> +             return retval;
> >> +     }
> >> +
> >> +     regs_start = res.start;
> >> +     regs_size = resource_size(&res);
> >> +
> >> +     if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
> >> +             dev_err(dev, "Error requesting memory region!\n");
> >> +             return -EBUSY;
> >> +     }
> >> +
> >> +     sdma->regs = devm_ioremap(dev, regs_start, regs_size);
> >> +     if (!sdma->regs) {
> >> +             dev_err(dev, "Error mapping memory region!\n");
> >> +             return -ENOMEM;
> >> +     }
> >> +
> >> +     retval = devm_request_irq(dev, sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME,
> >> +             sdma);
> >> +     if (retval) {
> >> +             dev_err(dev, "Error requesting IRQ!\n");
> >> +             return -EINVAL;
> >> +     }
> >> +
> >> +     dma = &sdma->dma;
> >> +     dma->dev = dev;
> >> +     dma->chancnt = SIRFSOC_DMA_CHANNELS;
> >> +
> >> +     dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
> >> +     dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
> >> +     dma->device_issue_pending = sirfsoc_dma_issue_pending;
> >> +     dma->device_control = sirfsoc_dma_control;
> >> +     dma->device_tx_status = sirfsoc_dma_tx_status;
> >> +     dma->device_prep_dma_memcpy = sirfsoc_dma_prep_memcpy;
> >> +
> >> +     INIT_LIST_HEAD(&dma->channels);
> >> +     dma_cap_set(DMA_MEMCPY, dma->cap_mask);
> > DMA_SLAVE as well..
>
> ok.
>
> >
> >> +
> >> +     for (i = 0; i < dma->chancnt; i++) {
> >> +             schan = &sdma->channels[i];
> >> +
> >> +             schan->chan.device = dma;
> >> +             schan->chan.chan_id = dma->chancnt * id + i;
> >> +             schan->chan.cookie = 1;
> >> +             schan->completed_cookie = schan->chan.cookie;
> >> +
> >> +             INIT_LIST_HEAD(&schan->free);
> >> +             INIT_LIST_HEAD(&schan->prepared);
> >> +             INIT_LIST_HEAD(&schan->queued);
> >> +             INIT_LIST_HEAD(&schan->active);
> >> +             INIT_LIST_HEAD(&schan->completed);
> >> +
> >> +             spin_lock_init(&schan->lock);
> >> +             list_add_tail(&schan->chan.device_node, &dma->channels);
> >> +     }
> >> +
> >> +     tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
> >> +
> >> +     /* Register DMA engine */
> >> +     dev_set_drvdata(dev, sdma);
> >> +     retval = dma_async_device_register(dma);
> >> +     if (retval) {
> >> +             devm_free_irq(dev, sdma->irq, sdma);
> >> +             irq_dispose_mapping(sdma->irq);
> >> +     }
> >> +
> >> +     return retval;
> >> +}
> >> +
> >> +static int __devexit sirfsoc_dma_remove(struct platform_device *op)
> >> +{
> >> +     struct device *dev = &op->dev;
> >> +     struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
> >> +
> >> +     dma_async_device_unregister(&sdma->dma);
> >> +     devm_free_irq(dev, sdma->irq, sdma);
> >> +     irq_dispose_mapping(sdma->irq);
> >> +
> >> +     return 0;
> >> +}
> >> +
> >> +static struct of_device_id sirfsoc_dma_match[] = {
> >> +     { .compatible = "sirf,prima2-dmac", },
> >> +     {},
> >> +};
> >> +
> >> +static struct platform_driver sirfsoc_dma_driver = {
> >> +     .probe          = sirfsoc_dma_probe,
> >> +     .remove         = __devexit_p(sirfsoc_dma_remove),
> >> +     .driver = {
> >> +             .name = DRV_NAME,
> >> +             .owner = THIS_MODULE,
> >> +             .of_match_table = sirfsoc_dma_match,
> >> +     },
> >> +};
> >> +
> >> +static int __init sirfsoc_dma_init(void)
> >> +{
> >> +     return platform_driver_register(&sirfsoc_dma_driver);
> >> +}
> >> +module_init(sirfsoc_dma_init);
> >> +
> >> +static void __exit sirfsoc_dma_exit(void)
> >> +{
> >> +     platform_driver_unregister(&sirfsoc_dma_driver);
> >> +}
> >> +module_exit(sirfsoc_dma_exit);
> >> +
> >> +MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, "
> >> +     "Barry Song <baohua.song@csr.com>");
> >> +MODULE_DESCRIPTION("SIRFSOC DMA control driver");
> >> +MODULE_LICENSE("GPL");
> >> diff --git a/include/linux/sirfsoc_dma.h b/include/linux/sirfsoc_dma.h
> >> new file mode 100644
> >> index 0000000..75d2d86
> >> --- /dev/null
> >> +++ b/include/linux/sirfsoc_dma.h
> >> @@ -0,0 +1,18 @@
> >> +#ifndef _SIRFSOC_DMA_H_
> >> +#define _SIRFSOC_DMA_H_
> >> +/*
> >> + * create a custom slave config struct for CSR SiRFprimaII and pass that,
> >> + * and make dma_slave_config a member of that struct
> >> + */
> >> +struct sirfsoc_dma_slave_config {
> >> +     struct dma_slave_config generic_config;
> >> +
> >> +     /* CSR SiRFprimaII 2D-DMA config */
> >> +     int             xlen;           /* DMA xlen */
> >> +     int             ylen;           /* DMA ylen */
> > what lengths?
> >
> >> +     int             width;          /* DMA width */
> >> +};
> >> +
> >> +bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id);
> >> +
> >> +#endif
>
> Thanks
> barry
>
> _______________________________________________
> linux-arm-kernel mailing list
> linux-arm-kernel@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

--
~Vinod
ÿôèº{.nÇ+‰·Ÿ®‰­†+%ŠËÿ±éݶ\x17¥Šwÿº{.nÇ+‰·¥Š{±þG«éÿŠ{ayº\x1dʇڙë,j\a­¢f£¢·hšïêÿ‘êçz_è®\x03(­éšŽŠÝ¢j"ú\x1a¶^[m§ÿÿ¾\a«þG«éÿ¢¸?™¨è­Ú&£ø§~á¶iO•æ¬z·švØ^\x14\x04\x1a¶^[m§ÿÿÃ\fÿ¶ìÿ¢¸?–I¥

^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-07 18:09       ` Koul, Vinod
  0 siblings, 0 replies; 84+ messages in thread
From: Koul, Vinod @ 2011-09-07 18:09 UTC (permalink / raw)
  To: linux-arm-kernel

On Thu, 2011-09-08 at 00:46 +0800, Barry Song wrote:
> Hi Vinod,
> thanks for your quick feedback.
>
> 2011/9/8 Koul, Vinod <vinod.koul@intel.com>:
> > On Tue, 2011-09-06 at 22:41 -0700, Barry Song wrote:
> >> From: Rongjun Ying <rongjun.ying@csr.com>
> >
> >> +config SIRF_DMA
> >> +     tristate "CSR SiRFprimaII DMA support"
> >> +     depends on ARCH_PRIMA2
> >> +     select DMA_ENGINE
> >> +     help
> >> +       Enable support for the CSR SiRFprimaII DMA engine.
> > How different is it from the other primacell based DMA drivers, and why
> > wouldn't it make sense to use/modify one of them?
>
> it is much different with primacell based DMA like pl080, pl330.
> prima2 has a self-defined DMAC IP. basically it is a 2D mode dma with
> two scales X and Y and direct way to start and stop DMA.
> every channel has fixed function to serve only one perpheral. so you
> find we have a filter id.
okay, what do you mean by 2D mode? Is it similar to what TI folks, Linus
W and Jassi Brar posted RFC's on?
IIRC, other primacell dmacs do have this capability but not supported by
the drivers, so I think this could be added in current drivers, Jassi
and Linus W can comment better...

[snip]

>> +
> >> +/* Submit descriptor to hardware */
> >> +static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
> >> +{
> >> +     struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan);
> >> +     struct sirfsoc_dma_desc *mdesc;
> >> +     unsigned long flags;
> >> +     dma_cookie_t cookie;
> >> +
> >> +     mdesc = container_of(txd, struct sirfsoc_dma_desc, desc);
> >> +
> >> +     spin_lock_irqsave(&schan->lock, flags);
> >> +
> >> +     /* Move descriptor to queue */
> >> +     list_move_tail(&mdesc->node, &schan->queued);
> >> +
> >> +     /* If channel is idle, execute all queued descriptors */
> >> +     if (list_empty(&schan->active))
> >> +             sirfsoc_dma_execute(schan);
> > this is wrong, this should be done in .issue_pending
>
> ok. as i reference several current drivers in drivers/dma, they are
> doing dma  start in submit....guess they are wrong too?
Right, please see Documentation/dmaengine.txt.

>
> >
> >> +
> >> +     /* Update cookie */
> >> +     cookie = schan->chan.cookie + 1;
> >> +     if (cookie <= 0)
> >> +             cookie = 1;
> >> +
> >> +     schan->chan.cookie = cookie;
> >> +     mdesc->desc.cookie = cookie;
> >> +
> >> +     spin_unlock_irqrestore(&schan->lock, flags);
> >> +
> >> +     return cookie;
> >> +}
> >> +
> >> +static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan,
> >> +     struct sirfsoc_dma_slave_config *config)
> >> +{
> >> +     u32 addr, direction;
> >> +     unsigned long flags;
> >> +
> >> +     switch (config->generic_config.direction) {
> >> +     case DMA_FROM_DEVICE:
> >> +             direction = 0;
> >> +             addr = config->generic_config.dst_addr;
> >> +             break;
> >> +
> >> +     case DMA_TO_DEVICE:
> >> +             direction = 1;
> >> +             addr = config->generic_config.src_addr;
> >> +             break;
> >> +
> >> +     default:
> >> +             return -EINVAL;
> >> +     }
> >> +
> >> +     if ((config->generic_config.src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
> >> +             (config->generic_config.dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES))
> >> +             return -EINVAL;
> >> +
> >> +     spin_lock_irqsave(&schan->lock, flags);
> >> +     schan->addr = addr;
> >> +     schan->direction = direction;
> >> +     schan->xlen = config->xlen;
> >> +     schan->ylen = config->ylen;
> >> +     schan->width = config->width;
> > what do these parameters mean, is width the dma fifo width, if so use
> > existing members for that
>
> the width is not dma fifo width. prima2 required 3 parameters to begin
> a 2D dma transfer, the relationship is as below:
>
> <----------------width----------------->
> |-------|-------------------------|----------|          ---
> |        |                              |             |           ^
> |        | <----------xlen----->|             |           |
> |        |                              |             |         ylen
> |        |                              |             |           |
> |-------|-------------------------|-----------|        _v_
>
> after i go back to office, i'll copy details from datasheet to you.
>
> >
> >> +     schan->mode = (config->generic_config.src_maxburst == 4 ? 1 : 0);
> >> +     spin_unlock_irqrestore(&schan->lock, flags);
> >> +
> >> +     return 0;
> >> +}
> >> +
> >> +static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan)
> >> +{
> >> +     struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
> >> +     int cid = schan->chan.chan_id;
> >> +     unsigned long flags;
> >> +
> >> +     writel_relaxed(readl_relaxed(sdma->regs + SIRFSOC_DMA_INT_EN) & ~(1 << cid),
> >> +             sdma->regs + SIRFSOC_DMA_INT_EN);
> >> +     writel_relaxed(1 << cid, sdma->regs + SIRFSOC_DMA_CH_VALID);
> >> +
> >> +     spin_lock_irqsave(&schan->lock, flags);
> >> +
> >> +     list_splice_tail_init(&schan->queued, &schan->free);
> > what about active list
> >> +     spin_unlock_irqrestore(&schan->lock, flags);
> >> +
> >> +     return 0;
> >> +}
> >> +
> >> +static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
> >> +     unsigned long arg)
> >> +{
> >> +     struct sirfsoc_dma_slave_config *config;
> >> +     struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
> >> +
> >> +     switch (cmd) {
> >> +     case DMA_TERMINATE_ALL:
> >> +             return sirfsoc_dma_terminate_all(schan);
> >> +     case DMA_SLAVE_CONFIG:
> >> +             config = (struct sirfsoc_dma_slave_config *)arg;
> >> +             return sirfsoc_dma_slave_config(schan, config);
> >> +
> >> +     default:
> >> +             break;
> >> +     }
> >> +
> >> +     return -ENOSYS;
> >> +}
> >> +
> >> +/* Alloc channel resources */
> >> +static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
> >> +{
> >> +     struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
> >> +     struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
> >> +     struct sirfsoc_dma_desc *mdesc;
> >> +     unsigned long flags;
> >> +     LIST_HEAD(descs);
> >> +     int i;
> >> +
> >> +     /* Alloc descriptors for this channel */
> >> +     for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) {
> >> +             mdesc = kzalloc(sizeof(struct sirfsoc_dma_desc), GFP_KERNEL);
> >> +             if (!mdesc) {
> >> +                     dev_notice(sdma->dma.dev, "Memory allocation error. "
> >> +                             "Allocated only %u descriptors\n", i);
> >> +                     break;
> >> +             }
> >> +
> >> +             dma_async_tx_descriptor_init(&mdesc->desc, chan);
> >> +             mdesc->desc.flags = DMA_CTRL_ACK;
> >> +             mdesc->desc.tx_submit = sirfsoc_dma_tx_submit;
> >> +
> >> +             list_add_tail(&mdesc->node, &descs);
> >> +     }
> >> +
> >> +     /* Return error only if no descriptors were allocated */
> >> +     if (i == 0)
> >> +             return -ENOMEM;
> >> +
> >> +     spin_lock_irqsave(&schan->lock, flags);
> >> +
> >> +     list_splice_tail_init(&descs, &schan->free);
> >> +     spin_unlock_irqrestore(&schan->lock, flags);
> >> +
> >> +     return 0;
> >> +}
> >> +
> >> +/* Free channel resources */
> >> +static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
> >> +{
> >> +     struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
> >> +     struct sirfsoc_dma_desc *mdesc, *tmp;
> >> +     unsigned long flags;
> >> +     LIST_HEAD(descs);
> >> +
> >> +     spin_lock_irqsave(&schan->lock, flags);
> >> +
> >> +     /* Channel must be idle */
> >> +     BUG_ON(!list_empty(&schan->prepared));
> >> +     BUG_ON(!list_empty(&schan->queued));
> >> +     BUG_ON(!list_empty(&schan->active));
> >> +     BUG_ON(!list_empty(&schan->completed));
> >> +
> >> +     /* Move data */
> >> +     list_splice_tail_init(&schan->free, &descs);
> >> +
> >> +     spin_unlock_irqrestore(&schan->lock, flags);
> >> +
> >> +     /* Free descriptors */
> >> +     list_for_each_entry_safe(mdesc, tmp, &descs, node)
> >> +             kfree(mdesc);
> >> +}
> >> +
> >> +/* Send pending descriptor to hardware */
> >> +static void sirfsoc_dma_issue_pending(struct dma_chan *chan)
> >> +{
> >> +     struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
> >> +     unsigned long flags;
> >> +
> >> +     spin_lock_irqsave(&schan->lock, flags);
> >> +
> >> +     if (list_empty(&schan->active) && !list_empty(&schan->queued))
> >> +             sirfsoc_dma_execute(schan);
> >> +
> >> +     spin_unlock_irqrestore(&schan->lock, flags);
> >> +}
> >> +
> >> +/* Check request completion status */
> >> +static enum dma_status
> >> +sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
> >> +     struct dma_tx_state *txstate)
> >> +{
> >> +     struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
> >> +     unsigned long flags;
> >> +     dma_cookie_t last_used;
> >> +     dma_cookie_t last_complete;
> >> +
> >> +     spin_lock_irqsave(&schan->lock, flags);
> >> +     last_used = schan->chan.cookie;
> >> +     last_complete = schan->completed_cookie;
> >> +     spin_unlock_irqrestore(&schan->lock, flags);
> >> +
> >> +     dma_set_tx_state(txstate, last_complete, last_used, 0);
> >> +     return dma_async_is_complete(cookie, last_complete, last_used);
> >> +}
> >> +
> >> +/* Prepare descriptor for memory to memory copy */
> >> +static struct dma_async_tx_descriptor *
> >> +sirfsoc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
> >> +     size_t len, unsigned long flags)
> >> +{
> >> +     struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
> >> +     struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
> >> +     struct sirfsoc_dma_desc *mdesc = NULL;
> >> +     unsigned long iflags;
> >> +
> >> +     /* Get free descriptor */
> >> +     spin_lock_irqsave(&schan->lock, iflags);
> >> +     if (!list_empty(&schan->free)) {
> >> +             mdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
> >> +                     node);
> >> +             list_del(&mdesc->node);
> >> +     }
> >> +     spin_unlock_irqrestore(&schan->lock, iflags);
> >> +
> >> +     if (!mdesc) {
> >> +             /* try to free completed descriptors */
> >> +             sirfsoc_dma_process_completed(sdma);
> >> +             return NULL;
> >> +     }
> >> +
> >> +     /* Place descriptor in prepared list */
> >> +     spin_lock_irqsave(&schan->lock, iflags);
> >> +     list_add_tail(&mdesc->node, &schan->prepared);
> >> +     spin_unlock_irqrestore(&schan->lock, iflags);
> >> +
> >> +     return &mdesc->desc;
> >> +}
> >> +
> >> +/*
> >> + * The DMA controller consists of 16 independent DMA channels.
> >> + * Each channel is allocated to a different function
> >> + */
> >> +bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
> >> +{
> >> +     unsigned int ch_nr = (unsigned int) chan_id;
> >> +
> >> +     if (ch_nr == chan->chan_id)
> >> +             return true;
> >> +
> >> +     return false;
> >> +}
> >> +EXPORT_SYMBOL(sirfsoc_dma_filter_id);
> >> +
> >> +static int __devinit sirfsoc_dma_probe(struct platform_device *op)
> >> +{
> >> +     struct device_node *dn = op->dev.of_node;
> >> +     struct device *dev = &op->dev;
> >> +     struct dma_device *dma;
> >> +     struct sirfsoc_dma *sdma;
> >> +     struct sirfsoc_dma_chan *schan;
> >> +     struct resource res;
> >> +     ulong regs_start, regs_size;
> >> +     u32 id;
> >> +     int retval, i;
> >> +
> >> +     sdma = devm_kzalloc(dev, sizeof(struct sirfsoc_dma), GFP_KERNEL);
> >> +     if (!sdma) {
> >> +             dev_err(dev, "Memory exhausted!\n");
> >> +             return -ENOMEM;
> >> +     }
> >> +
> >> +     if (of_property_read_u32(dn, "cell-index", &id)) {
> >> +             dev_err(dev, "Fail to get DMAC index\n");
> >> +             return -ENODEV;
> >> +     }
> >> +
> >> +     sdma->irq = irq_of_parse_and_map(dn, 0);
> >> +     if (sdma->irq == NO_IRQ) {
> >> +             dev_err(dev, "Error mapping IRQ!\n");
> >> +             return -EINVAL;
> >> +     }
> >> +
> >> +     retval = of_address_to_resource(dn, 0, &res);
> >> +     if (retval) {
> >> +             dev_err(dev, "Error parsing memory region!\n");
> >> +             return retval;
> >> +     }
> >> +
> >> +     regs_start = res.start;
> >> +     regs_size = resource_size(&res);
> >> +
> >> +     if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
> >> +             dev_err(dev, "Error requesting memory region!\n");
> >> +             return -EBUSY;
> >> +     }
> >> +
> >> +     sdma->regs = devm_ioremap(dev, regs_start, regs_size);
> >> +     if (!sdma->regs) {
> >> +             dev_err(dev, "Error mapping memory region!\n");
> >> +             return -ENOMEM;
> >> +     }
> >> +
> >> +     retval = devm_request_irq(dev, sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME,
> >> +             sdma);
> >> +     if (retval) {
> >> +             dev_err(dev, "Error requesting IRQ!\n");
> >> +             return -EINVAL;
> >> +     }
> >> +
> >> +     dma = &sdma->dma;
> >> +     dma->dev = dev;
> >> +     dma->chancnt = SIRFSOC_DMA_CHANNELS;
> >> +
> >> +     dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
> >> +     dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
> >> +     dma->device_issue_pending = sirfsoc_dma_issue_pending;
> >> +     dma->device_control = sirfsoc_dma_control;
> >> +     dma->device_tx_status = sirfsoc_dma_tx_status;
> >> +     dma->device_prep_dma_memcpy = sirfsoc_dma_prep_memcpy;
> >> +
> >> +     INIT_LIST_HEAD(&dma->channels);
> >> +     dma_cap_set(DMA_MEMCPY, dma->cap_mask);
> > DMA_SLAVE as well..
>
> ok.
>
> >
> >> +
> >> +     for (i = 0; i < dma->chancnt; i++) {
> >> +             schan = &sdma->channels[i];
> >> +
> >> +             schan->chan.device = dma;
> >> +             schan->chan.chan_id = dma->chancnt * id + i;
> >> +             schan->chan.cookie = 1;
> >> +             schan->completed_cookie = schan->chan.cookie;
> >> +
> >> +             INIT_LIST_HEAD(&schan->free);
> >> +             INIT_LIST_HEAD(&schan->prepared);
> >> +             INIT_LIST_HEAD(&schan->queued);
> >> +             INIT_LIST_HEAD(&schan->active);
> >> +             INIT_LIST_HEAD(&schan->completed);
> >> +
> >> +             spin_lock_init(&schan->lock);
> >> +             list_add_tail(&schan->chan.device_node, &dma->channels);
> >> +     }
> >> +
> >> +     tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
> >> +
> >> +     /* Register DMA engine */
> >> +     dev_set_drvdata(dev, sdma);
> >> +     retval = dma_async_device_register(dma);
> >> +     if (retval) {
> >> +             devm_free_irq(dev, sdma->irq, sdma);
> >> +             irq_dispose_mapping(sdma->irq);
> >> +     }
> >> +
> >> +     return retval;
> >> +}
> >> +
> >> +static int __devexit sirfsoc_dma_remove(struct platform_device *op)
> >> +{
> >> +     struct device *dev = &op->dev;
> >> +     struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
> >> +
> >> +     dma_async_device_unregister(&sdma->dma);
> >> +     devm_free_irq(dev, sdma->irq, sdma);
> >> +     irq_dispose_mapping(sdma->irq);
> >> +
> >> +     return 0;
> >> +}
> >> +
> >> +static struct of_device_id sirfsoc_dma_match[] = {
> >> +     { .compatible = "sirf,prima2-dmac", },
> >> +     {},
> >> +};
> >> +
> >> +static struct platform_driver sirfsoc_dma_driver = {
> >> +     .probe          = sirfsoc_dma_probe,
> >> +     .remove         = __devexit_p(sirfsoc_dma_remove),
> >> +     .driver = {
> >> +             .name = DRV_NAME,
> >> +             .owner = THIS_MODULE,
> >> +             .of_match_table = sirfsoc_dma_match,
> >> +     },
> >> +};
> >> +
> >> +static int __init sirfsoc_dma_init(void)
> >> +{
> >> +     return platform_driver_register(&sirfsoc_dma_driver);
> >> +}
> >> +module_init(sirfsoc_dma_init);
> >> +
> >> +static void __exit sirfsoc_dma_exit(void)
> >> +{
> >> +     platform_driver_unregister(&sirfsoc_dma_driver);
> >> +}
> >> +module_exit(sirfsoc_dma_exit);
> >> +
> >> +MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, "
> >> +     "Barry Song <baohua.song@csr.com>");
> >> +MODULE_DESCRIPTION("SIRFSOC DMA control driver");
> >> +MODULE_LICENSE("GPL");
> >> diff --git a/include/linux/sirfsoc_dma.h b/include/linux/sirfsoc_dma.h
> >> new file mode 100644
> >> index 0000000..75d2d86
> >> --- /dev/null
> >> +++ b/include/linux/sirfsoc_dma.h
> >> @@ -0,0 +1,18 @@
> >> +#ifndef _SIRFSOC_DMA_H_
> >> +#define _SIRFSOC_DMA_H_
> >> +/*
> >> + * create a custom slave config struct for CSR SiRFprimaII and pass that,
> >> + * and make dma_slave_config a member of that struct
> >> + */
> >> +struct sirfsoc_dma_slave_config {
> >> +     struct dma_slave_config generic_config;
> >> +
> >> +     /* CSR SiRFprimaII 2D-DMA config */
> >> +     int             xlen;           /* DMA xlen */
> >> +     int             ylen;           /* DMA ylen */
> > what lengths?
> >
> >> +     int             width;          /* DMA width */
> >> +};
> >> +
> >> +bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id);
> >> +
> >> +#endif
>
> Thanks
> barry
>
> _______________________________________________
> linux-arm-kernel mailing list
> linux-arm-kernel at lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

--
~Vinod

^ permalink raw reply	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-07 16:14   ` Koul, Vinod
@ 2011-09-07 19:27     ` Linus Walleij
  -1 siblings, 0 replies; 84+ messages in thread
From: Linus Walleij @ 2011-09-07 19:27 UTC (permalink / raw)
  To: Koul, Vinod
  Cc: Baohua.Song, arnd, linux-kernel, workgroup.linux, rongjun.ying,
	Williams, Dan J, linux-arm-kernel

2011/9/7 Koul, Vinod <vinod.koul@intel.com>:
> On Tue, 2011-09-06 at 22:41 -0700, Barry Song wrote:
>
>> +config SIRF_DMA
>> +     tristate "CSR SiRFprimaII DMA support"
>> +     depends on ARCH_PRIMA2
>> +     select DMA_ENGINE
>> +     help
>> +       Enable support for the CSR SiRFprimaII DMA engine.
>
> How different is it from the other primacell based DMA drivers, and why
> wouldn't it make sense to use/modify one of them?

Hehe it looks to me like primall has no relation to PrimeCell, just
confusingly similar names :-)

Yours,
Linus Walleij

^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-07 19:27     ` Linus Walleij
  0 siblings, 0 replies; 84+ messages in thread
From: Linus Walleij @ 2011-09-07 19:27 UTC (permalink / raw)
  To: linux-arm-kernel

2011/9/7 Koul, Vinod <vinod.koul@intel.com>:
> On Tue, 2011-09-06 at 22:41 -0700, Barry Song wrote:
>
>> +config SIRF_DMA
>> + ? ? tristate "CSR SiRFprimaII DMA support"
>> + ? ? depends on ARCH_PRIMA2
>> + ? ? select DMA_ENGINE
>> + ? ? help
>> + ? ? ? Enable support for the CSR SiRFprimaII DMA engine.
>
> How different is it from the other primacell based DMA drivers, and why
> wouldn't it make sense to use/modify one of them?

Hehe it looks to me like primall has no relation to PrimeCell, just
confusingly similar names :-)

Yours,
Linus Walleij

^ permalink raw reply	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-07 19:27     ` Linus Walleij
@ 2011-09-08  1:47       ` Barry Song
  -1 siblings, 0 replies; 84+ messages in thread
From: Barry Song @ 2011-09-08  1:47 UTC (permalink / raw)
  To: Linus Walleij
  Cc: Koul, Vinod, arnd, linux-kernel, workgroup.linux, rongjun.ying,
	Baohua.Song, Williams, Dan J, linux-arm-kernel

2011/9/8 Linus Walleij <linus.ml.walleij@gmail.com>:
> 2011/9/7 Koul, Vinod <vinod.koul@intel.com>:
>> On Tue, 2011-09-06 at 22:41 -0700, Barry Song wrote:
>>
>>> +config SIRF_DMA
>>> +     tristate "CSR SiRFprimaII DMA support"
>>> +     depends on ARCH_PRIMA2
>>> +     select DMA_ENGINE
>>> +     help
>>> +       Enable support for the CSR SiRFprimaII DMA engine.
>>
>> How different is it from the other primacell based DMA drivers, and why
>> wouldn't it make sense to use/modify one of them?
>
> Hehe it looks to me like primall has no relation to PrimeCell, just
> confusingly similar names :-)

yes. only with the word "prima", which is the mach name. it has
nothing to do with primecell.
>
> Yours,
> Linus Walleij
-barry

^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-08  1:47       ` Barry Song
  0 siblings, 0 replies; 84+ messages in thread
From: Barry Song @ 2011-09-08  1:47 UTC (permalink / raw)
  To: linux-arm-kernel

2011/9/8 Linus Walleij <linus.ml.walleij@gmail.com>:
> 2011/9/7 Koul, Vinod <vinod.koul@intel.com>:
>> On Tue, 2011-09-06 at 22:41 -0700, Barry Song wrote:
>>
>>> +config SIRF_DMA
>>> + ? ? tristate "CSR SiRFprimaII DMA support"
>>> + ? ? depends on ARCH_PRIMA2
>>> + ? ? select DMA_ENGINE
>>> + ? ? help
>>> + ? ? ? Enable support for the CSR SiRFprimaII DMA engine.
>>
>> How different is it from the other primacell based DMA drivers, and why
>> wouldn't it make sense to use/modify one of them?
>
> Hehe it looks to me like primall has no relation to PrimeCell, just
> confusingly similar names :-)

yes. only with the word "prima", which is the mach name. it has
nothing to do with primecell.
>
> Yours,
> Linus Walleij
-barry

^ permalink raw reply	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-07 18:09       ` Koul, Vinod
  (?)
@ 2011-09-08  2:12       ` Barry Song
  2011-09-08  3:17           ` Jassi Brar
  -1 siblings, 1 reply; 84+ messages in thread
From: Barry Song @ 2011-09-08  2:12 UTC (permalink / raw)
  To: Koul, Vinod
  Cc: jassisinghbrar, linus.walleij, Williams, Dan J, arnd,
	linux-kernel, workgroup.linux, rongjun.ying, Baohua.Song,
	linux-arm-kernel

[-- Attachment #1: Type: text/plain, Size: 2096 bytes --]

2011/9/8 Koul, Vinod <vinod.koul@intel.com>:
> On Thu, 2011-09-08 at 00:46 +0800, Barry Song wrote:
>> Hi Vinod,
>> thanks for your quick feedback.
>>
>> 2011/9/8 Koul, Vinod <vinod.koul@intel.com>:
>> > On Tue, 2011-09-06 at 22:41 -0700, Barry Song wrote:
>> >> From: Rongjun Ying <rongjun.ying@csr.com>
>> >
>> >> +config SIRF_DMA
>> >> +     tristate "CSR SiRFprimaII DMA support"
>> >> +     depends on ARCH_PRIMA2
>> >> +     select DMA_ENGINE
>> >> +     help
>> >> +       Enable support for the CSR SiRFprimaII DMA engine.
>> > How different is it from the other primacell based DMA drivers, and why
>> > wouldn't it make sense to use/modify one of them?
>>
>> it is much different with primacell based DMA like pl080, pl330.
>> prima2 has a self-defined DMAC IP. basically it is a 2D mode dma with
>> two scales X and Y and direct way to start and stop DMA.
>> every channel has fixed function to serve only one perpheral. so you
>> find we have a filter id.
> okay, what do you mean by 2D mode? Is it similar to what TI folks, Linus
> W and Jassi Brar posted RFC's on?

In SiRFprimaII 2-D DMA, the system memory space is interpreted
as a 2-D layout instead of a linear 1-D layout. More specifically, the
system memory can be considered as
multiple data lines. The length of the data line is determined by the
user-selected DMA_WIDTH register.
The user can specify a data window that the user wants to access using
four parameters:
■ Start address
■ X length
■ Y length
■ Width

The idea of a 2-D DMA is shown in figure 2d-dma.png attached.

If you specifies the Y length as 0 or the X length equals to the DMA
width, then this 2-D DMA reduces to
1-D. If the user configures the X length greater than the DMA width,
then the extra data is wrapped around
to the next data line, this may corrupt the DMA transfer for
multiple-line 2-D DMA. If this is a 1-D DMA, then
there is no issue. The attached diagram 2d-dma2.png shows the
wrap-around of the extra data in case the X length
greater than DMA width.

Thanks
barry

[-- Attachment #2: 2d-dma.PNG --]
[-- Type: image/png, Size: 93471 bytes --]

[-- Attachment #3: 2d-dma2.PNG --]
[-- Type: image/png, Size: 9836 bytes --]

^ permalink raw reply	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-07 18:09       ` Koul, Vinod
@ 2011-09-08  2:18         ` Barry Song
  -1 siblings, 0 replies; 84+ messages in thread
From: Barry Song @ 2011-09-08  2:18 UTC (permalink / raw)
  To: Koul, Vinod
  Cc: jassisinghbrar, linus.walleij, Williams, Dan J, arnd,
	linux-kernel, linux-arm-kernel, DL-SHA-WorkGroupLinux

[-- Attachment #1: Type: text/plain, Size: 2096 bytes --]

2011/9/8 Koul, Vinod <vinod.koul@intel.com>:
> On Thu, 2011-09-08 at 00:46 +0800, Barry Song wrote:
>> Hi Vinod,
>> thanks for your quick feedback.
>>
>> 2011/9/8 Koul, Vinod <vinod.koul@intel.com>:
>> > On Tue, 2011-09-06 at 22:41 -0700, Barry Song wrote:
>> >> From: Rongjun Ying <rongjun.ying@csr.com>
>> >
>> >> +config SIRF_DMA
>> >> +     tristate "CSR SiRFprimaII DMA support"
>> >> +     depends on ARCH_PRIMA2
>> >> +     select DMA_ENGINE
>> >> +     help
>> >> +       Enable support for the CSR SiRFprimaII DMA engine.
>> > How different is it from the other primacell based DMA drivers, and why
>> > wouldn't it make sense to use/modify one of them?
>>
>> it is much different with primacell based DMA like pl080, pl330.
>> prima2 has a self-defined DMAC IP. basically it is a 2D mode dma with
>> two scales X and Y and direct way to start and stop DMA.
>> every channel has fixed function to serve only one perpheral. so you
>> find we have a filter id.
> okay, what do you mean by 2D mode? Is it similar to what TI folks, Linus
> W and Jassi Brar posted RFC's on?

In SiRFprimaII 2-D DMA, the system memory space is interpreted
as a 2-D layout instead of a linear 1-D layout. More specifically, the
system memory can be considered as
multiple data lines. The length of the data line is determined by the
user-selected DMA_WIDTH register.
The user can specify a data window that the user wants to access using
four parameters:
■ Start address
■ X length
■ Y length
■ Width

The idea of a 2-D DMA is shown in figure 2d-dma.png attached.

If you specifies the Y length as 0 or the X length equals to the DMA
width, then this 2-D DMA reduces to
1-D. If the user configures the X length greater than the DMA width,
then the extra data is wrapped around
to the next data line, this may corrupt the DMA transfer for
multiple-line 2-D DMA. If this is a 1-D DMA, then
there is no issue. The attached diagram 2d-dma2.png shows the
wrap-around of the extra data in case the X length
greater than DMA width.

Thanks
barry

[-- Attachment #2: 2d-dma.PNG --]
[-- Type: image/png, Size: 16675 bytes --]

[-- Attachment #3: 2d-dma2.PNG --]
[-- Type: image/png, Size: 9836 bytes --]

^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-08  2:18         ` Barry Song
  0 siblings, 0 replies; 84+ messages in thread
From: Barry Song @ 2011-09-08  2:18 UTC (permalink / raw)
  To: linux-arm-kernel

2011/9/8 Koul, Vinod <vinod.koul@intel.com>:
> On Thu, 2011-09-08 at 00:46 +0800, Barry Song wrote:
>> Hi Vinod,
>> thanks for your quick feedback.
>>
>> 2011/9/8 Koul, Vinod <vinod.koul@intel.com>:
>> > On Tue, 2011-09-06 at 22:41 -0700, Barry Song wrote:
>> >> From: Rongjun Ying <rongjun.ying@csr.com>
>> >
>> >> +config SIRF_DMA
>> >> + ? ? tristate "CSR SiRFprimaII DMA support"
>> >> + ? ? depends on ARCH_PRIMA2
>> >> + ? ? select DMA_ENGINE
>> >> + ? ? help
>> >> + ? ? ? Enable support for the CSR SiRFprimaII DMA engine.
>> > How different is it from the other primacell based DMA drivers, and why
>> > wouldn't it make sense to use/modify one of them?
>>
>> it is much different with primacell based DMA like pl080, pl330.
>> prima2 has a self-defined DMAC IP. basically it is a 2D mode dma with
>> two scales X and Y and direct way to start and stop DMA.
>> every channel has fixed function to serve only one perpheral. so you
>> find we have a filter id.
> okay, what do you mean by 2D mode? Is it similar to what TI folks, Linus
> W and Jassi Brar posted RFC's on?

In SiRFprimaII 2-D DMA, the system memory space is interpreted
as a 2-D layout instead of a linear 1-D layout. More specifically, the
system memory can be considered as
multiple data lines. The length of the data line is determined by the
user-selected DMA_WIDTH register.
The user can specify a data window that the user wants to access using
four parameters:
? Start address
? X length
? Y length
? Width

The idea of a 2-D DMA is shown in figure 2d-dma.png attached.

If you specifies the Y length as 0 or the X length equals to the DMA
width, then this 2-D DMA reduces to
1-D. If the user configures the X length greater than the DMA width,
then the extra data is wrapped around
to the next data line, this may corrupt the DMA transfer for
multiple-line 2-D DMA. If this is a 1-D DMA, then
there is no issue. The attached diagram 2d-dma2.png shows the
wrap-around of the extra data in case the X length
greater than DMA width.

Thanks
barry
-------------- next part --------------
A non-text attachment was scrubbed...
Name: 2d-dma.PNG
Type: image/png
Size: 16675 bytes
Desc: not available
URL: <http://lists.infradead.org/pipermail/linux-arm-kernel/attachments/20110908/de9ea7b0/attachment-0002.png>
-------------- next part --------------
A non-text attachment was scrubbed...
Name: 2d-dma2.PNG
Type: image/png
Size: 9836 bytes
Desc: not available
URL: <http://lists.infradead.org/pipermail/linux-arm-kernel/attachments/20110908/de9ea7b0/attachment-0003.png>

^ permalink raw reply	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-08  2:12       ` Barry Song
@ 2011-09-08  3:17           ` Jassi Brar
  0 siblings, 0 replies; 84+ messages in thread
From: Jassi Brar @ 2011-09-08  3:17 UTC (permalink / raw)
  To: Barry Song
  Cc: Koul, Vinod, linus.walleij, Williams, Dan J, arnd, linux-kernel,
	workgroup.linux, rongjun.ying, Baohua.Song, linux-arm-kernel

On Thu, Sep 8, 2011 at 7:42 AM, Barry Song <21cnbao@gmail.com> wrote:

>>> it is much different with primacell based DMA like pl080, pl330.
>>> prima2 has a self-defined DMAC IP. basically it is a 2D mode dma with
>>> two scales X and Y and direct way to start and stop DMA.
>>> every channel has fixed function to serve only one perpheral. so you
>>> find we have a filter id.
>> okay, what do you mean by 2D mode? Is it similar to what TI folks, Linus
>> W and Jassi Brar posted RFC's on?
>
> In SiRFprimaII 2-D DMA, the system memory space is interpreted
> as a 2-D layout instead of a linear 1-D layout. More specifically, the
> system memory can be considered as
> multiple data lines. The length of the data line is determined by the
> user-selected DMA_WIDTH register.
> The user can specify a data window that the user wants to access using
> four parameters:
> ■ Start address
> ■ X length
> ■ Y length
> ■ Width
>
> The idea of a 2-D DMA is shown in figure 2d-dma.png attached.
>
> If you specifies the Y length as 0 or the X length equals to the DMA
> width, then this 2-D DMA reduces to
> 1-D. If the user configures the X length greater than the DMA width,
> then the extra data is wrapped around
> to the next data line, this may corrupt the DMA transfer for
> multiple-line 2-D DMA. If this is a 1-D DMA, then
> there is no issue. The attached diagram 2d-dma2.png shows the
> wrap-around of the extra data in case the X length
> greater than DMA width.

Sorry, the role of DMA_WIDTH is not clear to me yet.
In which case the user _must_ set {xlen > width} ?

thanks
-jassi

^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-08  3:17           ` Jassi Brar
  0 siblings, 0 replies; 84+ messages in thread
From: Jassi Brar @ 2011-09-08  3:17 UTC (permalink / raw)
  To: linux-arm-kernel

On Thu, Sep 8, 2011 at 7:42 AM, Barry Song <21cnbao@gmail.com> wrote:

>>> it is much different with primacell based DMA like pl080, pl330.
>>> prima2 has a self-defined DMAC IP. basically it is a 2D mode dma with
>>> two scales X and Y and direct way to start and stop DMA.
>>> every channel has fixed function to serve only one perpheral. so you
>>> find we have a filter id.
>> okay, what do you mean by 2D mode? Is it similar to what TI folks, Linus
>> W and Jassi Brar posted RFC's on?
>
> In SiRFprimaII 2-D DMA, the system memory space is interpreted
> as a 2-D layout instead of a linear 1-D layout. More specifically, the
> system memory can be considered as
> multiple data lines. The length of the data line is determined by the
> user-selected DMA_WIDTH register.
> The user can specify a data window that the user wants to access using
> four parameters:
> ? Start address
> ? X length
> ? Y length
> ? Width
>
> The idea of a 2-D DMA is shown in figure 2d-dma.png attached.
>
> If you specifies the Y length as 0 or the X length equals to the DMA
> width, then this 2-D DMA reduces to
> 1-D. If the user configures the X length greater than the DMA width,
> then the extra data is wrapped around
> to the next data line, this may corrupt the DMA transfer for
> multiple-line 2-D DMA. If this is a 1-D DMA, then
> there is no issue. The attached diagram 2d-dma2.png shows the
> wrap-around of the extra data in case the X length
> greater than DMA width.

Sorry, the role of DMA_WIDTH is not clear to me yet.
In which case the user _must_ set {xlen > width} ?

thanks
-jassi

^ permalink raw reply	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-08  3:17           ` Jassi Brar
@ 2011-09-08  5:25             ` Jassi Brar
  -1 siblings, 0 replies; 84+ messages in thread
From: Jassi Brar @ 2011-09-08  5:25 UTC (permalink / raw)
  To: Barry Song
  Cc: Koul, Vinod, linus.walleij, Williams, Dan J, arnd, linux-kernel,
	workgroup.linux, rongjun.ying, Baohua.Song, linux-arm-kernel

On Thu, Sep 8, 2011 at 8:47 AM, Jassi Brar <jassisinghbrar@gmail.com> wrote:
> On Thu, Sep 8, 2011 at 7:42 AM, Barry Song <21cnbao@gmail.com> wrote:
>
>>>> it is much different with primacell based DMA like pl080, pl330.
>>>> prima2 has a self-defined DMAC IP. basically it is a 2D mode dma with
>>>> two scales X and Y and direct way to start and stop DMA.
>>>> every channel has fixed function to serve only one perpheral. so you
>>>> find we have a filter id.
>>> okay, what do you mean by 2D mode? Is it similar to what TI folks, Linus
>>> W and Jassi Brar posted RFC's on?
>>
>> In SiRFprimaII 2-D DMA, the system memory space is interpreted
>> as a 2-D layout instead of a linear 1-D layout. More specifically, the
>> system memory can be considered as
>> multiple data lines. The length of the data line is determined by the
>> user-selected DMA_WIDTH register.
>> The user can specify a data window that the user wants to access using
>> four parameters:
>> ■ Start address
>> ■ X length
>> ■ Y length
>> ■ Width
>>
>> The idea of a 2-D DMA is shown in figure 2d-dma.png attached.
>>
>> If you specifies the Y length as 0 or the X length equals to the DMA
>> width, then this 2-D DMA reduces to
>> 1-D. If the user configures the X length greater than the DMA width,
>> then the extra data is wrapped around
>> to the next data line, this may corrupt the DMA transfer for
>> multiple-line 2-D DMA. If this is a 1-D DMA, then
>> there is no issue. The attached diagram 2d-dma2.png shows the
>> wrap-around of the extra data in case the X length
>> greater than DMA width.
>
> Sorry, the role of DMA_WIDTH is not clear to me yet.
> In which case the user _must_ set {xlen > width} ?
>
Perhaps 2d-dma.PNG is inaccurate - it shouldn't depict any deltaX.
Doesn't xlen and width always start together ? If no, please don't read ahead.

According to figures, {xlen > width} is to be set _only_ when a transfer
is divided into _exactly_ two chunks separated by gap _exactly_
equal to length of the second chunk (an extremely rare case).

Anyways, every case can be easily expressed using the generic api
I proposed. See 'struct xfer_template' in https://lkml.org/lkml/2011/8/12/128

Roughly speaking, the following should be done...
Client driver :-
**************
For a 'Rectangular' transfer (2d-dma.PNG) :-
      xfer_template.numf = Ylen;  /* height of rectangle */
      xfer_template.frame_size = 1;
      xfer_template.sgl[0].size = Xlen; /* width of rectangle */
      xfer_template.sgl[0].icg = start_addr_Y(n) - end_addr_Y(n-1);

For the "A Line and some" transfer (2d-dma2.PNG) :-
      xfer_template.numf = 1;
      xfer_template.frame_size = 2;
      xfer_template.sgl[0].size = xlen1; /* a line */
      xfer_template.sgl[1].size = xlen2;  /* and some */
      xfer_template.sgl[0].icg = start_addr_Y(n) - end_addr_Y(n-1);
      xfer_template.sgl[1].icg = 0; /* doesn't matter */

DMAC driver :-
***************
      if (xfer_template.frame_size == 1) {
           /* rectangle */
           schan->xlen = xfer_template.sgl[0].size;
           schan->width = schan->xlen + xfer_template.sgl[0].icg;
      } else if (xfer_template.frame_size == 2 &&
                  xfer_template.numf == 1 &&
                  xfer_template.sgl[1].size == xfer_template.sgl[0].icg){
           /* a line and some */
           schan->xlen = xfer_template.sgl[0].size +  xfer_template.sgl[1].size.
           schan->width = xfer_template.sgl[0].size;
      } else {
           /* _Hardware_ doesn't support the transfer as such. *
           return -EINVAL;
      }
      schan->ylen = xfer_template.numf /* -1? */;

^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-08  5:25             ` Jassi Brar
  0 siblings, 0 replies; 84+ messages in thread
From: Jassi Brar @ 2011-09-08  5:25 UTC (permalink / raw)
  To: linux-arm-kernel

On Thu, Sep 8, 2011 at 8:47 AM, Jassi Brar <jassisinghbrar@gmail.com> wrote:
> On Thu, Sep 8, 2011 at 7:42 AM, Barry Song <21cnbao@gmail.com> wrote:
>
>>>> it is much different with primacell based DMA like pl080, pl330.
>>>> prima2 has a self-defined DMAC IP. basically it is a 2D mode dma with
>>>> two scales X and Y and direct way to start and stop DMA.
>>>> every channel has fixed function to serve only one perpheral. so you
>>>> find we have a filter id.
>>> okay, what do you mean by 2D mode? Is it similar to what TI folks, Linus
>>> W and Jassi Brar posted RFC's on?
>>
>> In SiRFprimaII 2-D DMA, the system memory space is interpreted
>> as a 2-D layout instead of a linear 1-D layout. More specifically, the
>> system memory can be considered as
>> multiple data lines. The length of the data line is determined by the
>> user-selected DMA_WIDTH register.
>> The user can specify a data window that the user wants to access using
>> four parameters:
>> ? Start address
>> ? X length
>> ? Y length
>> ? Width
>>
>> The idea of a 2-D DMA is shown in figure 2d-dma.png attached.
>>
>> If you specifies the Y length as 0 or the X length equals to the DMA
>> width, then this 2-D DMA reduces to
>> 1-D. If the user configures the X length greater than the DMA width,
>> then the extra data is wrapped around
>> to the next data line, this may corrupt the DMA transfer for
>> multiple-line 2-D DMA. If this is a 1-D DMA, then
>> there is no issue. The attached diagram 2d-dma2.png shows the
>> wrap-around of the extra data in case the X length
>> greater than DMA width.
>
> Sorry, the role of DMA_WIDTH is not clear to me yet.
> In which case the user _must_ set {xlen > width} ?
>
Perhaps 2d-dma.PNG is inaccurate - it shouldn't depict any deltaX.
Doesn't xlen and width always start together ? If no, please don't read ahead.

According to figures, {xlen > width} is to be set _only_ when a transfer
is divided into _exactly_ two chunks separated by gap _exactly_
equal to length of the second chunk (an extremely rare case).

Anyways, every case can be easily expressed using the generic api
I proposed. See 'struct xfer_template' in https://lkml.org/lkml/2011/8/12/128

Roughly speaking, the following should be done...
Client driver :-
**************
For a 'Rectangular' transfer (2d-dma.PNG) :-
      xfer_template.numf = Ylen;  /* height of rectangle */
      xfer_template.frame_size = 1;
      xfer_template.sgl[0].size = Xlen; /* width of rectangle */
      xfer_template.sgl[0].icg = start_addr_Y(n) - end_addr_Y(n-1);

For the "A Line and some" transfer (2d-dma2.PNG) :-
      xfer_template.numf = 1;
      xfer_template.frame_size = 2;
      xfer_template.sgl[0].size = xlen1; /* a line */
      xfer_template.sgl[1].size = xlen2;  /* and some */
      xfer_template.sgl[0].icg = start_addr_Y(n) - end_addr_Y(n-1);
      xfer_template.sgl[1].icg = 0; /* doesn't matter */

DMAC driver :-
***************
      if (xfer_template.frame_size == 1) {
           /* rectangle */
           schan->xlen = xfer_template.sgl[0].size;
           schan->width = schan->xlen + xfer_template.sgl[0].icg;
      } else if (xfer_template.frame_size == 2 &&
                  xfer_template.numf == 1 &&
                  xfer_template.sgl[1].size == xfer_template.sgl[0].icg){
           /* a line and some */
           schan->xlen = xfer_template.sgl[0].size +  xfer_template.sgl[1].size.
           schan->width = xfer_template.sgl[0].size;
      } else {
           /* _Hardware_ doesn't support the transfer as such. *
           return -EINVAL;
      }
      schan->ylen = xfer_template.numf /* -1? */;

^ permalink raw reply	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-08  3:17           ` Jassi Brar
@ 2011-09-08  6:14             ` Barry Song
  -1 siblings, 0 replies; 84+ messages in thread
From: Barry Song @ 2011-09-08  6:14 UTC (permalink / raw)
  To: Jassi Brar
  Cc: Koul, Vinod, linus.walleij, Williams, Dan J, arnd, linux-kernel,
	workgroup.linux, rongjun.ying, Baohua.Song, linux-arm-kernel

Hi Jassi,

2011/9/8 Jassi Brar <jassisinghbrar@gmail.com>:
> On Thu, Sep 8, 2011 at 7:42 AM, Barry Song <21cnbao@gmail.com> wrote:
>
>>>> it is much different with primacell based DMA like pl080, pl330.
>>>> prima2 has a self-defined DMAC IP. basically it is a 2D mode dma with
>>>> two scales X and Y and direct way to start and stop DMA.
>>>> every channel has fixed function to serve only one perpheral. so you
>>>> find we have a filter id.
>>> okay, what do you mean by 2D mode? Is it similar to what TI folks, Linus
>>> W and Jassi Brar posted RFC's on?
>>
>> In SiRFprimaII 2-D DMA, the system memory space is interpreted
>> as a 2-D layout instead of a linear 1-D layout. More specifically, the
>> system memory can be considered as
>> multiple data lines. The length of the data line is determined by the
>> user-selected DMA_WIDTH register.
>> The user can specify a data window that the user wants to access using
>> four parameters:
>> ■ Start address
>> ■ X length
>> ■ Y length
>> ■ Width
>>
>> The idea of a 2-D DMA is shown in figure 2d-dma.png attached.
>>
>> If you specifies the Y length as 0 or the X length equals to the DMA
>> width, then this 2-D DMA reduces to
>> 1-D. If the user configures the X length greater than the DMA width,
>> then the extra data is wrapped around
>> to the next data line, this may corrupt the DMA transfer for
>> multiple-line 2-D DMA. If this is a 1-D DMA, then
>> there is no issue. The attached diagram 2d-dma2.png shows the
>> wrap-around of the extra data in case the X length
>> greater than DMA width.
>
> Sorry, the role of DMA_WIDTH is not clear to me yet.
> In which case the user _must_ set {xlen > width} ?

DMA_WIDTH is the size address will increase after every DMA line.
For example:
if xlen = 4, dma_width=8, ylen=5;
Then DMA will run as(suppose DMA address start from 0):
0~3
8~11
16~19
24~27
...

This feature can help us to do DMA operation to non-continuous memory
area, for example, pick a part of a image.

The DMA X-length is in the 32-bit D-word boundary. This value
specifies the number of D-words transferred
in each line. The value should be less than or equal to the value of
DMA width register.

The DMA Y-length specifies the number of lines in DMA transfers. The
number of the lines in DMA transfer
is Y-length + 1.

Each channel has dedicated DMA Width registers. Each DMA channel
should not use other channel’s DMA
width.
To enable a correct 2-D DMA, the DMA Width register must be correctly
set. The value of the DMA Width
register must be greater than or equal to the X-length, otherwise, the
data is overlapped. DMA_WIDTH
should not be set to 0.


Thanks
barry

^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-08  6:14             ` Barry Song
  0 siblings, 0 replies; 84+ messages in thread
From: Barry Song @ 2011-09-08  6:14 UTC (permalink / raw)
  To: linux-arm-kernel

Hi Jassi,

2011/9/8 Jassi Brar <jassisinghbrar@gmail.com>:
> On Thu, Sep 8, 2011 at 7:42 AM, Barry Song <21cnbao@gmail.com> wrote:
>
>>>> it is much different with primacell based DMA like pl080, pl330.
>>>> prima2 has a self-defined DMAC IP. basically it is a 2D mode dma with
>>>> two scales X and Y and direct way to start and stop DMA.
>>>> every channel has fixed function to serve only one perpheral. so you
>>>> find we have a filter id.
>>> okay, what do you mean by 2D mode? Is it similar to what TI folks, Linus
>>> W and Jassi Brar posted RFC's on?
>>
>> In SiRFprimaII 2-D DMA, the system memory space is interpreted
>> as a 2-D layout instead of a linear 1-D layout. More specifically, the
>> system memory can be considered as
>> multiple data lines. The length of the data line is determined by the
>> user-selected DMA_WIDTH register.
>> The user can specify a data window that the user wants to access using
>> four parameters:
>> ? Start address
>> ? X length
>> ? Y length
>> ? Width
>>
>> The idea of a 2-D DMA is shown in figure 2d-dma.png attached.
>>
>> If you specifies the Y length as 0 or the X length equals to the DMA
>> width, then this 2-D DMA reduces to
>> 1-D. If the user configures the X length greater than the DMA width,
>> then the extra data is wrapped around
>> to the next data line, this may corrupt the DMA transfer for
>> multiple-line 2-D DMA. If this is a 1-D DMA, then
>> there is no issue. The attached diagram 2d-dma2.png shows the
>> wrap-around of the extra data in case the X length
>> greater than DMA width.
>
> Sorry, the role of DMA_WIDTH is not clear to me yet.
> In which case the user _must_ set {xlen > width} ?

DMA_WIDTH is the size address will increase after every DMA line.
For example:
if xlen = 4, dma_width=8, ylen=5;
Then DMA will run as(suppose DMA address start from 0):
0~3
8~11
16~19
24~27
...

This feature can help us to do DMA operation to non-continuous memory
area, for example, pick a part of a image.

The DMA X-length is in the 32-bit D-word boundary. This value
specifies the number of D-words transferred
in each line. The value should be less than or equal to the value of
DMA width register.

The DMA Y-length specifies the number of lines in DMA transfers. The
number of the lines in DMA transfer
is Y-length + 1.

Each channel has dedicated DMA Width registers. Each DMA channel
should not use other channel?s DMA
width.
To enable a correct 2-D DMA, the DMA Width register must be correctly
set. The value of the DMA Width
register must be greater than or equal to the X-length, otherwise, the
data is overlapped. DMA_WIDTH
should not be set to 0.


Thanks
barry

^ permalink raw reply	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-08  5:25             ` Jassi Brar
@ 2011-09-08  6:36               ` Barry Song
  -1 siblings, 0 replies; 84+ messages in thread
From: Barry Song @ 2011-09-08  6:36 UTC (permalink / raw)
  To: Jassi Brar
  Cc: Koul, Vinod, linus.walleij, Williams, Dan J, arnd, linux-kernel,
	workgroup.linux, rongjun.ying, Baohua.Song, linux-arm-kernel

2011/9/8 Jassi Brar <jassisinghbrar@gmail.com>:
> On Thu, Sep 8, 2011 at 8:47 AM, Jassi Brar <jassisinghbrar@gmail.com> wrote:
>> On Thu, Sep 8, 2011 at 7:42 AM, Barry Song <21cnbao@gmail.com> wrote:
>>
>>>>> it is much different with primacell based DMA like pl080, pl330.
>>>>> prima2 has a self-defined DMAC IP. basically it is a 2D mode dma with
>>>>> two scales X and Y and direct way to start and stop DMA.
>>>>> every channel has fixed function to serve only one perpheral. so you
>>>>> find we have a filter id.
>>>> okay, what do you mean by 2D mode? Is it similar to what TI folks, Linus
>>>> W and Jassi Brar posted RFC's on?
>>>
>>> In SiRFprimaII 2-D DMA, the system memory space is interpreted
>>> as a 2-D layout instead of a linear 1-D layout. More specifically, the
>>> system memory can be considered as
>>> multiple data lines. The length of the data line is determined by the
>>> user-selected DMA_WIDTH register.
>>> The user can specify a data window that the user wants to access using
>>> four parameters:
>>> ■ Start address
>>> ■ X length
>>> ■ Y length
>>> ■ Width
>>>
>>> The idea of a 2-D DMA is shown in figure 2d-dma.png attached.
>>>
>>> If you specifies the Y length as 0 or the X length equals to the DMA
>>> width, then this 2-D DMA reduces to
>>> 1-D. If the user configures the X length greater than the DMA width,
>>> then the extra data is wrapped around
>>> to the next data line, this may corrupt the DMA transfer for
>>> multiple-line 2-D DMA. If this is a 1-D DMA, then
>>> there is no issue. The attached diagram 2d-dma2.png shows the
>>> wrap-around of the extra data in case the X length
>>> greater than DMA width.
>>
>> Sorry, the role of DMA_WIDTH is not clear to me yet.
>> In which case the user _must_ set {xlen > width} ?
>>
> Perhaps 2d-dma.PNG is inaccurate - it shouldn't depict any deltaX.
> Doesn't xlen and width always start together ? If no, please don't read ahead.
>
> According to figures, {xlen > width} is to be set _only_ when a transfer
> is divided into _exactly_ two chunks separated by gap _exactly_
> equal to length of the second chunk (an extremely rare case).

Sorry i didn't list related full information in datasheet in my early reply.
we don't have the case of xlen > dma_width.

>
> Anyways, every case can be easily expressed using the generic api
> I proposed. See 'struct xfer_template' in https://lkml.org/lkml/2011/8/12/128

i think it is the interleaved DMA case and there is a
inter-chunk-gap(ICG) between every chunk while dma_width > xlen,
then your new generic API can definitely cover this case.

>
> Roughly speaking, the following should be done...
> Client driver :-
> **************
> For a 'Rectangular' transfer (2d-dma.PNG) :-
>      xfer_template.numf = Ylen;  /* height of rectangle */
>      xfer_template.frame_size = 1;
>      xfer_template.sgl[0].size = Xlen; /* width of rectangle */
>      xfer_template.sgl[0].icg = start_addr_Y(n) - end_addr_Y(n-1);
>
> For the "A Line and some" transfer (2d-dma2.PNG) :-
>      xfer_template.numf = 1;
>      xfer_template.frame_size = 2;
>      xfer_template.sgl[0].size = xlen1; /* a line */
>      xfer_template.sgl[1].size = xlen2;  /* and some */
>      xfer_template.sgl[0].icg = start_addr_Y(n) - end_addr_Y(n-1);
>      xfer_template.sgl[1].icg = 0; /* doesn't matter */
>
> DMAC driver :-
> ***************
>      if (xfer_template.frame_size == 1) {
>           /* rectangle */
>           schan->xlen = xfer_template.sgl[0].size;
>           schan->width = schan->xlen + xfer_template.sgl[0].icg;
>      } else if (xfer_template.frame_size == 2 &&
>                  xfer_template.numf == 1 &&
>                  xfer_template.sgl[1].size == xfer_template.sgl[0].icg){
>           /* a line and some */
>           schan->xlen = xfer_template.sgl[0].size +  xfer_template.sgl[1].size.
>           schan->width = xfer_template.sgl[0].size;
>      } else {
>           /* _Hardware_ doesn't support the transfer as such. *
>           return -EINVAL;
>      }
>      schan->ylen = xfer_template.numf /* -1? */;
>

Thanks
barry

^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-08  6:36               ` Barry Song
  0 siblings, 0 replies; 84+ messages in thread
From: Barry Song @ 2011-09-08  6:36 UTC (permalink / raw)
  To: linux-arm-kernel

2011/9/8 Jassi Brar <jassisinghbrar@gmail.com>:
> On Thu, Sep 8, 2011 at 8:47 AM, Jassi Brar <jassisinghbrar@gmail.com> wrote:
>> On Thu, Sep 8, 2011 at 7:42 AM, Barry Song <21cnbao@gmail.com> wrote:
>>
>>>>> it is much different with primacell based DMA like pl080, pl330.
>>>>> prima2 has a self-defined DMAC IP. basically it is a 2D mode dma with
>>>>> two scales X and Y and direct way to start and stop DMA.
>>>>> every channel has fixed function to serve only one perpheral. so you
>>>>> find we have a filter id.
>>>> okay, what do you mean by 2D mode? Is it similar to what TI folks, Linus
>>>> W and Jassi Brar posted RFC's on?
>>>
>>> In SiRFprimaII 2-D DMA, the system memory space is interpreted
>>> as a 2-D layout instead of a linear 1-D layout. More specifically, the
>>> system memory can be considered as
>>> multiple data lines. The length of the data line is determined by the
>>> user-selected DMA_WIDTH register.
>>> The user can specify a data window that the user wants to access using
>>> four parameters:
>>> ? Start address
>>> ? X length
>>> ? Y length
>>> ? Width
>>>
>>> The idea of a 2-D DMA is shown in figure 2d-dma.png attached.
>>>
>>> If you specifies the Y length as 0 or the X length equals to the DMA
>>> width, then this 2-D DMA reduces to
>>> 1-D. If the user configures the X length greater than the DMA width,
>>> then the extra data is wrapped around
>>> to the next data line, this may corrupt the DMA transfer for
>>> multiple-line 2-D DMA. If this is a 1-D DMA, then
>>> there is no issue. The attached diagram 2d-dma2.png shows the
>>> wrap-around of the extra data in case the X length
>>> greater than DMA width.
>>
>> Sorry, the role of DMA_WIDTH is not clear to me yet.
>> In which case the user _must_ set {xlen > width} ?
>>
> Perhaps 2d-dma.PNG is inaccurate - it shouldn't depict any deltaX.
> Doesn't xlen and width always start together ? If no, please don't read ahead.
>
> According to figures, {xlen > width} is to be set _only_ when a transfer
> is divided into _exactly_ two chunks separated by gap _exactly_
> equal to length of the second chunk (an extremely rare case).

Sorry i didn't list related full information in datasheet in my early reply.
we don't have the case of xlen > dma_width.

>
> Anyways, every case can be easily expressed using the generic api
> I proposed. See 'struct xfer_template' in https://lkml.org/lkml/2011/8/12/128

i think it is the interleaved DMA case and there is a
inter-chunk-gap(ICG) between every chunk while dma_width > xlen,
then your new generic API can definitely cover this case.

>
> Roughly speaking, the following should be done...
> Client driver :-
> **************
> For a 'Rectangular' transfer (2d-dma.PNG) :-
> ? ? ?xfer_template.numf = Ylen; ?/* height of rectangle */
> ? ? ?xfer_template.frame_size = 1;
> ? ? ?xfer_template.sgl[0].size = Xlen; /* width of rectangle */
> ? ? ?xfer_template.sgl[0].icg = start_addr_Y(n) - end_addr_Y(n-1);
>
> For the "A Line and some" transfer (2d-dma2.PNG) :-
> ? ? ?xfer_template.numf = 1;
> ? ? ?xfer_template.frame_size = 2;
> ? ? ?xfer_template.sgl[0].size = xlen1; /* a line */
> ? ? ?xfer_template.sgl[1].size = xlen2; ?/* and some */
> ? ? ?xfer_template.sgl[0].icg = start_addr_Y(n) - end_addr_Y(n-1);
> ? ? ?xfer_template.sgl[1].icg = 0; /* doesn't matter */
>
> DMAC driver :-
> ***************
> ? ? ?if (xfer_template.frame_size == 1) {
> ? ? ? ? ? /* rectangle */
> ? ? ? ? ? schan->xlen = xfer_template.sgl[0].size;
> ? ? ? ? ? schan->width = schan->xlen + xfer_template.sgl[0].icg;
> ? ? ?} else if (xfer_template.frame_size == 2 &&
> ? ? ? ? ? ? ? ? ?xfer_template.numf == 1 &&
> ? ? ? ? ? ? ? ? ?xfer_template.sgl[1].size == xfer_template.sgl[0].icg){
> ? ? ? ? ? /* a line and some */
> ? ? ? ? ? schan->xlen = xfer_template.sgl[0].size + ?xfer_template.sgl[1].size.
> ? ? ? ? ? schan->width = xfer_template.sgl[0].size;
> ? ? ?} else {
> ? ? ? ? ? /* _Hardware_ doesn't support the transfer as such. *
> ? ? ? ? ? return -EINVAL;
> ? ? ?}
> ? ? ?schan->ylen = xfer_template.numf /* -1? */;
>

Thanks
barry

^ permalink raw reply	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-08  6:14             ` Barry Song
@ 2011-09-08  6:37               ` Jassi Brar
  -1 siblings, 0 replies; 84+ messages in thread
From: Jassi Brar @ 2011-09-08  6:37 UTC (permalink / raw)
  To: Barry Song
  Cc: Koul, Vinod, linus.walleij, Williams, Dan J, arnd, linux-kernel,
	workgroup.linux, rongjun.ying, Baohua.Song, linux-arm-kernel

On Thu, Sep 8, 2011 at 11:44 AM, Barry Song <21cnbao@gmail.com> wrote:
>> Sorry, the role of DMA_WIDTH is not clear to me yet.
>> In which case the user _must_ set {xlen > width} ?
>
> DMA_WIDTH is the size address will increase after every DMA line.
> For example:
> if xlen = 4, dma_width=8, ylen=5;
> Then DMA will run as(suppose DMA address start from 0):
> 0~3
> 8~11
> 16~19
> 24~27
> ...
Ok, so I guessed right.  2d-dma.PNG is indeed inaccurate, unless
there is some separate start-address of the 'width-window' which
you haven't mentioned.

> This feature can help us to do DMA operation to non-continuous memory
> area, for example, pick a part of a image.
That's just a small part of what the proposed api can do.

> The DMA X-length is in the 32-bit D-word boundary. This value
> specifies the number of D-words transferred
> in each line. The value should be less than or equal to the value of
> DMA width register.
>
> The DMA Y-length specifies the number of lines in DMA transfers. The
> number of the lines in DMA transfer
> is Y-length + 1.
>
> Each channel has dedicated DMA Width registers. Each DMA channel
> should not use other channel’s DMA
> width.
> To enable a correct 2-D DMA, the DMA Width register must be correctly
> set. The value of the DMA Width
> register must be greater than or equal to the X-length, otherwise, the
> data is overlapped. DMA_WIDTH
> should not be set to 0.
>
Ok, so what I said in my last post [https://lkml.org/lkml/2011/9/8/10] seems
quite accurate. Maybe you only need to additionally take care of
conversion of sizes between bytes and words.

^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-08  6:37               ` Jassi Brar
  0 siblings, 0 replies; 84+ messages in thread
From: Jassi Brar @ 2011-09-08  6:37 UTC (permalink / raw)
  To: linux-arm-kernel

On Thu, Sep 8, 2011 at 11:44 AM, Barry Song <21cnbao@gmail.com> wrote:
>> Sorry, the role of DMA_WIDTH is not clear to me yet.
>> In which case the user _must_ set {xlen > width} ?
>
> DMA_WIDTH is the size address will increase after every DMA line.
> For example:
> if xlen = 4, dma_width=8, ylen=5;
> Then DMA will run as(suppose DMA address start from 0):
> 0~3
> 8~11
> 16~19
> 24~27
> ...
Ok, so I guessed right.  2d-dma.PNG is indeed inaccurate, unless
there is some separate start-address of the 'width-window' which
you haven't mentioned.

> This feature can help us to do DMA operation to non-continuous memory
> area, for example, pick a part of a image.
That's just a small part of what the proposed api can do.

> The DMA X-length is in the 32-bit D-word boundary. This value
> specifies the number of D-words transferred
> in each line. The value should be less than or equal to the value of
> DMA width register.
>
> The DMA Y-length specifies the number of lines in DMA transfers. The
> number of the lines in DMA transfer
> is Y-length + 1.
>
> Each channel has dedicated DMA Width registers. Each DMA channel
> should not use other channel?s DMA
> width.
> To enable a correct 2-D DMA, the DMA Width register must be correctly
> set. The value of the DMA Width
> register must be greater than or equal to the X-length, otherwise, the
> data is overlapped. DMA_WIDTH
> should not be set to 0.
>
Ok, so what I said in my last post [https://lkml.org/lkml/2011/9/8/10] seems
quite accurate. Maybe you only need to additionally take care of
conversion of sizes between bytes and words.

^ permalink raw reply	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-08  6:36               ` Barry Song
@ 2011-09-08  7:49                 ` Jassi Brar
  -1 siblings, 0 replies; 84+ messages in thread
From: Jassi Brar @ 2011-09-08  7:49 UTC (permalink / raw)
  To: Barry Song
  Cc: Koul, Vinod, linus.walleij, Williams, Dan J, arnd, linux-kernel,
	workgroup.linux, rongjun.ying, Baohua.Song, linux-arm-kernel

On Thu, Sep 8, 2011 at 12:06 PM, Barry Song <21cnbao@gmail.com> wrote:
> 2011/9/8 Jassi Brar <jassisinghbrar@gmail.com>:
>> According to figures, {xlen > width} is to be set _only_ when a transfer
>> is divided into _exactly_ two chunks separated by gap _exactly_
>> equal to length of the second chunk (an extremely rare case).
>
> Sorry i didn't list related full information in datasheet in my early reply.
> we don't have the case of xlen > dma_width.
Then the dmac driver becomes even simpler.
Accept strictly 'rectangular' requests and reject every other.


>> Anyways, every case can be easily expressed using the generic api
>> I proposed. See 'struct xfer_template' in https://lkml.org/lkml/2011/8/12/128
>
> i think it is the interleaved DMA case and there is a
> inter-chunk-gap(ICG) between every chunk while dma_width > xlen,
> then your new generic API can definitely cover this case.
>
Yup, it does. Thanks for confirming.

^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-08  7:49                 ` Jassi Brar
  0 siblings, 0 replies; 84+ messages in thread
From: Jassi Brar @ 2011-09-08  7:49 UTC (permalink / raw)
  To: linux-arm-kernel

On Thu, Sep 8, 2011 at 12:06 PM, Barry Song <21cnbao@gmail.com> wrote:
> 2011/9/8 Jassi Brar <jassisinghbrar@gmail.com>:
>> According to figures, {xlen > width} is to be set _only_ when a transfer
>> is divided into _exactly_ two chunks separated by gap _exactly_
>> equal to length of the second chunk (an extremely rare case).
>
> Sorry i didn't list related full information in datasheet in my early reply.
> we don't have the case of xlen > dma_width.
Then the dmac driver becomes even simpler.
Accept strictly 'rectangular' requests and reject every other.


>> Anyways, every case can be easily expressed using the generic api
>> I proposed. See 'struct xfer_template' in https://lkml.org/lkml/2011/8/12/128
>
> i think it is the interleaved DMA case and there is a
> inter-chunk-gap(ICG) between every chunk while dma_width > xlen,
> then your new generic API can definitely cover this case.
>
Yup, it does. Thanks for confirming.

^ permalink raw reply	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-07  5:41 ` Barry Song
@ 2011-09-08 14:52   ` Arnd Bergmann
  -1 siblings, 0 replies; 84+ messages in thread
From: Arnd Bergmann @ 2011-09-08 14:52 UTC (permalink / raw)
  To: Barry Song
  Cc: dan.j.williams, vinod.koul, workgroup.linux, linux-arm-kernel,
	linux-kernel, Rongjun Ying

On Wednesday 07 September 2011, Barry Song wrote:
> +/*
> + * The DMA controller consists of 16 independent DMA channels.
> + * Each channel is allocated to a different function
> + */
> +bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
> +{
> +       unsigned int ch_nr = (unsigned int) chan_id;
> +
> +       if (ch_nr == chan->chan_id)
> +               return true;
> +
> +       return false;
> +}
> +EXPORT_SYMBOL(sirfsoc_dma_filter_id);
> +

Hi Barry,

It seems wrong that you have to export this function. The dmaengine API
should be able to work without this, and when you have drivers using the
interface, those should not rely on a specific implementation as the
purpose of the API is specifically to hide that.

Am I missing something?

	Arnd

^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-08 14:52   ` Arnd Bergmann
  0 siblings, 0 replies; 84+ messages in thread
From: Arnd Bergmann @ 2011-09-08 14:52 UTC (permalink / raw)
  To: linux-arm-kernel

On Wednesday 07 September 2011, Barry Song wrote:
> +/*
> + * The DMA controller consists of 16 independent DMA channels.
> + * Each channel is allocated to a different function
> + */
> +bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
> +{
> +       unsigned int ch_nr = (unsigned int) chan_id;
> +
> +       if (ch_nr == chan->chan_id)
> +               return true;
> +
> +       return false;
> +}
> +EXPORT_SYMBOL(sirfsoc_dma_filter_id);
> +

Hi Barry,

It seems wrong that you have to export this function. The dmaengine API
should be able to work without this, and when you have drivers using the
interface, those should not rely on a specific implementation as the
purpose of the API is specifically to hide that.

Am I missing something?

	Arnd

^ permalink raw reply	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-08 14:52   ` Arnd Bergmann
@ 2011-09-08 15:27     ` Barry Song
  -1 siblings, 0 replies; 84+ messages in thread
From: Barry Song @ 2011-09-08 15:27 UTC (permalink / raw)
  To: Arnd Bergmann
  Cc: Barry Song, vinod.koul, linux-kernel, workgroup.linux,
	Rongjun Ying, dan.j.williams, linux-arm-kernel

Hi Arnd,
Thanks!

2011/9/8 Arnd Bergmann <arnd@arndb.de>
>
> On Wednesday 07 September 2011, Barry Song wrote:
> > +/*
> > + * The DMA controller consists of 16 independent DMA channels.
> > + * Each channel is allocated to a different function
> > + */
> > +bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
> > +{
> > +       unsigned int ch_nr = (unsigned int) chan_id;
> > +
> > +       if (ch_nr == chan->chan_id)
> > +               return true;
> > +
> > +       return false;
> > +}
> > +EXPORT_SYMBOL(sirfsoc_dma_filter_id);
> > +
>
> Hi Barry,
>
> It seems wrong that you have to export this function. The dmaengine API
> should be able to work without this, and when you have drivers using the
> interface, those should not rely on a specific implementation as the
> purpose of the API is specifically to hide that.
>
> Am I missing something?

i am not sure whether i have other way to require a special channel
for a special device. it seems dma_request_channel only gives me a
chance to use a filter function since my all channels have same DMA
cap masks.

this filter is used by all drivers with DMA since every dma channel is
fixed to be assigned to one device.

i did do some copy from coh901318.c:
bool coh901318_filter_id(struct dma_chan *chan, void *chan_id)
{
    unsigned int ch_nr = (unsigned int) chan_id;

    if (ch_nr == to_coh901318_chan(chan)->id)
        return true;

    return false;
}
EXPORT_SYMBOL(coh901318_filter_id);

if it does become a common filter,  we might have a function like:

bool dmaengine_filter_match_channel_id(struct dma_chan *chan, void *chan_id)
{
    if (ch_nr == chan->chan_id)
        return true;

    return false;
}
EXPORT_SYMBOL(dmaengine_filter_match_channel_id);

Another filter is in amba-pl08x.c, it is filtering channel by name:
bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
{
    struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
    char *name = chan_id;

    /* Check that the channel is not taken! */
    if (!strcmp(plchan->name, name))
        return true;

    return false;
}

>
>        Arnd
>

-barry

^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-08 15:27     ` Barry Song
  0 siblings, 0 replies; 84+ messages in thread
From: Barry Song @ 2011-09-08 15:27 UTC (permalink / raw)
  To: linux-arm-kernel

Hi Arnd,
Thanks!

2011/9/8 Arnd Bergmann <arnd@arndb.de>
>
> On Wednesday 07 September 2011, Barry Song wrote:
> > +/*
> > + * The DMA controller consists of 16 independent DMA channels.
> > + * Each channel is allocated to a different function
> > + */
> > +bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
> > +{
> > + ? ? ? unsigned int ch_nr = (unsigned int) chan_id;
> > +
> > + ? ? ? if (ch_nr == chan->chan_id)
> > + ? ? ? ? ? ? ? return true;
> > +
> > + ? ? ? return false;
> > +}
> > +EXPORT_SYMBOL(sirfsoc_dma_filter_id);
> > +
>
> Hi Barry,
>
> It seems wrong that you have to export this function. The dmaengine API
> should be able to work without this, and when you have drivers using the
> interface, those should not rely on a specific implementation as the
> purpose of the API is specifically to hide that.
>
> Am I missing something?

i am not sure whether i have other way to require a special channel
for a special device. it seems dma_request_channel only gives me a
chance to use a filter function since my all channels have same DMA
cap masks.

this filter is used by all drivers with DMA since every dma channel is
fixed to be assigned to one device.

i did do some copy from coh901318.c:
bool coh901318_filter_id(struct dma_chan *chan, void *chan_id)
{
    unsigned int ch_nr = (unsigned int) chan_id;

    if (ch_nr == to_coh901318_chan(chan)->id)
        return true;

    return false;
}
EXPORT_SYMBOL(coh901318_filter_id);

if it does become a common filter,  we might have a function like:

bool dmaengine_filter_match_channel_id(struct dma_chan *chan, void *chan_id)
{
    if (ch_nr == chan->chan_id)
        return true;

    return false;
}
EXPORT_SYMBOL(dmaengine_filter_match_channel_id);

Another filter is in amba-pl08x.c, it is filtering channel by name:
bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
{
    struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
    char *name = chan_id;

    /* Check that the channel is not taken! */
    if (!strcmp(plchan->name, name))
        return true;

    return false;
}

>
> ? ? ? ?Arnd
>

-barry

^ permalink raw reply	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-08 15:27     ` Barry Song
@ 2011-09-08 16:19       ` Arnd Bergmann
  -1 siblings, 0 replies; 84+ messages in thread
From: Arnd Bergmann @ 2011-09-08 16:19 UTC (permalink / raw)
  To: Barry Song
  Cc: Barry Song, vinod.koul, linux-kernel, workgroup.linux,
	Rongjun Ying, dan.j.williams, linux-arm-kernel

On Thursday 08 September 2011, Barry Song wrote:
> i am not sure whether i have other way to require a special channel
> for a special device. it seems dma_request_channel only gives me a
> chance to use a filter function since my all channels have same DMA
> cap masks.
> 
> this filter is used by all drivers with DMA since every dma channel is
> fixed to be assigned to one device.
> 
> i did do some copy from coh901318.c:
> bool coh901318_filter_id(struct dma_chan *chan, void *chan_id)
> {
>     unsigned int ch_nr = (unsigned int) chan_id;
> 
>     if (ch_nr == to_coh901318_chan(chan)->id)
>         return true;
> 
>     return false;
> }
> EXPORT_SYMBOL(coh901318_filter_id);
> 
> if it does become a common filter,  we might have a function like:
> 
> bool dmaengine_filter_match_channel_id(struct dma_chan *chan, void *chan_id)
> {
>     if (ch_nr == chan->chan_id)
>         return true;
> 
>     return false;
> }
> EXPORT_SYMBOL(dmaengine_filter_match_channel_id);

Ok, I see now. I think it would be best to introduce a generic
'filter by device tree property' function or alternatively an
dma_of_request_channel function like this:

struct dma_chan *dma_of_request_channel(struct device *dev, unsigned int index)
{
	struct dma_device *dmadev;
	struct {
		unsigned int phandle;
		unsigned int channel_num;
	} *property;
	int lenp;

	property = of_get_property(dev->of_node, "dma-channel", &lenp);
	if (lenp < (index * sizeof (*property))
		return -EINVAL;

	property += index;

	dmadev = dma_find_device(of_find_node_by_phandle(property->phandle));
	if (!dmadev)
		return -ENODEV;

	return dma_get_channel(dmadev, property->channel_num);
}

This way, you can link a device to its dma_channel in the device tree without
the device driver even understanding what a dma_device or a channel id
is.

	Arnd

^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-08 16:19       ` Arnd Bergmann
  0 siblings, 0 replies; 84+ messages in thread
From: Arnd Bergmann @ 2011-09-08 16:19 UTC (permalink / raw)
  To: linux-arm-kernel

On Thursday 08 September 2011, Barry Song wrote:
> i am not sure whether i have other way to require a special channel
> for a special device. it seems dma_request_channel only gives me a
> chance to use a filter function since my all channels have same DMA
> cap masks.
> 
> this filter is used by all drivers with DMA since every dma channel is
> fixed to be assigned to one device.
> 
> i did do some copy from coh901318.c:
> bool coh901318_filter_id(struct dma_chan *chan, void *chan_id)
> {
>     unsigned int ch_nr = (unsigned int) chan_id;
> 
>     if (ch_nr == to_coh901318_chan(chan)->id)
>         return true;
> 
>     return false;
> }
> EXPORT_SYMBOL(coh901318_filter_id);
> 
> if it does become a common filter,  we might have a function like:
> 
> bool dmaengine_filter_match_channel_id(struct dma_chan *chan, void *chan_id)
> {
>     if (ch_nr == chan->chan_id)
>         return true;
> 
>     return false;
> }
> EXPORT_SYMBOL(dmaengine_filter_match_channel_id);

Ok, I see now. I think it would be best to introduce a generic
'filter by device tree property' function or alternatively an
dma_of_request_channel function like this:

struct dma_chan *dma_of_request_channel(struct device *dev, unsigned int index)
{
	struct dma_device *dmadev;
	struct {
		unsigned int phandle;
		unsigned int channel_num;
	} *property;
	int lenp;

	property = of_get_property(dev->of_node, "dma-channel", &lenp);
	if (lenp < (index * sizeof (*property))
		return -EINVAL;

	property += index;

	dmadev = dma_find_device(of_find_node_by_phandle(property->phandle));
	if (!dmadev)
		return -ENODEV;

	return dma_get_channel(dmadev, property->channel_num);
}

This way, you can link a device to its dma_channel in the device tree without
the device driver even understanding what a dma_device or a channel id
is.

	Arnd

^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-08 16:19       ` Arnd Bergmann
  (?)
@ 2011-09-08 18:48       ` Linus Walleij
  2011-09-08 20:11           ` Arnd Bergmann
  -1 siblings, 1 reply; 84+ messages in thread
From: Linus Walleij @ 2011-09-08 18:48 UTC (permalink / raw)
  To: linux-arm-kernel

2011/9/8 Arnd Bergmann <arnd@arndb.de>:
> On Thursday 08 September 2011, Barry Song wrote:
>>
>> this filter is used by all drivers with DMA since every dma channel is
>> fixed to be assigned to one device.
>
> Ok, I see now. I think it would be best to introduce a generic
> 'filter by device tree property' function or alternatively an
> dma_of_request_channel function like this:

You'd have to discuss that with Vinod, the thing is that x86 Atom
systems are using dmaengine for device slave transfers too, and
IIRC these things don't use devicetrees. I may be wrong...

Yours,
Linus Walleij

^ permalink raw reply	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-08 18:48       ` Linus Walleij
@ 2011-09-08 20:11           ` Arnd Bergmann
  0 siblings, 0 replies; 84+ messages in thread
From: Arnd Bergmann @ 2011-09-08 20:11 UTC (permalink / raw)
  To: Linus Walleij
  Cc: Barry Song, vinod.koul, linux-kernel, workgroup.linux,
	Rongjun Ying, Barry Song, dan.j.williams, linux-arm-kernel

On Thursday 08 September 2011 20:48:26 Linus Walleij wrote:
> 2011/9/8 Arnd Bergmann <arnd@arndb.de>:
> > On Thursday 08 September 2011, Barry Song wrote:
> >>
> >> this filter is used by all drivers with DMA since every dma channel is
> >> fixed to be assigned to one device.
> >
> > Ok, I see now. I think it would be best to introduce a generic
> > 'filter by device tree property' function or alternatively an
> > dma_of_request_channel function like this:
> 
> You'd have to discuss that with Vinod, the thing is that x86 Atom
> systems are using dmaengine for device slave transfers too, and
> IIRC these things don't use devicetrees. I may be wrong...

Some of them use device tree, some don't.

I'm not saying that we have to convert all drivers to use this, but
for platforms that always have device tree available, it seems by far
the cleanest solution.

	Arnd

^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-08 20:11           ` Arnd Bergmann
  0 siblings, 0 replies; 84+ messages in thread
From: Arnd Bergmann @ 2011-09-08 20:11 UTC (permalink / raw)
  To: linux-arm-kernel

On Thursday 08 September 2011 20:48:26 Linus Walleij wrote:
> 2011/9/8 Arnd Bergmann <arnd@arndb.de>:
> > On Thursday 08 September 2011, Barry Song wrote:
> >>
> >> this filter is used by all drivers with DMA since every dma channel is
> >> fixed to be assigned to one device.
> >
> > Ok, I see now. I think it would be best to introduce a generic
> > 'filter by device tree property' function or alternatively an
> > dma_of_request_channel function like this:
> 
> You'd have to discuss that with Vinod, the thing is that x86 Atom
> systems are using dmaengine for device slave transfers too, and
> IIRC these things don't use devicetrees. I may be wrong...

Some of them use device tree, some don't.

I'm not saying that we have to convert all drivers to use this, but
for platforms that always have device tree available, it seems by far
the cleanest solution.

	Arnd

^ permalink raw reply	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-08 20:11           ` Arnd Bergmann
@ 2011-09-08 21:38             ` Vinod Koul
  -1 siblings, 0 replies; 84+ messages in thread
From: Vinod Koul @ 2011-09-08 21:38 UTC (permalink / raw)
  To: Arnd Bergmann
  Cc: vinod.koul, Linus Walleij, Barry Song, linux-kernel,
	workgroup.linux, Rongjun Ying, Barry Song, dan.j.williams,
	linux-arm-kernel

On Thu, 2011-09-08 at 22:11 +0200, Arnd Bergmann wrote:
> On Thursday 08 September 2011 20:48:26 Linus Walleij wrote:
> > 2011/9/8 Arnd Bergmann <arnd@arndb.de>:
> > > On Thursday 08 September 2011, Barry Song wrote:
> > >>
> > >> this filter is used by all drivers with DMA since every dma channel is
> > >> fixed to be assigned to one device.
> > >
> > > Ok, I see now. I think it would be best to introduce a generic
> > > 'filter by device tree property' function or alternatively an
> > > dma_of_request_channel function like this:
> > 
> > You'd have to discuss that with Vinod, the thing is that x86 Atom
> > systems are using dmaengine for device slave transfers too, and
> > IIRC these things don't use devicetrees. I may be wrong...
> 
> Some of them use device tree, some don't.
> 
> I'm not saying that we have to convert all drivers to use this, but
> for platforms that always have device tree available, it seems by far
> the cleanest solution.
We don't have a very clean solution for filter function in case of slave
dmaengine. How should the client specify which channel it wants is not
really clear.

We can look at device tree but that's something which wont work in case
of non device tree platforms (atom x86).
What we need is this information of channel mapping, which IMO is
platform specific and needs to come from platform data, we can abstract
it actually from the device tree data/PCI/firmware etc but essentially a
mechanism to publish channels and slaves uniquely and match them in this
kind of data

Linus W, any progress on that patches you posted??
 
-- 
~Vinod Koul
Intel Corp.




^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-08 21:38             ` Vinod Koul
  0 siblings, 0 replies; 84+ messages in thread
From: Vinod Koul @ 2011-09-08 21:38 UTC (permalink / raw)
  To: linux-arm-kernel

On Thu, 2011-09-08 at 22:11 +0200, Arnd Bergmann wrote:
> On Thursday 08 September 2011 20:48:26 Linus Walleij wrote:
> > 2011/9/8 Arnd Bergmann <arnd@arndb.de>:
> > > On Thursday 08 September 2011, Barry Song wrote:
> > >>
> > >> this filter is used by all drivers with DMA since every dma channel is
> > >> fixed to be assigned to one device.
> > >
> > > Ok, I see now. I think it would be best to introduce a generic
> > > 'filter by device tree property' function or alternatively an
> > > dma_of_request_channel function like this:
> > 
> > You'd have to discuss that with Vinod, the thing is that x86 Atom
> > systems are using dmaengine for device slave transfers too, and
> > IIRC these things don't use devicetrees. I may be wrong...
> 
> Some of them use device tree, some don't.
> 
> I'm not saying that we have to convert all drivers to use this, but
> for platforms that always have device tree available, it seems by far
> the cleanest solution.
We don't have a very clean solution for filter function in case of slave
dmaengine. How should the client specify which channel it wants is not
really clear.

We can look at device tree but that's something which wont work in case
of non device tree platforms (atom x86).
What we need is this information of channel mapping, which IMO is
platform specific and needs to come from platform data, we can abstract
it actually from the device tree data/PCI/firmware etc but essentially a
mechanism to publish channels and slaves uniquely and match them in this
kind of data

Linus W, any progress on that patches you posted??
 
-- 
~Vinod Koul
Intel Corp.

^ permalink raw reply	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-08  5:25             ` Jassi Brar
@ 2011-09-08 21:46               ` Vinod Koul
  -1 siblings, 0 replies; 84+ messages in thread
From: Vinod Koul @ 2011-09-08 21:46 UTC (permalink / raw)
  To: Jassi Brar
  Cc: vinod.koul, Barry Song, arnd, linus.walleij, linux-kernel,
	workgroup.linux, rongjun.ying, Baohua.Song, Williams, Dan J,
	linux-arm-kernel

On Thu, 2011-09-08 at 10:55 +0530, Jassi Brar wrote:
> On Thu, Sep 8, 2011 at 8:47 AM, Jassi Brar <jassisinghbrar@gmail.com> wrote:
> > On Thu, Sep 8, 2011 at 7:42 AM, Barry Song <21cnbao@gmail.com> wrote:
> >
> >>>> it is much different with primacell based DMA like pl080, pl330.
> >>>> prima2 has a self-defined DMAC IP. basically it is a 2D mode dma with
> >>>> two scales X and Y and direct way to start and stop DMA.
> >>>> every channel has fixed function to serve only one perpheral. so you
> >>>> find we have a filter id.
> >>> okay, what do you mean by 2D mode? Is it similar to what TI folks, Linus
> >>> W and Jassi Brar posted RFC's on?
> >>
> >> In SiRFprimaII 2-D DMA, the system memory space is interpreted
> >> as a 2-D layout instead of a linear 1-D layout. More specifically, the
> >> system memory can be considered as
> >> multiple data lines. The length of the data line is determined by the
> >> user-selected DMA_WIDTH register.
> >> The user can specify a data window that the user wants to access using
> >> four parameters:
> >> ■ Start address
> >> ■ X length
> >> ■ Y length
> >> ■ Width
> >>
> >> The idea of a 2-D DMA is shown in figure 2d-dma.png attached.
> >>
> >> If you specifies the Y length as 0 or the X length equals to the DMA
> >> width, then this 2-D DMA reduces to
> >> 1-D. If the user configures the X length greater than the DMA width,
> >> then the extra data is wrapped around
> >> to the next data line, this may corrupt the DMA transfer for
> >> multiple-line 2-D DMA. If this is a 1-D DMA, then
> >> there is no issue. The attached diagram 2d-dma2.png shows the
> >> wrap-around of the extra data in case the X length
> >> greater than DMA width.
> >
> > Sorry, the role of DMA_WIDTH is not clear to me yet.
> > In which case the user _must_ set {xlen > width} ?
> >
> Perhaps 2d-dma.PNG is inaccurate - it shouldn't depict any deltaX.
> Doesn't xlen and width always start together ? If no, please don't read ahead.
> 
> According to figures, {xlen > width} is to be set _only_ when a transfer
> is divided into _exactly_ two chunks separated by gap _exactly_
> equal to length of the second chunk (an extremely rare case).
> 
> Anyways, every case can be easily expressed using the generic api
> I proposed. See 'struct xfer_template' in https://lkml.org/lkml/2011/8/12/128
> 
> Roughly speaking, the following should be done...
> Client driver :-
> **************
> For a 'Rectangular' transfer (2d-dma.PNG) :-
>       xfer_template.numf = Ylen;  /* height of rectangle */
>       xfer_template.frame_size = 1;
>       xfer_template.sgl[0].size = Xlen; /* width of rectangle */
>       xfer_template.sgl[0].icg = start_addr_Y(n) - end_addr_Y(n-1);
> 
> For the "A Line and some" transfer (2d-dma2.PNG) :-
>       xfer_template.numf = 1;
>       xfer_template.frame_size = 2;
>       xfer_template.sgl[0].size = xlen1; /* a line */
>       xfer_template.sgl[1].size = xlen2;  /* and some */
>       xfer_template.sgl[0].icg = start_addr_Y(n) - end_addr_Y(n-1);
>       xfer_template.sgl[1].icg = 0; /* doesn't matter */
> 
> DMAC driver :-
> ***************
>       if (xfer_template.frame_size == 1) {
>            /* rectangle */
>            schan->xlen = xfer_template.sgl[0].size;
>            schan->width = schan->xlen + xfer_template.sgl[0].icg;
>       } else if (xfer_template.frame_size == 2 &&
>                   xfer_template.numf == 1 &&
>                   xfer_template.sgl[1].size == xfer_template.sgl[0].icg){
>            /* a line and some */
>            schan->xlen = xfer_template.sgl[0].size +  xfer_template.sgl[1].size.
>            schan->width = xfer_template.sgl[0].size;
>       } else {
>            /* _Hardware_ doesn't support the transfer as such. *
>            return -EINVAL;
>       }
>       schan->ylen = xfer_template.numf /* -1? */;
Looks like Jassi got a user for his proposed API. I am not sure whats
going on with TI folks, they never showed up here @LPC.


-- 
~Vinod Koul
Intel Corp.


^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-08 21:46               ` Vinod Koul
  0 siblings, 0 replies; 84+ messages in thread
From: Vinod Koul @ 2011-09-08 21:46 UTC (permalink / raw)
  To: linux-arm-kernel

On Thu, 2011-09-08 at 10:55 +0530, Jassi Brar wrote:
> On Thu, Sep 8, 2011 at 8:47 AM, Jassi Brar <jassisinghbrar@gmail.com> wrote:
> > On Thu, Sep 8, 2011 at 7:42 AM, Barry Song <21cnbao@gmail.com> wrote:
> >
> >>>> it is much different with primacell based DMA like pl080, pl330.
> >>>> prima2 has a self-defined DMAC IP. basically it is a 2D mode dma with
> >>>> two scales X and Y and direct way to start and stop DMA.
> >>>> every channel has fixed function to serve only one perpheral. so you
> >>>> find we have a filter id.
> >>> okay, what do you mean by 2D mode? Is it similar to what TI folks, Linus
> >>> W and Jassi Brar posted RFC's on?
> >>
> >> In SiRFprimaII 2-D DMA, the system memory space is interpreted
> >> as a 2-D layout instead of a linear 1-D layout. More specifically, the
> >> system memory can be considered as
> >> multiple data lines. The length of the data line is determined by the
> >> user-selected DMA_WIDTH register.
> >> The user can specify a data window that the user wants to access using
> >> four parameters:
> >> ? Start address
> >> ? X length
> >> ? Y length
> >> ? Width
> >>
> >> The idea of a 2-D DMA is shown in figure 2d-dma.png attached.
> >>
> >> If you specifies the Y length as 0 or the X length equals to the DMA
> >> width, then this 2-D DMA reduces to
> >> 1-D. If the user configures the X length greater than the DMA width,
> >> then the extra data is wrapped around
> >> to the next data line, this may corrupt the DMA transfer for
> >> multiple-line 2-D DMA. If this is a 1-D DMA, then
> >> there is no issue. The attached diagram 2d-dma2.png shows the
> >> wrap-around of the extra data in case the X length
> >> greater than DMA width.
> >
> > Sorry, the role of DMA_WIDTH is not clear to me yet.
> > In which case the user _must_ set {xlen > width} ?
> >
> Perhaps 2d-dma.PNG is inaccurate - it shouldn't depict any deltaX.
> Doesn't xlen and width always start together ? If no, please don't read ahead.
> 
> According to figures, {xlen > width} is to be set _only_ when a transfer
> is divided into _exactly_ two chunks separated by gap _exactly_
> equal to length of the second chunk (an extremely rare case).
> 
> Anyways, every case can be easily expressed using the generic api
> I proposed. See 'struct xfer_template' in https://lkml.org/lkml/2011/8/12/128
> 
> Roughly speaking, the following should be done...
> Client driver :-
> **************
> For a 'Rectangular' transfer (2d-dma.PNG) :-
>       xfer_template.numf = Ylen;  /* height of rectangle */
>       xfer_template.frame_size = 1;
>       xfer_template.sgl[0].size = Xlen; /* width of rectangle */
>       xfer_template.sgl[0].icg = start_addr_Y(n) - end_addr_Y(n-1);
> 
> For the "A Line and some" transfer (2d-dma2.PNG) :-
>       xfer_template.numf = 1;
>       xfer_template.frame_size = 2;
>       xfer_template.sgl[0].size = xlen1; /* a line */
>       xfer_template.sgl[1].size = xlen2;  /* and some */
>       xfer_template.sgl[0].icg = start_addr_Y(n) - end_addr_Y(n-1);
>       xfer_template.sgl[1].icg = 0; /* doesn't matter */
> 
> DMAC driver :-
> ***************
>       if (xfer_template.frame_size == 1) {
>            /* rectangle */
>            schan->xlen = xfer_template.sgl[0].size;
>            schan->width = schan->xlen + xfer_template.sgl[0].icg;
>       } else if (xfer_template.frame_size == 2 &&
>                   xfer_template.numf == 1 &&
>                   xfer_template.sgl[1].size == xfer_template.sgl[0].icg){
>            /* a line and some */
>            schan->xlen = xfer_template.sgl[0].size +  xfer_template.sgl[1].size.
>            schan->width = xfer_template.sgl[0].size;
>       } else {
>            /* _Hardware_ doesn't support the transfer as such. *
>            return -EINVAL;
>       }
>       schan->ylen = xfer_template.numf /* -1? */;
Looks like Jassi got a user for his proposed API. I am not sure whats
going on with TI folks, they never showed up here @LPC.


-- 
~Vinod Koul
Intel Corp.

^ permalink raw reply	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-08  6:36               ` Barry Song
@ 2011-09-08 21:51                 ` Vinod Koul
  -1 siblings, 0 replies; 84+ messages in thread
From: Vinod Koul @ 2011-09-08 21:51 UTC (permalink / raw)
  To: Barry Song
  Cc: vinod.koul, Jassi Brar, arnd, linus.walleij, linux-kernel,
	workgroup.linux, rongjun.ying, Baohua.Song, Williams, Dan J,
	linux-arm-kernel

On Thu, 2011-09-08 at 14:36 +0800, Barry Song wrote:
> 2011/9/8 Jassi Brar <jassisinghbrar@gmail.com>:
> > On Thu, Sep 8, 2011 at 8:47 AM, Jassi Brar <jassisinghbrar@gmail.com> wrote:
> >> On Thu, Sep 8, 2011 at 7:42 AM, Barry Song <21cnbao@gmail.com> wrote:
> >>
> >>>>> it is much different with primacell based DMA like pl080, pl330.
> >>>>> prima2 has a self-defined DMAC IP. basically it is a 2D mode dma with
> >>>>> two scales X and Y and direct way to start and stop DMA.
> >>>>> every channel has fixed function to serve only one perpheral. so you
> >>>>> find we have a filter id.
> >>>> okay, what do you mean by 2D mode? Is it similar to what TI folks, Linus
> >>>> W and Jassi Brar posted RFC's on?
> >>>
> >>> In SiRFprimaII 2-D DMA, the system memory space is interpreted
> >>> as a 2-D layout instead of a linear 1-D layout. More specifically, the
> >>> system memory can be considered as
> >>> multiple data lines. The length of the data line is determined by the
> >>> user-selected DMA_WIDTH register.
> >>> The user can specify a data window that the user wants to access using
> >>> four parameters:
> >>> ■ Start address
> >>> ■ X length
> >>> ■ Y length
> >>> ■ Width
> >>>
> >>> The idea of a 2-D DMA is shown in figure 2d-dma.png attached.
> >>>
> >>> If you specifies the Y length as 0 or the X length equals to the DMA
> >>> width, then this 2-D DMA reduces to
> >>> 1-D. If the user configures the X length greater than the DMA width,
> >>> then the extra data is wrapped around
> >>> to the next data line, this may corrupt the DMA transfer for
> >>> multiple-line 2-D DMA. If this is a 1-D DMA, then
> >>> there is no issue. The attached diagram 2d-dma2.png shows the
> >>> wrap-around of the extra data in case the X length
> >>> greater than DMA width.
> >>
> >> Sorry, the role of DMA_WIDTH is not clear to me yet.
> >> In which case the user _must_ set {xlen > width} ?
> >>
> > Perhaps 2d-dma.PNG is inaccurate - it shouldn't depict any deltaX.
> > Doesn't xlen and width always start together ? If no, please don't read ahead.
> >
> > According to figures, {xlen > width} is to be set _only_ when a transfer
> > is divided into _exactly_ two chunks separated by gap _exactly_
> > equal to length of the second chunk (an extremely rare case).
> 
> Sorry i didn't list related full information in datasheet in my early reply.
> we don't have the case of xlen > dma_width.
What is intended usage of the 2D dma? 

In first diagram you sent, x length is first block length to transfer
and (dma_width - x_len) is length to skip, right?

So what is the second diagram about? Is this juts a case when notion of
xlen > dma_width? How much you transfer and how much you skip?

-- 
~Vinod Koul
Intel Corp.


^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-08 21:51                 ` Vinod Koul
  0 siblings, 0 replies; 84+ messages in thread
From: Vinod Koul @ 2011-09-08 21:51 UTC (permalink / raw)
  To: linux-arm-kernel

On Thu, 2011-09-08 at 14:36 +0800, Barry Song wrote:
> 2011/9/8 Jassi Brar <jassisinghbrar@gmail.com>:
> > On Thu, Sep 8, 2011 at 8:47 AM, Jassi Brar <jassisinghbrar@gmail.com> wrote:
> >> On Thu, Sep 8, 2011 at 7:42 AM, Barry Song <21cnbao@gmail.com> wrote:
> >>
> >>>>> it is much different with primacell based DMA like pl080, pl330.
> >>>>> prima2 has a self-defined DMAC IP. basically it is a 2D mode dma with
> >>>>> two scales X and Y and direct way to start and stop DMA.
> >>>>> every channel has fixed function to serve only one perpheral. so you
> >>>>> find we have a filter id.
> >>>> okay, what do you mean by 2D mode? Is it similar to what TI folks, Linus
> >>>> W and Jassi Brar posted RFC's on?
> >>>
> >>> In SiRFprimaII 2-D DMA, the system memory space is interpreted
> >>> as a 2-D layout instead of a linear 1-D layout. More specifically, the
> >>> system memory can be considered as
> >>> multiple data lines. The length of the data line is determined by the
> >>> user-selected DMA_WIDTH register.
> >>> The user can specify a data window that the user wants to access using
> >>> four parameters:
> >>> ? Start address
> >>> ? X length
> >>> ? Y length
> >>> ? Width
> >>>
> >>> The idea of a 2-D DMA is shown in figure 2d-dma.png attached.
> >>>
> >>> If you specifies the Y length as 0 or the X length equals to the DMA
> >>> width, then this 2-D DMA reduces to
> >>> 1-D. If the user configures the X length greater than the DMA width,
> >>> then the extra data is wrapped around
> >>> to the next data line, this may corrupt the DMA transfer for
> >>> multiple-line 2-D DMA. If this is a 1-D DMA, then
> >>> there is no issue. The attached diagram 2d-dma2.png shows the
> >>> wrap-around of the extra data in case the X length
> >>> greater than DMA width.
> >>
> >> Sorry, the role of DMA_WIDTH is not clear to me yet.
> >> In which case the user _must_ set {xlen > width} ?
> >>
> > Perhaps 2d-dma.PNG is inaccurate - it shouldn't depict any deltaX.
> > Doesn't xlen and width always start together ? If no, please don't read ahead.
> >
> > According to figures, {xlen > width} is to be set _only_ when a transfer
> > is divided into _exactly_ two chunks separated by gap _exactly_
> > equal to length of the second chunk (an extremely rare case).
> 
> Sorry i didn't list related full information in datasheet in my early reply.
> we don't have the case of xlen > dma_width.
What is intended usage of the 2D dma? 

In first diagram you sent, x length is first block length to transfer
and (dma_width - x_len) is length to skip, right?

So what is the second diagram about? Is this juts a case when notion of
xlen > dma_width? How much you transfer and how much you skip?

-- 
~Vinod Koul
Intel Corp.

^ permalink raw reply	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-08 21:51                 ` Vinod Koul
@ 2011-09-09  2:35                   ` Barry Song
  -1 siblings, 0 replies; 84+ messages in thread
From: Barry Song @ 2011-09-09  2:35 UTC (permalink / raw)
  To: Vinod Koul
  Cc: vinod.koul, Jassi Brar, arnd, linus.walleij, linux-kernel,
	workgroup.linux, rongjun.ying, Baohua.Song, Williams, Dan J,
	linux-arm-kernel

2011/9/9 Vinod Koul <vkoul@infradead.org>:
> On Thu, 2011-09-08 at 14:36 +0800, Barry Song wrote:
>> 2011/9/8 Jassi Brar <jassisinghbrar@gmail.com>:
>> > On Thu, Sep 8, 2011 at 8:47 AM, Jassi Brar <jassisinghbrar@gmail.com> wrote:
>> >> On Thu, Sep 8, 2011 at 7:42 AM, Barry Song <21cnbao@gmail.com> wrote:
>> >>
>> >>>>> it is much different with primacell based DMA like pl080, pl330.
>> >>>>> prima2 has a self-defined DMAC IP. basically it is a 2D mode dma with
>> >>>>> two scales X and Y and direct way to start and stop DMA.
>> >>>>> every channel has fixed function to serve only one perpheral. so you
>> >>>>> find we have a filter id.
>> >>>> okay, what do you mean by 2D mode? Is it similar to what TI folks, Linus
>> >>>> W and Jassi Brar posted RFC's on?
>> >>>
>> >>> In SiRFprimaII 2-D DMA, the system memory space is interpreted
>> >>> as a 2-D layout instead of a linear 1-D layout. More specifically, the
>> >>> system memory can be considered as
>> >>> multiple data lines. The length of the data line is determined by the
>> >>> user-selected DMA_WIDTH register.
>> >>> The user can specify a data window that the user wants to access using
>> >>> four parameters:
>> >>> ■ Start address
>> >>> ■ X length
>> >>> ■ Y length
>> >>> ■ Width
>> >>>
>> >>> The idea of a 2-D DMA is shown in figure 2d-dma.png attached.
>> >>>
>> >>> If you specifies the Y length as 0 or the X length equals to the DMA
>> >>> width, then this 2-D DMA reduces to
>> >>> 1-D. If the user configures the X length greater than the DMA width,
>> >>> then the extra data is wrapped around
>> >>> to the next data line, this may corrupt the DMA transfer for
>> >>> multiple-line 2-D DMA. If this is a 1-D DMA, then
>> >>> there is no issue. The attached diagram 2d-dma2.png shows the
>> >>> wrap-around of the extra data in case the X length
>> >>> greater than DMA width.
>> >>
>> >> Sorry, the role of DMA_WIDTH is not clear to me yet.
>> >> In which case the user _must_ set {xlen > width} ?
>> >>
>> > Perhaps 2d-dma.PNG is inaccurate - it shouldn't depict any deltaX.
>> > Doesn't xlen and width always start together ? If no, please don't read ahead.
>> >
>> > According to figures, {xlen > width} is to be set _only_ when a transfer
>> > is divided into _exactly_ two chunks separated by gap _exactly_
>> > equal to length of the second chunk (an extremely rare case).
>>
>> Sorry i didn't list related full information in datasheet in my early reply.
>> we don't have the case of xlen > dma_width.
> What is intended usage of the 2D dma?

two cases:
1. continuous DMA:
xlen = DMA_WIDTH
2. interleaved DMA:
xlen < DMA_WIDTH

2 is for dma video/image or something like that.  For example, you
might only copy 1/9 of a nine-square grid by it.

>
> In first diagram you sent, x length is first block length to transfer
> and (dma_width - x_len) is length to skip, right?

(dma_width - x_len) is length to skip

>
> So what is the second diagram about? Is this juts a case when notion of
> xlen > dma_width? How much you transfer and how much you skip?

Actually, there is no real case for xlen > dma_width. this picture is
only explaining what will happen if we set xlen > dma_width.
when xlen > dma_width, for every line, dma will transfer only
dma_width. Then the extra data is wrapped around
to the next data line, it will corrupt the DMA transfer for
multiple-line 2-D DMA since the extra data will overlap with the real
next line.

if we set ylen to 0, the DMA becomes 1D, the wrap-around of the extra
data has no issue nobody will overlap with it.

>
> --
> ~Vinod Koul
> Intel Corp.
>
>

-barry

^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-09  2:35                   ` Barry Song
  0 siblings, 0 replies; 84+ messages in thread
From: Barry Song @ 2011-09-09  2:35 UTC (permalink / raw)
  To: linux-arm-kernel

2011/9/9 Vinod Koul <vkoul@infradead.org>:
> On Thu, 2011-09-08 at 14:36 +0800, Barry Song wrote:
>> 2011/9/8 Jassi Brar <jassisinghbrar@gmail.com>:
>> > On Thu, Sep 8, 2011 at 8:47 AM, Jassi Brar <jassisinghbrar@gmail.com> wrote:
>> >> On Thu, Sep 8, 2011 at 7:42 AM, Barry Song <21cnbao@gmail.com> wrote:
>> >>
>> >>>>> it is much different with primacell based DMA like pl080, pl330.
>> >>>>> prima2 has a self-defined DMAC IP. basically it is a 2D mode dma with
>> >>>>> two scales X and Y and direct way to start and stop DMA.
>> >>>>> every channel has fixed function to serve only one perpheral. so you
>> >>>>> find we have a filter id.
>> >>>> okay, what do you mean by 2D mode? Is it similar to what TI folks, Linus
>> >>>> W and Jassi Brar posted RFC's on?
>> >>>
>> >>> In SiRFprimaII 2-D DMA, the system memory space is interpreted
>> >>> as a 2-D layout instead of a linear 1-D layout. More specifically, the
>> >>> system memory can be considered as
>> >>> multiple data lines. The length of the data line is determined by the
>> >>> user-selected DMA_WIDTH register.
>> >>> The user can specify a data window that the user wants to access using
>> >>> four parameters:
>> >>> ? Start address
>> >>> ? X length
>> >>> ? Y length
>> >>> ? Width
>> >>>
>> >>> The idea of a 2-D DMA is shown in figure 2d-dma.png attached.
>> >>>
>> >>> If you specifies the Y length as 0 or the X length equals to the DMA
>> >>> width, then this 2-D DMA reduces to
>> >>> 1-D. If the user configures the X length greater than the DMA width,
>> >>> then the extra data is wrapped around
>> >>> to the next data line, this may corrupt the DMA transfer for
>> >>> multiple-line 2-D DMA. If this is a 1-D DMA, then
>> >>> there is no issue. The attached diagram 2d-dma2.png shows the
>> >>> wrap-around of the extra data in case the X length
>> >>> greater than DMA width.
>> >>
>> >> Sorry, the role of DMA_WIDTH is not clear to me yet.
>> >> In which case the user _must_ set {xlen > width} ?
>> >>
>> > Perhaps 2d-dma.PNG is inaccurate - it shouldn't depict any deltaX.
>> > Doesn't xlen and width always start together ? If no, please don't read ahead.
>> >
>> > According to figures, {xlen > width} is to be set _only_ when a transfer
>> > is divided into _exactly_ two chunks separated by gap _exactly_
>> > equal to length of the second chunk (an extremely rare case).
>>
>> Sorry i didn't list related full information in datasheet in my early reply.
>> we don't have the case of xlen > dma_width.
> What is intended usage of the 2D dma?

two cases:
1. continuous DMA:
xlen = DMA_WIDTH
2. interleaved DMA:
xlen < DMA_WIDTH

2 is for dma video/image or something like that.  For example, you
might only copy 1/9 of a nine-square grid by it.

>
> In first diagram you sent, x length is first block length to transfer
> and (dma_width - x_len) is length to skip, right?

(dma_width - x_len) is length to skip

>
> So what is the second diagram about? Is this juts a case when notion of
> xlen > dma_width? How much you transfer and how much you skip?

Actually, there is no real case for xlen > dma_width. this picture is
only explaining what will happen if we set xlen > dma_width.
when xlen > dma_width, for every line, dma will transfer only
dma_width. Then the extra data is wrapped around
to the next data line, it will corrupt the DMA transfer for
multiple-line 2-D DMA since the extra data will overlap with the real
next line.

if we set ylen to 0, the DMA becomes 1D, the wrap-around of the extra
data has no issue nobody will overlap with it.

>
> --
> ~Vinod Koul
> Intel Corp.
>
>

-barry

^ permalink raw reply	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-09  2:35                   ` Barry Song
@ 2011-09-09  2:52                     ` Barry Song
  -1 siblings, 0 replies; 84+ messages in thread
From: Barry Song @ 2011-09-09  2:52 UTC (permalink / raw)
  To: Vinod Koul
  Cc: vinod.koul, Jassi Brar, arnd, linus.walleij, linux-kernel,
	workgroup.linux, rongjun.ying, Baohua.Song, Williams, Dan J,
	linux-arm-kernel

2011/9/9 Barry Song <21cnbao@gmail.com>:
> 2011/9/9 Vinod Koul <vkoul@infradead.org>:
>> On Thu, 2011-09-08 at 14:36 +0800, Barry Song wrote:
>>> 2011/9/8 Jassi Brar <jassisinghbrar@gmail.com>:
>>> > On Thu, Sep 8, 2011 at 8:47 AM, Jassi Brar <jassisinghbrar@gmail.com> wrote:
>>> >> On Thu, Sep 8, 2011 at 7:42 AM, Barry Song <21cnbao@gmail.com> wrote:
>>> >>
>>> >>>>> it is much different with primacell based DMA like pl080, pl330.
>>> >>>>> prima2 has a self-defined DMAC IP. basically it is a 2D mode dma with
>>> >>>>> two scales X and Y and direct way to start and stop DMA.
>>> >>>>> every channel has fixed function to serve only one perpheral. so you
>>> >>>>> find we have a filter id.
>>> >>>> okay, what do you mean by 2D mode? Is it similar to what TI folks, Linus
>>> >>>> W and Jassi Brar posted RFC's on?
>>> >>>
>>> >>> In SiRFprimaII 2-D DMA, the system memory space is interpreted
>>> >>> as a 2-D layout instead of a linear 1-D layout. More specifically, the
>>> >>> system memory can be considered as
>>> >>> multiple data lines. The length of the data line is determined by the
>>> >>> user-selected DMA_WIDTH register.
>>> >>> The user can specify a data window that the user wants to access using
>>> >>> four parameters:
>>> >>> ■ Start address
>>> >>> ■ X length
>>> >>> ■ Y length
>>> >>> ■ Width
>>> >>>
>>> >>> The idea of a 2-D DMA is shown in figure 2d-dma.png attached.
>>> >>>
>>> >>> If you specifies the Y length as 0 or the X length equals to the DMA
>>> >>> width, then this 2-D DMA reduces to
>>> >>> 1-D. If the user configures the X length greater than the DMA width,
>>> >>> then the extra data is wrapped around
>>> >>> to the next data line, this may corrupt the DMA transfer for
>>> >>> multiple-line 2-D DMA. If this is a 1-D DMA, then
>>> >>> there is no issue. The attached diagram 2d-dma2.png shows the
>>> >>> wrap-around of the extra data in case the X length
>>> >>> greater than DMA width.
>>> >>
>>> >> Sorry, the role of DMA_WIDTH is not clear to me yet.
>>> >> In which case the user _must_ set {xlen > width} ?
>>> >>
>>> > Perhaps 2d-dma.PNG is inaccurate - it shouldn't depict any deltaX.
>>> > Doesn't xlen and width always start together ? If no, please don't read ahead.
>>> >
>>> > According to figures, {xlen > width} is to be set _only_ when a transfer
>>> > is divided into _exactly_ two chunks separated by gap _exactly_
>>> > equal to length of the second chunk (an extremely rare case).
>>>
>>> Sorry i didn't list related full information in datasheet in my early reply.
>>> we don't have the case of xlen > dma_width.
>> What is intended usage of the 2D dma?
>
> two cases:
> 1. continuous DMA:
> xlen = DMA_WIDTH
> 2. interleaved DMA:
> xlen < DMA_WIDTH
>
> 2 is for dma video/image or something like that.  For example, you
> might only copy 1/9 of a nine-square grid by it.
>
>>
>> In first diagram you sent, x length is first block length to transfer
>> and (dma_width - x_len) is length to skip, right?
>
> (dma_width - x_len) is length to skip
>
>>
>> So what is the second diagram about? Is this juts a case when notion of
>> xlen > dma_width? How much you transfer and how much you skip?
>
> Actually, there is no real case for xlen > dma_width. this picture is
> only explaining what will happen if we set xlen > dma_width.
> when xlen > dma_width, for every line, dma will transfer only
> dma_width. Then the extra data is wrapped around
> to the next data line, it will corrupt the DMA transfer for
> multiple-line 2-D DMA since the extra data will overlap with the real
> next line.
>
> if we set ylen to 0, the DMA becomes 1D, the wrap-around of the extra
> data has no issue nobody will overlap with it.
>

For example, if we set xlen = 7, dma_width =4, ylen > 0
then dma will happen like:
0~6
4~10
8~14
12~18
....

But for ylen=0, it has no issue. there is no next line to overlap with
the extra data.

in case we set xlen = 0, something funny will happen:
i will copy the same memory again and again(ylen+1 times).

-barry

^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-09  2:52                     ` Barry Song
  0 siblings, 0 replies; 84+ messages in thread
From: Barry Song @ 2011-09-09  2:52 UTC (permalink / raw)
  To: linux-arm-kernel

2011/9/9 Barry Song <21cnbao@gmail.com>:
> 2011/9/9 Vinod Koul <vkoul@infradead.org>:
>> On Thu, 2011-09-08 at 14:36 +0800, Barry Song wrote:
>>> 2011/9/8 Jassi Brar <jassisinghbrar@gmail.com>:
>>> > On Thu, Sep 8, 2011 at 8:47 AM, Jassi Brar <jassisinghbrar@gmail.com> wrote:
>>> >> On Thu, Sep 8, 2011 at 7:42 AM, Barry Song <21cnbao@gmail.com> wrote:
>>> >>
>>> >>>>> it is much different with primacell based DMA like pl080, pl330.
>>> >>>>> prima2 has a self-defined DMAC IP. basically it is a 2D mode dma with
>>> >>>>> two scales X and Y and direct way to start and stop DMA.
>>> >>>>> every channel has fixed function to serve only one perpheral. so you
>>> >>>>> find we have a filter id.
>>> >>>> okay, what do you mean by 2D mode? Is it similar to what TI folks, Linus
>>> >>>> W and Jassi Brar posted RFC's on?
>>> >>>
>>> >>> In SiRFprimaII 2-D DMA, the system memory space is interpreted
>>> >>> as a 2-D layout instead of a linear 1-D layout. More specifically, the
>>> >>> system memory can be considered as
>>> >>> multiple data lines. The length of the data line is determined by the
>>> >>> user-selected DMA_WIDTH register.
>>> >>> The user can specify a data window that the user wants to access using
>>> >>> four parameters:
>>> >>> ? Start address
>>> >>> ? X length
>>> >>> ? Y length
>>> >>> ? Width
>>> >>>
>>> >>> The idea of a 2-D DMA is shown in figure 2d-dma.png attached.
>>> >>>
>>> >>> If you specifies the Y length as 0 or the X length equals to the DMA
>>> >>> width, then this 2-D DMA reduces to
>>> >>> 1-D. If the user configures the X length greater than the DMA width,
>>> >>> then the extra data is wrapped around
>>> >>> to the next data line, this may corrupt the DMA transfer for
>>> >>> multiple-line 2-D DMA. If this is a 1-D DMA, then
>>> >>> there is no issue. The attached diagram 2d-dma2.png shows the
>>> >>> wrap-around of the extra data in case the X length
>>> >>> greater than DMA width.
>>> >>
>>> >> Sorry, the role of DMA_WIDTH is not clear to me yet.
>>> >> In which case the user _must_ set {xlen > width} ?
>>> >>
>>> > Perhaps 2d-dma.PNG is inaccurate - it shouldn't depict any deltaX.
>>> > Doesn't xlen and width always start together ? If no, please don't read ahead.
>>> >
>>> > According to figures, {xlen > width} is to be set _only_ when a transfer
>>> > is divided into _exactly_ two chunks separated by gap _exactly_
>>> > equal to length of the second chunk (an extremely rare case).
>>>
>>> Sorry i didn't list related full information in datasheet in my early reply.
>>> we don't have the case of xlen > dma_width.
>> What is intended usage of the 2D dma?
>
> two cases:
> 1. continuous DMA:
> xlen = DMA_WIDTH
> 2. interleaved DMA:
> xlen < DMA_WIDTH
>
> 2 is for dma video/image or something like that. ?For example, you
> might only copy 1/9 of a nine-square grid by it.
>
>>
>> In first diagram you sent, x length is first block length to transfer
>> and (dma_width - x_len) is length to skip, right?
>
> (dma_width - x_len) is length to skip
>
>>
>> So what is the second diagram about? Is this juts a case when notion of
>> xlen > dma_width? How much you transfer and how much you skip?
>
> Actually, there is no real case for xlen > dma_width. this picture is
> only explaining what will happen if we set xlen > dma_width.
> when xlen > dma_width, for every line, dma will transfer only
> dma_width. Then the extra data is wrapped around
> to the next data line, it will corrupt the DMA transfer for
> multiple-line 2-D DMA since the extra data will overlap with the real
> next line.
>
> if we set ylen to 0, the DMA becomes 1D, the wrap-around of the extra
> data has no issue nobody will overlap with it.
>

For example, if we set xlen = 7, dma_width =4, ylen > 0
then dma will happen like:
0~6
4~10
8~14
12~18
....

But for ylen=0, it has no issue. there is no next line to overlap with
the extra data.

in case we set xlen = 0, something funny will happen:
i will copy the same memory again and again(ylen+1 times).

-barry

^ permalink raw reply	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-08 21:46               ` Vinod Koul
@ 2011-09-09  8:18                 ` Barry Song
  -1 siblings, 0 replies; 84+ messages in thread
From: Barry Song @ 2011-09-09  8:18 UTC (permalink / raw)
  To: Vinod Koul
  Cc: Jassi Brar, vinod.koul, arnd, linus.walleij, linux-kernel,
	workgroup.linux, rongjun.ying, Baohua.Song, Williams, Dan J,
	linux-arm-kernel

2011/9/9 Vinod Koul <vkoul@infradead.org>:
> On Thu, 2011-09-08 at 10:55 +0530, Jassi Brar wrote:
>> On Thu, Sep 8, 2011 at 8:47 AM, Jassi Brar <jassisinghbrar@gmail.com> wrote:
>> > On Thu, Sep 8, 2011 at 7:42 AM, Barry Song <21cnbao@gmail.com> wrote:
>> >
>> >>>> it is much different with primacell based DMA like pl080, pl330.
>> >>>> prima2 has a self-defined DMAC IP. basically it is a 2D mode dma with
>> >>>> two scales X and Y and direct way to start and stop DMA.
>> >>>> every channel has fixed function to serve only one perpheral. so you
>> >>>> find we have a filter id.
>> >>> okay, what do you mean by 2D mode? Is it similar to what TI folks, Linus
>> >>> W and Jassi Brar posted RFC's on?
>> >>
>> >> In SiRFprimaII 2-D DMA, the system memory space is interpreted
>> >> as a 2-D layout instead of a linear 1-D layout. More specifically, the
>> >> system memory can be considered as
>> >> multiple data lines. The length of the data line is determined by the
>> >> user-selected DMA_WIDTH register.
>> >> The user can specify a data window that the user wants to access using
>> >> four parameters:
>> >> ■ Start address
>> >> ■ X length
>> >> ■ Y length
>> >> ■ Width
>> >>
>> >> The idea of a 2-D DMA is shown in figure 2d-dma.png attached.
>> >>
>> >> If you specifies the Y length as 0 or the X length equals to the DMA
>> >> width, then this 2-D DMA reduces to
>> >> 1-D. If the user configures the X length greater than the DMA width,
>> >> then the extra data is wrapped around
>> >> to the next data line, this may corrupt the DMA transfer for
>> >> multiple-line 2-D DMA. If this is a 1-D DMA, then
>> >> there is no issue. The attached diagram 2d-dma2.png shows the
>> >> wrap-around of the extra data in case the X length
>> >> greater than DMA width.
>> >
>> > Sorry, the role of DMA_WIDTH is not clear to me yet.
>> > In which case the user _must_ set {xlen > width} ?
>> >
>> Perhaps 2d-dma.PNG is inaccurate - it shouldn't depict any deltaX.
>> Doesn't xlen and width always start together ? If no, please don't read ahead.
>>
>> According to figures, {xlen > width} is to be set _only_ when a transfer
>> is divided into _exactly_ two chunks separated by gap _exactly_
>> equal to length of the second chunk (an extremely rare case).
>>
>> Anyways, every case can be easily expressed using the generic api
>> I proposed. See 'struct xfer_template' in https://lkml.org/lkml/2011/8/12/128
>>
>> Roughly speaking, the following should be done...
>> Client driver :-
>> **************
>> For a 'Rectangular' transfer (2d-dma.PNG) :-
>>       xfer_template.numf = Ylen;  /* height of rectangle */
>>       xfer_template.frame_size = 1;
>>       xfer_template.sgl[0].size = Xlen; /* width of rectangle */
>>       xfer_template.sgl[0].icg = start_addr_Y(n) - end_addr_Y(n-1);
>>
>> For the "A Line and some" transfer (2d-dma2.PNG) :-
>>       xfer_template.numf = 1;
>>       xfer_template.frame_size = 2;
>>       xfer_template.sgl[0].size = xlen1; /* a line */
>>       xfer_template.sgl[1].size = xlen2;  /* and some */
>>       xfer_template.sgl[0].icg = start_addr_Y(n) - end_addr_Y(n-1);
>>       xfer_template.sgl[1].icg = 0; /* doesn't matter */
>>
>> DMAC driver :-
>> ***************
>>       if (xfer_template.frame_size == 1) {
>>            /* rectangle */
>>            schan->xlen = xfer_template.sgl[0].size;
>>            schan->width = schan->xlen + xfer_template.sgl[0].icg;
>>       } else if (xfer_template.frame_size == 2 &&
>>                   xfer_template.numf == 1 &&
>>                   xfer_template.sgl[1].size == xfer_template.sgl[0].icg){
>>            /* a line and some */
>>            schan->xlen = xfer_template.sgl[0].size +  xfer_template.sgl[1].size.
>>            schan->width = xfer_template.sgl[0].size;
>>       } else {
>>            /* _Hardware_ doesn't support the transfer as such. *
>>            return -EINVAL;
>>       }
>>       schan->ylen = xfer_template.numf /* -1? */;
> Looks like Jassi got a user for his proposed API. I am not sure whats
> going on with TI folks, they never showed up here @LPC.

yes, i can definitely be an user of Jassi's new generic api. it seems
the api is just there for dma in 2d.

Jassi prefer to use a transfer type instead of a control command.
though we will not really change the interleaved setting for every
transfer(it is more possible for one device, we will not change the
xlen/ylen/dma_width setting in the whole life period), i do believe
the transfer type is enough flexible for my possible applications to
change xlen, ylen and dma_width in different transfers.

as we know, interleaved DMA can also be used for audio driver. for
some audio controllers, if there are n channels, it can't tranfer the
whole audio frame to sound card by one dma channel, and it needs to
tranfer every channel by separate dma. typically, we can define this
kind of xfer to skip other n-1 channel.

Jassi, you might think my reply as an ACK to "[PATCH] DMAEngine:
Define generic transfer request api".

>
>
> --
> ~Vinod Koul
> Intel Corp.
>
>

thanks
barry

^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-09  8:18                 ` Barry Song
  0 siblings, 0 replies; 84+ messages in thread
From: Barry Song @ 2011-09-09  8:18 UTC (permalink / raw)
  To: linux-arm-kernel

2011/9/9 Vinod Koul <vkoul@infradead.org>:
> On Thu, 2011-09-08 at 10:55 +0530, Jassi Brar wrote:
>> On Thu, Sep 8, 2011 at 8:47 AM, Jassi Brar <jassisinghbrar@gmail.com> wrote:
>> > On Thu, Sep 8, 2011 at 7:42 AM, Barry Song <21cnbao@gmail.com> wrote:
>> >
>> >>>> it is much different with primacell based DMA like pl080, pl330.
>> >>>> prima2 has a self-defined DMAC IP. basically it is a 2D mode dma with
>> >>>> two scales X and Y and direct way to start and stop DMA.
>> >>>> every channel has fixed function to serve only one perpheral. so you
>> >>>> find we have a filter id.
>> >>> okay, what do you mean by 2D mode? Is it similar to what TI folks, Linus
>> >>> W and Jassi Brar posted RFC's on?
>> >>
>> >> In SiRFprimaII 2-D DMA, the system memory space is interpreted
>> >> as a 2-D layout instead of a linear 1-D layout. More specifically, the
>> >> system memory can be considered as
>> >> multiple data lines. The length of the data line is determined by the
>> >> user-selected DMA_WIDTH register.
>> >> The user can specify a data window that the user wants to access using
>> >> four parameters:
>> >> ? Start address
>> >> ? X length
>> >> ? Y length
>> >> ? Width
>> >>
>> >> The idea of a 2-D DMA is shown in figure 2d-dma.png attached.
>> >>
>> >> If you specifies the Y length as 0 or the X length equals to the DMA
>> >> width, then this 2-D DMA reduces to
>> >> 1-D. If the user configures the X length greater than the DMA width,
>> >> then the extra data is wrapped around
>> >> to the next data line, this may corrupt the DMA transfer for
>> >> multiple-line 2-D DMA. If this is a 1-D DMA, then
>> >> there is no issue. The attached diagram 2d-dma2.png shows the
>> >> wrap-around of the extra data in case the X length
>> >> greater than DMA width.
>> >
>> > Sorry, the role of DMA_WIDTH is not clear to me yet.
>> > In which case the user _must_ set {xlen > width} ?
>> >
>> Perhaps 2d-dma.PNG is inaccurate - it shouldn't depict any deltaX.
>> Doesn't xlen and width always start together ? If no, please don't read ahead.
>>
>> According to figures, {xlen > width} is to be set _only_ when a transfer
>> is divided into _exactly_ two chunks separated by gap _exactly_
>> equal to length of the second chunk (an extremely rare case).
>>
>> Anyways, every case can be easily expressed using the generic api
>> I proposed. See 'struct xfer_template' in https://lkml.org/lkml/2011/8/12/128
>>
>> Roughly speaking, the following should be done...
>> Client driver :-
>> **************
>> For a 'Rectangular' transfer (2d-dma.PNG) :-
>> ? ? ? xfer_template.numf = Ylen; ?/* height of rectangle */
>> ? ? ? xfer_template.frame_size = 1;
>> ? ? ? xfer_template.sgl[0].size = Xlen; /* width of rectangle */
>> ? ? ? xfer_template.sgl[0].icg = start_addr_Y(n) - end_addr_Y(n-1);
>>
>> For the "A Line and some" transfer (2d-dma2.PNG) :-
>> ? ? ? xfer_template.numf = 1;
>> ? ? ? xfer_template.frame_size = 2;
>> ? ? ? xfer_template.sgl[0].size = xlen1; /* a line */
>> ? ? ? xfer_template.sgl[1].size = xlen2; ?/* and some */
>> ? ? ? xfer_template.sgl[0].icg = start_addr_Y(n) - end_addr_Y(n-1);
>> ? ? ? xfer_template.sgl[1].icg = 0; /* doesn't matter */
>>
>> DMAC driver :-
>> ***************
>> ? ? ? if (xfer_template.frame_size == 1) {
>> ? ? ? ? ? ?/* rectangle */
>> ? ? ? ? ? ?schan->xlen = xfer_template.sgl[0].size;
>> ? ? ? ? ? ?schan->width = schan->xlen + xfer_template.sgl[0].icg;
>> ? ? ? } else if (xfer_template.frame_size == 2 &&
>> ? ? ? ? ? ? ? ? ? xfer_template.numf == 1 &&
>> ? ? ? ? ? ? ? ? ? xfer_template.sgl[1].size == xfer_template.sgl[0].icg){
>> ? ? ? ? ? ?/* a line and some */
>> ? ? ? ? ? ?schan->xlen = xfer_template.sgl[0].size + ?xfer_template.sgl[1].size.
>> ? ? ? ? ? ?schan->width = xfer_template.sgl[0].size;
>> ? ? ? } else {
>> ? ? ? ? ? ?/* _Hardware_ doesn't support the transfer as such. *
>> ? ? ? ? ? ?return -EINVAL;
>> ? ? ? }
>> ? ? ? schan->ylen = xfer_template.numf /* -1? */;
> Looks like Jassi got a user for his proposed API. I am not sure whats
> going on with TI folks, they never showed up here @LPC.

yes, i can definitely be an user of Jassi's new generic api. it seems
the api is just there for dma in 2d.

Jassi prefer to use a transfer type instead of a control command.
though we will not really change the interleaved setting for every
transfer(it is more possible for one device, we will not change the
xlen/ylen/dma_width setting in the whole life period), i do believe
the transfer type is enough flexible for my possible applications to
change xlen, ylen and dma_width in different transfers.

as we know, interleaved DMA can also be used for audio driver. for
some audio controllers, if there are n channels, it can't tranfer the
whole audio frame to sound card by one dma channel, and it needs to
tranfer every channel by separate dma. typically, we can define this
kind of xfer to skip other n-1 channel.

Jassi, you might think my reply as an ACK to "[PATCH] DMAEngine:
Define generic transfer request api".

>
>
> --
> ~Vinod Koul
> Intel Corp.
>
>

thanks
barry

^ permalink raw reply	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-08 20:11           ` Arnd Bergmann
@ 2011-09-09 16:10             ` Vinod Koul
  -1 siblings, 0 replies; 84+ messages in thread
From: Vinod Koul @ 2011-09-09 16:10 UTC (permalink / raw)
  To: Arnd Bergmann
  Cc: vinod.koul, Linus Walleij, Barry Song, linux-kernel,
	workgroup.linux, Rongjun Ying, Barry Song, dan.j.williams,
	linux-arm-kernel

On Thu, 2011-09-08 at 22:11 +0200, Arnd Bergmann wrote:
> On Thursday 08 September 2011 20:48:26 Linus Walleij wrote:
> > 2011/9/8 Arnd Bergmann <arnd@arndb.de>:
> > > On Thursday 08 September 2011, Barry Song wrote:
> > >>
> > >> this filter is used by all drivers with DMA since every dma channel is
> > >> fixed to be assigned to one device.
> > >
> > > Ok, I see now. I think it would be best to introduce a generic
> > > 'filter by device tree property' function or alternatively an
> > > dma_of_request_channel function like this:
> > 
> > You'd have to discuss that with Vinod, the thing is that x86 Atom
> > systems are using dmaengine for device slave transfers too, and
> > IIRC these things don't use devicetrees. I may be wrong...
> 
> Some of them use device tree, some don't.
> 
> I'm not saying that we have to convert all drivers to use this, but
> for platforms that always have device tree available, it seems by far
> the cleanest solution.
It maybe a clean solution but if it doesn't fit all needs, then it am
not sure...

One way as I said earlier to get the information of dmac-client
relationships from arch specific way (pci, device tree etc) and
dmacs/clients/platform presents them in arch-independent way to
dmaengine...

Thoughts?

--
~Vinod

-- 
~Vinod Koul
Intel Corp.


^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-09 16:10             ` Vinod Koul
  0 siblings, 0 replies; 84+ messages in thread
From: Vinod Koul @ 2011-09-09 16:10 UTC (permalink / raw)
  To: linux-arm-kernel

On Thu, 2011-09-08 at 22:11 +0200, Arnd Bergmann wrote:
> On Thursday 08 September 2011 20:48:26 Linus Walleij wrote:
> > 2011/9/8 Arnd Bergmann <arnd@arndb.de>:
> > > On Thursday 08 September 2011, Barry Song wrote:
> > >>
> > >> this filter is used by all drivers with DMA since every dma channel is
> > >> fixed to be assigned to one device.
> > >
> > > Ok, I see now. I think it would be best to introduce a generic
> > > 'filter by device tree property' function or alternatively an
> > > dma_of_request_channel function like this:
> > 
> > You'd have to discuss that with Vinod, the thing is that x86 Atom
> > systems are using dmaengine for device slave transfers too, and
> > IIRC these things don't use devicetrees. I may be wrong...
> 
> Some of them use device tree, some don't.
> 
> I'm not saying that we have to convert all drivers to use this, but
> for platforms that always have device tree available, it seems by far
> the cleanest solution.
It maybe a clean solution but if it doesn't fit all needs, then it am
not sure...

One way as I said earlier to get the information of dmac-client
relationships from arch specific way (pci, device tree etc) and
dmacs/clients/platform presents them in arch-independent way to
dmaengine...

Thoughts?

--
~Vinod

-- 
~Vinod Koul
Intel Corp.

^ permalink raw reply	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-09  8:18                 ` Barry Song
@ 2011-09-09 16:21                   ` Vinod Koul
  -1 siblings, 0 replies; 84+ messages in thread
From: Vinod Koul @ 2011-09-09 16:21 UTC (permalink / raw)
  To: Barry Song
  Cc: vinod.koul, Jassi Brar, arnd, linus.walleij, linux-kernel,
	workgroup.linux, rongjun.ying, Baohua.Song, Williams, Dan J,
	linux-arm-kernel

On Fri, 2011-09-09 at 16:18 +0800, Barry Song wrote:
> 2011/9/9 Vinod Koul <vkoul@infradead.org>:
> > Looks like Jassi got a user for his proposed API. I am not sure whats
> > going on with TI folks, they never showed up here @LPC.
> 
> yes, i can definitely be an user of Jassi's new generic api. it seems
> the api is just there for dma in 2d.
> 
> Jassi prefer to use a transfer type instead of a control command.
> though we will not really change the interleaved setting for every
> transfer(it is more possible for one device, we will not change the
> xlen/ylen/dma_width setting in the whole life period), i do believe
> the transfer type is enough flexible for my possible applications to
> change xlen, ylen and dma_width in different transfers.
> 
> as we know, interleaved DMA can also be used for audio driver. for
> some audio controllers, if there are n channels, it can't tranfer the
> whole audio frame to sound card by one dma channel, and it needs to
> tranfer every channel by separate dma. typically, we can define this
> kind of xfer to skip other n-1 channel.
> 
> Jassi, you might think my reply as an ACK to "[PATCH] DMAEngine:
> Define generic transfer request api".
Great, can you two redo patch along with this driver as user of new
API :)

-- 
~Vinod Koul
Intel Corp.


^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-09 16:21                   ` Vinod Koul
  0 siblings, 0 replies; 84+ messages in thread
From: Vinod Koul @ 2011-09-09 16:21 UTC (permalink / raw)
  To: linux-arm-kernel

On Fri, 2011-09-09 at 16:18 +0800, Barry Song wrote:
> 2011/9/9 Vinod Koul <vkoul@infradead.org>:
> > Looks like Jassi got a user for his proposed API. I am not sure whats
> > going on with TI folks, they never showed up here @LPC.
> 
> yes, i can definitely be an user of Jassi's new generic api. it seems
> the api is just there for dma in 2d.
> 
> Jassi prefer to use a transfer type instead of a control command.
> though we will not really change the interleaved setting for every
> transfer(it is more possible for one device, we will not change the
> xlen/ylen/dma_width setting in the whole life period), i do believe
> the transfer type is enough flexible for my possible applications to
> change xlen, ylen and dma_width in different transfers.
> 
> as we know, interleaved DMA can also be used for audio driver. for
> some audio controllers, if there are n channels, it can't tranfer the
> whole audio frame to sound card by one dma channel, and it needs to
> tranfer every channel by separate dma. typically, we can define this
> kind of xfer to skip other n-1 channel.
> 
> Jassi, you might think my reply as an ACK to "[PATCH] DMAEngine:
> Define generic transfer request api".
Great, can you two redo patch along with this driver as user of new
API :)

-- 
~Vinod Koul
Intel Corp.

^ permalink raw reply	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-09  2:52                     ` Barry Song
@ 2011-09-09 16:25                       ` Vinod Koul
  -1 siblings, 0 replies; 84+ messages in thread
From: Vinod Koul @ 2011-09-09 16:25 UTC (permalink / raw)
  To: Barry Song
  Cc: vinod.koul, arnd, linus.walleij, Jassi Brar, linux-kernel,
	workgroup.linux, rongjun.ying, Baohua.Song, Williams, Dan J,
	linux-arm-kernel

On Fri, 2011-09-09 at 10:52 +0800, Barry Song wrote:
> >
> > Actually, there is no real case for xlen > dma_width. this picture is
> > only explaining what will happen if we set xlen > dma_width.
> > when xlen > dma_width, for every line, dma will transfer only
> > dma_width. Then the extra data is wrapped around
> > to the next data line, it will corrupt the DMA transfer for
> > multiple-line 2-D DMA since the extra data will overlap with the real
> > next line.
> >
> > if we set ylen to 0, the DMA becomes 1D, the wrap-around of the extra
> > data has no issue nobody will overlap with it.
> >
> 
> For example, if we set xlen = 7, dma_width =4, ylen > 0
> then dma will happen like:
> 0~6
> 4~10
> 8~14
> 12~18
> ....
Is there a real world use case of this??

> 
> But for ylen=0, it has no issue. there is no next line to overlap with
> the extra data.
> 
> in case we set xlen = 0, something funny will happen:
> i will copy the same memory again and again(ylen+1 times).


-- 
~Vinod Koul
Intel Corp.


^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-09 16:25                       ` Vinod Koul
  0 siblings, 0 replies; 84+ messages in thread
From: Vinod Koul @ 2011-09-09 16:25 UTC (permalink / raw)
  To: linux-arm-kernel

On Fri, 2011-09-09 at 10:52 +0800, Barry Song wrote:
> >
> > Actually, there is no real case for xlen > dma_width. this picture is
> > only explaining what will happen if we set xlen > dma_width.
> > when xlen > dma_width, for every line, dma will transfer only
> > dma_width. Then the extra data is wrapped around
> > to the next data line, it will corrupt the DMA transfer for
> > multiple-line 2-D DMA since the extra data will overlap with the real
> > next line.
> >
> > if we set ylen to 0, the DMA becomes 1D, the wrap-around of the extra
> > data has no issue nobody will overlap with it.
> >
> 
> For example, if we set xlen = 7, dma_width =4, ylen > 0
> then dma will happen like:
> 0~6
> 4~10
> 8~14
> 12~18
> ....
Is there a real world use case of this??

> 
> But for ylen=0, it has no issue. there is no next line to overlap with
> the extra data.
> 
> in case we set xlen = 0, something funny will happen:
> i will copy the same memory again and again(ylen+1 times).


-- 
~Vinod Koul
Intel Corp.

^ permalink raw reply	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-09 16:25                       ` Vinod Koul
@ 2011-09-09 23:37                         ` Barry Song
  -1 siblings, 0 replies; 84+ messages in thread
From: Barry Song @ 2011-09-09 23:37 UTC (permalink / raw)
  To: Vinod Koul
  Cc: vinod.koul, arnd, linus.walleij, Jassi Brar, linux-kernel,
	workgroup.linux, rongjun.ying, Baohua.Song, Williams, Dan J,
	linux-arm-kernel

2011/9/10 Vinod Koul <vkoul@infradead.org>:
> On Fri, 2011-09-09 at 10:52 +0800, Barry Song wrote:
>> >
>> > Actually, there is no real case for xlen > dma_width. this picture is
>> > only explaining what will happen if we set xlen > dma_width.
>> > when xlen > dma_width, for every line, dma will transfer only
>> > dma_width. Then the extra data is wrapped around
>> > to the next data line, it will corrupt the DMA transfer for
>> > multiple-line 2-D DMA since the extra data will overlap with the real
>> > next line.
>> >
>> > if we set ylen to 0, the DMA becomes 1D, the wrap-around of the extra
>> > data has no issue nobody will overlap with it.
>> >
>>
>> For example, if we set xlen = 7, dma_width =4, ylen > 0
>> then dma will happen like:
>> 0~6
>> 4~10
>> 8~14
>> 12~18
>> ....
> Is there a real world use case of this??

no real case actually and hardware spec says it is wrong to set xlen >
dma_width since it causes the above overlap.

>
>>
>> But for ylen=0, it has no issue. there is no next line to overlap with
>> the extra data.
>>
>> in case we set xlen = 0, something funny will happen:
>> i will copy the same memory again and again(ylen+1 times).
>
>
> --
> ~Vinod Koul
> Intel Corp.
>
>

Thanks
barry

^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-09 23:37                         ` Barry Song
  0 siblings, 0 replies; 84+ messages in thread
From: Barry Song @ 2011-09-09 23:37 UTC (permalink / raw)
  To: linux-arm-kernel

2011/9/10 Vinod Koul <vkoul@infradead.org>:
> On Fri, 2011-09-09 at 10:52 +0800, Barry Song wrote:
>> >
>> > Actually, there is no real case for xlen > dma_width. this picture is
>> > only explaining what will happen if we set xlen > dma_width.
>> > when xlen > dma_width, for every line, dma will transfer only
>> > dma_width. Then the extra data is wrapped around
>> > to the next data line, it will corrupt the DMA transfer for
>> > multiple-line 2-D DMA since the extra data will overlap with the real
>> > next line.
>> >
>> > if we set ylen to 0, the DMA becomes 1D, the wrap-around of the extra
>> > data has no issue nobody will overlap with it.
>> >
>>
>> For example, if we set xlen = 7, dma_width =4, ylen > 0
>> then dma will happen like:
>> 0~6
>> 4~10
>> 8~14
>> 12~18
>> ....
> Is there a real world use case of this??

no real case actually and hardware spec says it is wrong to set xlen >
dma_width since it causes the above overlap.

>
>>
>> But for ylen=0, it has no issue. there is no next line to overlap with
>> the extra data.
>>
>> in case we set xlen = 0, something funny will happen:
>> i will copy the same memory again and again(ylen+1 times).
>
>
> --
> ~Vinod Koul
> Intel Corp.
>
>

Thanks
barry

^ permalink raw reply	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-09 16:21                   ` Vinod Koul
@ 2011-09-09 23:40                     ` Barry Song
  -1 siblings, 0 replies; 84+ messages in thread
From: Barry Song @ 2011-09-09 23:40 UTC (permalink / raw)
  To: Vinod Koul
  Cc: vinod.koul, Jassi Brar, arnd, linus.walleij, linux-kernel,
	workgroup.linux, rongjun.ying, Baohua.Song, Williams, Dan J,
	linux-arm-kernel

2011/9/10 Vinod Koul <vkoul@infradead.org>:
> On Fri, 2011-09-09 at 16:18 +0800, Barry Song wrote:
>> 2011/9/9 Vinod Koul <vkoul@infradead.org>:
>> > Looks like Jassi got a user for his proposed API. I am not sure whats
>> > going on with TI folks, they never showed up here @LPC.
>>
>> yes, i can definitely be an user of Jassi's new generic api. it seems
>> the api is just there for dma in 2d.
>>
>> Jassi prefer to use a transfer type instead of a control command.
>> though we will not really change the interleaved setting for every
>> transfer(it is more possible for one device, we will not change the
>> xlen/ylen/dma_width setting in the whole life period), i do believe
>> the transfer type is enough flexible for my possible applications to
>> change xlen, ylen and dma_width in different transfers.
>>
>> as we know, interleaved DMA can also be used for audio driver. for
>> some audio controllers, if there are n channels, it can't tranfer the
>> whole audio frame to sound card by one dma channel, and it needs to
>> tranfer every channel by separate dma. typically, we can define this
>> kind of xfer to skip other n-1 channel.
>>
>> Jassi, you might think my reply as an ACK to "[PATCH] DMAEngine:
>> Define generic transfer request api".
> Great, can you two redo patch along with this driver as user of new
> API :)

ok. i'll send v2 using this new API.

>
> --
> ~Vinod Koul
> Intel Corp.
>
>
-barry

^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-09 23:40                     ` Barry Song
  0 siblings, 0 replies; 84+ messages in thread
From: Barry Song @ 2011-09-09 23:40 UTC (permalink / raw)
  To: linux-arm-kernel

2011/9/10 Vinod Koul <vkoul@infradead.org>:
> On Fri, 2011-09-09 at 16:18 +0800, Barry Song wrote:
>> 2011/9/9 Vinod Koul <vkoul@infradead.org>:
>> > Looks like Jassi got a user for his proposed API. I am not sure whats
>> > going on with TI folks, they never showed up here @LPC.
>>
>> yes, i can definitely be an user of Jassi's new generic api. it seems
>> the api is just there for dma in 2d.
>>
>> Jassi prefer to use a transfer type instead of a control command.
>> though we will not really change the interleaved setting for every
>> transfer(it is more possible for one device, we will not change the
>> xlen/ylen/dma_width setting in the whole life period), i do believe
>> the transfer type is enough flexible for my possible applications to
>> change xlen, ylen and dma_width in different transfers.
>>
>> as we know, interleaved DMA can also be used for audio driver. for
>> some audio controllers, if there are n channels, it can't tranfer the
>> whole audio frame to sound card by one dma channel, and it needs to
>> tranfer every channel by separate dma. typically, we can define this
>> kind of xfer to skip other n-1 channel.
>>
>> Jassi, you might think my reply as an ACK to "[PATCH] DMAEngine:
>> Define generic transfer request api".
> Great, can you two redo patch along with this driver as user of new
> API :)

ok. i'll send v2 using this new API.

>
> --
> ~Vinod Koul
> Intel Corp.
>
>
-barry

^ permalink raw reply	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-09  8:18                 ` Barry Song
@ 2011-09-10  7:33                   ` Jassi Brar
  -1 siblings, 0 replies; 84+ messages in thread
From: Jassi Brar @ 2011-09-10  7:33 UTC (permalink / raw)
  To: Barry Song
  Cc: Vinod Koul, arnd, vinod.koul, linus.walleij, Jassi Brar,
	linux-kernel, workgroup.linux, rongjun.ying, Baohua.Song,
	Williams, Dan J, linux-arm-kernel

On 9 September 2011 13:48, Barry Song <21cnbao@gmail.com> wrote:

> Jassi, you might think my reply as an ACK to "[PATCH] DMAEngine:
> Define generic transfer request api".

Thanks, but could you please formally ACK the patch in it's own thread?

^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-10  7:33                   ` Jassi Brar
  0 siblings, 0 replies; 84+ messages in thread
From: Jassi Brar @ 2011-09-10  7:33 UTC (permalink / raw)
  To: linux-arm-kernel

On 9 September 2011 13:48, Barry Song <21cnbao@gmail.com> wrote:

> Jassi, you might think my reply as an ACK to "[PATCH] DMAEngine:
> Define generic transfer request api".

Thanks, but could you please formally ACK the patch in it's own thread?

^ permalink raw reply	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-08  6:36               ` Barry Song
@ 2011-09-11 15:59                 ` Vinod Koul
  -1 siblings, 0 replies; 84+ messages in thread
From: Vinod Koul @ 2011-09-11 15:59 UTC (permalink / raw)
  To: Barry Song
  Cc: vinod.koul, Jassi Brar, linus.walleij, Williams, Dan J, arnd,
	linux-kernel, workgroup.linux, rongjun.ying, Baohua.Song,
	linux-arm-kernel

On Thu, 2011-09-08 at 14:36 +0800, Barry Song wrote:
> 2011/9/8 Jassi Brar <jassisinghbrar@gmail.com>:
> > On Thu, Sep 8, 2011 at 8:47 AM, Jassi Brar <jassisinghbrar@gmail.com> wrote:
> >> On Thu, Sep 8, 2011 at 7:42 AM, Barry Song <21cnbao@gmail.com> wrote:
> >>
> >>>>> it is much different with primacell based DMA like pl080, pl330.
> >>>>> prima2 has a self-defined DMAC IP. basically it is a 2D mode dma with
> >>>>> two scales X and Y and direct way to start and stop DMA.
> >>>>> every channel has fixed function to serve only one perpheral. so you
> >>>>> find we have a filter id.
> >>>> okay, what do you mean by 2D mode? Is it similar to what TI folks, Linus
> >>>> W and Jassi Brar posted RFC's on?
> >>>
> >>> In SiRFprimaII 2-D DMA, the system memory space is interpreted
> >>> as a 2-D layout instead of a linear 1-D layout. More specifically, the
> >>> system memory can be considered as
> >>> multiple data lines. The length of the data line is determined by the
> >>> user-selected DMA_WIDTH register.
> >>> The user can specify a data window that the user wants to access using
> >>> four parameters:
> >>> ■ Start address
> >>> ■ X length
> >>> ■ Y length
> >>> ■ Width
> >>>
> >>> The idea of a 2-D DMA is shown in figure 2d-dma.png attached.
> >>>
> >>> If you specifies the Y length as 0 or the X length equals to the DMA
> >>> width, then this 2-D DMA reduces to
> >>> 1-D. If the user configures the X length greater than the DMA width,
> >>> then the extra data is wrapped around
> >>> to the next data line, this may corrupt the DMA transfer for
> >>> multiple-line 2-D DMA. If this is a 1-D DMA, then
> >>> there is no issue. The attached diagram 2d-dma2.png shows the
> >>> wrap-around of the extra data in case the X length
> >>> greater than DMA width.
> >>
> >> Sorry, the role of DMA_WIDTH is not clear to me yet.
> >> In which case the user _must_ set {xlen > width} ?
> >>
> > Perhaps 2d-dma.PNG is inaccurate - it shouldn't depict any deltaX.
> > Doesn't xlen and width always start together ? If no, please don't read ahead.
> >
> > According to figures, {xlen > width} is to be set _only_ when a transfer
> > is divided into _exactly_ two chunks separated by gap _exactly_
> > equal to length of the second chunk (an extremely rare case).
> 
> Sorry i didn't list related full information in datasheet in my early reply.
> we don't have the case of xlen > dma_width.
But is it theoretically possible or just an error case?? Looks like its
latter, right?

-- 
~Vinod Koul
Intel Corp.


^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-11 15:59                 ` Vinod Koul
  0 siblings, 0 replies; 84+ messages in thread
From: Vinod Koul @ 2011-09-11 15:59 UTC (permalink / raw)
  To: linux-arm-kernel

On Thu, 2011-09-08 at 14:36 +0800, Barry Song wrote:
> 2011/9/8 Jassi Brar <jassisinghbrar@gmail.com>:
> > On Thu, Sep 8, 2011 at 8:47 AM, Jassi Brar <jassisinghbrar@gmail.com> wrote:
> >> On Thu, Sep 8, 2011 at 7:42 AM, Barry Song <21cnbao@gmail.com> wrote:
> >>
> >>>>> it is much different with primacell based DMA like pl080, pl330.
> >>>>> prima2 has a self-defined DMAC IP. basically it is a 2D mode dma with
> >>>>> two scales X and Y and direct way to start and stop DMA.
> >>>>> every channel has fixed function to serve only one perpheral. so you
> >>>>> find we have a filter id.
> >>>> okay, what do you mean by 2D mode? Is it similar to what TI folks, Linus
> >>>> W and Jassi Brar posted RFC's on?
> >>>
> >>> In SiRFprimaII 2-D DMA, the system memory space is interpreted
> >>> as a 2-D layout instead of a linear 1-D layout. More specifically, the
> >>> system memory can be considered as
> >>> multiple data lines. The length of the data line is determined by the
> >>> user-selected DMA_WIDTH register.
> >>> The user can specify a data window that the user wants to access using
> >>> four parameters:
> >>> ? Start address
> >>> ? X length
> >>> ? Y length
> >>> ? Width
> >>>
> >>> The idea of a 2-D DMA is shown in figure 2d-dma.png attached.
> >>>
> >>> If you specifies the Y length as 0 or the X length equals to the DMA
> >>> width, then this 2-D DMA reduces to
> >>> 1-D. If the user configures the X length greater than the DMA width,
> >>> then the extra data is wrapped around
> >>> to the next data line, this may corrupt the DMA transfer for
> >>> multiple-line 2-D DMA. If this is a 1-D DMA, then
> >>> there is no issue. The attached diagram 2d-dma2.png shows the
> >>> wrap-around of the extra data in case the X length
> >>> greater than DMA width.
> >>
> >> Sorry, the role of DMA_WIDTH is not clear to me yet.
> >> In which case the user _must_ set {xlen > width} ?
> >>
> > Perhaps 2d-dma.PNG is inaccurate - it shouldn't depict any deltaX.
> > Doesn't xlen and width always start together ? If no, please don't read ahead.
> >
> > According to figures, {xlen > width} is to be set _only_ when a transfer
> > is divided into _exactly_ two chunks separated by gap _exactly_
> > equal to length of the second chunk (an extremely rare case).
> 
> Sorry i didn't list related full information in datasheet in my early reply.
> we don't have the case of xlen > dma_width.
But is it theoretically possible or just an error case?? Looks like its
latter, right?

-- 
~Vinod Koul
Intel Corp.

^ permalink raw reply	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-09  8:18                 ` Barry Song
@ 2011-09-11 16:02                   ` Vinod Koul
  -1 siblings, 0 replies; 84+ messages in thread
From: Vinod Koul @ 2011-09-11 16:02 UTC (permalink / raw)
  To: Barry Song
  Cc: vinod.koul, Jassi Brar, arnd, linus.walleij, linux-kernel,
	workgroup.linux, rongjun.ying, Baohua.Song, Williams, Dan J,
	linux-arm-kernel

On Fri, 2011-09-09 at 16:18 +0800, Barry Song wrote:
> 2011/9/9 Vinod Koul <vkoul@infradead.org>:
> > On Thu, 2011-09-08 at 10:55 +0530, Jassi Brar wrote:
> >> On Thu, Sep 8, 2011 at 8:47 AM, Jassi Brar <jassisinghbrar@gmail.com> wrote:
> Jassi prefer to use a transfer type instead of a control command.
> though we will not really change the interleaved setting for every
> transfer(it is more possible for one device, we will not change the
> xlen/ylen/dma_width setting in the whole life period), i do believe
> the transfer type is enough flexible for my possible applications to
> change xlen, ylen and dma_width in different transfers.
Is this usually the assumption or yours is a special case, how about
your's Jassi? 

-- 
~Vinod Koul
Intel Corp.


^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-11 16:02                   ` Vinod Koul
  0 siblings, 0 replies; 84+ messages in thread
From: Vinod Koul @ 2011-09-11 16:02 UTC (permalink / raw)
  To: linux-arm-kernel

On Fri, 2011-09-09 at 16:18 +0800, Barry Song wrote:
> 2011/9/9 Vinod Koul <vkoul@infradead.org>:
> > On Thu, 2011-09-08 at 10:55 +0530, Jassi Brar wrote:
> >> On Thu, Sep 8, 2011 at 8:47 AM, Jassi Brar <jassisinghbrar@gmail.com> wrote:
> Jassi prefer to use a transfer type instead of a control command.
> though we will not really change the interleaved setting for every
> transfer(it is more possible for one device, we will not change the
> xlen/ylen/dma_width setting in the whole life period), i do believe
> the transfer type is enough flexible for my possible applications to
> change xlen, ylen and dma_width in different transfers.
Is this usually the assumption or yours is a special case, how about
your's Jassi? 

-- 
~Vinod Koul
Intel Corp.

^ permalink raw reply	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-08 21:38             ` Vinod Koul
@ 2011-09-11 21:27               ` Linus Walleij
  -1 siblings, 0 replies; 84+ messages in thread
From: Linus Walleij @ 2011-09-11 21:27 UTC (permalink / raw)
  To: Vinod Koul
  Cc: Arnd Bergmann, vinod.koul, Barry Song, linux-kernel,
	workgroup.linux, Rongjun Ying, Barry Song, dan.j.williams,
	linux-arm-kernel

On Thu, Sep 8, 2011 at 11:38 PM, Vinod Koul <vkoul@infradead.org> wrote:
> On Thu, 2011-09-08 at 22:11 +0200, Arnd Bergmann wrote:
>> On Thursday 08 September 2011 20:48:26 Linus Walleij wrote:
>> > 2011/9/8 Arnd Bergmann <arnd@arndb.de>:
>> > > On Thursday 08 September 2011, Barry Song wrote:
>> > >>
>> > >> this filter is used by all drivers with DMA since every dma channel is
>> > >> fixed to be assigned to one device.
>> > >
>> > > Ok, I see now. I think it would be best to introduce a generic
>> > > 'filter by device tree property' function or alternatively an
>> > > dma_of_request_channel function like this:
>> >
>> > You'd have to discuss that with Vinod, the thing is that x86 Atom
>> > systems are using dmaengine for device slave transfers too, and
>> > IIRC these things don't use devicetrees. I may be wrong...
>>
>> Some of them use device tree, some don't.
>>
>> I'm not saying that we have to convert all drivers to use this, but
>> for platforms that always have device tree available, it seems by far
>> the cleanest solution.
> We don't have a very clean solution for filter function in case of slave
> dmaengine. How should the client specify which channel it wants is not
> really clear.
>
> We can look at device tree but that's something which wont work in case
> of non device tree platforms (atom x86).
> What we need is this information of channel mapping, which IMO is
> platform specific and needs to come from platform data, we can abstract
> it actually from the device tree data/PCI/firmware etc but essentially a
> mechanism to publish channels and slaves uniquely and match them in this
> kind of data
>
> Linus W, any progress on that patches you posted??

I haven't written any, and I felt the whole issue was pretty inflamed
too so I felt bad about it and avoided to think about it even since
I have no real problem with this in my current setups.

Currently there is a strong coupling between platforms and filter
functions and I can live with it in the systems I use since they have
just one DMAC and need only one filter function per device,
that is specified in platform data for the device. This would likely
also work for the SiRFprimaII if it has only a single DMAC.
The people facing an immediate issue with this are IIRC the
Samsung S5Ps.

If you think this is in need of solving soon and want me to propose
patches for channel mapping to devices using the approach used in
clkdev and regulator APIs to create an attributed mapping table
using struct device * or its string representations, I can
try it out of course, but if it gets flamy I will just back off again.

Thanks,
Linus Walleij

^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-11 21:27               ` Linus Walleij
  0 siblings, 0 replies; 84+ messages in thread
From: Linus Walleij @ 2011-09-11 21:27 UTC (permalink / raw)
  To: linux-arm-kernel

On Thu, Sep 8, 2011 at 11:38 PM, Vinod Koul <vkoul@infradead.org> wrote:
> On Thu, 2011-09-08 at 22:11 +0200, Arnd Bergmann wrote:
>> On Thursday 08 September 2011 20:48:26 Linus Walleij wrote:
>> > 2011/9/8 Arnd Bergmann <arnd@arndb.de>:
>> > > On Thursday 08 September 2011, Barry Song wrote:
>> > >>
>> > >> this filter is used by all drivers with DMA since every dma channel is
>> > >> fixed to be assigned to one device.
>> > >
>> > > Ok, I see now. I think it would be best to introduce a generic
>> > > 'filter by device tree property' function or alternatively an
>> > > dma_of_request_channel function like this:
>> >
>> > You'd have to discuss that with Vinod, the thing is that x86 Atom
>> > systems are using dmaengine for device slave transfers too, and
>> > IIRC these things don't use devicetrees. I may be wrong...
>>
>> Some of them use device tree, some don't.
>>
>> I'm not saying that we have to convert all drivers to use this, but
>> for platforms that always have device tree available, it seems by far
>> the cleanest solution.
> We don't have a very clean solution for filter function in case of slave
> dmaengine. How should the client specify which channel it wants is not
> really clear.
>
> We can look at device tree but that's something which wont work in case
> of non device tree platforms (atom x86).
> What we need is this information of channel mapping, which IMO is
> platform specific and needs to come from platform data, we can abstract
> it actually from the device tree data/PCI/firmware etc but essentially a
> mechanism to publish channels and slaves uniquely and match them in this
> kind of data
>
> Linus W, any progress on that patches you posted??

I haven't written any, and I felt the whole issue was pretty inflamed
too so I felt bad about it and avoided to think about it even since
I have no real problem with this in my current setups.

Currently there is a strong coupling between platforms and filter
functions and I can live with it in the systems I use since they have
just one DMAC and need only one filter function per device,
that is specified in platform data for the device. This would likely
also work for the SiRFprimaII if it has only a single DMAC.
The people facing an immediate issue with this are IIRC the
Samsung S5Ps.

If you think this is in need of solving soon and want me to propose
patches for channel mapping to devices using the approach used in
clkdev and regulator APIs to create an attributed mapping table
using struct device * or its string representations, I can
try it out of course, but if it gets flamy I will just back off again.

Thanks,
Linus Walleij

^ permalink raw reply	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-11 21:27               ` Linus Walleij
@ 2011-09-12  0:01                 ` Barry Song
  -1 siblings, 0 replies; 84+ messages in thread
From: Barry Song @ 2011-09-12  0:01 UTC (permalink / raw)
  To: Linus Walleij
  Cc: Vinod Koul, Arnd Bergmann, vinod.koul, linux-kernel,
	workgroup.linux, Rongjun Ying, Barry Song, dan.j.williams,
	linux-arm-kernel

2011/9/12 Linus Walleij <linus.walleij@linaro.org>:
> On Thu, Sep 8, 2011 at 11:38 PM, Vinod Koul <vkoul@infradead.org> wrote:
>> On Thu, 2011-09-08 at 22:11 +0200, Arnd Bergmann wrote:
>>> On Thursday 08 September 2011 20:48:26 Linus Walleij wrote:
>>> > 2011/9/8 Arnd Bergmann <arnd@arndb.de>:
>>> > > On Thursday 08 September 2011, Barry Song wrote:
>>> > >>
>>> > >> this filter is used by all drivers with DMA since every dma channel is
>>> > >> fixed to be assigned to one device.
>>> > >
>>> > > Ok, I see now. I think it would be best to introduce a generic
>>> > > 'filter by device tree property' function or alternatively an
>>> > > dma_of_request_channel function like this:
>>> >
>>> > You'd have to discuss that with Vinod, the thing is that x86 Atom
>>> > systems are using dmaengine for device slave transfers too, and
>>> > IIRC these things don't use devicetrees. I may be wrong...
>>>
>>> Some of them use device tree, some don't.
>>>
>>> I'm not saying that we have to convert all drivers to use this, but
>>> for platforms that always have device tree available, it seems by far
>>> the cleanest solution.
>> We don't have a very clean solution for filter function in case of slave
>> dmaengine. How should the client specify which channel it wants is not
>> really clear.
>>
>> We can look at device tree but that's something which wont work in case
>> of non device tree platforms (atom x86).
>> What we need is this information of channel mapping, which IMO is
>> platform specific and needs to come from platform data, we can abstract
>> it actually from the device tree data/PCI/firmware etc but essentially a
>> mechanism to publish channels and slaves uniquely and match them in this
>> kind of data
>>
>> Linus W, any progress on that patches you posted??
>
> I haven't written any, and I felt the whole issue was pretty inflamed
> too so I felt bad about it and avoided to think about it even since
> I have no real problem with this in my current setups.
>
> Currently there is a strong coupling between platforms and filter
> functions and I can live with it in the systems I use since they have
> just one DMAC and need only one filter function per device,
> that is specified in platform data for the device. This would likely
> also work for the SiRFprimaII if it has only a single DMAC.
> The people facing an immediate issue with this are IIRC the
> Samsung S5Ps.
>
> If you think this is in need of solving soon and want me to propose
> patches for channel mapping to devices using the approach used in
> clkdev and regulator APIs to create an attributed mapping table
> using struct device * or its string representations, I can
> try it out of course, but if it gets flamy I will just back off again.

the attributed mapping table is something that is rubbish Linus
Torvalds wants to say as people are waiting for the new clkdev
framework to eliminate the long and trivial table, those are totally
useless codes to present hardware details.

if we want the mapping table, we want it in dt. maybe something like
current gpio makes more sense. devices just list the gpio range in
dts. i guess DMA is just like iomem and gpio, which is just device
resource. then we might make it just like and "reg" and "gpio".

for platforms without dt, write the dma resouce in mach and add a
common filter function which filters the channel id might be simple.

>
> Thanks,
> Linus Walleij
>
-barry

^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-12  0:01                 ` Barry Song
  0 siblings, 0 replies; 84+ messages in thread
From: Barry Song @ 2011-09-12  0:01 UTC (permalink / raw)
  To: linux-arm-kernel

2011/9/12 Linus Walleij <linus.walleij@linaro.org>:
> On Thu, Sep 8, 2011 at 11:38 PM, Vinod Koul <vkoul@infradead.org> wrote:
>> On Thu, 2011-09-08 at 22:11 +0200, Arnd Bergmann wrote:
>>> On Thursday 08 September 2011 20:48:26 Linus Walleij wrote:
>>> > 2011/9/8 Arnd Bergmann <arnd@arndb.de>:
>>> > > On Thursday 08 September 2011, Barry Song wrote:
>>> > >>
>>> > >> this filter is used by all drivers with DMA since every dma channel is
>>> > >> fixed to be assigned to one device.
>>> > >
>>> > > Ok, I see now. I think it would be best to introduce a generic
>>> > > 'filter by device tree property' function or alternatively an
>>> > > dma_of_request_channel function like this:
>>> >
>>> > You'd have to discuss that with Vinod, the thing is that x86 Atom
>>> > systems are using dmaengine for device slave transfers too, and
>>> > IIRC these things don't use devicetrees. I may be wrong...
>>>
>>> Some of them use device tree, some don't.
>>>
>>> I'm not saying that we have to convert all drivers to use this, but
>>> for platforms that always have device tree available, it seems by far
>>> the cleanest solution.
>> We don't have a very clean solution for filter function in case of slave
>> dmaengine. How should the client specify which channel it wants is not
>> really clear.
>>
>> We can look at device tree but that's something which wont work in case
>> of non device tree platforms (atom x86).
>> What we need is this information of channel mapping, which IMO is
>> platform specific and needs to come from platform data, we can abstract
>> it actually from the device tree data/PCI/firmware etc but essentially a
>> mechanism to publish channels and slaves uniquely and match them in this
>> kind of data
>>
>> Linus W, any progress on that patches you posted??
>
> I haven't written any, and I felt the whole issue was pretty inflamed
> too so I felt bad about it and avoided to think about it even since
> I have no real problem with this in my current setups.
>
> Currently there is a strong coupling between platforms and filter
> functions and I can live with it in the systems I use since they have
> just one DMAC and need only one filter function per device,
> that is specified in platform data for the device. This would likely
> also work for the SiRFprimaII if it has only a single DMAC.
> The people facing an immediate issue with this are IIRC the
> Samsung S5Ps.
>
> If you think this is in need of solving soon and want me to propose
> patches for channel mapping to devices using the approach used in
> clkdev and regulator APIs to create an attributed mapping table
> using struct device * or its string representations, I can
> try it out of course, but if it gets flamy I will just back off again.

the attributed mapping table is something that is rubbish Linus
Torvalds wants to say as people are waiting for the new clkdev
framework to eliminate the long and trivial table, those are totally
useless codes to present hardware details.

if we want the mapping table, we want it in dt. maybe something like
current gpio makes more sense. devices just list the gpio range in
dts. i guess DMA is just like iomem and gpio, which is just device
resource. then we might make it just like and "reg" and "gpio".

for platforms without dt, write the dma resouce in mach and add a
common filter function which filters the channel id might be simple.

>
> Thanks,
> Linus Walleij
>
-barry

^ permalink raw reply	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-11 15:59                 ` Vinod Koul
@ 2011-09-12  0:13                   ` Barry Song
  -1 siblings, 0 replies; 84+ messages in thread
From: Barry Song @ 2011-09-12  0:13 UTC (permalink / raw)
  To: Vinod Koul
  Cc: vinod.koul, Jassi Brar, linus.walleij, Williams, Dan J, arnd,
	linux-kernel, workgroup.linux, rongjun.ying, Baohua.Song,
	linux-arm-kernel

2011/9/11 Vinod Koul <vkoul@infradead.org>:
> On Thu, 2011-09-08 at 14:36 +0800, Barry Song wrote:
>> 2011/9/8 Jassi Brar <jassisinghbrar@gmail.com>:
>> > On Thu, Sep 8, 2011 at 8:47 AM, Jassi Brar <jassisinghbrar@gmail.com> wrote:
>> >> On Thu, Sep 8, 2011 at 7:42 AM, Barry Song <21cnbao@gmail.com> wrote:
>> >>
>> >>>>> it is much different with primacell based DMA like pl080, pl330.
>> >>>>> prima2 has a self-defined DMAC IP. basically it is a 2D mode dma with
>> >>>>> two scales X and Y and direct way to start and stop DMA.
>> >>>>> every channel has fixed function to serve only one perpheral. so you
>> >>>>> find we have a filter id.
>> >>>> okay, what do you mean by 2D mode? Is it similar to what TI folks, Linus
>> >>>> W and Jassi Brar posted RFC's on?
>> >>>
>> >>> In SiRFprimaII 2-D DMA, the system memory space is interpreted
>> >>> as a 2-D layout instead of a linear 1-D layout. More specifically, the
>> >>> system memory can be considered as
>> >>> multiple data lines. The length of the data line is determined by the
>> >>> user-selected DMA_WIDTH register.
>> >>> The user can specify a data window that the user wants to access using
>> >>> four parameters:
>> >>> ■ Start address
>> >>> ■ X length
>> >>> ■ Y length
>> >>> ■ Width
>> >>>
>> >>> The idea of a 2-D DMA is shown in figure 2d-dma.png attached.
>> >>>
>> >>> If you specifies the Y length as 0 or the X length equals to the DMA
>> >>> width, then this 2-D DMA reduces to
>> >>> 1-D. If the user configures the X length greater than the DMA width,
>> >>> then the extra data is wrapped around
>> >>> to the next data line, this may corrupt the DMA transfer for
>> >>> multiple-line 2-D DMA. If this is a 1-D DMA, then
>> >>> there is no issue. The attached diagram 2d-dma2.png shows the
>> >>> wrap-around of the extra data in case the X length
>> >>> greater than DMA width.
>> >>
>> >> Sorry, the role of DMA_WIDTH is not clear to me yet.
>> >> In which case the user _must_ set {xlen > width} ?
>> >>
>> > Perhaps 2d-dma.PNG is inaccurate - it shouldn't depict any deltaX.
>> > Doesn't xlen and width always start together ? If no, please don't read ahead.
>> >
>> > According to figures, {xlen > width} is to be set _only_ when a transfer
>> > is divided into _exactly_ two chunks separated by gap _exactly_
>> > equal to length of the second chunk (an extremely rare case).
>>
>> Sorry i didn't list related full information in datasheet in my early reply.
>> we don't have the case of xlen > dma_width.
> But is it theoretically possible or just an error case?? Looks like its
> latter, right?

hardware can do since it doesn't require xlen must be less than or
equel with dma_width. but it is a wrong case in fact as prima2
datasheet has said.

it does exist a case cpu read same address (by volatile keyword or
memory barrier), for example, polling. it totally intends to control
some software execute  thread.
but it doesn't exist a case dma read overlapped address in just a dma
cycle since we bacically can't control when and where the dma will go
to hold bus. data transferred by dma is generically not related with
program switch and execution thread.
we do want the overlap and same memory is tranferred again, it should
be in another dma cycle with our instruction in software.

>
> --
> ~Vinod Koul
> Intel Corp.
>
>

-barry

^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-12  0:13                   ` Barry Song
  0 siblings, 0 replies; 84+ messages in thread
From: Barry Song @ 2011-09-12  0:13 UTC (permalink / raw)
  To: linux-arm-kernel

2011/9/11 Vinod Koul <vkoul@infradead.org>:
> On Thu, 2011-09-08 at 14:36 +0800, Barry Song wrote:
>> 2011/9/8 Jassi Brar <jassisinghbrar@gmail.com>:
>> > On Thu, Sep 8, 2011 at 8:47 AM, Jassi Brar <jassisinghbrar@gmail.com> wrote:
>> >> On Thu, Sep 8, 2011 at 7:42 AM, Barry Song <21cnbao@gmail.com> wrote:
>> >>
>> >>>>> it is much different with primacell based DMA like pl080, pl330.
>> >>>>> prima2 has a self-defined DMAC IP. basically it is a 2D mode dma with
>> >>>>> two scales X and Y and direct way to start and stop DMA.
>> >>>>> every channel has fixed function to serve only one perpheral. so you
>> >>>>> find we have a filter id.
>> >>>> okay, what do you mean by 2D mode? Is it similar to what TI folks, Linus
>> >>>> W and Jassi Brar posted RFC's on?
>> >>>
>> >>> In SiRFprimaII 2-D DMA, the system memory space is interpreted
>> >>> as a 2-D layout instead of a linear 1-D layout. More specifically, the
>> >>> system memory can be considered as
>> >>> multiple data lines. The length of the data line is determined by the
>> >>> user-selected DMA_WIDTH register.
>> >>> The user can specify a data window that the user wants to access using
>> >>> four parameters:
>> >>> ? Start address
>> >>> ? X length
>> >>> ? Y length
>> >>> ? Width
>> >>>
>> >>> The idea of a 2-D DMA is shown in figure 2d-dma.png attached.
>> >>>
>> >>> If you specifies the Y length as 0 or the X length equals to the DMA
>> >>> width, then this 2-D DMA reduces to
>> >>> 1-D. If the user configures the X length greater than the DMA width,
>> >>> then the extra data is wrapped around
>> >>> to the next data line, this may corrupt the DMA transfer for
>> >>> multiple-line 2-D DMA. If this is a 1-D DMA, then
>> >>> there is no issue. The attached diagram 2d-dma2.png shows the
>> >>> wrap-around of the extra data in case the X length
>> >>> greater than DMA width.
>> >>
>> >> Sorry, the role of DMA_WIDTH is not clear to me yet.
>> >> In which case the user _must_ set {xlen > width} ?
>> >>
>> > Perhaps 2d-dma.PNG is inaccurate - it shouldn't depict any deltaX.
>> > Doesn't xlen and width always start together ? If no, please don't read ahead.
>> >
>> > According to figures, {xlen > width} is to be set _only_ when a transfer
>> > is divided into _exactly_ two chunks separated by gap _exactly_
>> > equal to length of the second chunk (an extremely rare case).
>>
>> Sorry i didn't list related full information in datasheet in my early reply.
>> we don't have the case of xlen > dma_width.
> But is it theoretically possible or just an error case?? Looks like its
> latter, right?

hardware can do since it doesn't require xlen must be less than or
equel with dma_width. but it is a wrong case in fact as prima2
datasheet has said.

it does exist a case cpu read same address (by volatile keyword or
memory barrier), for example, polling. it totally intends to control
some software execute  thread.
but it doesn't exist a case dma read overlapped address in just a dma
cycle since we bacically can't control when and where the dma will go
to hold bus. data transferred by dma is generically not related with
program switch and execution thread.
we do want the overlap and same memory is tranferred again, it should
be in another dma cycle with our instruction in software.

>
> --
> ~Vinod Koul
> Intel Corp.
>
>

-barry

^ permalink raw reply	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-11 16:02                   ` Vinod Koul
@ 2011-09-12  6:33                     ` Jassi Brar
  -1 siblings, 0 replies; 84+ messages in thread
From: Jassi Brar @ 2011-09-12  6:33 UTC (permalink / raw)
  To: Vinod Koul
  Cc: Barry Song, arnd, vinod.koul, linus.walleij, Jassi Brar,
	linux-kernel, workgroup.linux, rongjun.ying, Baohua.Song,
	Williams, Dan J, linux-arm-kernel

On 11 September 2011 21:32, Vinod Koul <vkoul@infradead.org> wrote:
> On Fri, 2011-09-09 at 16:18 +0800, Barry Song wrote:

>> Jassi prefer to use a transfer type instead of a control command.
>> though we will not really change the interleaved setting for every
>> transfer(it is more possible for one device, we will not change the
>> xlen/ylen/dma_width setting in the whole life period), i do believe
>> the transfer type is enough flexible for my possible applications to
>> change xlen, ylen and dma_width in different transfers.

> Is this usually the assumption or yours is a special case, how about
> your's Jassi?

1) Having type per transfer is more flexible than having to set the type
     for a channel using a control command. The overhead is negligible
     because the client reuses the same descriptors with only changed
     source/destination addresses.
2) DMA_SLAVE_CONFIG is meant for slave (Mem<->Dev) channels,
    whereas it is very likely(for multimedia drives) to have such operations
    Mem->Mem as well.
3) Someday if people realize we can fold many, if not all, transfer types into
    one, this api has the potential to be the survivor.

That was where my mind was grazing when I chose to do what I did.

^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-12  6:33                     ` Jassi Brar
  0 siblings, 0 replies; 84+ messages in thread
From: Jassi Brar @ 2011-09-12  6:33 UTC (permalink / raw)
  To: linux-arm-kernel

On 11 September 2011 21:32, Vinod Koul <vkoul@infradead.org> wrote:
> On Fri, 2011-09-09 at 16:18 +0800, Barry Song wrote:

>> Jassi prefer to use a transfer type instead of a control command.
>> though we will not really change the interleaved setting for every
>> transfer(it is more possible for one device, we will not change the
>> xlen/ylen/dma_width setting in the whole life period), i do believe
>> the transfer type is enough flexible for my possible applications to
>> change xlen, ylen and dma_width in different transfers.

> Is this usually the assumption or yours is a special case, how about
> your's Jassi?

1) Having type per transfer is more flexible than having to set the type
     for a channel using a control command. The overhead is negligible
     because the client reuses the same descriptors with only changed
     source/destination addresses.
2) DMA_SLAVE_CONFIG is meant for slave (Mem<->Dev) channels,
    whereas it is very likely(for multimedia drives) to have such operations
    Mem->Mem as well.
3) Someday if people realize we can fold many, if not all, transfer types into
    one, this api has the potential to be the survivor.

That was where my mind was grazing when I chose to do what I did.

^ permalink raw reply	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-11 21:27               ` Linus Walleij
@ 2011-09-14  4:54                 ` Vinod Koul
  -1 siblings, 0 replies; 84+ messages in thread
From: Vinod Koul @ 2011-09-14  4:54 UTC (permalink / raw)
  To: Linus Walleij
  Cc: Arnd Bergmann, Barry Song, linux-kernel, workgroup.linux,
	Rongjun Ying, Barry Song, dan.j.williams, linux-arm-kernel

On Sun, 2011-09-11 at 23:27 +0200, Linus Walleij wrote:
> On Thu, Sep 8, 2011 at 11:38 PM, Vinod Koul <vkoul@infradead.org> wrote:
> >
> > Linus W, any progress on that patches you posted??
> 
> I haven't written any, and I felt the whole issue was pretty inflamed
> too so I felt bad about it and avoided to think about it even since
> I have no real problem with this in my current setups.
> 
> Currently there is a strong coupling between platforms and filter
> functions and I can live with it in the systems I use since they have
> just one DMAC and need only one filter function per device,
> that is specified in platform data for the device. This would likely
> also work for the SiRFprimaII if it has only a single DMAC.
> The people facing an immediate issue with this are IIRC the
> Samsung S5Ps.
> 
> If you think this is in need of solving soon and want me to propose
> patches for channel mapping to devices using the approach used in
> clkdev and regulator APIs to create an attributed mapping table
> using struct device * or its string representations, I can
> try it out of course, but if it gets flamy I will just back off again.
Please try at my request. I really would like this problem to be solved.

What we want is API which solves filter function issues (we should be
able to completely do away after proper mapping). Making a simple to
understand/code/maintain scheme which is platform independent.


-- 
~Vinod


^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-14  4:54                 ` Vinod Koul
  0 siblings, 0 replies; 84+ messages in thread
From: Vinod Koul @ 2011-09-14  4:54 UTC (permalink / raw)
  To: linux-arm-kernel

On Sun, 2011-09-11 at 23:27 +0200, Linus Walleij wrote:
> On Thu, Sep 8, 2011 at 11:38 PM, Vinod Koul <vkoul@infradead.org> wrote:
> >
> > Linus W, any progress on that patches you posted??
> 
> I haven't written any, and I felt the whole issue was pretty inflamed
> too so I felt bad about it and avoided to think about it even since
> I have no real problem with this in my current setups.
> 
> Currently there is a strong coupling between platforms and filter
> functions and I can live with it in the systems I use since they have
> just one DMAC and need only one filter function per device,
> that is specified in platform data for the device. This would likely
> also work for the SiRFprimaII if it has only a single DMAC.
> The people facing an immediate issue with this are IIRC the
> Samsung S5Ps.
> 
> If you think this is in need of solving soon and want me to propose
> patches for channel mapping to devices using the approach used in
> clkdev and regulator APIs to create an attributed mapping table
> using struct device * or its string representations, I can
> try it out of course, but if it gets flamy I will just back off again.
Please try at my request. I really would like this problem to be solved.

What we want is API which solves filter function issues (we should be
able to completely do away after proper mapping). Making a simple to
understand/code/maintain scheme which is platform independent.


-- 
~Vinod

^ permalink raw reply	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-12  6:33                     ` Jassi Brar
@ 2011-09-14  5:07                       ` Vinod Koul
  -1 siblings, 0 replies; 84+ messages in thread
From: Vinod Koul @ 2011-09-14  5:07 UTC (permalink / raw)
  To: Jassi Brar
  Cc: Barry Song, arnd, linus.walleij, Jassi Brar, linux-kernel,
	workgroup.linux, rongjun.ying, Baohua.Song, Williams, Dan J,
	linux-arm-kernel

On Mon, 2011-09-12 at 12:03 +0530, Jassi Brar wrote:
> On 11 September 2011 21:32, Vinod Koul <vkoul@infradead.org> wrote:
> > On Fri, 2011-09-09 at 16:18 +0800, Barry Song wrote:
> 
> >> Jassi prefer to use a transfer type instead of a control command.
> >> though we will not really change the interleaved setting for every
> >> transfer(it is more possible for one device, we will not change the
> >> xlen/ylen/dma_width setting in the whole life period), i do believe
> >> the transfer type is enough flexible for my possible applications to
> >> change xlen, ylen and dma_width in different transfers.
> 
> > Is this usually the assumption or yours is a special case, how about
> > your's Jassi?
> 
> 1) Having type per transfer is more flexible than having to set the type
>      for a channel using a control command. The overhead is negligible
>      because the client reuses the same descriptors with only changed
>      source/destination addresses.
> 2) DMA_SLAVE_CONFIG is meant for slave (Mem<->Dev) channels,
>     whereas it is very likely(for multimedia drives) to have such operations
>     Mem->Mem as well.
> 3) Someday if people realize we can fold many, if not all, transfer types into
>     one, this api has the potential to be the survivor.
> 
> That was where my mind was grazing when I chose to do what I did.
I would agree that it makes sense to have it in API rather than config.


-- 
~Vinod


^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-14  5:07                       ` Vinod Koul
  0 siblings, 0 replies; 84+ messages in thread
From: Vinod Koul @ 2011-09-14  5:07 UTC (permalink / raw)
  To: linux-arm-kernel

On Mon, 2011-09-12 at 12:03 +0530, Jassi Brar wrote:
> On 11 September 2011 21:32, Vinod Koul <vkoul@infradead.org> wrote:
> > On Fri, 2011-09-09 at 16:18 +0800, Barry Song wrote:
> 
> >> Jassi prefer to use a transfer type instead of a control command.
> >> though we will not really change the interleaved setting for every
> >> transfer(it is more possible for one device, we will not change the
> >> xlen/ylen/dma_width setting in the whole life period), i do believe
> >> the transfer type is enough flexible for my possible applications to
> >> change xlen, ylen and dma_width in different transfers.
> 
> > Is this usually the assumption or yours is a special case, how about
> > your's Jassi?
> 
> 1) Having type per transfer is more flexible than having to set the type
>      for a channel using a control command. The overhead is negligible
>      because the client reuses the same descriptors with only changed
>      source/destination addresses.
> 2) DMA_SLAVE_CONFIG is meant for slave (Mem<->Dev) channels,
>     whereas it is very likely(for multimedia drives) to have such operations
>     Mem->Mem as well.
> 3) Someday if people realize we can fold many, if not all, transfer types into
>     one, this api has the potential to be the survivor.
> 
> That was where my mind was grazing when I chose to do what I did.
I would agree that it makes sense to have it in API rather than config.


-- 
~Vinod

^ permalink raw reply	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-08  5:25             ` Jassi Brar
@ 2011-09-14  6:46               ` Barry Song
  -1 siblings, 0 replies; 84+ messages in thread
From: Barry Song @ 2011-09-14  6:46 UTC (permalink / raw)
  To: Jassi Brar
  Cc: Koul, Vinod, linus.walleij, Williams, Dan J, arnd, linux-kernel,
	workgroup.linux, rongjun.ying, Baohua.Song, linux-arm-kernel

Hi Jassi,

2011/9/8 Jassi Brar <jassisinghbrar@gmail.com>:
> On Thu, Sep 8, 2011 at 8:47 AM, Jassi Brar <jassisinghbrar@gmail.com> wrote:
>> On Thu, Sep 8, 2011 at 7:42 AM, Barry Song <21cnbao@gmail.com> wrote:
>>
>>>>> it is much different with primacell based DMA like pl080, pl330.
>>>>> prima2 has a self-defined DMAC IP. basically it is a 2D mode dma with
>>>>> two scales X and Y and direct way to start and stop DMA.
>>>>> every channel has fixed function to serve only one perpheral. so you
>>>>> find we have a filter id.
>>>> okay, what do you mean by 2D mode? Is it similar to what TI folks, Linus
>>>> W and Jassi Brar posted RFC's on?
>>>
>>> In SiRFprimaII 2-D DMA, the system memory space is interpreted
>>> as a 2-D layout instead of a linear 1-D layout. More specifically, the
>>> system memory can be considered as
>>> multiple data lines. The length of the data line is determined by the
>>> user-selected DMA_WIDTH register.
>>> The user can specify a data window that the user wants to access using
>>> four parameters:
>>> ■ Start address
>>> ■ X length
>>> ■ Y length
>>> ■ Width
>>>
>>> The idea of a 2-D DMA is shown in figure 2d-dma.png attached.
>>>
>>> If you specifies the Y length as 0 or the X length equals to the DMA
>>> width, then this 2-D DMA reduces to
>>> 1-D. If the user configures the X length greater than the DMA width,
>>> then the extra data is wrapped around
>>> to the next data line, this may corrupt the DMA transfer for
>>> multiple-line 2-D DMA. If this is a 1-D DMA, then
>>> there is no issue. The attached diagram 2d-dma2.png shows the
>>> wrap-around of the extra data in case the X length
>>> greater than DMA width.
>>
>> Sorry, the role of DMA_WIDTH is not clear to me yet.
>> In which case the user _must_ set {xlen > width} ?
>>
> Perhaps 2d-dma.PNG is inaccurate - it shouldn't depict any deltaX.
> Doesn't xlen and width always start together ? If no, please don't read ahead.
>
> According to figures, {xlen > width} is to be set _only_ when a transfer
> is divided into _exactly_ two chunks separated by gap _exactly_
> equal to length of the second chunk (an extremely rare case).
>
> Anyways, every case can be easily expressed using the generic api
> I proposed. See 'struct xfer_template' in https://lkml.org/lkml/2011/8/12/128
>
> Roughly speaking, the following should be done...
> Client driver :-
> **************
> For a 'Rectangular' transfer (2d-dma.PNG) :-
>      xfer_template.numf = Ylen;  /* height of rectangle */
>      xfer_template.frame_size = 1;
>      xfer_template.sgl[0].size = Xlen; /* width of rectangle */
>      xfer_template.sgl[0].icg = start_addr_Y(n) - end_addr_Y(n-1);

For prima2:

xfer_template.numf = ylen + 1
xfer_template.frame_size = 1;
xfer_template.sgl[0].size = xlen;
xfer_template.sgl[0].icg = dma_width - xlen;

>
> For the "A Line and some" transfer (2d-dma2.PNG) :-
>      xfer_template.numf = 1;
>      xfer_template.frame_size = 2;
>      xfer_template.sgl[0].size = xlen1; /* a line */
>      xfer_template.sgl[1].size = xlen2;  /* and some */
>      xfer_template.sgl[0].icg = start_addr_Y(n) - end_addr_Y(n-1);
>      xfer_template.sgl[1].icg = 0; /* doesn't matter */
>
> DMAC driver :-
> ***************
>      if (xfer_template.frame_size == 1) {
>           /* rectangle */
>           schan->xlen = xfer_template.sgl[0].size;
>           schan->width = schan->xlen + xfer_template.sgl[0].icg;
>      } else if (xfer_template.frame_size == 2 &&
>                  xfer_template.numf == 1 &&
>                  xfer_template.sgl[1].size == xfer_template.sgl[0].icg){
>           /* a line and some */
>           schan->xlen = xfer_template.sgl[0].size +  xfer_template.sgl[1].size.
>           schan->width = xfer_template.sgl[0].size;
>      } else {
>           /* _Hardware_ doesn't support the transfer as such. *
>           return -EINVAL;
>      }
>      schan->ylen = xfer_template.numf /* -1? */;
>

For prima2:
xfer_template.frame_size is always 1, then
schan->xlen = xfer_template.sgl[0].size;
schan->width = schan->xlen + xfer_template.sgl[0].icg;

Thanks
barry

^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-14  6:46               ` Barry Song
  0 siblings, 0 replies; 84+ messages in thread
From: Barry Song @ 2011-09-14  6:46 UTC (permalink / raw)
  To: linux-arm-kernel

Hi Jassi,

2011/9/8 Jassi Brar <jassisinghbrar@gmail.com>:
> On Thu, Sep 8, 2011 at 8:47 AM, Jassi Brar <jassisinghbrar@gmail.com> wrote:
>> On Thu, Sep 8, 2011 at 7:42 AM, Barry Song <21cnbao@gmail.com> wrote:
>>
>>>>> it is much different with primacell based DMA like pl080, pl330.
>>>>> prima2 has a self-defined DMAC IP. basically it is a 2D mode dma with
>>>>> two scales X and Y and direct way to start and stop DMA.
>>>>> every channel has fixed function to serve only one perpheral. so you
>>>>> find we have a filter id.
>>>> okay, what do you mean by 2D mode? Is it similar to what TI folks, Linus
>>>> W and Jassi Brar posted RFC's on?
>>>
>>> In SiRFprimaII 2-D DMA, the system memory space is interpreted
>>> as a 2-D layout instead of a linear 1-D layout. More specifically, the
>>> system memory can be considered as
>>> multiple data lines. The length of the data line is determined by the
>>> user-selected DMA_WIDTH register.
>>> The user can specify a data window that the user wants to access using
>>> four parameters:
>>> ? Start address
>>> ? X length
>>> ? Y length
>>> ? Width
>>>
>>> The idea of a 2-D DMA is shown in figure 2d-dma.png attached.
>>>
>>> If you specifies the Y length as 0 or the X length equals to the DMA
>>> width, then this 2-D DMA reduces to
>>> 1-D. If the user configures the X length greater than the DMA width,
>>> then the extra data is wrapped around
>>> to the next data line, this may corrupt the DMA transfer for
>>> multiple-line 2-D DMA. If this is a 1-D DMA, then
>>> there is no issue. The attached diagram 2d-dma2.png shows the
>>> wrap-around of the extra data in case the X length
>>> greater than DMA width.
>>
>> Sorry, the role of DMA_WIDTH is not clear to me yet.
>> In which case the user _must_ set {xlen > width} ?
>>
> Perhaps 2d-dma.PNG is inaccurate - it shouldn't depict any deltaX.
> Doesn't xlen and width always start together ? If no, please don't read ahead.
>
> According to figures, {xlen > width} is to be set _only_ when a transfer
> is divided into _exactly_ two chunks separated by gap _exactly_
> equal to length of the second chunk (an extremely rare case).
>
> Anyways, every case can be easily expressed using the generic api
> I proposed. See 'struct xfer_template' in https://lkml.org/lkml/2011/8/12/128
>
> Roughly speaking, the following should be done...
> Client driver :-
> **************
> For a 'Rectangular' transfer (2d-dma.PNG) :-
> ? ? ?xfer_template.numf = Ylen; ?/* height of rectangle */
> ? ? ?xfer_template.frame_size = 1;
> ? ? ?xfer_template.sgl[0].size = Xlen; /* width of rectangle */
> ? ? ?xfer_template.sgl[0].icg = start_addr_Y(n) - end_addr_Y(n-1);

For prima2:

xfer_template.numf = ylen + 1
xfer_template.frame_size = 1;
xfer_template.sgl[0].size = xlen;
xfer_template.sgl[0].icg = dma_width - xlen;

>
> For the "A Line and some" transfer (2d-dma2.PNG) :-
> ? ? ?xfer_template.numf = 1;
> ? ? ?xfer_template.frame_size = 2;
> ? ? ?xfer_template.sgl[0].size = xlen1; /* a line */
> ? ? ?xfer_template.sgl[1].size = xlen2; ?/* and some */
> ? ? ?xfer_template.sgl[0].icg = start_addr_Y(n) - end_addr_Y(n-1);
> ? ? ?xfer_template.sgl[1].icg = 0; /* doesn't matter */
>
> DMAC driver :-
> ***************
> ? ? ?if (xfer_template.frame_size == 1) {
> ? ? ? ? ? /* rectangle */
> ? ? ? ? ? schan->xlen = xfer_template.sgl[0].size;
> ? ? ? ? ? schan->width = schan->xlen + xfer_template.sgl[0].icg;
> ? ? ?} else if (xfer_template.frame_size == 2 &&
> ? ? ? ? ? ? ? ? ?xfer_template.numf == 1 &&
> ? ? ? ? ? ? ? ? ?xfer_template.sgl[1].size == xfer_template.sgl[0].icg){
> ? ? ? ? ? /* a line and some */
> ? ? ? ? ? schan->xlen = xfer_template.sgl[0].size + ?xfer_template.sgl[1].size.
> ? ? ? ? ? schan->width = xfer_template.sgl[0].size;
> ? ? ?} else {
> ? ? ? ? ? /* _Hardware_ doesn't support the transfer as such. *
> ? ? ? ? ? return -EINVAL;
> ? ? ?}
> ? ? ?schan->ylen = xfer_template.numf /* -1? */;
>

For prima2:
xfer_template.frame_size is always 1, then
schan->xlen = xfer_template.sgl[0].size;
schan->width = schan->xlen + xfer_template.sgl[0].icg;

Thanks
barry

^ permalink raw reply	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-14  6:46               ` Barry Song
@ 2011-09-14  7:11                 ` Jassi Brar
  -1 siblings, 0 replies; 84+ messages in thread
From: Jassi Brar @ 2011-09-14  7:11 UTC (permalink / raw)
  To: Barry Song
  Cc: Koul, Vinod, linus.walleij, Williams, Dan J, arnd, linux-kernel,
	workgroup.linux, rongjun.ying, Baohua.Song, linux-arm-kernel

On Wed, Sep 14, 2011 at 12:16 PM, Barry Song <21cnbao@gmail.com> wrote:

>> Anyways, every case can be easily expressed using the generic api
>> I proposed. See 'struct xfer_template' in https://lkml.org/lkml/2011/8/12/128
>>
>> Roughly speaking, the following should be done...
>> Client driver :-
>> **************
>> For a 'Rectangular' transfer (2d-dma.PNG) :-
>>      xfer_template.numf = Ylen;  /* height of rectangle */
>>      xfer_template.frame_size = 1;
>>      xfer_template.sgl[0].size = Xlen; /* width of rectangle */
>>      xfer_template.sgl[0].icg = start_addr_Y(n) - end_addr_Y(n-1);
>
> For prima2:
>
> xfer_template.numf = ylen + 1
> xfer_template.frame_size = 1;
> xfer_template.sgl[0].size = xlen;
> xfer_template.sgl[0].icg = dma_width - xlen;
>
All is same as I suggested except for 'numf'.
You might want to keep 'numf' same as well. Because the client
driver shouldn't need to know that the DMAC's register expect "+1" value.
Remember the client is supposed to be reusable over other DMACs as well.
So, rather in the DMAC driver, please do
    schan->ylen = xfer_template.numf + 1;


>> DMAC driver :-
>> ***************
>>      if (xfer_template.frame_size == 1) {
>>           /* rectangle */
>>           schan->xlen = xfer_template.sgl[0].size;
>>           schan->width = schan->xlen + xfer_template.sgl[0].icg;
>>      } else if (xfer_template.frame_size == 2 &&
>>                  xfer_template.numf == 1 &&
>>                  xfer_template.sgl[1].size == xfer_template.sgl[0].icg){
>>           /* a line and some */
>>           schan->xlen = xfer_template.sgl[0].size +  xfer_template.sgl[1].size.
>>           schan->width = xfer_template.sgl[0].size;
>>      } else {
>>           /* _Hardware_ doesn't support the transfer as such. *
>>           return -EINVAL;
>>      }
>>      schan->ylen = xfer_template.numf /* -1? */;
>>
>
> For prima2:
> xfer_template.frame_size is always 1, then
> schan->xlen = xfer_template.sgl[0].size;
> schan->width = schan->xlen + xfer_template.sgl[0].icg;
>
Ok, you don't need the 'else if' clause because as you said
{xlen > width} is not an option.
But you do need the other two checks, so that the DMAC driver
cleanly rejects any interleaved-format that it doesn't support.

Point being, the clients and DMAC drivers are supposed to be
very 'promiscuous' - any Client could be 'riding' any DMAC ;)

^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-14  7:11                 ` Jassi Brar
  0 siblings, 0 replies; 84+ messages in thread
From: Jassi Brar @ 2011-09-14  7:11 UTC (permalink / raw)
  To: linux-arm-kernel

On Wed, Sep 14, 2011 at 12:16 PM, Barry Song <21cnbao@gmail.com> wrote:

>> Anyways, every case can be easily expressed using the generic api
>> I proposed. See 'struct xfer_template' in https://lkml.org/lkml/2011/8/12/128
>>
>> Roughly speaking, the following should be done...
>> Client driver :-
>> **************
>> For a 'Rectangular' transfer (2d-dma.PNG) :-
>> ? ? ?xfer_template.numf = Ylen; ?/* height of rectangle */
>> ? ? ?xfer_template.frame_size = 1;
>> ? ? ?xfer_template.sgl[0].size = Xlen; /* width of rectangle */
>> ? ? ?xfer_template.sgl[0].icg = start_addr_Y(n) - end_addr_Y(n-1);
>
> For prima2:
>
> xfer_template.numf = ylen + 1
> xfer_template.frame_size = 1;
> xfer_template.sgl[0].size = xlen;
> xfer_template.sgl[0].icg = dma_width - xlen;
>
All is same as I suggested except for 'numf'.
You might want to keep 'numf' same as well. Because the client
driver shouldn't need to know that the DMAC's register expect "+1" value.
Remember the client is supposed to be reusable over other DMACs as well.
So, rather in the DMAC driver, please do
    schan->ylen = xfer_template.numf + 1;


>> DMAC driver :-
>> ***************
>> ? ? ?if (xfer_template.frame_size == 1) {
>> ? ? ? ? ? /* rectangle */
>> ? ? ? ? ? schan->xlen = xfer_template.sgl[0].size;
>> ? ? ? ? ? schan->width = schan->xlen + xfer_template.sgl[0].icg;
>> ? ? ?} else if (xfer_template.frame_size == 2 &&
>> ? ? ? ? ? ? ? ? ?xfer_template.numf == 1 &&
>> ? ? ? ? ? ? ? ? ?xfer_template.sgl[1].size == xfer_template.sgl[0].icg){
>> ? ? ? ? ? /* a line and some */
>> ? ? ? ? ? schan->xlen = xfer_template.sgl[0].size + ?xfer_template.sgl[1].size.
>> ? ? ? ? ? schan->width = xfer_template.sgl[0].size;
>> ? ? ?} else {
>> ? ? ? ? ? /* _Hardware_ doesn't support the transfer as such. *
>> ? ? ? ? ? return -EINVAL;
>> ? ? ?}
>> ? ? ?schan->ylen = xfer_template.numf /* -1? */;
>>
>
> For prima2:
> xfer_template.frame_size is always 1, then
> schan->xlen = xfer_template.sgl[0].size;
> schan->width = schan->xlen + xfer_template.sgl[0].icg;
>
Ok, you don't need the 'else if' clause because as you said
{xlen > width} is not an option.
But you do need the other two checks, so that the DMAC driver
cleanly rejects any interleaved-format that it doesn't support.

Point being, the clients and DMAC drivers are supposed to be
very 'promiscuous' - any Client could be 'riding' any DMAC ;)

^ permalink raw reply	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-14  7:11                 ` Jassi Brar
@ 2011-09-14  9:49                   ` Barry Song
  -1 siblings, 0 replies; 84+ messages in thread
From: Barry Song @ 2011-09-14  9:49 UTC (permalink / raw)
  To: Jassi Brar
  Cc: Koul, Vinod, linus.walleij, Williams, Dan J, arnd, linux-kernel,
	workgroup.linux, rongjun.ying, Baohua.Song, linux-arm-kernel

2011/9/14 Jassi Brar <jassisinghbrar@gmail.com>:
> On Wed, Sep 14, 2011 at 12:16 PM, Barry Song <21cnbao@gmail.com> wrote:
>
>>> Anyways, every case can be easily expressed using the generic api
>>> I proposed. See 'struct xfer_template' in https://lkml.org/lkml/2011/8/12/128
>>>
>>> Roughly speaking, the following should be done...
>>> Client driver :-
>>> **************
>>> For a 'Rectangular' transfer (2d-dma.PNG) :-
>>>      xfer_template.numf = Ylen;  /* height of rectangle */
>>>      xfer_template.frame_size = 1;
>>>      xfer_template.sgl[0].size = Xlen; /* width of rectangle */
>>>      xfer_template.sgl[0].icg = start_addr_Y(n) - end_addr_Y(n-1);
>>
>> For prima2:
>>
>> xfer_template.numf = ylen + 1
>> xfer_template.frame_size = 1;
>> xfer_template.sgl[0].size = xlen;
>> xfer_template.sgl[0].icg = dma_width - xlen;
>>
> All is same as I suggested except for 'numf'.
> You might want to keep 'numf' same as well. Because the client
> driver shouldn't need to know that the DMAC's register expect "+1" value.
> Remember the client is supposed to be reusable over other DMACs as well.
> So, rather in the DMAC driver, please do
>    schan->ylen = xfer_template.numf + 1;

clients should not know any thing like xlen, ylen, dma_width, which
are all dmac driver cares. clients only need to know the generic xfer.
here i am just listing the relationship between client and dmac driver
to get the confirmation from you  and make sure i haven't
misunderstood your api :-)

>
>
>>> DMAC driver :-
>>> ***************
>>>      if (xfer_template.frame_size == 1) {
>>>           /* rectangle */
>>>           schan->xlen = xfer_template.sgl[0].size;
>>>           schan->width = schan->xlen + xfer_template.sgl[0].icg;
>>>      } else if (xfer_template.frame_size == 2 &&
>>>                  xfer_template.numf == 1 &&
>>>                  xfer_template.sgl[1].size == xfer_template.sgl[0].icg){
>>>           /* a line and some */
>>>           schan->xlen = xfer_template.sgl[0].size +  xfer_template.sgl[1].size.
>>>           schan->width = xfer_template.sgl[0].size;
>>>      } else {
>>>           /* _Hardware_ doesn't support the transfer as such. *
>>>           return -EINVAL;
>>>      }
>>>      schan->ylen = xfer_template.numf /* -1? */;
>>>
>>
>> For prima2:
>> xfer_template.frame_size is always 1, then
>> schan->xlen = xfer_template.sgl[0].size;
>> schan->width = schan->xlen + xfer_template.sgl[0].icg;
>>
> Ok, you don't need the 'else if' clause because as you said
> {xlen > width} is not an option.
> But you do need the other two checks, so that the DMAC driver
> cleanly rejects any interleaved-format that it doesn't support.
>
> Point being, the clients and DMAC drivers are supposed to be
> very 'promiscuous' - any Client could be 'riding' any DMAC ;)

yes. of course.

>

-barry

^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-14  9:49                   ` Barry Song
  0 siblings, 0 replies; 84+ messages in thread
From: Barry Song @ 2011-09-14  9:49 UTC (permalink / raw)
  To: linux-arm-kernel

2011/9/14 Jassi Brar <jassisinghbrar@gmail.com>:
> On Wed, Sep 14, 2011 at 12:16 PM, Barry Song <21cnbao@gmail.com> wrote:
>
>>> Anyways, every case can be easily expressed using the generic api
>>> I proposed. See 'struct xfer_template' in https://lkml.org/lkml/2011/8/12/128
>>>
>>> Roughly speaking, the following should be done...
>>> Client driver :-
>>> **************
>>> For a 'Rectangular' transfer (2d-dma.PNG) :-
>>> ? ? ?xfer_template.numf = Ylen; ?/* height of rectangle */
>>> ? ? ?xfer_template.frame_size = 1;
>>> ? ? ?xfer_template.sgl[0].size = Xlen; /* width of rectangle */
>>> ? ? ?xfer_template.sgl[0].icg = start_addr_Y(n) - end_addr_Y(n-1);
>>
>> For prima2:
>>
>> xfer_template.numf = ylen + 1
>> xfer_template.frame_size = 1;
>> xfer_template.sgl[0].size = xlen;
>> xfer_template.sgl[0].icg = dma_width - xlen;
>>
> All is same as I suggested except for 'numf'.
> You might want to keep 'numf' same as well. Because the client
> driver shouldn't need to know that the DMAC's register expect "+1" value.
> Remember the client is supposed to be reusable over other DMACs as well.
> So, rather in the DMAC driver, please do
> ? ?schan->ylen = xfer_template.numf + 1;

clients should not know any thing like xlen, ylen, dma_width, which
are all dmac driver cares. clients only need to know the generic xfer.
here i am just listing the relationship between client and dmac driver
to get the confirmation from you  and make sure i haven't
misunderstood your api :-)

>
>
>>> DMAC driver :-
>>> ***************
>>> ? ? ?if (xfer_template.frame_size == 1) {
>>> ? ? ? ? ? /* rectangle */
>>> ? ? ? ? ? schan->xlen = xfer_template.sgl[0].size;
>>> ? ? ? ? ? schan->width = schan->xlen + xfer_template.sgl[0].icg;
>>> ? ? ?} else if (xfer_template.frame_size == 2 &&
>>> ? ? ? ? ? ? ? ? ?xfer_template.numf == 1 &&
>>> ? ? ? ? ? ? ? ? ?xfer_template.sgl[1].size == xfer_template.sgl[0].icg){
>>> ? ? ? ? ? /* a line and some */
>>> ? ? ? ? ? schan->xlen = xfer_template.sgl[0].size + ?xfer_template.sgl[1].size.
>>> ? ? ? ? ? schan->width = xfer_template.sgl[0].size;
>>> ? ? ?} else {
>>> ? ? ? ? ? /* _Hardware_ doesn't support the transfer as such. *
>>> ? ? ? ? ? return -EINVAL;
>>> ? ? ?}
>>> ? ? ?schan->ylen = xfer_template.numf /* -1? */;
>>>
>>
>> For prima2:
>> xfer_template.frame_size is always 1, then
>> schan->xlen = xfer_template.sgl[0].size;
>> schan->width = schan->xlen + xfer_template.sgl[0].icg;
>>
> Ok, you don't need the 'else if' clause because as you said
> {xlen > width} is not an option.
> But you do need the other two checks, so that the DMAC driver
> cleanly rejects any interleaved-format that it doesn't support.
>
> Point being, the clients and DMAC drivers are supposed to be
> very 'promiscuous' - any Client could be 'riding' any DMAC ;)

yes. of course.

>

-barry

^ permalink raw reply	[flat|nested] 84+ messages in thread

* Re: [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
  2011-09-07 16:46     ` Barry Song
@ 2011-09-16  9:06       ` Barry Song
  -1 siblings, 0 replies; 84+ messages in thread
From: Barry Song @ 2011-09-16  9:06 UTC (permalink / raw)
  To: Koul, Vinod
  Cc: Baohua.Song, arnd, linux-kernel, workgroup.linux, rongjun.ying,
	Williams, Dan J, linux-arm-kernel

>>> +/* Interrupt handler */
>>> +static irqreturn_t sirfsoc_dma_irq(int irq, void *data)
>>> +{
>>> +     struct sirfsoc_dma *sdma = data;
>>> +     struct sirfsoc_dma_chan *schan;
>>> +     u32 is;
>>> +     int ch;
>>> +
>>> +     is = readl_relaxed(sdma->regs + SIRFSOC_DMA_CH_INT);
>>> +     while ((ch = fls(is) - 1) >= 0) {
>>> +             is &= ~(1 << ch);
>>> +             writel_relaxed(1 << ch, sdma->regs + SIRFSOC_DMA_CH_INT);
>>> +             schan = &sdma->channels[ch];
>>> +
>>> +             spin_lock(&schan->lock);
>>> +
>>> +             /* Execute queued descriptors */
>>> +             list_splice_tail_init(&schan->active, &schan->completed);
>>> +             if (!list_empty(&schan->queued))
>>> +                     sirfsoc_dma_execute(schan);
>>> +
>>> +             spin_unlock(&schan->lock);
>>> +     }
>> Here you know which channel has triggered interrupt and you may pass
>> this info to your tasklet and avoid scanning again there
>
> ok. let me see.

we really know what channels have been trigger in irq. but we lose a
good way to transfer that to tasklet actually. if we place a flag in
schan data.
1. there can be more 1 channels triggers, then tasklet still need a for(...)
2. we actually need a lock between irq and tasklet. otherwise, new irq
coming in tasklet might change the flag.

so i think current way should be better.

>
>>
>>> +
>>> +     /* Schedule tasklet */
>>> +     tasklet_schedule(&sdma->tasklet);
>>> +
>>> +     return IRQ_HANDLED;
>>> +}
>>> +
>>> +/* process completed descriptors */
>>> +static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
>>> +{
>>> +     dma_cookie_t last_cookie = 0;
>>> +     struct sirfsoc_dma_chan *schan;
>>> +     struct sirfsoc_dma_desc *mdesc;
>>> +     struct dma_async_tx_descriptor *desc;
>>> +     unsigned long flags;
>>> +     LIST_HEAD(list);
>>> +     int i;
>>> +
>>> +     for (i = 0; i < sdma->dma.chancnt; i++) {
>>> +             schan = &sdma->channels[i];
>>> +
>>> +             /* Get all completed descriptors */
>>> +             spin_lock_irqsave(&schan->lock, flags);
>> this will block interrupts, i dont see a reason why this should be used
>> here??
>
> ok. no irq is accessing completed list.

sorry. after reading more carefully, we actually need this lock since
irq will move finished active node to completed list. we need to keep
the completed safe.

i have fixed other issues and used jassi's v1 patch (generic xfer) and
will send v2.

Thanks
barry

^ permalink raw reply	[flat|nested] 84+ messages in thread

* [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver
@ 2011-09-16  9:06       ` Barry Song
  0 siblings, 0 replies; 84+ messages in thread
From: Barry Song @ 2011-09-16  9:06 UTC (permalink / raw)
  To: linux-arm-kernel

>>> +/* Interrupt handler */
>>> +static irqreturn_t sirfsoc_dma_irq(int irq, void *data)
>>> +{
>>> + ? ? struct sirfsoc_dma *sdma = data;
>>> + ? ? struct sirfsoc_dma_chan *schan;
>>> + ? ? u32 is;
>>> + ? ? int ch;
>>> +
>>> + ? ? is = readl_relaxed(sdma->regs + SIRFSOC_DMA_CH_INT);
>>> + ? ? while ((ch = fls(is) - 1) >= 0) {
>>> + ? ? ? ? ? ? is &= ~(1 << ch);
>>> + ? ? ? ? ? ? writel_relaxed(1 << ch, sdma->regs + SIRFSOC_DMA_CH_INT);
>>> + ? ? ? ? ? ? schan = &sdma->channels[ch];
>>> +
>>> + ? ? ? ? ? ? spin_lock(&schan->lock);
>>> +
>>> + ? ? ? ? ? ? /* Execute queued descriptors */
>>> + ? ? ? ? ? ? list_splice_tail_init(&schan->active, &schan->completed);
>>> + ? ? ? ? ? ? if (!list_empty(&schan->queued))
>>> + ? ? ? ? ? ? ? ? ? ? sirfsoc_dma_execute(schan);
>>> +
>>> + ? ? ? ? ? ? spin_unlock(&schan->lock);
>>> + ? ? }
>> Here you know which channel has triggered interrupt and you may pass
>> this info to your tasklet and avoid scanning again there
>
> ok. let me see.

we really know what channels have been trigger in irq. but we lose a
good way to transfer that to tasklet actually. if we place a flag in
schan data.
1. there can be more 1 channels triggers, then tasklet still need a for(...)
2. we actually need a lock between irq and tasklet. otherwise, new irq
coming in tasklet might change the flag.

so i think current way should be better.

>
>>
>>> +
>>> + ? ? /* Schedule tasklet */
>>> + ? ? tasklet_schedule(&sdma->tasklet);
>>> +
>>> + ? ? return IRQ_HANDLED;
>>> +}
>>> +
>>> +/* process completed descriptors */
>>> +static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
>>> +{
>>> + ? ? dma_cookie_t last_cookie = 0;
>>> + ? ? struct sirfsoc_dma_chan *schan;
>>> + ? ? struct sirfsoc_dma_desc *mdesc;
>>> + ? ? struct dma_async_tx_descriptor *desc;
>>> + ? ? unsigned long flags;
>>> + ? ? LIST_HEAD(list);
>>> + ? ? int i;
>>> +
>>> + ? ? for (i = 0; i < sdma->dma.chancnt; i++) {
>>> + ? ? ? ? ? ? schan = &sdma->channels[i];
>>> +
>>> + ? ? ? ? ? ? /* Get all completed descriptors */
>>> + ? ? ? ? ? ? spin_lock_irqsave(&schan->lock, flags);
>> this will block interrupts, i dont see a reason why this should be used
>> here??
>
> ok. no irq is accessing completed list.

sorry. after reading more carefully, we actually need this lock since
irq will move finished active node to completed list. we need to keep
the completed safe.

i have fixed other issues and used jassi's v1 patch (generic xfer) and
will send v2.

Thanks
barry

^ permalink raw reply	[flat|nested] 84+ messages in thread

end of thread, other threads:[~2011-09-16  9:06 UTC | newest]

Thread overview: 84+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2011-09-07  5:41 [PATCH] dmaengine: add CSR SiRFprimaII DMAC driver Barry Song
2011-09-07  5:41 ` Barry Song
2011-09-07 16:14 ` Koul, Vinod
2011-09-07 16:14   ` Koul, Vinod
2011-09-07 16:46   ` Barry Song
2011-09-07 16:46     ` Barry Song
2011-09-07 18:09     ` Koul, Vinod
2011-09-07 18:09       ` Koul, Vinod
2011-09-08  2:12       ` Barry Song
2011-09-08  3:17         ` Jassi Brar
2011-09-08  3:17           ` Jassi Brar
2011-09-08  5:25           ` Jassi Brar
2011-09-08  5:25             ` Jassi Brar
2011-09-08  6:36             ` Barry Song
2011-09-08  6:36               ` Barry Song
2011-09-08  7:49               ` Jassi Brar
2011-09-08  7:49                 ` Jassi Brar
2011-09-08 21:51               ` Vinod Koul
2011-09-08 21:51                 ` Vinod Koul
2011-09-09  2:35                 ` Barry Song
2011-09-09  2:35                   ` Barry Song
2011-09-09  2:52                   ` Barry Song
2011-09-09  2:52                     ` Barry Song
2011-09-09 16:25                     ` Vinod Koul
2011-09-09 16:25                       ` Vinod Koul
2011-09-09 23:37                       ` Barry Song
2011-09-09 23:37                         ` Barry Song
2011-09-11 15:59               ` Vinod Koul
2011-09-11 15:59                 ` Vinod Koul
2011-09-12  0:13                 ` Barry Song
2011-09-12  0:13                   ` Barry Song
2011-09-08 21:46             ` Vinod Koul
2011-09-08 21:46               ` Vinod Koul
2011-09-09  8:18               ` Barry Song
2011-09-09  8:18                 ` Barry Song
2011-09-09 16:21                 ` Vinod Koul
2011-09-09 16:21                   ` Vinod Koul
2011-09-09 23:40                   ` Barry Song
2011-09-09 23:40                     ` Barry Song
2011-09-10  7:33                 ` Jassi Brar
2011-09-10  7:33                   ` Jassi Brar
2011-09-11 16:02                 ` Vinod Koul
2011-09-11 16:02                   ` Vinod Koul
2011-09-12  6:33                   ` Jassi Brar
2011-09-12  6:33                     ` Jassi Brar
2011-09-14  5:07                     ` Vinod Koul
2011-09-14  5:07                       ` Vinod Koul
2011-09-14  6:46             ` Barry Song
2011-09-14  6:46               ` Barry Song
2011-09-14  7:11               ` Jassi Brar
2011-09-14  7:11                 ` Jassi Brar
2011-09-14  9:49                 ` Barry Song
2011-09-14  9:49                   ` Barry Song
2011-09-08  6:14           ` Barry Song
2011-09-08  6:14             ` Barry Song
2011-09-08  6:37             ` Jassi Brar
2011-09-08  6:37               ` Jassi Brar
2011-09-08  2:18       ` Barry Song
2011-09-08  2:18         ` Barry Song
2011-09-16  9:06     ` Barry Song
2011-09-16  9:06       ` Barry Song
2011-09-07 19:27   ` Linus Walleij
2011-09-07 19:27     ` Linus Walleij
2011-09-08  1:47     ` Barry Song
2011-09-08  1:47       ` Barry Song
2011-09-08 14:52 ` Arnd Bergmann
2011-09-08 14:52   ` Arnd Bergmann
2011-09-08 15:27   ` Barry Song
2011-09-08 15:27     ` Barry Song
2011-09-08 16:19     ` Arnd Bergmann
2011-09-08 16:19       ` Arnd Bergmann
2011-09-08 18:48       ` Linus Walleij
2011-09-08 20:11         ` Arnd Bergmann
2011-09-08 20:11           ` Arnd Bergmann
2011-09-08 21:38           ` Vinod Koul
2011-09-08 21:38             ` Vinod Koul
2011-09-11 21:27             ` Linus Walleij
2011-09-11 21:27               ` Linus Walleij
2011-09-12  0:01               ` Barry Song
2011-09-12  0:01                 ` Barry Song
2011-09-14  4:54               ` Vinod Koul
2011-09-14  4:54                 ` Vinod Koul
2011-09-09 16:10           ` Vinod Koul
2011-09-09 16:10             ` Vinod Koul

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.