linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCHv7] dmaengine: Add support for BCM2835
@ 2013-11-17 15:39 Florian Meier
  2013-11-17 16:02 ` Joe Perches
                   ` (2 more replies)
  0 siblings, 3 replies; 13+ messages in thread
From: Florian Meier @ 2013-11-17 15:39 UTC (permalink / raw)
  To: Stephen Warren, Vinod Koul, Dan Williams, Russell King - ARM Linux
  Cc: devicetree, alsa-devel, Liam Girdwood, linux-kernel, Mark Brown,
	linux-rpi-kernel, dmaengine, linux-arm-kernel

Add support for DMA controller of BCM2835 as used in the Raspberry Pi.
Currently it only supports cyclic DMA.

Signed-off-by: Florian Meier <florian.meier@koalo.de>
---

This version includes some more style improvements
suggested in the previous thread.

 .../devicetree/bindings/dma/bcm2835-dma.txt        |   56 ++
 drivers/dma/Kconfig                                |    6 +
 drivers/dma/Makefile                               |    1 +
 drivers/dma/bcm2835-dma.c                          |  736 ++++++++++++++++++++
 4 files changed, 799 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/dma/bcm2835-dma.txt
 create mode 100644 drivers/dma/bcm2835-dma.c

diff --git a/Documentation/devicetree/bindings/dma/bcm2835-dma.txt b/Documentation/devicetree/bindings/dma/bcm2835-dma.txt
new file mode 100644
index 0000000..7d91019
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/bcm2835-dma.txt
@@ -0,0 +1,56 @@
+* BCM2835 DMA controller
+
+Required properties:
+- compatible: Should be "brcm,bcm2835-dma".
+- reg: Should contain DMA registers location and length.
+- interrupts: Should contain the DMA interrupts associated
+		to the DMA channels in ascending order.
+		First cell is the IRQ bank.
+		Second cell is the IRQ number.
+- #dma-cells: Must be <1>, used to represent the number of integer cells in
+		the dmas property of client devices.
+- brcm,dma-channel-mask: Bit mask representing the channels
+			 not used by the firmware.
+
+Example:
+
+dma: dma@7e007000 {
+	compatible = "brcm,bcm2835-dma";
+	reg = <0x7e007000 0xf00>;
+	interrupts = <1 16
+		      1 17
+		      1 18
+		      1 19
+		      1 20
+		      1 21
+		      1 22
+		      1 23
+		      1 24
+		      1 25
+		      1 26
+		      1 27
+		      1 28>;
+
+	#dma-cells = <1>;
+	brcm,dma-channel-mask = <0x7f35>;
+};
+
+DMA clients connected to the BCM2835 DMA controller must use the format
+described in the dma.txt file, using a two-cell specifier for each channel:
+a phandle plus one integer cells.
+The two cells in order are:
+
+1. A phandle pointing to the DMA controller.
+2. The DREQ number.
+
+Example:
+
+bcm2835_i2s: i2s@7e203000 {
+	compatible = "brcm,bcm2835-i2s";
+	reg = <	0x7e203000 0x20
+		0x7e101098 0x02>;
+
+	dmas = <&dma 2
+		&dma 3>;
+	dma-names = "tx", "rx";
+};
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index c61a6ec..880e723 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -300,6 +300,12 @@ config DMA_OMAP
 	select DMA_ENGINE
 	select DMA_VIRTUAL_CHANNELS
 
+config DMA_BCM2835
+	tristate "BCM2835 DMA engine support"
+	depends on (ARCH_BCM2835 || MACH_BCM2708)
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+
 config TI_CPPI41
 	tristate "AM33xx CPPI41 DMA support"
 	depends on ARCH_OMAP
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 0ce2da9..0a6f08e 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
 obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
 obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
 obj-$(CONFIG_DMA_OMAP) += omap-dma.o
+obj-$(CONFIG_DMA_BCM2835) += bcm2835-dma.o
 obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
 obj-$(CONFIG_TI_CPPI41) += cppi41.o
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
new file mode 100644
index 0000000..4b4c673
--- /dev/null
+++ b/drivers/dma/bcm2835-dma.c
@@ -0,0 +1,736 @@
+/*
+ * BCM2835 DMA engine support
+ *
+ * This driver only supports cyclic DMA transfers
+ * as needed for the I2S module.
+ *
+ * Author:      Florian Meier <florian.meier@koalo.de>
+ *              Copyright 2013
+ *
+ * Based on
+ *	OMAP DMAengine support by Russell King
+ *
+ *	BCM2708 DMA Driver
+ *	Copyright (C) 2010 Broadcom
+ *
+ *	Raspberry Pi PCM I2S ALSA Driver
+ *	Copyright (c) by Phil Poole 2013
+ *
+ *	MARVELL MMP Peripheral DMA Driver
+ *	Copyright 2012 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+
+#include "virt-dma.h"
+
+struct bcm2835_dmadev {
+	struct dma_device ddev;
+	spinlock_t lock;
+	void __iomem *base;
+	struct device_dma_parameters dma_parms;
+};
+
+struct bcm2835_dma_cb {
+	uint32_t info;
+	uint32_t src;
+	uint32_t dst;
+	uint32_t length;
+	uint32_t stride;
+	uint32_t next;
+	uint32_t pad[2];
+};
+
+struct bcm2835_chan {
+	struct virt_dma_chan vc;
+	struct list_head node;
+
+	struct dma_slave_config	cfg;
+	bool cyclic;
+	unsigned dreq;
+
+	int ch;
+	struct bcm2835_desc *desc;
+
+	void __iomem *chan_base;
+	int irq_number;
+};
+
+struct bcm2835_desc {
+	struct virt_dma_desc vd;
+	enum dma_transfer_direction dir;
+
+	unsigned int control_block_size;
+	struct bcm2835_dma_cb *control_block_base;
+	dma_addr_t control_block_base_phys;
+
+	unsigned frames;
+	size_t size;
+};
+
+#define BCM2835_DMA_CS		0x00
+#define BCM2835_DMA_ADDR	0x04
+#define BCM2835_DMA_SOURCE_AD	0x0c
+#define BCM2835_DMA_DEST_AD	0x10
+#define BCM2835_DMA_NEXTCB	0x1C
+
+/* DMA CS Control and Status bits */
+#define BCM2835_DMA_ACTIVE	BIT(0)
+#define BCM2835_DMA_INT	BIT(2)
+#define BCM2835_DMA_ISPAUSED	BIT(4)  /* Pause requested or not active */
+#define BCM2835_DMA_ISHELD	BIT(5)  /* Is held by DREQ flow control */
+#define BCM2835_DMA_ERR	BIT(8)
+#define BCM2835_DMA_ABORT	BIT(30) /* Stop current CB, go to next, WO */
+#define BCM2835_DMA_RESET	BIT(31) /* WO, self clearing */
+
+#define BCM2835_DMA_INT_EN	BIT(0)
+#define BCM2835_DMA_D_INC	BIT(4)
+#define BCM2835_DMA_D_DREQ	BIT(6)
+#define BCM2835_DMA_S_INC	BIT(8)
+#define BCM2835_DMA_S_DREQ	BIT(10)
+
+#define BCM2835_DMA_PER_MAP(x)	((x) << 16)
+
+#define BCM2835_DMA_DATA_TYPE_S8	1
+#define BCM2835_DMA_DATA_TYPE_S16	2
+#define BCM2835_DMA_DATA_TYPE_S32	4
+#define BCM2835_DMA_DATA_TYPE_S128	16
+
+/* Valid only for channels 0 - 14, 15 has its own base address */
+#define BCM2835_DMA_CHAN(n)	((n) << 8) /* Base address */
+#define BCM2835_DMA_CHANIO(base, n) ((base) + BCM2835_DMA_CHAN(n))
+
+static inline struct bcm2835_dmadev *to_bcm2835_dma_dev(struct dma_device *d)
+{
+	return container_of(d, struct bcm2835_dmadev, ddev);
+}
+
+static inline struct bcm2835_chan *to_bcm2835_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct bcm2835_chan, vc.chan);
+}
+
+static inline struct bcm2835_desc *to_bcm2835_dma_desc(
+		struct dma_async_tx_descriptor *t)
+{
+	return container_of(t, struct bcm2835_desc, vd.tx);
+}
+
+static void bcm2835_dma_desc_free(struct virt_dma_desc *vd)
+{
+	struct bcm2835_desc *desc = container_of(vd, struct bcm2835_desc, vd);
+	dma_free_coherent(desc->vd.tx.chan->device->dev,
+			desc->control_block_size,
+			desc->control_block_base,
+			desc->control_block_base_phys);
+	kfree(desc);
+}
+
+static int bcm2835_dma_abort(void __iomem *chan_base)
+{
+	unsigned long int cs;
+	long int timeout = 10000;
+
+	cs = readl(chan_base + BCM2835_DMA_CS);
+	if (!(cs & BCM2835_DMA_ACTIVE))
+		return 0;
+
+	/* Write 0 to the active bit - Pause the DMA */
+	writel(0, chan_base + BCM2835_DMA_CS);
+
+	/* Wait for any current AXI transfer to complete */
+	while ((cs & BCM2835_DMA_ISPAUSED) && --timeout >= 0)
+		cs = readl(chan_base + BCM2835_DMA_CS);
+
+	/* We'll un-pause when we set of our next DMA */
+	if (cs & BCM2835_DMA_ISPAUSED)
+		return -ETIMEDOUT;
+
+	if (!(cs & BCM2835_DMA_ACTIVE))
+		return 0;
+
+	/* Terminate the control block chain */
+	writel(0, chan_base + BCM2835_DMA_NEXTCB);
+
+	/* Abort the whole DMA */
+	writel(BCM2835_DMA_ABORT | BCM2835_DMA_ACTIVE,
+	       chan_base + BCM2835_DMA_CS);
+
+	return 0;
+}
+
+static void bcm2835_dma_start_desc(struct bcm2835_chan *c)
+{
+	struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
+	struct bcm2835_desc *d;
+
+	if (!vd) {
+		c->desc = NULL;
+		return;
+	}
+
+	list_del(&vd->node);
+
+	c->desc = d = to_bcm2835_dma_desc(&vd->tx);
+
+	dsb();	/* ARM data synchronization (push) operation */
+
+	writel(d->control_block_base_phys, c->chan_base + BCM2835_DMA_ADDR);
+	writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS);
+}
+
+static irqreturn_t bcm2835_dma_callback(int irq, void *data)
+{
+	struct bcm2835_chan *c = data;
+	struct bcm2835_desc *d;
+	unsigned long flags;
+
+	spin_lock_irqsave(&c->vc.lock, flags);
+
+	/* Acknowledge interrupt */
+	writel(BCM2835_DMA_INT, c->chan_base + BCM2835_DMA_CS);
+
+	d = c->desc;
+
+	if (d) {
+		/* TODO Only works for cyclic DMA */
+		vchan_cyclic_callback(&d->vd);
+	}
+
+	/* Keep the DMA engine running */
+	dsb(); /* ARM synchronization barrier */
+	writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS);
+
+	spin_unlock_irqrestore(&c->vc.lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+
+	dev_dbg(c->vc.chan.device->dev,
+			"Allocating DMA channel %i\n", c->ch);
+
+	return request_irq(c->irq_number,
+			bcm2835_dma_callback, 0, "DMA IRQ", c);
+}
+
+static void bcm2835_dma_free_chan_resources(struct dma_chan *chan)
+{
+	struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+
+	vchan_free_chan_resources(&c->vc);
+	free_irq(c->irq_number, c);
+
+	dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch);
+}
+
+static size_t bcm2835_dma_desc_size(struct bcm2835_desc *d)
+{
+	return d->size;
+}
+
+static size_t bcm2835_dma_desc_size_pos(struct bcm2835_desc *d, dma_addr_t addr)
+{
+	unsigned i;
+	size_t size;
+
+	for (size = i = 0; i < d->frames; i++) {
+		struct bcm2835_dma_cb *control_block =
+			&d->control_block_base[i];
+		size_t this_size = control_block->length;
+		dma_addr_t dma;
+
+		if (d->dir == DMA_DEV_TO_MEM)
+			dma = control_block->dst;
+		else
+			dma = control_block->src;
+
+		if (size)
+			size += this_size;
+		else if (addr >= dma && addr < dma + this_size)
+			size += dma + this_size - addr;
+	}
+
+	return size;
+}
+
+static enum dma_status bcm2835_dma_tx_status(struct dma_chan *chan,
+	dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+	struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+	struct virt_dma_desc *vd;
+	enum dma_status ret;
+	unsigned long flags;
+
+	ret = dma_cookie_status(chan, cookie, txstate);
+	if (ret == DMA_SUCCESS || !txstate)
+		return ret;
+
+	spin_lock_irqsave(&c->vc.lock, flags);
+	vd = vchan_find_desc(&c->vc, cookie);
+	if (vd) {
+		txstate->residue =
+			bcm2835_dma_desc_size(to_bcm2835_dma_desc(&vd->tx));
+	} else if (c->desc && c->desc->vd.tx.cookie == cookie) {
+		struct bcm2835_desc *d = c->desc;
+		dma_addr_t pos;
+
+		if (d->dir == DMA_MEM_TO_DEV)
+			pos = readl(c->chan_base + BCM2835_DMA_SOURCE_AD);
+		else if (d->dir == DMA_DEV_TO_MEM)
+			pos = readl(c->chan_base + BCM2835_DMA_DEST_AD);
+		else
+			pos = 0;
+
+		txstate->residue = bcm2835_dma_desc_size_pos(d, pos);
+	} else {
+		txstate->residue = 0;
+	}
+
+	spin_unlock_irqrestore(&c->vc.lock, flags);
+
+	return ret;
+}
+
+static void bcm2835_dma_issue_pending(struct dma_chan *chan)
+{
+	struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+	unsigned long flags;
+
+	c->cyclic = true; /* Nothing else is implemented */
+
+	spin_lock_irqsave(&c->vc.lock, flags);
+	if (vchan_issue_pending(&c->vc) && !c->desc)
+		bcm2835_dma_start_desc(c);
+
+	spin_unlock_irqrestore(&c->vc.lock, flags);
+}
+
+static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
+	struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+	size_t period_len, enum dma_transfer_direction direction,
+	unsigned long flags, void *context)
+{
+	struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+	enum dma_slave_buswidth dev_width;
+	struct bcm2835_desc *d;
+	dma_addr_t dev_addr;
+	unsigned es, sync_type;
+	unsigned frame;
+
+	/* Grab configuration */
+	if (direction == DMA_DEV_TO_MEM) {
+		dev_addr = c->cfg.src_addr;
+		dev_width = c->cfg.src_addr_width;
+		sync_type = BCM2835_DMA_S_DREQ;
+	} else if (direction == DMA_MEM_TO_DEV) {
+		dev_addr = c->cfg.dst_addr;
+		dev_width = c->cfg.dst_addr_width;
+		sync_type = BCM2835_DMA_D_DREQ;
+	} else {
+		dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
+		return NULL;
+	}
+
+	/* Bus width translates to the element size (ES) */
+	switch (dev_width) {
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+		es = BCM2835_DMA_DATA_TYPE_S32;
+		break;
+	default:
+		return NULL;
+	}
+
+	/* Now allocate and setup the descriptor. */
+	d = kzalloc(sizeof(*d), GFP_NOWAIT);
+	if (!d)
+		return NULL;
+
+	d->dir = direction;
+	d->frames = buf_len / period_len;
+
+	/* Allocate memory for control blocks */
+	d->control_block_size = d->frames * sizeof(struct bcm2835_dma_cb);
+	d->control_block_base = dma_zalloc_coherent(chan->device->dev,
+			d->control_block_size, &d->control_block_base_phys,
+			GFP_NOWAIT);
+
+	if (!d->control_block_base) {
+		kfree(d);
+		return NULL;
+	}
+
+	/*
+	 * Iterate over all frames, create a control block
+	 * for each frame and link them together.
+	 */
+	for (frame = 0; frame < d->frames; frame++) {
+		struct bcm2835_dma_cb *control_block =
+			&d->control_block_base[frame];
+
+		/* Setup adresses */
+		if (d->dir == DMA_DEV_TO_MEM) {
+			control_block->info = BCM2835_DMA_D_INC;
+			control_block->src = dev_addr;
+			control_block->dst = buf_addr + frame * period_len;
+		} else {
+			control_block->info = BCM2835_DMA_S_INC;
+			control_block->src = buf_addr + frame * period_len;
+			control_block->dst = dev_addr;
+		}
+
+		/* Enable interrupt */
+		control_block->info |= BCM2835_DMA_INT_EN;
+
+		/* Setup synchronization */
+		if (sync_type != 0)
+			control_block->info |= sync_type;
+
+		/* Setup DREQ channel */
+		if (c->dreq != 0)
+			control_block->info |=
+				BCM2835_DMA_PER_MAP(c->dreq);
+
+		/* Length of a frame */
+		control_block->length = period_len;
+		d->size += control_block->length;
+
+		/*
+		 * Next block is the next frame.
+		 * This DMA engine driver currently only supports cyclic DMA.
+		 * Therefore, wrap around at number of frames.
+		 */
+		control_block->next = d->control_block_base_phys +
+			sizeof(struct bcm2835_dma_cb)
+			* ((frame + 1) % d->frames);
+	}
+
+	return vchan_tx_prep(&c->vc, &d->vd, flags);
+}
+
+static int bcm2835_dma_slave_config(struct bcm2835_chan *c,
+		struct dma_slave_config *cfg)
+{
+	if ((cfg->direction == DMA_DEV_TO_MEM &&
+	     cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
+	    (cfg->direction == DMA_MEM_TO_DEV &&
+	     cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
+	    !is_slave_direction(cfg->direction)) {
+		return -EINVAL;
+	}
+
+	c->cfg = *cfg;
+
+	return 0;
+}
+
+static int bcm2835_dma_terminate_all(struct bcm2835_chan *c)
+{
+	struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device);
+	unsigned long flags;
+	int timeout = 1000;
+	LIST_HEAD(head);
+
+	spin_lock_irqsave(&c->vc.lock, flags);
+
+	/* Prevent this channel being scheduled */
+	spin_lock(&d->lock);
+	list_del_init(&c->node);
+	spin_unlock(&d->lock);
+
+	/*
+	 * Stop DMA activity: we assume the callback will not be called
+	 * after bcm_dma_abort() returns (even if it does, it will see
+	 * c->desc is NULL and exit.)
+	 */
+	if (c->desc) {
+		c->desc = NULL;
+		bcm2835_dma_abort(c->chan_base);
+
+		/* Wait for stopping */
+		while (timeout > 0) {
+			timeout--;
+			if (!(readl(c->chan_base + BCM2835_DMA_CS) &
+						BCM2835_DMA_ACTIVE))
+				break;
+
+			cpu_relax();
+		}
+
+		if (timeout <= 0)
+			dev_err(d->ddev.dev, "DMA transfer could not be terminated\n");
+	}
+
+	vchan_get_all_descriptors(&c->vc, &head);
+	spin_unlock_irqrestore(&c->vc.lock, flags);
+	vchan_dma_desc_free_list(&c->vc, &head);
+
+	return 0;
+}
+
+static int bcm2835_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+	unsigned long arg)
+{
+	struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+	int ret;
+
+	switch (cmd) {
+	case DMA_SLAVE_CONFIG:
+		return bcm2835_dma_slave_config(c,
+				(struct dma_slave_config *)arg);
+
+	case DMA_TERMINATE_ALL:
+		bcm2835_dma_terminate_all(c);
+		break;
+
+	default:
+		ret = -ENXIO;
+		break;
+	}
+
+	return ret;
+}
+
+static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, int irq)
+{
+	struct bcm2835_chan *c;
+
+	c = devm_kzalloc(d->ddev.dev, sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return -ENOMEM;
+
+	c->vc.desc_free = bcm2835_dma_desc_free;
+	vchan_init(&c->vc, &d->ddev);
+	INIT_LIST_HEAD(&c->node);
+
+	d->ddev.chancnt++;
+
+	c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id);
+	c->ch = chan_id;
+	c->irq_number = irq;
+
+	return 0;
+}
+
+static void bcm2835_dma_free(struct bcm2835_dmadev *od)
+{
+	while (!list_empty(&od->ddev.channels)) {
+		struct bcm2835_chan *c = list_first_entry(&od->ddev.channels,
+			struct bcm2835_chan, vc.chan.device_node);
+
+		list_del(&c->vc.chan.device_node);
+		tasklet_kill(&c->vc.task);
+	}
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id bcm2835_dma_of_match[] = {
+	{ .compatible = "brcm,bcm2835-dma", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, bcm2835_dma_of_match);
+#endif
+
+static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec,
+					   struct of_dma *ofdma)
+{
+	struct bcm2835_dmadev *d = ofdma->of_dma_data;
+	struct dma_chan *chan, *candidate;
+
+retry:
+	candidate = NULL;
+
+	/* Walk the list of channels registered with the current instance and
+	 * find one that is currently unused */
+	list_for_each_entry(chan, &d->ddev.channels, device_node)
+		if (chan->client_count == 0) {
+			candidate = chan;
+			break;
+		}
+
+	if (!candidate)
+		return NULL;
+
+	/* dma_get_slave_channel will return NULL if we lost a race between
+	 * the lookup and the reservation */
+	chan = dma_get_slave_channel(candidate);
+
+	if (chan) {
+		struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+
+		/* Set DREQ from param */
+		c->dreq = spec->args[0];
+
+		return chan;
+	}
+
+	goto retry;
+}
+
+static int bcm2835_dma_device_slave_caps(struct dma_chan *dchan,
+	struct dma_slave_caps *caps)
+{
+	caps->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+	caps->dstn_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+	caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+	caps->cmd_pause = false;
+	caps->cmd_terminate = true;
+
+	return 0;
+}
+
+static int bcm2835_dma_probe(struct platform_device *pdev)
+{
+	struct bcm2835_dmadev *od;
+	struct resource *res;
+	void __iomem *base;
+	int rc;
+	int i;
+	int irq;
+	uint32_t chans_available;
+
+	if (!pdev->dev.dma_mask)
+		pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+
+	rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+	if (rc)
+		return rc;
+	dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+
+	od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
+	if (!od)
+		return -ENOMEM;
+
+	pdev->dev.dma_parms = &od->dma_parms;
+	dma_set_max_seg_size(&pdev->dev, 0x3FFFFFFF);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	od->base = base;
+
+	dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
+	dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
+	od->ddev.device_alloc_chan_resources = bcm2835_dma_alloc_chan_resources;
+	od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources;
+	od->ddev.device_tx_status = bcm2835_dma_tx_status;
+	od->ddev.device_issue_pending = bcm2835_dma_issue_pending;
+	od->ddev.device_slave_caps = bcm2835_dma_device_slave_caps;
+	od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic;
+	od->ddev.device_control = bcm2835_dma_control;
+	od->ddev.dev = &pdev->dev;
+	INIT_LIST_HEAD(&od->ddev.channels);
+	spin_lock_init(&od->lock);
+
+	platform_set_drvdata(pdev, od);
+
+	if (pdev->dev.of_node) {
+		/* Request DMA channel mask from device tree */
+		if (of_property_read_u32(pdev->dev.of_node,
+				"brcm,dma-channel-mask",
+				&chans_available)) {
+			dev_err(&pdev->dev, "Failed to get channel mask\n");
+			bcm2835_dma_free(od);
+			return -EINVAL;
+		}
+	} else {
+		dev_err(&pdev->dev, "Failed to get channel mask. No device tree.\n");
+		bcm2835_dma_free(od);
+		return -EINVAL;
+	}
+
+	/* Do not use the FIQ and BULK channels */
+	chans_available &= ~0xD;
+
+	for (i = 0; i < pdev->num_resources; i++) {
+		irq = platform_get_irq(pdev, i);
+		if (irq < 0)
+			break;
+
+		if (chans_available & (1 << i)) {
+			rc = bcm2835_dma_chan_init(od, i, irq);
+			if (rc) {
+				bcm2835_dma_free(od);
+				return rc;
+			}
+		}
+	}
+
+	dev_dbg(&pdev->dev, "Initialized %i DMA channels\n", i);
+
+	if (pdev->dev.of_node) {
+		/* Device-tree DMA controller registration */
+		rc = of_dma_controller_register(pdev->dev.of_node,
+				bcm2835_dma_xlate, od);
+		if (rc) {
+			dev_err(&pdev->dev, "Failed to register DMA controller\n");
+			bcm2835_dma_free(od);
+			return rc;
+		}
+	}
+
+	rc = dma_async_device_register(&od->ddev);
+	if (rc) {
+		dev_err(&pdev->dev,
+			"Failed to register slave DMA engine device: %d\n", rc);
+		bcm2835_dma_free(od);
+		return rc;
+	}
+
+	dev_dbg(&pdev->dev, "Load BCM2835 DMA engine driver\n");
+
+	return rc;
+}
+
+static int bcm2835_dma_remove(struct platform_device *pdev)
+{
+	struct bcm2835_dmadev *od = platform_get_drvdata(pdev);
+
+	dma_async_device_unregister(&od->ddev);
+	bcm2835_dma_free(od);
+
+	return 0;
+}
+
+static struct platform_driver bcm2835_dma_driver = {
+	.probe	= bcm2835_dma_probe,
+	.remove	= bcm2835_dma_remove,
+	.driver = {
+		.name = "bcm2835-dma",
+		.owner = THIS_MODULE,
+		.of_match_table = of_match_ptr(bcm2835_dma_of_match),
+	},
+};
+
+module_platform_driver(bcm2835_dma_driver);
+
+MODULE_ALIAS("platform:bcm2835-dma");
+MODULE_DESCRIPTION("BCM2835 DMA engine driver");
+MODULE_AUTHOR("Florian Meier <florian.meier@koalo.de>");
+MODULE_LICENSE("GPL v2");
-- 
1.7.9.5

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [PATCHv7] dmaengine: Add support for BCM2835
  2013-11-17 15:39 [PATCHv7] dmaengine: Add support for BCM2835 Florian Meier
@ 2013-11-17 16:02 ` Joe Perches
  2013-11-17 16:37   ` Florian Meier
  2013-11-18 10:00 ` [PATCHv7] dmaengine: Add support for BCM2835 Shevchenko, Andriy
  2013-11-18 14:41 ` Mark Rutland
  2 siblings, 1 reply; 13+ messages in thread
From: Joe Perches @ 2013-11-17 16:02 UTC (permalink / raw)
  To: Florian Meier
  Cc: Stephen Warren, Vinod Koul, Dan Williams,
	Russell King - ARM Linux, devicetree, alsa-devel, Liam Girdwood,
	linux-kernel, Mark Brown, linux-rpi-kernel, dmaengine,
	linux-arm-kernel

On Sun, 2013-11-17 at 16:39 +0100, Florian Meier wrote:
> Add support for DMA controller of BCM2835 as used in the Raspberry Pi.
> Currently it only supports cyclic DMA.
[]
> diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
[]
> +static int bcm2835_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
> +	unsigned long arg)
> +{
> +	struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
> +	int ret;
> +
> +	switch (cmd) {
> +	case DMA_SLAVE_CONFIG:
> +		return bcm2835_dma_slave_config(c,
> +				(struct dma_slave_config *)arg);
> +
> +	case DMA_TERMINATE_ALL:
> +		bcm2835_dma_terminate_all(c);
> +		break;
> +
> +	default:
> +		ret = -ENXIO;
> +		break;
> +	}
> +
> +	return ret;
> +}

case DMA_TERMINATE_ALL returns an uninitialized ret;

[]

> +static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec,
> +					   struct of_dma *ofdma)
> +{
> +	struct bcm2835_dmadev *d = ofdma->of_dma_data;
> +	struct dma_chan *chan, *candidate;
> +
> +retry:
> +	candidate = NULL;
> +
> +	/* Walk the list of channels registered with the current instance and
> +	 * find one that is currently unused */
> +	list_for_each_entry(chan, &d->ddev.channels, device_node)
> +		if (chan->client_count == 0) {
> +			candidate = chan;
> +			break;
> +		}
> +
> +	if (!candidate)
> +		return NULL;
> +
> +	/* dma_get_slave_channel will return NULL if we lost a race between
> +	 * the lookup and the reservation */
> +	chan = dma_get_slave_channel(candidate);

Can that race happen consistently?
Does this avoid being a tight loop?

> +	if (chan) {
> +		struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
> +
> +		/* Set DREQ from param */
> +		c->dreq = spec->args[0];
> +
> +		return chan;
> +	}
> +
> +	goto retry;
> +}

Also, I think this would be better as:

	if (!chan)
		goto retry;

	to_bcm2835_dma_chan(chan)->dreq = spec->args[0];

	return chan;
}


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCHv7] dmaengine: Add support for BCM2835
  2013-11-17 16:02 ` Joe Perches
@ 2013-11-17 16:37   ` Florian Meier
  2013-11-17 20:12     ` [PATCH] mmp_pdma: Style neatening Joe Perches
  0 siblings, 1 reply; 13+ messages in thread
From: Florian Meier @ 2013-11-17 16:37 UTC (permalink / raw)
  To: Joe Perches
  Cc: Stephen Warren, Vinod Koul, Dan Williams,
	Russell King - ARM Linux, devicetree, alsa-devel, Liam Girdwood,
	linux-kernel, Mark Brown, linux-rpi-kernel, dmaengine,
	linux-arm-kernel

On 17.11.2013 17:02, Joe Perches wrote:
> On Sun, 2013-11-17 at 16:39 +0100, Florian Meier wrote:
>> Add support for DMA controller of BCM2835 as used in the Raspberry Pi.
>> Currently it only supports cyclic DMA.
> []
>> diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
> []
>> +static int bcm2835_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
>> +	unsigned long arg)
>> +{
>> +	struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
>> +	int ret;
>> +
>> +	switch (cmd) {
>> +	case DMA_SLAVE_CONFIG:
>> +		return bcm2835_dma_slave_config(c,
>> +				(struct dma_slave_config *)arg);
>> +
>> +	case DMA_TERMINATE_ALL:
>> +		bcm2835_dma_terminate_all(c);
>> +		break;
>> +
>> +	default:
>> +		ret = -ENXIO;
>> +		break;
>> +	}
>> +
>> +	return ret;
>> +}
> 
> case DMA_TERMINATE_ALL returns an uninitialized ret;

Oh yes - stupid mistake. Thank you!

> []
> 
>> +static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec,
>> +					   struct of_dma *ofdma)
>> +{
>> +	struct bcm2835_dmadev *d = ofdma->of_dma_data;
>> +	struct dma_chan *chan, *candidate;
>> +
>> +retry:
>> +	candidate = NULL;
>> +
>> +	/* Walk the list of channels registered with the current instance and
>> +	 * find one that is currently unused */
>> +	list_for_each_entry(chan, &d->ddev.channels, device_node)
>> +		if (chan->client_count == 0) {
>> +			candidate = chan;
>> +			break;
>> +		}
>> +
>> +	if (!candidate)
>> +		return NULL;
>> +
>> +	/* dma_get_slave_channel will return NULL if we lost a race between
>> +	 * the lookup and the reservation */
>> +	chan = dma_get_slave_channel(candidate);
> 
> Can that race happen consistently?
> Does this avoid being a tight loop?

I would say this can not happen.
If I get everything right, the conflicting process will not
get NULL (because it has won the race). In the worst case,
a new process enters the race, but this will only continue until all
channels are used. In that case no candidate exists and the loop will
exit.

At least, the code is directly taken from mmp_pdma.c ;-)

Greetings,
Florian


^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH] mmp_pdma: Style neatening
  2013-11-17 16:37   ` Florian Meier
@ 2013-11-17 20:12     ` Joe Perches
  2013-11-28  9:34       ` Vinod Koul
  0 siblings, 1 reply; 13+ messages in thread
From: Joe Perches @ 2013-11-17 20:12 UTC (permalink / raw)
  To: Florian Meier, Daniel Mack
  Cc: Vinod Koul, Dan Williams, Russell King - ARM Linux, devicetree,
	linux-kernel, dmaengine, linux-arm-kernel

Neaten code used as a template for other drivers.
Make the code more consistent with kernel styles.

o Convert #defines with (1<<foo) to BIT(foo)
o Alignment wrapping
o Logic inversions to put return at end of functions
o Convert devm_kzalloc with multiply to devm_kcalloc
o typo of Peripheral fix

Signed-off-by: Joe Perches <joe@perches.com>
---
> At least, the code is directly taken from mmp_pdma.c ;-)

Well, maybe the template code should be updated if there
are going to be more of these.
 
Uncompiled/untested.

 drivers/dma/mmp_pdma.c | 204 +++++++++++++++++++++++++------------------------
 1 file changed, 105 insertions(+), 99 deletions(-)

diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index dcb1e05..c2658f6 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -5,6 +5,7 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
+
 #include <linux/err.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -32,38 +33,37 @@
 #define DTADR		0x0208
 #define DCMD		0x020c
 
-#define DCSR_RUN	(1 << 31)	/* Run Bit (read / write) */
-#define DCSR_NODESC	(1 << 30)	/* No-Descriptor Fetch (read / write) */
-#define DCSR_STOPIRQEN	(1 << 29)	/* Stop Interrupt Enable (read / write) */
-#define DCSR_REQPEND	(1 << 8)	/* Request Pending (read-only) */
-#define DCSR_STOPSTATE	(1 << 3)	/* Stop State (read-only) */
-#define DCSR_ENDINTR	(1 << 2)	/* End Interrupt (read / write) */
-#define DCSR_STARTINTR	(1 << 1)	/* Start Interrupt (read / write) */
-#define DCSR_BUSERR	(1 << 0)	/* Bus Error Interrupt (read / write) */
-
-#define DCSR_EORIRQEN	(1 << 28)       /* End of Receive Interrupt Enable (R/W) */
-#define DCSR_EORJMPEN	(1 << 27)       /* Jump to next descriptor on EOR */
-#define DCSR_EORSTOPEN	(1 << 26)       /* STOP on an EOR */
-#define DCSR_SETCMPST	(1 << 25)       /* Set Descriptor Compare Status */
-#define DCSR_CLRCMPST	(1 << 24)       /* Clear Descriptor Compare Status */
-#define DCSR_CMPST	(1 << 10)       /* The Descriptor Compare Status */
-#define DCSR_EORINTR	(1 << 9)        /* The end of Receive */
-
-#define DRCMR(n)	((((n) < 64) ? 0x0100 : 0x1100) + \
-				 (((n) & 0x3f) << 2))
-#define DRCMR_MAPVLD	(1 << 7)	/* Map Valid (read / write) */
-#define DRCMR_CHLNUM	0x1f		/* mask for Channel Number (read / write) */
+#define DCSR_RUN	BIT(31)	/* Run Bit (read / write) */
+#define DCSR_NODESC	BIT(30)	/* No-Descriptor Fetch (read / write) */
+#define DCSR_STOPIRQEN	BIT(29)	/* Stop Interrupt Enable (read / write) */
+#define DCSR_REQPEND	BIT(8)	/* Request Pending (read-only) */
+#define DCSR_STOPSTATE	BIT(3)	/* Stop State (read-only) */
+#define DCSR_ENDINTR	BIT(2)	/* End Interrupt (read / write) */
+#define DCSR_STARTINTR	BIT(1)	/* Start Interrupt (read / write) */
+#define DCSR_BUSERR	BIT(0)	/* Bus Error Interrupt (read / write) */
+
+#define DCSR_EORIRQEN	BIT(28)	/* End of Receive Interrupt Enable (R/W) */
+#define DCSR_EORJMPEN	BIT(27)	/* Jump to next descriptor on EOR */
+#define DCSR_EORSTOPEN	BIT(26)	/* STOP on an EOR */
+#define DCSR_SETCMPST	BIT(25)	/* Set Descriptor Compare Status */
+#define DCSR_CLRCMPST	BIT(24)	/* Clear Descriptor Compare Status */
+#define DCSR_CMPST	BIT(10)	/* The Descriptor Compare Status */
+#define DCSR_EORINTR	BIT(9)	/* The end of Receive */
+
+#define DRCMR(n)	((((n) < 64) ? 0x0100 : 0x1100) + (((n) & 0x3f) << 2))
+#define DRCMR_MAPVLD	BIT(7)	/* Map Valid (read / write) */
+#define DRCMR_CHLNUM	0x1f	/* mask for Channel Number (read / write) */
 
 #define DDADR_DESCADDR	0xfffffff0	/* Address of next descriptor (mask) */
-#define DDADR_STOP	(1 << 0)	/* Stop (read / write) */
-
-#define DCMD_INCSRCADDR	(1 << 31)	/* Source Address Increment Setting. */
-#define DCMD_INCTRGADDR	(1 << 30)	/* Target Address Increment Setting. */
-#define DCMD_FLOWSRC	(1 << 29)	/* Flow Control by the source. */
-#define DCMD_FLOWTRG	(1 << 28)	/* Flow Control by the target. */
-#define DCMD_STARTIRQEN	(1 << 22)	/* Start Interrupt Enable */
-#define DCMD_ENDIRQEN	(1 << 21)	/* End Interrupt Enable */
-#define DCMD_ENDIAN	(1 << 18)	/* Device Endian-ness. */
+#define DDADR_STOP	BIT(0)	/* Stop (read / write) */
+
+#define DCMD_INCSRCADDR	BIT(31)	/* Source Address Increment Setting. */
+#define DCMD_INCTRGADDR	BIT(30)	/* Target Address Increment Setting. */
+#define DCMD_FLOWSRC	BIT(29)	/* Flow Control by the source. */
+#define DCMD_FLOWTRG	BIT(28)	/* Flow Control by the target. */
+#define DCMD_STARTIRQEN	BIT(22)	/* Start Interrupt Enable */
+#define DCMD_ENDIRQEN	BIT(21)	/* End Interrupt Enable */
+#define DCMD_ENDIAN	BIT(18)	/* Device Endian-ness. */
 #define DCMD_BURST8	(1 << 16)	/* 8 byte burst */
 #define DCMD_BURST16	(2 << 16)	/* 16 byte burst */
 #define DCMD_BURST32	(3 << 16)	/* 32 byte burst */
@@ -132,10 +132,14 @@ struct mmp_pdma_device {
 	spinlock_t phy_lock; /* protect alloc/free phy channels */
 };
 
-#define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx)
-#define to_mmp_pdma_desc(lh) container_of(lh, struct mmp_pdma_desc_sw, node)
-#define to_mmp_pdma_chan(dchan) container_of(dchan, struct mmp_pdma_chan, chan)
-#define to_mmp_pdma_dev(dmadev) container_of(dmadev, struct mmp_pdma_device, device)
+#define tx_to_mmp_pdma_desc(tx)					\
+	container_of(tx, struct mmp_pdma_desc_sw, async_tx)
+#define to_mmp_pdma_desc(lh)					\
+	container_of(lh, struct mmp_pdma_desc_sw, node)
+#define to_mmp_pdma_chan(dchan)					\
+	container_of(dchan, struct mmp_pdma_chan, chan)
+#define to_mmp_pdma_dev(dmadev)					\
+	container_of(dmadev, struct mmp_pdma_device, device)
 
 static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
 {
@@ -162,19 +166,18 @@ static void enable_chan(struct mmp_pdma_phy *phy)
 	writel(dalgn, phy->base + DALGN);
 
 	reg = (phy->idx << 2) + DCSR;
-	writel(readl(phy->base + reg) | DCSR_RUN,
-					phy->base + reg);
+	writel(readl(phy->base + reg) | DCSR_RUN, phy->base + reg);
 }
 
 static void disable_chan(struct mmp_pdma_phy *phy)
 {
 	u32 reg;
 
-	if (phy) {
-		reg = (phy->idx << 2) + DCSR;
-		writel(readl(phy->base + reg) & ~DCSR_RUN,
-						phy->base + reg);
-	}
+	if (!phy)
+		return;
+
+	reg = (phy->idx << 2) + DCSR;
+	writel(readl(phy->base + reg) & ~DCSR_RUN, phy->base + reg);
 }
 
 static int clear_chan_irq(struct mmp_pdma_phy *phy)
@@ -183,26 +186,27 @@ static int clear_chan_irq(struct mmp_pdma_phy *phy)
 	u32 dint = readl(phy->base + DINT);
 	u32 reg = (phy->idx << 2) + DCSR;
 
-	if (dint & BIT(phy->idx)) {
-		/* clear irq */
-		dcsr = readl(phy->base + reg);
-		writel(dcsr, phy->base + reg);
-		if ((dcsr & DCSR_BUSERR) && (phy->vchan))
-			dev_warn(phy->vchan->dev, "DCSR_BUSERR\n");
-		return 0;
-	}
-	return -EAGAIN;
+	if (!(dint & BIT(phy->idx)))
+		return -EAGAIN;
+
+	/* clear irq */
+	dcsr = readl(phy->base + reg);
+	writel(dcsr, phy->base + reg);
+	if ((dcsr & DCSR_BUSERR) && (phy->vchan))
+		dev_warn(phy->vchan->dev, "DCSR_BUSERR\n");
+
+	return 0;
 }
 
 static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id)
 {
 	struct mmp_pdma_phy *phy = dev_id;
 
-	if (clear_chan_irq(phy) == 0) {
-		tasklet_schedule(&phy->vchan->tasklet);
-		return IRQ_HANDLED;
-	} else
+	if (clear_chan_irq(phy) != 0)
 		return IRQ_NONE;
+
+	tasklet_schedule(&phy->vchan->tasklet);
+	return IRQ_HANDLED;
 }
 
 static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
@@ -224,8 +228,8 @@ static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
 
 	if (irq_num)
 		return IRQ_HANDLED;
-	else
-		return IRQ_NONE;
+
+	return IRQ_NONE;
 }
 
 /* lookup free phy channel as descending priority */
@@ -245,9 +249,9 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
 	 */
 
 	spin_lock_irqsave(&pdev->phy_lock, flags);
-	for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) {
+	for (prio = 0; prio <= ((pdev->dma_channels - 1) & 0xf) >> 2; prio++) {
 		for (i = 0; i < pdev->dma_channels; i++) {
-			if (prio != ((i & 0xf) >> 2))
+			if (prio != (i & 0xf) >> 2)
 				continue;
 			phy = &pdev->phy[i];
 			if (!phy->vchan) {
@@ -389,14 +393,16 @@ static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
 	if (chan->desc_pool)
 		return 1;
 
-	chan->desc_pool =
-		dma_pool_create(dev_name(&dchan->dev->device), chan->dev,
-				  sizeof(struct mmp_pdma_desc_sw),
-				  __alignof__(struct mmp_pdma_desc_sw), 0);
+	chan->desc_pool = dma_pool_create(dev_name(&dchan->dev->device),
+					  chan->dev,
+					  sizeof(struct mmp_pdma_desc_sw),
+					  __alignof__(struct mmp_pdma_desc_sw),
+					  0);
 	if (!chan->desc_pool) {
 		dev_err(chan->dev, "unable to allocate descriptor pool\n");
 		return -ENOMEM;
 	}
+
 	mmp_pdma_free_phy(chan);
 	chan->idle = true;
 	chan->dev_addr = 0;
@@ -404,7 +410,7 @@ static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
 }
 
 static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan,
-				  struct list_head *list)
+				    struct list_head *list)
 {
 	struct mmp_pdma_desc_sw *desc, *_desc;
 
@@ -434,8 +440,8 @@ static void mmp_pdma_free_chan_resources(struct dma_chan *dchan)
 
 static struct dma_async_tx_descriptor *
 mmp_pdma_prep_memcpy(struct dma_chan *dchan,
-	dma_addr_t dma_dst, dma_addr_t dma_src,
-	size_t len, unsigned long flags)
+		     dma_addr_t dma_dst, dma_addr_t dma_src,
+		     size_t len, unsigned long flags)
 {
 	struct mmp_pdma_chan *chan;
 	struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
@@ -515,8 +521,8 @@ fail:
 
 static struct dma_async_tx_descriptor *
 mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
-			 unsigned int sg_len, enum dma_transfer_direction dir,
-			 unsigned long flags, void *context)
+		       unsigned int sg_len, enum dma_transfer_direction dir,
+		       unsigned long flags, void *context)
 {
 	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
 	struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
@@ -591,10 +597,11 @@ fail:
 	return NULL;
 }
 
-static struct dma_async_tx_descriptor *mmp_pdma_prep_dma_cyclic(
-	struct dma_chan *dchan, dma_addr_t buf_addr, size_t len,
-	size_t period_len, enum dma_transfer_direction direction,
-	unsigned long flags, void *context)
+static struct dma_async_tx_descriptor *
+mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
+			 dma_addr_t buf_addr, size_t len, size_t period_len,
+			 enum dma_transfer_direction direction,
+			 unsigned long flags, void *context)
 {
 	struct mmp_pdma_chan *chan;
 	struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
@@ -636,8 +643,8 @@ static struct dma_async_tx_descriptor *mmp_pdma_prep_dma_cyclic(
 			goto fail;
 		}
 
-		new->desc.dcmd = chan->dcmd | DCMD_ENDIRQEN |
-					(DCMD_LENGTH & period_len);
+		new->desc.dcmd = (chan->dcmd | DCMD_ENDIRQEN |
+				  (DCMD_LENGTH & period_len));
 		new->desc.dsadr = dma_src;
 		new->desc.dtadr = dma_dst;
 
@@ -677,12 +684,11 @@ fail:
 }
 
 static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
-		unsigned long arg)
+			    unsigned long arg)
 {
 	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
 	struct dma_slave_config *cfg = (void *)arg;
 	unsigned long flags;
-	int ret = 0;
 	u32 maxburst = 0, addr = 0;
 	enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
 
@@ -739,11 +745,12 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
 		return -ENOSYS;
 	}
 
-	return ret;
+	return 0;
 }
 
 static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
-			dma_cookie_t cookie, struct dma_tx_state *txstate)
+					  dma_cookie_t cookie,
+					  struct dma_tx_state *txstate)
 {
 	return dma_cookie_status(dchan, cookie, txstate);
 }
@@ -845,15 +852,14 @@ static int mmp_pdma_remove(struct platform_device *op)
 	return 0;
 }
 
-static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
-							int idx, int irq)
+static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq)
 {
 	struct mmp_pdma_phy *phy  = &pdev->phy[idx];
 	struct mmp_pdma_chan *chan;
 	int ret;
 
-	chan = devm_kzalloc(pdev->dev,
-			sizeof(struct mmp_pdma_chan), GFP_KERNEL);
+	chan = devm_kzalloc(pdev->dev, sizeof(struct mmp_pdma_chan),
+			    GFP_KERNEL);
 	if (chan == NULL)
 		return -ENOMEM;
 
@@ -861,8 +867,8 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
 	phy->base = pdev->base;
 
 	if (irq) {
-		ret = devm_request_irq(pdev->dev, irq,
-			mmp_pdma_chan_handler, 0, "pdma", phy);
+		ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler, 0,
+				       "pdma", phy);
 		if (ret) {
 			dev_err(pdev->dev, "channel request irq fail!\n");
 			return ret;
@@ -877,8 +883,7 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
 	INIT_LIST_HEAD(&chan->chain_running);
 
 	/* register virt channel to dma engine */
-	list_add_tail(&chan->chan.device_node,
-			&pdev->device.channels);
+	list_add_tail(&chan->chan.device_node, &pdev->device.channels);
 
 	return 0;
 }
@@ -913,13 +918,12 @@ retry:
 	 * the lookup and the reservation */
 	chan = dma_get_slave_channel(candidate);
 
-	if (chan) {
-		struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan);
-		c->drcmr = dma_spec->args[0];
-		return chan;
-	}
+	if (!chan)
+		goto retry;
 
-	goto retry;
+	to_mmp_pdma_chan(chan)->drcmr = dma_spec->args[0];
+
+	return chan;
 }
 
 static int mmp_pdma_probe(struct platform_device *op)
@@ -934,6 +938,7 @@ static int mmp_pdma_probe(struct platform_device *op)
 	pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
 	if (!pdev)
 		return -ENOMEM;
+
 	pdev->dev = &op->dev;
 
 	spin_lock_init(&pdev->phy_lock);
@@ -945,8 +950,8 @@ static int mmp_pdma_probe(struct platform_device *op)
 
 	of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev);
 	if (of_id)
-		of_property_read_u32(pdev->dev->of_node,
-				"#dma-channels", &dma_channels);
+		of_property_read_u32(pdev->dev->of_node, "#dma-channels",
+				     &dma_channels);
 	else if (pdata && pdata->dma_channels)
 		dma_channels = pdata->dma_channels;
 	else
@@ -958,8 +963,9 @@ static int mmp_pdma_probe(struct platform_device *op)
 			irq_num++;
 	}
 
-	pdev->phy = devm_kzalloc(pdev->dev,
-		dma_channels * sizeof(struct mmp_pdma_chan), GFP_KERNEL);
+	pdev->phy = devm_kcalloc(pdev->dev,
+				 dma_channels, sizeof(struct mmp_pdma_chan),
+				 GFP_KERNEL);
 	if (pdev->phy == NULL)
 		return -ENOMEM;
 
@@ -968,8 +974,8 @@ static int mmp_pdma_probe(struct platform_device *op)
 	if (irq_num != dma_channels) {
 		/* all chan share one irq, demux inside */
 		irq = platform_get_irq(op, 0);
-		ret = devm_request_irq(pdev->dev, irq,
-			mmp_pdma_int_handler, 0, "pdma", pdev);
+		ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler, 0,
+				       "pdma", pdev);
 		if (ret)
 			return ret;
 	}
@@ -1044,7 +1050,7 @@ bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
 	if (chan->device->dev->driver != &mmp_pdma_driver.driver)
 		return false;
 
-	c->drcmr = *(unsigned int *) param;
+	c->drcmr = *(unsigned int *)param;
 
 	return true;
 }
@@ -1052,6 +1058,6 @@ EXPORT_SYMBOL_GPL(mmp_pdma_filter_fn);
 
 module_platform_driver(mmp_pdma_driver);
 
-MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver");
+MODULE_DESCRIPTION("MARVELL MMP Peripheral DMA Driver");
 MODULE_AUTHOR("Marvell International Ltd.");
 MODULE_LICENSE("GPL v2");




^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [PATCHv7] dmaengine: Add support for BCM2835
  2013-11-17 15:39 [PATCHv7] dmaengine: Add support for BCM2835 Florian Meier
  2013-11-17 16:02 ` Joe Perches
@ 2013-11-18 10:00 ` Shevchenko, Andriy
  2013-11-18 12:16   ` Florian Meier
  2013-11-18 14:41 ` Mark Rutland
  2 siblings, 1 reply; 13+ messages in thread
From: Shevchenko, Andriy @ 2013-11-18 10:00 UTC (permalink / raw)
  To: Florian Meier
  Cc: Stephen Warren, Koul, Vinod, Williams, Dan J,
	Russell King - ARM Linux, devicetree, alsa-devel, Liam Girdwood,
	linux-kernel, Mark Brown, linux-rpi-kernel, dmaengine,
	linux-arm-kernel

[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #1: Type: text/plain; charset="utf-8", Size: 6920 bytes --]

On Sun, 2013-11-17 at 16:39 +0100, Florian Meier wrote:
> Add support for DMA controller of BCM2835 as used in the Raspberry Pi.
> Currently it only supports cyclic DMA.

Few comments below.

> +++ b/drivers/dma/bcm2835-dma.c
> @@ -0,0 +1,736 @@

> +static int bcm2835_dma_abort(void __iomem *chan_base)
> +{
> +	unsigned long int cs;
> +	long int timeout = 10000;
> +
> +	cs = readl(chan_base + BCM2835_DMA_CS);
> +	if (!(cs & BCM2835_DMA_ACTIVE))
> +		return 0;
> +
> +	/* Write 0 to the active bit - Pause the DMA */
> +	writel(0, chan_base + BCM2835_DMA_CS);
> +
> +	/* Wait for any current AXI transfer to complete */
> +	while ((cs & BCM2835_DMA_ISPAUSED) && --timeout >= 0)
> +		cs = readl(chan_base + BCM2835_DMA_CS);

I actually don't see timeout here. timeout means counter in your case.
Might be better to have something like

while (readl(...) & BCM2835_DMA_ISPAUSED && --timeout)
cpu_relax();

?

> +	/* We'll un-pause when we set of our next DMA */
> +	if (cs & BCM2835_DMA_ISPAUSED)

if (!timeout)

> +		return -ETIMEDOUT;


> +
> +	if (!(cs & BCM2835_DMA_ACTIVE))
> +		return 0;

Duplicate code. Perhaps
static inline bool is_chan_not_active(unsigned long cs)
{
return !(cs & BCM2835_DMA_ACTIVE);
}

[]

> +static irqreturn_t bcm2835_dma_callback(int irq, void *data)
> +{

[]

> +	writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS);

Since it's duplicate code, perhaps

static inline void set_chan_active(void __iomem *base)
{
writel(BCM2835_DMA_ACTIVE, base + BCM2835_DMA_CS);
}

[]

> +static size_t bcm2835_dma_desc_size_pos(struct bcm2835_desc *d, dma_addr_t addr)
> +{
> +	unsigned i;

In some cases you use 'unsigned long int' for 'unsigned long', for
example, but here 'unsigned' instead of 'unsigned int'. Please, align
style with certain choice.

[]

> +static enum dma_status bcm2835_dma_tx_status(struct dma_chan *chan,
> +	dma_cookie_t cookie, struct dma_tx_state *txstate)
> +{

[]

> +	} else {
> +		txstate->residue = 0;

Useless assignment since dmaengine will do this for you in
dma_cookie_status.

[]

> +static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
> +	struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
> +	size_t period_len, enum dma_transfer_direction direction,
> +	unsigned long flags, void *context)
> +{

[]

> +	/* Grab configuration */
> +	if (direction == DMA_DEV_TO_MEM) {
> +		dev_addr = c->cfg.src_addr;
> +		dev_width = c->cfg.src_addr_width;
> +		sync_type = BCM2835_DMA_S_DREQ;
> +	} else if (direction == DMA_MEM_TO_DEV) {
> +		dev_addr = c->cfg.dst_addr;
> +		dev_width = c->cfg.dst_addr_width;
> +		sync_type = BCM2835_DMA_D_DREQ;
> +	} else {
> +		dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
> +		return NULL;
> +	}

You might use following as well

if (!is_slave_direction) {
 dev_err(...);
 return NULL;
}

if (direction == DMA_DEV_TO_MEM)
{
...
} else {
...
}

?

At least it will be aligned with what you have further in this function.

> +	/* Bus width translates to the element size (ES) */
> +	switch (dev_width) {
> +	case DMA_SLAVE_BUSWIDTH_4_BYTES:
> +		es = BCM2835_DMA_DATA_TYPE_S32;
> +		break;
> +	default:
> +		return NULL;
> +	}

So, you use switch-case on hope to extend it later, correct?

[]

> +static int bcm2835_dma_terminate_all(struct bcm2835_chan *c)
> +{
> +	struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device);
> +	unsigned long flags;
> +	int timeout = 1000;
> +	LIST_HEAD(head);

[]

> +	if (c->desc) {
> +		c->desc = NULL;
> +		bcm2835_dma_abort(c->chan_base);
> +
> +		/* Wait for stopping */
> +		while (timeout > 0) {
> +			timeout--;

while (--timeout)

> +			if (!(readl(c->chan_base + BCM2835_DMA_CS) &
> +						BCM2835_DMA_ACTIVE))
> +				break;
> +
> +			cpu_relax();
> +		}
> +
> +		if (timeout <= 0)

if (!timeout)

[]

> +static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec,
> +					   struct of_dma *ofdma)
> +{
> +	struct bcm2835_dmadev *d = ofdma->of_dma_data;
> +	struct dma_chan *chan, *candidate;
> +
> +retry:
> +	candidate = NULL;
> +
> +	/* Walk the list of channels registered with the current instance and
> +	 * find one that is currently unused */
> +	list_for_each_entry(chan, &d->ddev.channels, device_node)
> +		if (chan->client_count == 0) {
> +			candidate = chan;
> +			break;
> +		}
> +
> +	if (!candidate)
> +		return NULL;
> +
> +	/* dma_get_slave_channel will return NULL if we lost a race between
> +	 * the lookup and the reservation */
> +	chan = dma_get_slave_channel(candidate);
> +
> +	if (chan) {

Perhaps

if (!chan)
goto retry;


> +		struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
> +
> +		/* Set DREQ from param */
> +		c->dreq = spec->args[0];
> +
> +		return chan;
> +	}
> +
> +	goto retry;
> +}

> +static int bcm2835_dma_probe(struct platform_device *pdev)
> +{
> +	struct bcm2835_dmadev *od;
> +	struct resource *res;
> +	void __iomem *base;
> +	int rc;
> +	int i;
> +	int irq;
> +	uint32_t chans_available;

Why uint32_t?

[]

> +	if (pdev->dev.of_node) {

Perhaps

if (!...of_node) {
 ...
 return -EINVAL;
}

> +		/* Request DMA channel mask from device tree */
> +		if (of_property_read_u32(pdev->dev.of_node,
> +				"brcm,dma-channel-mask",
> +				&chans_available)) {
> +			dev_err(&pdev->dev, "Failed to get channel mask\n");
> +			bcm2835_dma_free(od);
> +			return -EINVAL;
> +		}
> +	} else {
> +		dev_err(&pdev->dev, "Failed to get channel mask. No device tree.\n");
> +		bcm2835_dma_free(od);
> +		return -EINVAL;
> +	}

> +	dev_dbg(&pdev->dev, "Initialized %i DMA channels\n", i);
> +
> +	if (pdev->dev.of_node) {

Does it make sense?

> +		/* Device-tree DMA controller registration */
> +		rc = of_dma_controller_register(pdev->dev.of_node,
> +				bcm2835_dma_xlate, od);
> +		if (rc) {
> +			dev_err(&pdev->dev, "Failed to register DMA controller\n");
> +			bcm2835_dma_free(od);
> +			return rc;

goto err_no_dma;

> +		}
> +	}
> +
> +	rc = dma_async_device_register(&od->ddev);
> +	if (rc) {
> +		dev_err(&pdev->dev,
> +			"Failed to register slave DMA engine device: %d\n", rc);
> +		bcm2835_dma_free(od);
> +		return rc;

goto err_no_dma;

> +	}
> +
> +	dev_dbg(&pdev->dev, "Load BCM2835 DMA engine driver\n");


> +	return rc;

return 0;

err_no_dma:
bcm2835_dma_free(od);
return rc;

?


-- 
Andy Shevchenko <andriy.shevchenko@intel.com>
Intel Finland Oy
---------------------------------------------------------------------
Intel Finland Oy
Registered Address: PL 281, 00181 Helsinki 
Business Identity Code: 0357606 - 4 
Domiciled in Helsinki 

This e-mail and any attachments may contain confidential material for
the sole use of the intended recipient(s). Any review or distribution
by others is strictly prohibited. If you are not the intended
recipient, please contact the sender and delete all copies.
ÿôèº{.nÇ+‰·Ÿ®‰­†+%ŠËÿ±éݶ\x17¥Šwÿº{.nÇ+‰·¥Š{±þG«éÿŠ{ayº\x1dʇڙë,j\a­¢f£¢·hšïêÿ‘êçz_è®\x03(­éšŽŠÝ¢j"ú\x1a¶^[m§ÿÿ¾\a«þG«éÿ¢¸?™¨è­Ú&£ø§~á¶iO•æ¬z·švØ^\x14\x04\x1a¶^[m§ÿÿÃ\fÿ¶ìÿ¢¸?–I¥

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCHv7] dmaengine: Add support for BCM2835
  2013-11-18 10:00 ` [PATCHv7] dmaengine: Add support for BCM2835 Shevchenko, Andriy
@ 2013-11-18 12:16   ` Florian Meier
  2013-11-18 14:30     ` Andy Shevchenko
  0 siblings, 1 reply; 13+ messages in thread
From: Florian Meier @ 2013-11-18 12:16 UTC (permalink / raw)
  To: Shevchenko, Andriy
  Cc: Stephen Warren, Koul, Vinod, Williams, Dan J,
	Russell King - ARM Linux, devicetree, alsa-devel, Liam Girdwood,
	linux-kernel, Mark Brown, linux-rpi-kernel, dmaengine,
	linux-arm-kernel

Thank you! Few comments below.

> []
>> +static enum dma_status bcm2835_dma_tx_status(struct dma_chan *chan,
>> +	dma_cookie_t cookie, struct dma_tx_state *txstate)
>> +{
> 
> []
> 
>> +	} else {
>> +		txstate->residue = 0;
> 
> Useless assignment since dmaengine will do this for you in
> dma_cookie_status.

I agree that it is useless, but I think otherwise it might be concealed
that there is a third case left that uses a residue of 0. Do you think a
comment is better? E.g.:

+	} else {
+		/* residue = 0 per default */

>> +	/* Bus width translates to the element size (ES) */
>> +	switch (dev_width) {
>> +	case DMA_SLAVE_BUSWIDTH_4_BYTES:
>> +		es = BCM2835_DMA_DATA_TYPE_S32;
>> +		break;
>> +	default:
>> +		return NULL;
>> +	}
> 
> So, you use switch-case on hope to extend it later, correct?

Yes, there is a S128 case left, but that is not implemented yet.

>> +static int bcm2835_dma_probe(struct platform_device *pdev)
>> +{
>> +	struct bcm2835_dmadev *od;
>> +	struct resource *res;
>> +	void __iomem *base;
>> +	int rc;
>> +	int i;
>> +	int irq;
>> +	uint32_t chans_available;
> 
> Why uint32_t?

Because it is a bit mask of fixed length that directly comes from the
firmware.

>> +		/* Request DMA channel mask from device tree */
>> +		if (of_property_read_u32(pdev->dev.of_node,
>> +				"brcm,dma-channel-mask",
>> +				&chans_available)) {
>> +			dev_err(&pdev->dev, "Failed to get channel mask\n");
>> +			bcm2835_dma_free(od);
>> +			return -EINVAL;
>> +		}
>> +	} else {
>> +		dev_err(&pdev->dev, "Failed to get channel mask. No device tree.\n");
>> +		bcm2835_dma_free(od);
>> +		return -EINVAL;
>> +	}
> 
>> +	dev_dbg(&pdev->dev, "Initialized %i DMA channels\n", i);
>> +
>> +	if (pdev->dev.of_node) {
> 
> Does it make sense?

There was already a discussion about that in PATCHv4. It should be
possible to add board file initialization later with few patching.
Although, maybe this will not be relevant anymore, because device tree
support of this platform is getting better more and more.

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCHv7] dmaengine: Add support for BCM2835
  2013-11-18 12:16   ` Florian Meier
@ 2013-11-18 14:30     ` Andy Shevchenko
  2013-11-18 14:37       ` Florian Meier
  2013-11-18 14:54       ` Russell King - ARM Linux
  0 siblings, 2 replies; 13+ messages in thread
From: Andy Shevchenko @ 2013-11-18 14:30 UTC (permalink / raw)
  To: Florian Meier
  Cc: Stephen Warren, Koul, Vinod, Williams, Dan J,
	Russell King - ARM Linux, devicetree, alsa-devel, Liam Girdwood,
	linux-kernel, Mark Brown, linux-rpi-kernel, dmaengine,
	linux-arm-kernel

On Mon, 2013-11-18 at 13:16 +0100, Florian Meier wrote:
> Thank you! Few comments below.

See my answers below.

> >> +static enum dma_status bcm2835_dma_tx_status(struct dma_chan *chan,
> >> +	dma_cookie_t cookie, struct dma_tx_state *txstate)
> >> +{
> > 
> > []
> > 
> >> +	} else {
> >> +		txstate->residue = 0;
> > 
> > Useless assignment since dmaengine will do this for you in
> > dma_cookie_status.
> 
> I agree that it is useless, but I think otherwise it might be concealed
> that there is a third case left that uses a residue of 0. Do you think a
> comment is better? E.g.:
> 
> +	} else {
> +		/* residue = 0 per default */

I think like in many other DMA drivers either you have separate function
to get residue, which returns 0, or just not include this case.

> >> +static int bcm2835_dma_probe(struct platform_device *pdev)
> >> +{

> >> +	uint32_t chans_available;
> > 
> > Why uint32_t?
> 
> Because it is a bit mask of fixed length that directly comes from the
> firmware.

Like one already told you in your i2s patch, please, change that to
corresponding u* value, namely u32.

-- 
Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Intel Finland Oy


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCHv7] dmaengine: Add support for BCM2835
  2013-11-18 14:30     ` Andy Shevchenko
@ 2013-11-18 14:37       ` Florian Meier
  2013-11-18 14:54       ` Russell King - ARM Linux
  1 sibling, 0 replies; 13+ messages in thread
From: Florian Meier @ 2013-11-18 14:37 UTC (permalink / raw)
  To: Andy Shevchenko
  Cc: Stephen Warren, Koul, Vinod, Williams, Dan J,
	Russell King - ARM Linux, devicetree, alsa-devel, Liam Girdwood,
	linux-kernel, Mark Brown, linux-rpi-kernel, dmaengine,
	linux-arm-kernel

>>>> +static enum dma_status bcm2835_dma_tx_status(struct dma_chan *chan,
>>>> +	dma_cookie_t cookie, struct dma_tx_state *txstate)
>>>> +{
>>>
>>> []
>>>
>>>> +	} else {
>>>> +		txstate->residue = 0;
>>>
>>> Useless assignment since dmaengine will do this for you in
>>> dma_cookie_status.
>>
>> I agree that it is useless, but I think otherwise it might be concealed
>> that there is a third case left that uses a residue of 0. Do you think a
>> comment is better? E.g.:
>>
>> +	} else {
>> +		/* residue = 0 per default */
> 
> I think like in many other DMA drivers either you have separate function
> to get residue, which returns 0, or just not include this case.

You mean like in the omap-dma.c? ;-P

>>>> +static int bcm2835_dma_probe(struct platform_device *pdev)
>>>> +{
> 
>>>> +	uint32_t chans_available;
>>>
>>> Why uint32_t?
>>
>> Because it is a bit mask of fixed length that directly comes from the
>> firmware.
> 
> Like one already told you in your i2s patch, please, change that to
> corresponding u* value, namely u32.

I have no problem with changing that, but why?


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCHv7] dmaengine: Add support for BCM2835
  2013-11-17 15:39 [PATCHv7] dmaengine: Add support for BCM2835 Florian Meier
  2013-11-17 16:02 ` Joe Perches
  2013-11-18 10:00 ` [PATCHv7] dmaengine: Add support for BCM2835 Shevchenko, Andriy
@ 2013-11-18 14:41 ` Mark Rutland
  2 siblings, 0 replies; 13+ messages in thread
From: Mark Rutland @ 2013-11-18 14:41 UTC (permalink / raw)
  To: Florian Meier
  Cc: Stephen Warren, Vinod Koul, Dan Williams,
	Russell King - ARM Linux, devicetree, alsa-devel, Liam Girdwood,
	linux-kernel, Mark Brown, linux-rpi-kernel, dmaengine,
	linux-arm-kernel

On Sun, Nov 17, 2013 at 03:39:19PM +0000, Florian Meier wrote:
> Add support for DMA controller of BCM2835 as used in the Raspberry Pi.
> Currently it only supports cyclic DMA.
> 
> Signed-off-by: Florian Meier <florian.meier@koalo.de>
> ---
> 
> This version includes some more style improvements
> suggested in the previous thread.
> 
>  .../devicetree/bindings/dma/bcm2835-dma.txt        |   56 ++
>  drivers/dma/Kconfig                                |    6 +
>  drivers/dma/Makefile                               |    1 +
>  drivers/dma/bcm2835-dma.c                          |  736 ++++++++++++++++++++
>  4 files changed, 799 insertions(+)
>  create mode 100644 Documentation/devicetree/bindings/dma/bcm2835-dma.txt
>  create mode 100644 drivers/dma/bcm2835-dma.c
> 
> diff --git a/Documentation/devicetree/bindings/dma/bcm2835-dma.txt b/Documentation/devicetree/bindings/dma/bcm2835-dma.txt
> new file mode 100644
> index 0000000..7d91019
> --- /dev/null
> +++ b/Documentation/devicetree/bindings/dma/bcm2835-dma.txt
> @@ -0,0 +1,56 @@
> +* BCM2835 DMA controller
> +
> +Required properties:
> +- compatible: Should be "brcm,bcm2835-dma".
> +- reg: Should contain DMA registers location and length.
> +- interrupts: Should contain the DMA interrupts associated
> +               to the DMA channels in ascending order.
> +               First cell is the IRQ bank.
> +               Second cell is the IRQ number.

The format of the cells is a property of the interrupt parent, not of
the DMA controller. It shouldn't be described here.

> +- #dma-cells: Must be <1>, used to represent the number of integer cells in
> +               the dmas property of client devices.

A brief description of the set of sane values of the dma-specifier cell
would be better.

How many channels does the DMA controller have?

> +- brcm,dma-channel-mask: Bit mask representing the channels
> +                        not used by the firmware.

Which bits correspond to which channels?

How many channels are likely to be reserved out of how many in total?

Are they likely to be an arbitrary set, or some contiguous range?

> +
> +Example:
> +
> +dma: dma@7e007000 {
> +       compatible = "brcm,bcm2835-dma";
> +       reg = <0x7e007000 0xf00>;
> +       interrupts = <1 16
> +                     1 17
> +                     1 18
> +                     1 19
> +                     1 20
> +                     1 21
> +                     1 22
> +                     1 23
> +                     1 24
> +                     1 25
> +                     1 26
> +                     1 27
> +                     1 28>;

Please bracket these individually.

> +
> +       #dma-cells = <1>;
> +       brcm,dma-channel-mask = <0x7f35>;
> +};
> +
> +DMA clients connected to the BCM2835 DMA controller must use the format
> +described in the dma.txt file, using a two-cell specifier for each channel:
> +a phandle plus one integer cells.
> +The two cells in order are:
> +
> +1. A phandle pointing to the DMA controller.
> +2. The DREQ number.

This description is unnecessary, and technically wrong (the phandle
isn't part of the specifier, as the specifier goes with the phandle).

> +
> +Example:
> +
> +bcm2835_i2s: i2s@7e203000 {
> +       compatible = "brcm,bcm2835-i2s";
> +       reg = < 0x7e203000 0x20
> +               0x7e101098 0x02>;
> +
> +       dmas = <&dma 2
> +               &dma 3>;

Brackets please.

[...]

> +struct bcm2835_dma_cb {
> +       uint32_t info;
> +       uint32_t src;
> +       uint32_t dst;
> +       uint32_t length;
> +       uint32_t stride;
> +       uint32_t next;
> +       uint32_t pad[2];

s/uint32_t/u32/ here and elsewhere.

[...]

> +static void bcm2835_dma_start_desc(struct bcm2835_chan *c)
> +{
> +       struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
> +       struct bcm2835_desc *d;
> +
> +       if (!vd) {
> +               c->desc = NULL;
> +               return;
> +       }
> +
> +       list_del(&vd->node);
> +
> +       c->desc = d = to_bcm2835_dma_desc(&vd->tx);
> +
> +       dsb();  /* ARM data synchronization (push) operation */

We all know what a dsb is. What you should explain is _why_ the dsb is
here. As this is sat under drivers, it would be nicer to use an
architecture generic barrier rather than dsb() directly.

> +
> +       writel(d->control_block_base_phys, c->chan_base + BCM2835_DMA_ADDR);
> +       writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS);
> +}
> +
> +static irqreturn_t bcm2835_dma_callback(int irq, void *data)
> +{
> +       struct bcm2835_chan *c = data;
> +       struct bcm2835_desc *d;
> +       unsigned long flags;
> +
> +       spin_lock_irqsave(&c->vc.lock, flags);
> +
> +       /* Acknowledge interrupt */
> +       writel(BCM2835_DMA_INT, c->chan_base + BCM2835_DMA_CS);
> +
> +       d = c->desc;
> +
> +       if (d) {
> +               /* TODO Only works for cyclic DMA */
> +               vchan_cyclic_callback(&d->vd);
> +       }
> +
> +       /* Keep the DMA engine running */
> +       dsb(); /* ARM synchronization barrier */

Better explanation please.

> +       writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS);
> +
> +       spin_unlock_irqrestore(&c->vc.lock, flags);
> +
> +       return IRQ_HANDLED;
> +}
> +
> +static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan)
> +{
> +       struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
> +
> +       dev_dbg(c->vc.chan.device->dev,
> +                       "Allocating DMA channel %i\n", c->ch);

Why not %d? It's far more common...


[...]

> +       if (pdev->dev.of_node) {
> +               /* Request DMA channel mask from device tree */
> +               if (of_property_read_u32(pdev->dev.of_node,
> +                               "brcm,dma-channel-mask",
> +                               &chans_available)) {
> +                       dev_err(&pdev->dev, "Failed to get channel mask\n");
> +                       bcm2835_dma_free(od);
> +                       return -EINVAL;
> +               }

As of_property_read_u32 has an implicit check on np, you don't need to
first check pdev->dev.of_node.

> +       } else {
> +               dev_err(&pdev->dev, "Failed to get channel mask. No device tree.\n");
> +               bcm2835_dma_free(od);
> +               return -EINVAL;
> +       }
> +
> +       /* Do not use the FIQ and BULK channels */
> +       chans_available &= ~0xD;

A couple of #defines would be nice, along with an explanation as to
why...

Thanks,
Mark.

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCHv7] dmaengine: Add support for BCM2835
  2013-11-18 14:30     ` Andy Shevchenko
  2013-11-18 14:37       ` Florian Meier
@ 2013-11-18 14:54       ` Russell King - ARM Linux
  2013-11-18 15:04         ` Mark Rutland
  2013-11-18 22:18         ` Joe Perches
  1 sibling, 2 replies; 13+ messages in thread
From: Russell King - ARM Linux @ 2013-11-18 14:54 UTC (permalink / raw)
  To: Andy Shevchenko
  Cc: Florian Meier, Stephen Warren, Koul, Vinod, Williams, Dan J,
	devicetree, alsa-devel, Liam Girdwood, linux-kernel, Mark Brown,
	linux-rpi-kernel, dmaengine, linux-arm-kernel

On Mon, Nov 18, 2013 at 04:30:17PM +0200, Andy Shevchenko wrote:
> On Mon, 2013-11-18 at 13:16 +0100, Florian Meier wrote:
> > >> +	uint32_t chans_available;
> > > 
> > > Why uint32_t?
> > 
> > Because it is a bit mask of fixed length that directly comes from the
> > firmware.
> 
> Like one already told you in your i2s patch, please, change that to
> corresponding u* value, namely u32.

There's no problem with uint32_t vs u32 - either will do.  u32 is the
pre-stdint.h Linux definition of a 32-bit unsigned integer.  There's
no reason why uint32_t isn't perfectly acceptable.  It's a matter of
author taste which gets used.  (Except where modifications are to an
existing chunk of code using one or the other - where consistency then
matters more.)

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCHv7] dmaengine: Add support for BCM2835
  2013-11-18 14:54       ` Russell King - ARM Linux
@ 2013-11-18 15:04         ` Mark Rutland
  2013-11-18 22:18         ` Joe Perches
  1 sibling, 0 replies; 13+ messages in thread
From: Mark Rutland @ 2013-11-18 15:04 UTC (permalink / raw)
  To: Russell King - ARM Linux
  Cc: Andy Shevchenko, Florian Meier, Stephen Warren, Koul, Vinod,
	Williams, Dan J, devicetree, alsa-devel, Liam Girdwood,
	linux-kernel, Mark Brown, linux-rpi-kernel, dmaengine,
	linux-arm-kernel

On Mon, Nov 18, 2013 at 02:54:00PM +0000, Russell King - ARM Linux wrote:
> On Mon, Nov 18, 2013 at 04:30:17PM +0200, Andy Shevchenko wrote:
> > On Mon, 2013-11-18 at 13:16 +0100, Florian Meier wrote:
> > > >> +	uint32_t chans_available;
> > > > 
> > > > Why uint32_t?
> > > 
> > > Because it is a bit mask of fixed length that directly comes from the
> > > firmware.
> > 
> > Like one already told you in your i2s patch, please, change that to
> > corresponding u* value, namely u32.
> 
> There's no problem with uint32_t vs u32 - either will do.  u32 is the
> pre-stdint.h Linux definition of a 32-bit unsigned integer.  There's
> no reason why uint32_t isn't perfectly acceptable.  It's a matter of
> author taste which gets used.  (Except where modifications are to an
> existing chunk of code using one or the other - where consistency then
> matters more.)

Sorry for being a source of confusion here. I won't push this point in
future.

Thanks,
Mark.

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCHv7] dmaengine: Add support for BCM2835
  2013-11-18 14:54       ` Russell King - ARM Linux
  2013-11-18 15:04         ` Mark Rutland
@ 2013-11-18 22:18         ` Joe Perches
  1 sibling, 0 replies; 13+ messages in thread
From: Joe Perches @ 2013-11-18 22:18 UTC (permalink / raw)
  To: Russell King - ARM Linux
  Cc: Andy Shevchenko, Florian Meier, Stephen Warren, Koul, Vinod,
	Williams, Dan J, devicetree, alsa-devel, Liam Girdwood,
	linux-kernel, Mark Brown, linux-rpi-kernel, dmaengine,
	linux-arm-kernel

On Mon, 2013-11-18 at 14:54 +0000, Russell King - ARM Linux wrote:
> On Mon, Nov 18, 2013 at 04:30:17PM +0200, Andy Shevchenko wrote:
> > On Mon, 2013-11-18 at 13:16 +0100, Florian Meier wrote:
> > > >> +	uint32_t chans_available;
> > > > 
> > > > Why uint32_t?
> > > 
> > > Because it is a bit mask of fixed length that directly comes from the
> > > firmware.
> > 
> > Like one already told you in your i2s patch, please, change that to
> > corresponding u* value, namely u32.
> 
> There's no problem with uint32_t vs u32 - either will do.  u32 is the
> pre-stdint.h Linux definition of a 32-bit unsigned integer.  There's
> no reason why uint32_t isn't perfectly acceptable.  It's a matter of
> author taste which gets used.  (Except where modifications are to an
> existing chunk of code using one or the other - where consistency then
> matters more.)

https://lkml.org/lkml/2006/5/2/258



^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] mmp_pdma: Style neatening
  2013-11-17 20:12     ` [PATCH] mmp_pdma: Style neatening Joe Perches
@ 2013-11-28  9:34       ` Vinod Koul
  0 siblings, 0 replies; 13+ messages in thread
From: Vinod Koul @ 2013-11-28  9:34 UTC (permalink / raw)
  To: Joe Perches
  Cc: Florian Meier, Daniel Mack, Dan Williams,
	Russell King - ARM Linux, devicetree, linux-kernel, dmaengine,
	linux-arm-kernel

On Sun, Nov 17, 2013 at 12:12:56PM -0800, Joe Perches wrote:
> Neaten code used as a template for other drivers.
> Make the code more consistent with kernel styles.
> 
> o Convert #defines with (1<<foo) to BIT(foo)
> o Alignment wrapping
> o Logic inversions to put return at end of functions
> o Convert devm_kzalloc with multiply to devm_kcalloc
> o typo of Peripheral fix
> 
> Signed-off-by: Joe Perches <joe@perches.com>
> ---
> > At least, the code is directly taken from mmp_pdma.c ;-)
> 
> Well, maybe the template code should be updated if there
> are going to be more of these.
>  
> Uncompiled/untested.
Compile tested and applied.

BUT you should not have hijacked the thread and sent this patch on a different
chain!

--
~Vinod
> 
>  drivers/dma/mmp_pdma.c | 204 +++++++++++++++++++++++++------------------------
>  1 file changed, 105 insertions(+), 99 deletions(-)
> 
> diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
> index dcb1e05..c2658f6 100644
> --- a/drivers/dma/mmp_pdma.c
> +++ b/drivers/dma/mmp_pdma.c
> @@ -5,6 +5,7 @@
>   * it under the terms of the GNU General Public License version 2 as
>   * published by the Free Software Foundation.
>   */
> +
>  #include <linux/err.h>
>  #include <linux/module.h>
>  #include <linux/init.h>
> @@ -32,38 +33,37 @@
>  #define DTADR		0x0208
>  #define DCMD		0x020c
>  
> -#define DCSR_RUN	(1 << 31)	/* Run Bit (read / write) */
> -#define DCSR_NODESC	(1 << 30)	/* No-Descriptor Fetch (read / write) */
> -#define DCSR_STOPIRQEN	(1 << 29)	/* Stop Interrupt Enable (read / write) */
> -#define DCSR_REQPEND	(1 << 8)	/* Request Pending (read-only) */
> -#define DCSR_STOPSTATE	(1 << 3)	/* Stop State (read-only) */
> -#define DCSR_ENDINTR	(1 << 2)	/* End Interrupt (read / write) */
> -#define DCSR_STARTINTR	(1 << 1)	/* Start Interrupt (read / write) */
> -#define DCSR_BUSERR	(1 << 0)	/* Bus Error Interrupt (read / write) */
> -
> -#define DCSR_EORIRQEN	(1 << 28)       /* End of Receive Interrupt Enable (R/W) */
> -#define DCSR_EORJMPEN	(1 << 27)       /* Jump to next descriptor on EOR */
> -#define DCSR_EORSTOPEN	(1 << 26)       /* STOP on an EOR */
> -#define DCSR_SETCMPST	(1 << 25)       /* Set Descriptor Compare Status */
> -#define DCSR_CLRCMPST	(1 << 24)       /* Clear Descriptor Compare Status */
> -#define DCSR_CMPST	(1 << 10)       /* The Descriptor Compare Status */
> -#define DCSR_EORINTR	(1 << 9)        /* The end of Receive */
> -
> -#define DRCMR(n)	((((n) < 64) ? 0x0100 : 0x1100) + \
> -				 (((n) & 0x3f) << 2))
> -#define DRCMR_MAPVLD	(1 << 7)	/* Map Valid (read / write) */
> -#define DRCMR_CHLNUM	0x1f		/* mask for Channel Number (read / write) */
> +#define DCSR_RUN	BIT(31)	/* Run Bit (read / write) */
> +#define DCSR_NODESC	BIT(30)	/* No-Descriptor Fetch (read / write) */
> +#define DCSR_STOPIRQEN	BIT(29)	/* Stop Interrupt Enable (read / write) */
> +#define DCSR_REQPEND	BIT(8)	/* Request Pending (read-only) */
> +#define DCSR_STOPSTATE	BIT(3)	/* Stop State (read-only) */
> +#define DCSR_ENDINTR	BIT(2)	/* End Interrupt (read / write) */
> +#define DCSR_STARTINTR	BIT(1)	/* Start Interrupt (read / write) */
> +#define DCSR_BUSERR	BIT(0)	/* Bus Error Interrupt (read / write) */
> +
> +#define DCSR_EORIRQEN	BIT(28)	/* End of Receive Interrupt Enable (R/W) */
> +#define DCSR_EORJMPEN	BIT(27)	/* Jump to next descriptor on EOR */
> +#define DCSR_EORSTOPEN	BIT(26)	/* STOP on an EOR */
> +#define DCSR_SETCMPST	BIT(25)	/* Set Descriptor Compare Status */
> +#define DCSR_CLRCMPST	BIT(24)	/* Clear Descriptor Compare Status */
> +#define DCSR_CMPST	BIT(10)	/* The Descriptor Compare Status */
> +#define DCSR_EORINTR	BIT(9)	/* The end of Receive */
> +
> +#define DRCMR(n)	((((n) < 64) ? 0x0100 : 0x1100) + (((n) & 0x3f) << 2))
> +#define DRCMR_MAPVLD	BIT(7)	/* Map Valid (read / write) */
> +#define DRCMR_CHLNUM	0x1f	/* mask for Channel Number (read / write) */
>  
>  #define DDADR_DESCADDR	0xfffffff0	/* Address of next descriptor (mask) */
> -#define DDADR_STOP	(1 << 0)	/* Stop (read / write) */
> -
> -#define DCMD_INCSRCADDR	(1 << 31)	/* Source Address Increment Setting. */
> -#define DCMD_INCTRGADDR	(1 << 30)	/* Target Address Increment Setting. */
> -#define DCMD_FLOWSRC	(1 << 29)	/* Flow Control by the source. */
> -#define DCMD_FLOWTRG	(1 << 28)	/* Flow Control by the target. */
> -#define DCMD_STARTIRQEN	(1 << 22)	/* Start Interrupt Enable */
> -#define DCMD_ENDIRQEN	(1 << 21)	/* End Interrupt Enable */
> -#define DCMD_ENDIAN	(1 << 18)	/* Device Endian-ness. */
> +#define DDADR_STOP	BIT(0)	/* Stop (read / write) */
> +
> +#define DCMD_INCSRCADDR	BIT(31)	/* Source Address Increment Setting. */
> +#define DCMD_INCTRGADDR	BIT(30)	/* Target Address Increment Setting. */
> +#define DCMD_FLOWSRC	BIT(29)	/* Flow Control by the source. */
> +#define DCMD_FLOWTRG	BIT(28)	/* Flow Control by the target. */
> +#define DCMD_STARTIRQEN	BIT(22)	/* Start Interrupt Enable */
> +#define DCMD_ENDIRQEN	BIT(21)	/* End Interrupt Enable */
> +#define DCMD_ENDIAN	BIT(18)	/* Device Endian-ness. */
>  #define DCMD_BURST8	(1 << 16)	/* 8 byte burst */
>  #define DCMD_BURST16	(2 << 16)	/* 16 byte burst */
>  #define DCMD_BURST32	(3 << 16)	/* 32 byte burst */
> @@ -132,10 +132,14 @@ struct mmp_pdma_device {
>  	spinlock_t phy_lock; /* protect alloc/free phy channels */
>  };
>  
> -#define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx)
> -#define to_mmp_pdma_desc(lh) container_of(lh, struct mmp_pdma_desc_sw, node)
> -#define to_mmp_pdma_chan(dchan) container_of(dchan, struct mmp_pdma_chan, chan)
> -#define to_mmp_pdma_dev(dmadev) container_of(dmadev, struct mmp_pdma_device, device)
> +#define tx_to_mmp_pdma_desc(tx)					\
> +	container_of(tx, struct mmp_pdma_desc_sw, async_tx)
> +#define to_mmp_pdma_desc(lh)					\
> +	container_of(lh, struct mmp_pdma_desc_sw, node)
> +#define to_mmp_pdma_chan(dchan)					\
> +	container_of(dchan, struct mmp_pdma_chan, chan)
> +#define to_mmp_pdma_dev(dmadev)					\
> +	container_of(dmadev, struct mmp_pdma_device, device)
>  
>  static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
>  {
> @@ -162,19 +166,18 @@ static void enable_chan(struct mmp_pdma_phy *phy)
>  	writel(dalgn, phy->base + DALGN);
>  
>  	reg = (phy->idx << 2) + DCSR;
> -	writel(readl(phy->base + reg) | DCSR_RUN,
> -					phy->base + reg);
> +	writel(readl(phy->base + reg) | DCSR_RUN, phy->base + reg);
>  }
>  
>  static void disable_chan(struct mmp_pdma_phy *phy)
>  {
>  	u32 reg;
>  
> -	if (phy) {
> -		reg = (phy->idx << 2) + DCSR;
> -		writel(readl(phy->base + reg) & ~DCSR_RUN,
> -						phy->base + reg);
> -	}
> +	if (!phy)
> +		return;
> +
> +	reg = (phy->idx << 2) + DCSR;
> +	writel(readl(phy->base + reg) & ~DCSR_RUN, phy->base + reg);
>  }
>  
>  static int clear_chan_irq(struct mmp_pdma_phy *phy)
> @@ -183,26 +186,27 @@ static int clear_chan_irq(struct mmp_pdma_phy *phy)
>  	u32 dint = readl(phy->base + DINT);
>  	u32 reg = (phy->idx << 2) + DCSR;
>  
> -	if (dint & BIT(phy->idx)) {
> -		/* clear irq */
> -		dcsr = readl(phy->base + reg);
> -		writel(dcsr, phy->base + reg);
> -		if ((dcsr & DCSR_BUSERR) && (phy->vchan))
> -			dev_warn(phy->vchan->dev, "DCSR_BUSERR\n");
> -		return 0;
> -	}
> -	return -EAGAIN;
> +	if (!(dint & BIT(phy->idx)))
> +		return -EAGAIN;
> +
> +	/* clear irq */
> +	dcsr = readl(phy->base + reg);
> +	writel(dcsr, phy->base + reg);
> +	if ((dcsr & DCSR_BUSERR) && (phy->vchan))
> +		dev_warn(phy->vchan->dev, "DCSR_BUSERR\n");
> +
> +	return 0;
>  }
>  
>  static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id)
>  {
>  	struct mmp_pdma_phy *phy = dev_id;
>  
> -	if (clear_chan_irq(phy) == 0) {
> -		tasklet_schedule(&phy->vchan->tasklet);
> -		return IRQ_HANDLED;
> -	} else
> +	if (clear_chan_irq(phy) != 0)
>  		return IRQ_NONE;
> +
> +	tasklet_schedule(&phy->vchan->tasklet);
> +	return IRQ_HANDLED;
>  }
>  
>  static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
> @@ -224,8 +228,8 @@ static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
>  
>  	if (irq_num)
>  		return IRQ_HANDLED;
> -	else
> -		return IRQ_NONE;
> +
> +	return IRQ_NONE;
>  }
>  
>  /* lookup free phy channel as descending priority */
> @@ -245,9 +249,9 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
>  	 */
>  
>  	spin_lock_irqsave(&pdev->phy_lock, flags);
> -	for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) {
> +	for (prio = 0; prio <= ((pdev->dma_channels - 1) & 0xf) >> 2; prio++) {
>  		for (i = 0; i < pdev->dma_channels; i++) {
> -			if (prio != ((i & 0xf) >> 2))
> +			if (prio != (i & 0xf) >> 2)
>  				continue;
>  			phy = &pdev->phy[i];
>  			if (!phy->vchan) {
> @@ -389,14 +393,16 @@ static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
>  	if (chan->desc_pool)
>  		return 1;
>  
> -	chan->desc_pool =
> -		dma_pool_create(dev_name(&dchan->dev->device), chan->dev,
> -				  sizeof(struct mmp_pdma_desc_sw),
> -				  __alignof__(struct mmp_pdma_desc_sw), 0);
> +	chan->desc_pool = dma_pool_create(dev_name(&dchan->dev->device),
> +					  chan->dev,
> +					  sizeof(struct mmp_pdma_desc_sw),
> +					  __alignof__(struct mmp_pdma_desc_sw),
> +					  0);
>  	if (!chan->desc_pool) {
>  		dev_err(chan->dev, "unable to allocate descriptor pool\n");
>  		return -ENOMEM;
>  	}
> +
>  	mmp_pdma_free_phy(chan);
>  	chan->idle = true;
>  	chan->dev_addr = 0;
> @@ -404,7 +410,7 @@ static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
>  }
>  
>  static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan,
> -				  struct list_head *list)
> +				    struct list_head *list)
>  {
>  	struct mmp_pdma_desc_sw *desc, *_desc;
>  
> @@ -434,8 +440,8 @@ static void mmp_pdma_free_chan_resources(struct dma_chan *dchan)
>  
>  static struct dma_async_tx_descriptor *
>  mmp_pdma_prep_memcpy(struct dma_chan *dchan,
> -	dma_addr_t dma_dst, dma_addr_t dma_src,
> -	size_t len, unsigned long flags)
> +		     dma_addr_t dma_dst, dma_addr_t dma_src,
> +		     size_t len, unsigned long flags)
>  {
>  	struct mmp_pdma_chan *chan;
>  	struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
> @@ -515,8 +521,8 @@ fail:
>  
>  static struct dma_async_tx_descriptor *
>  mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
> -			 unsigned int sg_len, enum dma_transfer_direction dir,
> -			 unsigned long flags, void *context)
> +		       unsigned int sg_len, enum dma_transfer_direction dir,
> +		       unsigned long flags, void *context)
>  {
>  	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
>  	struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
> @@ -591,10 +597,11 @@ fail:
>  	return NULL;
>  }
>  
> -static struct dma_async_tx_descriptor *mmp_pdma_prep_dma_cyclic(
> -	struct dma_chan *dchan, dma_addr_t buf_addr, size_t len,
> -	size_t period_len, enum dma_transfer_direction direction,
> -	unsigned long flags, void *context)
> +static struct dma_async_tx_descriptor *
> +mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
> +			 dma_addr_t buf_addr, size_t len, size_t period_len,
> +			 enum dma_transfer_direction direction,
> +			 unsigned long flags, void *context)
>  {
>  	struct mmp_pdma_chan *chan;
>  	struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
> @@ -636,8 +643,8 @@ static struct dma_async_tx_descriptor *mmp_pdma_prep_dma_cyclic(
>  			goto fail;
>  		}
>  
> -		new->desc.dcmd = chan->dcmd | DCMD_ENDIRQEN |
> -					(DCMD_LENGTH & period_len);
> +		new->desc.dcmd = (chan->dcmd | DCMD_ENDIRQEN |
> +				  (DCMD_LENGTH & period_len));
>  		new->desc.dsadr = dma_src;
>  		new->desc.dtadr = dma_dst;
>  
> @@ -677,12 +684,11 @@ fail:
>  }
>  
>  static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
> -		unsigned long arg)
> +			    unsigned long arg)
>  {
>  	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
>  	struct dma_slave_config *cfg = (void *)arg;
>  	unsigned long flags;
> -	int ret = 0;
>  	u32 maxburst = 0, addr = 0;
>  	enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
>  
> @@ -739,11 +745,12 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
>  		return -ENOSYS;
>  	}
>  
> -	return ret;
> +	return 0;
>  }
>  
>  static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
> -			dma_cookie_t cookie, struct dma_tx_state *txstate)
> +					  dma_cookie_t cookie,
> +					  struct dma_tx_state *txstate)
>  {
>  	return dma_cookie_status(dchan, cookie, txstate);
>  }
> @@ -845,15 +852,14 @@ static int mmp_pdma_remove(struct platform_device *op)
>  	return 0;
>  }
>  
> -static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
> -							int idx, int irq)
> +static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq)
>  {
>  	struct mmp_pdma_phy *phy  = &pdev->phy[idx];
>  	struct mmp_pdma_chan *chan;
>  	int ret;
>  
> -	chan = devm_kzalloc(pdev->dev,
> -			sizeof(struct mmp_pdma_chan), GFP_KERNEL);
> +	chan = devm_kzalloc(pdev->dev, sizeof(struct mmp_pdma_chan),
> +			    GFP_KERNEL);
>  	if (chan == NULL)
>  		return -ENOMEM;
>  
> @@ -861,8 +867,8 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
>  	phy->base = pdev->base;
>  
>  	if (irq) {
> -		ret = devm_request_irq(pdev->dev, irq,
> -			mmp_pdma_chan_handler, 0, "pdma", phy);
> +		ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler, 0,
> +				       "pdma", phy);
>  		if (ret) {
>  			dev_err(pdev->dev, "channel request irq fail!\n");
>  			return ret;
> @@ -877,8 +883,7 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
>  	INIT_LIST_HEAD(&chan->chain_running);
>  
>  	/* register virt channel to dma engine */
> -	list_add_tail(&chan->chan.device_node,
> -			&pdev->device.channels);
> +	list_add_tail(&chan->chan.device_node, &pdev->device.channels);
>  
>  	return 0;
>  }
> @@ -913,13 +918,12 @@ retry:
>  	 * the lookup and the reservation */
>  	chan = dma_get_slave_channel(candidate);
>  
> -	if (chan) {
> -		struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan);
> -		c->drcmr = dma_spec->args[0];
> -		return chan;
> -	}
> +	if (!chan)
> +		goto retry;
>  
> -	goto retry;
> +	to_mmp_pdma_chan(chan)->drcmr = dma_spec->args[0];
> +
> +	return chan;
>  }
>  
>  static int mmp_pdma_probe(struct platform_device *op)
> @@ -934,6 +938,7 @@ static int mmp_pdma_probe(struct platform_device *op)
>  	pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
>  	if (!pdev)
>  		return -ENOMEM;
> +
>  	pdev->dev = &op->dev;
>  
>  	spin_lock_init(&pdev->phy_lock);
> @@ -945,8 +950,8 @@ static int mmp_pdma_probe(struct platform_device *op)
>  
>  	of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev);
>  	if (of_id)
> -		of_property_read_u32(pdev->dev->of_node,
> -				"#dma-channels", &dma_channels);
> +		of_property_read_u32(pdev->dev->of_node, "#dma-channels",
> +				     &dma_channels);
>  	else if (pdata && pdata->dma_channels)
>  		dma_channels = pdata->dma_channels;
>  	else
> @@ -958,8 +963,9 @@ static int mmp_pdma_probe(struct platform_device *op)
>  			irq_num++;
>  	}
>  
> -	pdev->phy = devm_kzalloc(pdev->dev,
> -		dma_channels * sizeof(struct mmp_pdma_chan), GFP_KERNEL);
> +	pdev->phy = devm_kcalloc(pdev->dev,
> +				 dma_channels, sizeof(struct mmp_pdma_chan),
> +				 GFP_KERNEL);
>  	if (pdev->phy == NULL)
>  		return -ENOMEM;
>  
> @@ -968,8 +974,8 @@ static int mmp_pdma_probe(struct platform_device *op)
>  	if (irq_num != dma_channels) {
>  		/* all chan share one irq, demux inside */
>  		irq = platform_get_irq(op, 0);
> -		ret = devm_request_irq(pdev->dev, irq,
> -			mmp_pdma_int_handler, 0, "pdma", pdev);
> +		ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler, 0,
> +				       "pdma", pdev);
>  		if (ret)
>  			return ret;
>  	}
> @@ -1044,7 +1050,7 @@ bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
>  	if (chan->device->dev->driver != &mmp_pdma_driver.driver)
>  		return false;
>  
> -	c->drcmr = *(unsigned int *) param;
> +	c->drcmr = *(unsigned int *)param;
>  
>  	return true;
>  }
> @@ -1052,6 +1058,6 @@ EXPORT_SYMBOL_GPL(mmp_pdma_filter_fn);
>  
>  module_platform_driver(mmp_pdma_driver);
>  
> -MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver");
> +MODULE_DESCRIPTION("MARVELL MMP Peripheral DMA Driver");
>  MODULE_AUTHOR("Marvell International Ltd.");
>  MODULE_LICENSE("GPL v2");
> 
> 
> 

-- 

^ permalink raw reply	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2013-11-28 10:32 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2013-11-17 15:39 [PATCHv7] dmaengine: Add support for BCM2835 Florian Meier
2013-11-17 16:02 ` Joe Perches
2013-11-17 16:37   ` Florian Meier
2013-11-17 20:12     ` [PATCH] mmp_pdma: Style neatening Joe Perches
2013-11-28  9:34       ` Vinod Koul
2013-11-18 10:00 ` [PATCHv7] dmaengine: Add support for BCM2835 Shevchenko, Andriy
2013-11-18 12:16   ` Florian Meier
2013-11-18 14:30     ` Andy Shevchenko
2013-11-18 14:37       ` Florian Meier
2013-11-18 14:54       ` Russell King - ARM Linux
2013-11-18 15:04         ` Mark Rutland
2013-11-18 22:18         ` Joe Perches
2013-11-18 14:41 ` Mark Rutland

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).