linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
To: Viresh Kumar <viresh.linux@gmail.com>,
	Vinod Koul <vinod.koul@intel.com>,
	linux-kernel@vger.kernel.org
Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Subject: [PATCHv2 3/4] dma: move dw_dmac driver to an own directory
Date: Wed, 26 Sep 2012 15:40:36 +0300	[thread overview]
Message-ID: <1348663237-3237-4-git-send-email-andriy.shevchenko@linux.intel.com> (raw)
In-Reply-To: <1348663237-3237-1-git-send-email-andriy.shevchenko@linux.intel.com>

The dw_dmac driver contains multiple files. To make a managment of them more
convenient move it to an own directory.

Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
---
 drivers/dma/Makefile          |    3 +-
 drivers/dma/dw/Makefile       |    2 +
 drivers/dma/dw/dw_dmac.c      | 1720 +++++++++++++++++++++++++++++++++++++++++
 drivers/dma/dw/dw_dmac_pci.c  |  127 +++
 drivers/dma/dw/dw_dmac_regs.h |  294 +++++++
 drivers/dma/dw_dmac.c         | 1720 -----------------------------------------
 drivers/dma/dw_dmac_pci.c     |  127 ---
 drivers/dma/dw_dmac_regs.h    |  294 -------
 8 files changed, 2144 insertions(+), 2143 deletions(-)
 create mode 100644 drivers/dma/dw/Makefile
 create mode 100644 drivers/dma/dw/dw_dmac.c
 create mode 100644 drivers/dma/dw/dw_dmac_pci.c
 create mode 100644 drivers/dma/dw/dw_dmac_regs.h
 delete mode 100644 drivers/dma/dw_dmac.c
 delete mode 100644 drivers/dma/dw_dmac_pci.c
 delete mode 100644 drivers/dma/dw_dmac_regs.h

diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 15eef5f..122a48a 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -11,8 +11,7 @@ obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
 obj-$(CONFIG_FSL_DMA) += fsldma.o
 obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
 obj-$(CONFIG_MV_XOR) += mv_xor.o
-obj-$(CONFIG_DW_DMAC) += dw_dmac.o
-obj-$(CONFIG_DW_DMAC_PCI) += dw_dmac_pci.o
+obj-$(CONFIG_DW_DMAC) += dw/
 obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
 obj-$(CONFIG_MX3_IPU) += ipu/
 obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
diff --git a/drivers/dma/dw/Makefile b/drivers/dma/dw/Makefile
new file mode 100644
index 0000000..2edfb24
--- /dev/null
+++ b/drivers/dma/dw/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_DW_DMAC) += dw_dmac.o
+obj-$(CONFIG_DW_DMAC_PCI) += dw_dmac_pci.o
diff --git a/drivers/dma/dw/dw_dmac.c b/drivers/dma/dw/dw_dmac.c
new file mode 100644
index 0000000..fa0471a
--- /dev/null
+++ b/drivers/dma/dw/dw_dmac.c
@@ -0,0 +1,1720 @@
+/*
+ * Core driver for the Synopsys DesignWare DMA Controller
+ *
+ * Copyright (C) 2007-2008 Atmel Corporation
+ * Copyright (C) 2010-2011 ST Microelectronics
+ * Copyright (C) 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "../dmaengine.h"
+#include "dw_dmac_regs.h"
+
+/*
+ * This supports the Synopsys "DesignWare AHB Central DMA Controller",
+ * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
+ * of which use ARM any more).  See the "Databook" from Synopsys for
+ * information beyond what licensees probably provide.
+ *
+ * The driver has currently been tested only with the Atmel AT32AP7000,
+ * which does not support descriptor writeback.
+ */
+
+static inline unsigned int dwc_get_dms(struct dw_dma_slave *slave)
+{
+	return slave ? slave->dst_master : 0;
+}
+
+static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave)
+{
+	return slave ? slave->src_master : 1;
+}
+
+#define DWC_DEFAULT_CTLLO(_chan) ({				\
+		struct dw_dma_slave *__slave = (_chan->private);	\
+		struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan);	\
+		struct dma_slave_config	*_sconfig = &_dwc->dma_sconfig;	\
+		int _dms = dwc_get_dms(__slave);		\
+		int _sms = dwc_get_sms(__slave);		\
+		u8 _smsize = __slave ? _sconfig->src_maxburst :	\
+			DW_DMA_MSIZE_16;			\
+		u8 _dmsize = __slave ? _sconfig->dst_maxburst :	\
+			DW_DMA_MSIZE_16;			\
+								\
+		(DWC_CTLL_DST_MSIZE(_dmsize)			\
+		 | DWC_CTLL_SRC_MSIZE(_smsize)			\
+		 | DWC_CTLL_LLP_D_EN				\
+		 | DWC_CTLL_LLP_S_EN				\
+		 | DWC_CTLL_DMS(_dms)				\
+		 | DWC_CTLL_SMS(_sms));				\
+	})
+
+/*
+ * Number of descriptors to allocate for each channel. This should be
+ * made configurable somehow; preferably, the clients (at least the
+ * ones using slave transfers) should be able to give us a hint.
+ */
+#define NR_DESCS_PER_CHANNEL	64
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * Because we're not relying on writeback from the controller (it may not
+ * even be configured into the core!) we don't need to use dma_pool.  These
+ * descriptors -- and associated data -- are cacheable.  We do need to make
+ * sure their dcache entries are written back before handing them off to
+ * the controller, though.
+ */
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+	return &chan->dev->device;
+}
+static struct device *chan2parent(struct dma_chan *chan)
+{
+	return chan->dev->device.parent;
+}
+
+static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
+{
+	return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
+}
+
+static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
+{
+	struct dw_desc *desc, *_desc;
+	struct dw_desc *ret = NULL;
+	unsigned int i = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dwc->lock, flags);
+	list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
+		i++;
+		if (async_tx_test_ack(&desc->txd)) {
+			list_del(&desc->desc_node);
+			ret = desc;
+			break;
+		}
+		dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
+	}
+	spin_unlock_irqrestore(&dwc->lock, flags);
+
+	dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
+
+	return ret;
+}
+
+static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
+{
+	struct dw_desc	*child;
+
+	list_for_each_entry(child, &desc->tx_list, desc_node)
+		dma_sync_single_for_cpu(chan2parent(&dwc->chan),
+				child->txd.phys, sizeof(child->lli),
+				DMA_TO_DEVICE);
+	dma_sync_single_for_cpu(chan2parent(&dwc->chan),
+			desc->txd.phys, sizeof(desc->lli),
+			DMA_TO_DEVICE);
+}
+
+/*
+ * Move a descriptor, including any children, to the free list.
+ * `desc' must not be on any lists.
+ */
+static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
+{
+	unsigned long flags;
+
+	if (desc) {
+		struct dw_desc *child;
+
+		dwc_sync_desc_for_cpu(dwc, desc);
+
+		spin_lock_irqsave(&dwc->lock, flags);
+		list_for_each_entry(child, &desc->tx_list, desc_node)
+			dev_vdbg(chan2dev(&dwc->chan),
+					"moving child desc %p to freelist\n",
+					child);
+		list_splice_init(&desc->tx_list, &dwc->free_list);
+		dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
+		list_add(&desc->desc_node, &dwc->free_list);
+		spin_unlock_irqrestore(&dwc->lock, flags);
+	}
+}
+
+static void dwc_initialize(struct dw_dma_chan *dwc)
+{
+	struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+	struct dw_dma_slave *dws = dwc->chan.private;
+	u32 cfghi = DWC_CFGH_FIFO_MODE;
+	u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
+
+	if (dwc->initialized == true)
+		return;
+
+	if (dws) {
+		/*
+		 * We need controller-specific data to set up slave
+		 * transfers.
+		 */
+		BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
+
+		cfghi = dws->cfg_hi;
+		cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
+	} else {
+		if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV)
+			cfghi = DWC_CFGH_DST_PER(dwc->dma_sconfig.slave_id);
+		else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM)
+			cfghi = DWC_CFGH_SRC_PER(dwc->dma_sconfig.slave_id);
+	}
+
+	channel_writel(dwc, CFG_LO, cfglo);
+	channel_writel(dwc, CFG_HI, cfghi);
+
+	/* Enable interrupts */
+	channel_set_bit(dw, MASK.XFER, dwc->mask);
+	channel_set_bit(dw, MASK.ERROR, dwc->mask);
+
+	dwc->initialized = true;
+}
+
+/*----------------------------------------------------------------------*/
+
+static inline unsigned int dwc_fast_fls(unsigned long long v)
+{
+	/*
+	 * We can be a lot more clever here, but this should take care
+	 * of the most common optimization.
+	 */
+	if (!(v & 7))
+		return 3;
+	else if (!(v & 3))
+		return 2;
+	else if (!(v & 1))
+		return 1;
+	return 0;
+}
+
+static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
+{
+	dev_err(chan2dev(&dwc->chan),
+		"  SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
+		channel_readl(dwc, SAR),
+		channel_readl(dwc, DAR),
+		channel_readl(dwc, LLP),
+		channel_readl(dwc, CTL_HI),
+		channel_readl(dwc, CTL_LO));
+}
+
+static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
+{
+	channel_clear_bit(dw, CH_EN, dwc->mask);
+	while (dma_readl(dw, CH_EN) & dwc->mask)
+		cpu_relax();
+}
+
+/*----------------------------------------------------------------------*/
+
+/* Perform single block transfer */
+static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
+				       struct dw_desc *desc)
+{
+	struct dw_dma	*dw = to_dw_dma(dwc->chan.device);
+	u32		ctllo;
+
+	/* Software emulation of LLP mode relies on interrupts to continue
+	 * multi block transfer. */
+	ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN;
+
+	channel_writel(dwc, SAR, desc->lli.sar);
+	channel_writel(dwc, DAR, desc->lli.dar);
+	channel_writel(dwc, CTL_LO, ctllo);
+	channel_writel(dwc, CTL_HI, desc->lli.ctlhi);
+	channel_set_bit(dw, CH_EN, dwc->mask);
+}
+
+/* Called with dwc->lock held and bh disabled */
+static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
+{
+	struct dw_dma	*dw = to_dw_dma(dwc->chan.device);
+	unsigned long	was_soft_llp;
+
+	/* ASSERT:  channel is idle */
+	if (dma_readl(dw, CH_EN) & dwc->mask) {
+		dev_err(chan2dev(&dwc->chan),
+			"BUG: Attempted to start non-idle channel\n");
+		dwc_dump_chan_regs(dwc);
+
+		/* The tasklet will hopefully advance the queue... */
+		return;
+	}
+
+	if (dwc->nollp) {
+		was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP,
+						&dwc->flags);
+		if (was_soft_llp) {
+			dev_err(chan2dev(&dwc->chan),
+				"BUG: Attempted to start new LLP transfer "
+				"inside ongoing one\n");
+			return;
+		}
+
+		dwc_initialize(dwc);
+
+		dwc->tx_list = &first->tx_list;
+		dwc->tx_node_active = first->tx_list.next;
+
+		dwc_do_single_block(dwc, first);
+
+		return;
+	}
+
+	dwc_initialize(dwc);
+
+	channel_writel(dwc, LLP, first->txd.phys);
+	channel_writel(dwc, CTL_LO,
+			DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
+	channel_writel(dwc, CTL_HI, 0);
+	channel_set_bit(dw, CH_EN, dwc->mask);
+}
+
+/*----------------------------------------------------------------------*/
+
+static void
+dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
+		bool callback_required)
+{
+	dma_async_tx_callback		callback = NULL;
+	void				*param = NULL;
+	struct dma_async_tx_descriptor	*txd = &desc->txd;
+	struct dw_desc			*child;
+	unsigned long			flags;
+
+	dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
+
+	spin_lock_irqsave(&dwc->lock, flags);
+	dma_cookie_complete(txd);
+	if (callback_required) {
+		callback = txd->callback;
+		param = txd->callback_param;
+	}
+
+	dwc_sync_desc_for_cpu(dwc, desc);
+
+	/* async_tx_ack */
+	list_for_each_entry(child, &desc->tx_list, desc_node)
+		async_tx_ack(&child->txd);
+	async_tx_ack(&desc->txd);
+
+	list_splice_init(&desc->tx_list, &dwc->free_list);
+	list_move(&desc->desc_node, &dwc->free_list);
+
+	if (!dwc->chan.private) {
+		struct device *parent = chan2parent(&dwc->chan);
+		if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+			if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
+				dma_unmap_single(parent, desc->lli.dar,
+						desc->len, DMA_FROM_DEVICE);
+			else
+				dma_unmap_page(parent, desc->lli.dar,
+						desc->len, DMA_FROM_DEVICE);
+		}
+		if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+			if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
+				dma_unmap_single(parent, desc->lli.sar,
+						desc->len, DMA_TO_DEVICE);
+			else
+				dma_unmap_page(parent, desc->lli.sar,
+						desc->len, DMA_TO_DEVICE);
+		}
+	}
+
+	spin_unlock_irqrestore(&dwc->lock, flags);
+
+	if (callback_required && callback)
+		callback(param);
+}
+
+static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
+{
+	struct dw_desc *desc, *_desc;
+	LIST_HEAD(list);
+	unsigned long flags;
+
+	spin_lock_irqsave(&dwc->lock, flags);
+	if (dma_readl(dw, CH_EN) & dwc->mask) {
+		dev_err(chan2dev(&dwc->chan),
+			"BUG: XFER bit set, but channel not idle!\n");
+
+		/* Try to continue after resetting the channel... */
+		dwc_chan_disable(dw, dwc);
+	}
+
+	/*
+	 * Submit queued descriptors ASAP, i.e. before we go through
+	 * the completed ones.
+	 */
+	list_splice_init(&dwc->active_list, &list);
+	if (!list_empty(&dwc->queue)) {
+		list_move(dwc->queue.next, &dwc->active_list);
+		dwc_dostart(dwc, dwc_first_active(dwc));
+	}
+
+	spin_unlock_irqrestore(&dwc->lock, flags);
+
+	list_for_each_entry_safe(desc, _desc, &list, desc_node)
+		dwc_descriptor_complete(dwc, desc, true);
+}
+
+static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
+{
+	dma_addr_t llp;
+	struct dw_desc *desc, *_desc;
+	struct dw_desc *child;
+	u32 status_xfer;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dwc->lock, flags);
+	llp = channel_readl(dwc, LLP);
+	status_xfer = dma_readl(dw, RAW.XFER);
+
+	if (status_xfer & dwc->mask) {
+		/* Everything we've submitted is done */
+		dma_writel(dw, CLEAR.XFER, dwc->mask);
+		spin_unlock_irqrestore(&dwc->lock, flags);
+
+		dwc_complete_all(dw, dwc);
+		return;
+	}
+
+	if (list_empty(&dwc->active_list)) {
+		spin_unlock_irqrestore(&dwc->lock, flags);
+		return;
+	}
+
+	dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__,
+			(unsigned long long)llp);
+
+	list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
+		/* check first descriptors addr */
+		if (desc->txd.phys == llp) {
+			spin_unlock_irqrestore(&dwc->lock, flags);
+			return;
+		}
+
+		/* check first descriptors llp */
+		if (desc->lli.llp == llp) {
+			/* This one is currently in progress */
+			spin_unlock_irqrestore(&dwc->lock, flags);
+			return;
+		}
+
+		list_for_each_entry(child, &desc->tx_list, desc_node)
+			if (child->lli.llp == llp) {
+				/* Currently in progress */
+				spin_unlock_irqrestore(&dwc->lock, flags);
+				return;
+			}
+
+		/*
+		 * No descriptors so far seem to be in progress, i.e.
+		 * this one must be done.
+		 */
+		spin_unlock_irqrestore(&dwc->lock, flags);
+		dwc_descriptor_complete(dwc, desc, true);
+		spin_lock_irqsave(&dwc->lock, flags);
+	}
+
+	dev_err(chan2dev(&dwc->chan),
+		"BUG: All descriptors done, but channel not idle!\n");
+
+	/* Try to continue after resetting the channel... */
+	dwc_chan_disable(dw, dwc);
+
+	if (!list_empty(&dwc->queue)) {
+		list_move(dwc->queue.next, &dwc->active_list);
+		dwc_dostart(dwc, dwc_first_active(dwc));
+	}
+	spin_unlock_irqrestore(&dwc->lock, flags);
+}
+
+static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
+{
+	dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
+			"  desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
+			lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo);
+}
+
+static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
+{
+	struct dw_desc *bad_desc;
+	struct dw_desc *child;
+	unsigned long flags;
+
+	dwc_scan_descriptors(dw, dwc);
+
+	spin_lock_irqsave(&dwc->lock, flags);
+
+	/*
+	 * The descriptor currently at the head of the active list is
+	 * borked. Since we don't have any way to report errors, we'll
+	 * just have to scream loudly and try to carry on.
+	 */
+	bad_desc = dwc_first_active(dwc);
+	list_del_init(&bad_desc->desc_node);
+	list_move(dwc->queue.next, dwc->active_list.prev);
+
+	/* Clear the error flag and try to restart the controller */
+	dma_writel(dw, CLEAR.ERROR, dwc->mask);
+	if (!list_empty(&dwc->active_list))
+		dwc_dostart(dwc, dwc_first_active(dwc));
+
+	/*
+	 * KERN_CRITICAL may seem harsh, but since this only happens
+	 * when someone submits a bad physical address in a
+	 * descriptor, we should consider ourselves lucky that the
+	 * controller flagged an error instead of scribbling over
+	 * random memory locations.
+	 */
+	dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
+			"Bad descriptor submitted for DMA!\n");
+	dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
+			"  cookie: %d\n", bad_desc->txd.cookie);
+	dwc_dump_lli(dwc, &bad_desc->lli);
+	list_for_each_entry(child, &bad_desc->tx_list, desc_node)
+		dwc_dump_lli(dwc, &child->lli);
+
+	spin_unlock_irqrestore(&dwc->lock, flags);
+
+	/* Pretend the descriptor completed successfully */
+	dwc_descriptor_complete(dwc, bad_desc, true);
+}
+
+/* --------------------- Cyclic DMA API extensions -------------------- */
+
+inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
+{
+	struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+	return channel_readl(dwc, SAR);
+}
+EXPORT_SYMBOL(dw_dma_get_src_addr);
+
+inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
+{
+	struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+	return channel_readl(dwc, DAR);
+}
+EXPORT_SYMBOL(dw_dma_get_dst_addr);
+
+/* called with dwc->lock held and all DMAC interrupts disabled */
+static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
+		u32 status_err, u32 status_xfer)
+{
+	unsigned long flags;
+
+	if (dwc->mask) {
+		void (*callback)(void *param);
+		void *callback_param;
+
+		dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
+				channel_readl(dwc, LLP));
+
+		callback = dwc->cdesc->period_callback;
+		callback_param = dwc->cdesc->period_callback_param;
+
+		if (callback)
+			callback(callback_param);
+	}
+
+	/*
+	 * Error and transfer complete are highly unlikely, and will most
+	 * likely be due to a configuration error by the user.
+	 */
+	if (unlikely(status_err & dwc->mask) ||
+			unlikely(status_xfer & dwc->mask)) {
+		int i;
+
+		dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
+				"interrupt, stopping DMA transfer\n",
+				status_xfer ? "xfer" : "error");
+
+		spin_lock_irqsave(&dwc->lock, flags);
+
+		dwc_dump_chan_regs(dwc);
+
+		dwc_chan_disable(dw, dwc);
+
+		/* make sure DMA does not restart by loading a new list */
+		channel_writel(dwc, LLP, 0);
+		channel_writel(dwc, CTL_LO, 0);
+		channel_writel(dwc, CTL_HI, 0);
+
+		dma_writel(dw, CLEAR.ERROR, dwc->mask);
+		dma_writel(dw, CLEAR.XFER, dwc->mask);
+
+		for (i = 0; i < dwc->cdesc->periods; i++)
+			dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
+
+		spin_unlock_irqrestore(&dwc->lock, flags);
+	}
+}
+
+/* ------------------------------------------------------------------------- */
+
+static void dw_dma_tasklet(unsigned long data)
+{
+	struct dw_dma *dw = (struct dw_dma *)data;
+	struct dw_dma_chan *dwc;
+	u32 status_xfer;
+	u32 status_err;
+	int i;
+
+	status_xfer = dma_readl(dw, RAW.XFER);
+	status_err = dma_readl(dw, RAW.ERROR);
+
+	dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err);
+
+	for (i = 0; i < dw->dma.chancnt; i++) {
+		dwc = &dw->chan[i];
+		if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
+			dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
+		else if (status_err & (1 << i))
+			dwc_handle_error(dw, dwc);
+		else if (status_xfer & (1 << i)) {
+			unsigned long flags;
+
+			spin_lock_irqsave(&dwc->lock, flags);
+			if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
+				if (dwc->tx_node_active != dwc->tx_list) {
+					struct dw_desc *desc =
+						list_entry(dwc->tx_node_active,
+							   struct dw_desc,
+							   desc_node);
+
+					dma_writel(dw, CLEAR.XFER, dwc->mask);
+
+					/* move pointer to next descriptor */
+					dwc->tx_node_active =
+						dwc->tx_node_active->next;
+
+					dwc_do_single_block(dwc, desc);
+
+					spin_unlock_irqrestore(&dwc->lock, flags);
+					continue;
+				} else {
+					/* we are done here */
+					clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
+				}
+			}
+			spin_unlock_irqrestore(&dwc->lock, flags);
+
+			dwc_scan_descriptors(dw, dwc);
+		}
+	}
+
+	/*
+	 * Re-enable interrupts.
+	 */
+	channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
+	channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
+}
+
+static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
+{
+	struct dw_dma *dw = dev_id;
+	u32 status;
+
+	dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__,
+			dma_readl(dw, STATUS_INT));
+
+	/*
+	 * Just disable the interrupts. We'll turn them back on in the
+	 * softirq handler.
+	 */
+	channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
+	channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
+
+	status = dma_readl(dw, STATUS_INT);
+	if (status) {
+		dev_err(dw->dma.dev,
+			"BUG: Unexpected interrupts pending: 0x%x\n",
+			status);
+
+		/* Try to recover */
+		channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
+		channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
+		channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
+		channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
+	}
+
+	tasklet_schedule(&dw->tasklet);
+
+	return IRQ_HANDLED;
+}
+
+/*----------------------------------------------------------------------*/
+
+static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+	struct dw_desc		*desc = txd_to_dw_desc(tx);
+	struct dw_dma_chan	*dwc = to_dw_dma_chan(tx->chan);
+	dma_cookie_t		cookie;
+	unsigned long		flags;
+
+	spin_lock_irqsave(&dwc->lock, flags);
+	cookie = dma_cookie_assign(tx);
+
+	/*
+	 * REVISIT: We should attempt to chain as many descriptors as
+	 * possible, perhaps even appending to those already submitted
+	 * for DMA. But this is hard to do in a race-free manner.
+	 */
+	if (list_empty(&dwc->active_list)) {
+		dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__,
+				desc->txd.cookie);
+		list_add_tail(&desc->desc_node, &dwc->active_list);
+		dwc_dostart(dwc, dwc_first_active(dwc));
+	} else {
+		dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__,
+				desc->txd.cookie);
+
+		list_add_tail(&desc->desc_node, &dwc->queue);
+	}
+
+	spin_unlock_irqrestore(&dwc->lock, flags);
+
+	return cookie;
+}
+
+static struct dma_async_tx_descriptor *
+dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+		size_t len, unsigned long flags)
+{
+	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
+	struct dw_dma_slave	*dws = chan->private;
+	struct dw_desc		*desc;
+	struct dw_desc		*first;
+	struct dw_desc		*prev;
+	size_t			xfer_count;
+	size_t			offset;
+	unsigned int		src_width;
+	unsigned int		dst_width;
+	unsigned int		data_width;
+	u32			ctllo;
+
+	dev_vdbg(chan2dev(chan),
+			"%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__,
+			(unsigned long long)dest, (unsigned long long)src,
+			len, flags);
+
+	if (unlikely(!len)) {
+		dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
+		return NULL;
+	}
+
+	data_width = min_t(unsigned int, dwc->dw->data_width[dwc_get_sms(dws)],
+					 dwc->dw->data_width[dwc_get_dms(dws)]);
+
+	src_width = dst_width = min_t(unsigned int, data_width,
+				      dwc_fast_fls(src | dest | len));
+
+	ctllo = DWC_DEFAULT_CTLLO(chan)
+			| DWC_CTLL_DST_WIDTH(dst_width)
+			| DWC_CTLL_SRC_WIDTH(src_width)
+			| DWC_CTLL_DST_INC
+			| DWC_CTLL_SRC_INC
+			| DWC_CTLL_FC_M2M;
+	prev = first = NULL;
+
+	for (offset = 0; offset < len; offset += xfer_count << src_width) {
+		xfer_count = min_t(size_t, (len - offset) >> src_width,
+					   dwc->block_size);
+
+		desc = dwc_desc_get(dwc);
+		if (!desc)
+			goto err_desc_get;
+
+		desc->lli.sar = src + offset;
+		desc->lli.dar = dest + offset;
+		desc->lli.ctllo = ctllo;
+		desc->lli.ctlhi = xfer_count;
+
+		if (!first) {
+			first = desc;
+		} else {
+			prev->lli.llp = desc->txd.phys;
+			dma_sync_single_for_device(chan2parent(chan),
+					prev->txd.phys, sizeof(prev->lli),
+					DMA_TO_DEVICE);
+			list_add_tail(&desc->desc_node,
+					&first->tx_list);
+		}
+		prev = desc;
+	}
+
+
+	if (flags & DMA_PREP_INTERRUPT)
+		/* Trigger interrupt after last block */
+		prev->lli.ctllo |= DWC_CTLL_INT_EN;
+
+	prev->lli.llp = 0;
+	dma_sync_single_for_device(chan2parent(chan),
+			prev->txd.phys, sizeof(prev->lli),
+			DMA_TO_DEVICE);
+
+	first->txd.flags = flags;
+	first->len = len;
+
+	return &first->txd;
+
+err_desc_get:
+	dwc_desc_put(dwc, first);
+	return NULL;
+}
+
+static struct dma_async_tx_descriptor *
+dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+		unsigned int sg_len, enum dma_transfer_direction direction,
+		unsigned long flags, void *context)
+{
+	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
+	struct dw_dma_slave	*dws = chan->private;
+	struct dma_slave_config	*sconfig = &dwc->dma_sconfig;
+	struct dw_desc		*prev;
+	struct dw_desc		*first;
+	u32			ctllo;
+	dma_addr_t		reg;
+	unsigned int		reg_width;
+	unsigned int		mem_width;
+	unsigned int		data_width;
+	unsigned int		i;
+	struct scatterlist	*sg;
+	size_t			total_len = 0;
+
+	dev_vdbg(chan2dev(chan), "%s\n", __func__);
+
+	if (unlikely(!dws || !sg_len))
+		return NULL;
+
+	prev = first = NULL;
+
+	switch (direction) {
+	case DMA_MEM_TO_DEV:
+		reg_width = __fls(sconfig->dst_addr_width);
+		reg = sconfig->dst_addr;
+		ctllo = (DWC_DEFAULT_CTLLO(chan)
+				| DWC_CTLL_DST_WIDTH(reg_width)
+				| DWC_CTLL_DST_FIX
+				| DWC_CTLL_SRC_INC);
+
+		ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
+			DWC_CTLL_FC(DW_DMA_FC_D_M2P);
+
+		data_width = dwc->dw->data_width[dwc_get_sms(dws)];
+
+		for_each_sg(sgl, sg, sg_len, i) {
+			struct dw_desc	*desc;
+			u32		len, dlen, mem;
+
+			mem = sg_dma_address(sg);
+			len = sg_dma_len(sg);
+
+			mem_width = min_t(unsigned int,
+					  data_width, dwc_fast_fls(mem | len));
+
+slave_sg_todev_fill_desc:
+			desc = dwc_desc_get(dwc);
+			if (!desc) {
+				dev_err(chan2dev(chan),
+					"not enough descriptors available\n");
+				goto err_desc_get;
+			}
+
+			desc->lli.sar = mem;
+			desc->lli.dar = reg;
+			desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
+			if ((len >> mem_width) > dwc->block_size) {
+				dlen = dwc->block_size << mem_width;
+				mem += dlen;
+				len -= dlen;
+			} else {
+				dlen = len;
+				len = 0;
+			}
+
+			desc->lli.ctlhi = dlen >> mem_width;
+
+			if (!first) {
+				first = desc;
+			} else {
+				prev->lli.llp = desc->txd.phys;
+				dma_sync_single_for_device(chan2parent(chan),
+						prev->txd.phys,
+						sizeof(prev->lli),
+						DMA_TO_DEVICE);
+				list_add_tail(&desc->desc_node,
+						&first->tx_list);
+			}
+			prev = desc;
+			total_len += dlen;
+
+			if (len)
+				goto slave_sg_todev_fill_desc;
+		}
+		break;
+	case DMA_DEV_TO_MEM:
+		reg_width = __fls(sconfig->src_addr_width);
+		reg = sconfig->src_addr;
+		ctllo = (DWC_DEFAULT_CTLLO(chan)
+				| DWC_CTLL_SRC_WIDTH(reg_width)
+				| DWC_CTLL_DST_INC
+				| DWC_CTLL_SRC_FIX);
+
+		ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
+			DWC_CTLL_FC(DW_DMA_FC_D_P2M);
+
+		data_width = dwc->dw->data_width[dwc_get_dms(dws)];
+
+		for_each_sg(sgl, sg, sg_len, i) {
+			struct dw_desc	*desc;
+			u32		len, dlen, mem;
+
+			mem = sg_dma_address(sg);
+			len = sg_dma_len(sg);
+
+			mem_width = min_t(unsigned int,
+					  data_width, dwc_fast_fls(mem | len));
+
+slave_sg_fromdev_fill_desc:
+			desc = dwc_desc_get(dwc);
+			if (!desc) {
+				dev_err(chan2dev(chan),
+						"not enough descriptors available\n");
+				goto err_desc_get;
+			}
+
+			desc->lli.sar = reg;
+			desc->lli.dar = mem;
+			desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
+			if ((len >> reg_width) > dwc->block_size) {
+				dlen = dwc->block_size << reg_width;
+				mem += dlen;
+				len -= dlen;
+			} else {
+				dlen = len;
+				len = 0;
+			}
+			desc->lli.ctlhi = dlen >> reg_width;
+
+			if (!first) {
+				first = desc;
+			} else {
+				prev->lli.llp = desc->txd.phys;
+				dma_sync_single_for_device(chan2parent(chan),
+						prev->txd.phys,
+						sizeof(prev->lli),
+						DMA_TO_DEVICE);
+				list_add_tail(&desc->desc_node,
+						&first->tx_list);
+			}
+			prev = desc;
+			total_len += dlen;
+
+			if (len)
+				goto slave_sg_fromdev_fill_desc;
+		}
+		break;
+	default:
+		return NULL;
+	}
+
+	if (flags & DMA_PREP_INTERRUPT)
+		/* Trigger interrupt after last block */
+		prev->lli.ctllo |= DWC_CTLL_INT_EN;
+
+	prev->lli.llp = 0;
+	dma_sync_single_for_device(chan2parent(chan),
+			prev->txd.phys, sizeof(prev->lli),
+			DMA_TO_DEVICE);
+
+	first->len = total_len;
+
+	return &first->txd;
+
+err_desc_get:
+	dwc_desc_put(dwc, first);
+	return NULL;
+}
+
+/*
+ * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
+ * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
+ *
+ * NOTE: burst size 2 is not supported by controller.
+ *
+ * This can be done by finding least significant bit set: n & (n - 1)
+ */
+static inline void convert_burst(u32 *maxburst)
+{
+	if (*maxburst > 1)
+		*maxburst = fls(*maxburst) - 2;
+	else
+		*maxburst = 0;
+}
+
+static int
+set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
+{
+	struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+
+	/* Check if it is chan is configured for slave transfers */
+	if (!chan->private)
+		return -EINVAL;
+
+	memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
+
+	convert_burst(&dwc->dma_sconfig.src_maxburst);
+	convert_burst(&dwc->dma_sconfig.dst_maxburst);
+
+	return 0;
+}
+
+static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+		       unsigned long arg)
+{
+	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
+	struct dw_dma		*dw = to_dw_dma(chan->device);
+	struct dw_desc		*desc, *_desc;
+	unsigned long		flags;
+	u32			cfglo;
+	LIST_HEAD(list);
+
+	if (cmd == DMA_PAUSE) {
+		spin_lock_irqsave(&dwc->lock, flags);
+
+		cfglo = channel_readl(dwc, CFG_LO);
+		channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
+		while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY))
+			cpu_relax();
+
+		dwc->paused = true;
+		spin_unlock_irqrestore(&dwc->lock, flags);
+	} else if (cmd == DMA_RESUME) {
+		if (!dwc->paused)
+			return 0;
+
+		spin_lock_irqsave(&dwc->lock, flags);
+
+		cfglo = channel_readl(dwc, CFG_LO);
+		channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
+		dwc->paused = false;
+
+		spin_unlock_irqrestore(&dwc->lock, flags);
+	} else if (cmd == DMA_TERMINATE_ALL) {
+		spin_lock_irqsave(&dwc->lock, flags);
+
+		clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
+
+		dwc_chan_disable(dw, dwc);
+
+		dwc->paused = false;
+
+		/* active_list entries will end up before queued entries */
+		list_splice_init(&dwc->queue, &list);
+		list_splice_init(&dwc->active_list, &list);
+
+		spin_unlock_irqrestore(&dwc->lock, flags);
+
+		/* Flush all pending and queued descriptors */
+		list_for_each_entry_safe(desc, _desc, &list, desc_node)
+			dwc_descriptor_complete(dwc, desc, false);
+	} else if (cmd == DMA_SLAVE_CONFIG) {
+		return set_runtime_config(chan, (struct dma_slave_config *)arg);
+	} else {
+		return -ENXIO;
+	}
+
+	return 0;
+}
+
+static enum dma_status
+dwc_tx_status(struct dma_chan *chan,
+	      dma_cookie_t cookie,
+	      struct dma_tx_state *txstate)
+{
+	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
+	enum dma_status		ret;
+
+	ret = dma_cookie_status(chan, cookie, txstate);
+	if (ret != DMA_SUCCESS) {
+		dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
+
+		ret = dma_cookie_status(chan, cookie, txstate);
+	}
+
+	if (ret != DMA_SUCCESS)
+		dma_set_residue(txstate, dwc_first_active(dwc)->len);
+
+	if (dwc->paused)
+		return DMA_PAUSED;
+
+	return ret;
+}
+
+static void dwc_issue_pending(struct dma_chan *chan)
+{
+	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
+
+	if (!list_empty(&dwc->queue))
+		dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
+}
+
+static int dwc_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
+	struct dw_dma		*dw = to_dw_dma(chan->device);
+	struct dw_desc		*desc;
+	int			i;
+	unsigned long		flags;
+
+	dev_vdbg(chan2dev(chan), "%s\n", __func__);
+
+	/* ASSERT:  channel is idle */
+	if (dma_readl(dw, CH_EN) & dwc->mask) {
+		dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
+		return -EIO;
+	}
+
+	dma_cookie_init(chan);
+
+	/*
+	 * NOTE: some controllers may have additional features that we
+	 * need to initialize here, like "scatter-gather" (which
+	 * doesn't mean what you think it means), and status writeback.
+	 */
+
+	spin_lock_irqsave(&dwc->lock, flags);
+	i = dwc->descs_allocated;
+	while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
+		spin_unlock_irqrestore(&dwc->lock, flags);
+
+		desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
+		if (!desc) {
+			dev_info(chan2dev(chan),
+				"only allocated %d descriptors\n", i);
+			spin_lock_irqsave(&dwc->lock, flags);
+			break;
+		}
+
+		INIT_LIST_HEAD(&desc->tx_list);
+		dma_async_tx_descriptor_init(&desc->txd, chan);
+		desc->txd.tx_submit = dwc_tx_submit;
+		desc->txd.flags = DMA_CTRL_ACK;
+		desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli,
+				sizeof(desc->lli), DMA_TO_DEVICE);
+		dwc_desc_put(dwc, desc);
+
+		spin_lock_irqsave(&dwc->lock, flags);
+		i = ++dwc->descs_allocated;
+	}
+
+	spin_unlock_irqrestore(&dwc->lock, flags);
+
+	dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
+
+	return i;
+}
+
+static void dwc_free_chan_resources(struct dma_chan *chan)
+{
+	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
+	struct dw_dma		*dw = to_dw_dma(chan->device);
+	struct dw_desc		*desc, *_desc;
+	unsigned long		flags;
+	LIST_HEAD(list);
+
+	dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
+			dwc->descs_allocated);
+
+	/* ASSERT:  channel is idle */
+	BUG_ON(!list_empty(&dwc->active_list));
+	BUG_ON(!list_empty(&dwc->queue));
+	BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
+
+	spin_lock_irqsave(&dwc->lock, flags);
+	list_splice_init(&dwc->free_list, &list);
+	dwc->descs_allocated = 0;
+	dwc->initialized = false;
+
+	/* Disable interrupts */
+	channel_clear_bit(dw, MASK.XFER, dwc->mask);
+	channel_clear_bit(dw, MASK.ERROR, dwc->mask);
+
+	spin_unlock_irqrestore(&dwc->lock, flags);
+
+	list_for_each_entry_safe(desc, _desc, &list, desc_node) {
+		dev_vdbg(chan2dev(chan), "  freeing descriptor %p\n", desc);
+		dma_unmap_single(chan2parent(chan), desc->txd.phys,
+				sizeof(desc->lli), DMA_TO_DEVICE);
+		kfree(desc);
+	}
+
+	dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
+}
+
+/* --------------------- Cyclic DMA API extensions -------------------- */
+
+/**
+ * dw_dma_cyclic_start - start the cyclic DMA transfer
+ * @chan: the DMA channel to start
+ *
+ * Must be called with soft interrupts disabled. Returns zero on success or
+ * -errno on failure.
+ */
+int dw_dma_cyclic_start(struct dma_chan *chan)
+{
+	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
+	struct dw_dma		*dw = to_dw_dma(dwc->chan.device);
+	unsigned long		flags;
+
+	if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
+		dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
+		return -ENODEV;
+	}
+
+	spin_lock_irqsave(&dwc->lock, flags);
+
+	/* assert channel is idle */
+	if (dma_readl(dw, CH_EN) & dwc->mask) {
+		dev_err(chan2dev(&dwc->chan),
+			"BUG: Attempted to start non-idle channel\n");
+		dwc_dump_chan_regs(dwc);
+		spin_unlock_irqrestore(&dwc->lock, flags);
+		return -EBUSY;
+	}
+
+	dma_writel(dw, CLEAR.ERROR, dwc->mask);
+	dma_writel(dw, CLEAR.XFER, dwc->mask);
+
+	/* setup DMAC channel registers */
+	channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
+	channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
+	channel_writel(dwc, CTL_HI, 0);
+
+	channel_set_bit(dw, CH_EN, dwc->mask);
+
+	spin_unlock_irqrestore(&dwc->lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(dw_dma_cyclic_start);
+
+/**
+ * dw_dma_cyclic_stop - stop the cyclic DMA transfer
+ * @chan: the DMA channel to stop
+ *
+ * Must be called with soft interrupts disabled.
+ */
+void dw_dma_cyclic_stop(struct dma_chan *chan)
+{
+	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
+	struct dw_dma		*dw = to_dw_dma(dwc->chan.device);
+	unsigned long		flags;
+
+	spin_lock_irqsave(&dwc->lock, flags);
+
+	dwc_chan_disable(dw, dwc);
+
+	spin_unlock_irqrestore(&dwc->lock, flags);
+}
+EXPORT_SYMBOL(dw_dma_cyclic_stop);
+
+/**
+ * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
+ * @chan: the DMA channel to prepare
+ * @buf_addr: physical DMA address where the buffer starts
+ * @buf_len: total number of bytes for the entire buffer
+ * @period_len: number of bytes for each period
+ * @direction: transfer direction, to or from device
+ *
+ * Must be called before trying to start the transfer. Returns a valid struct
+ * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
+ */
+struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
+		dma_addr_t buf_addr, size_t buf_len, size_t period_len,
+		enum dma_transfer_direction direction)
+{
+	struct dw_dma_chan		*dwc = to_dw_dma_chan(chan);
+	struct dma_slave_config		*sconfig = &dwc->dma_sconfig;
+	struct dw_cyclic_desc		*cdesc;
+	struct dw_cyclic_desc		*retval = NULL;
+	struct dw_desc			*desc;
+	struct dw_desc			*last = NULL;
+	unsigned long			was_cyclic;
+	unsigned int			reg_width;
+	unsigned int			periods;
+	unsigned int			i;
+	unsigned long			flags;
+
+	spin_lock_irqsave(&dwc->lock, flags);
+	if (dwc->nollp) {
+		spin_unlock_irqrestore(&dwc->lock, flags);
+		dev_dbg(chan2dev(&dwc->chan),
+				"channel doesn't support LLP transfers\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
+		spin_unlock_irqrestore(&dwc->lock, flags);
+		dev_dbg(chan2dev(&dwc->chan),
+				"queue and/or active list are not empty\n");
+		return ERR_PTR(-EBUSY);
+	}
+
+	was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
+	spin_unlock_irqrestore(&dwc->lock, flags);
+	if (was_cyclic) {
+		dev_dbg(chan2dev(&dwc->chan),
+				"channel already prepared for cyclic DMA\n");
+		return ERR_PTR(-EBUSY);
+	}
+
+	retval = ERR_PTR(-EINVAL);
+
+	if (direction == DMA_MEM_TO_DEV)
+		reg_width = __ffs(sconfig->dst_addr_width);
+	else
+		reg_width = __ffs(sconfig->src_addr_width);
+
+	periods = buf_len / period_len;
+
+	/* Check for too big/unaligned periods and unaligned DMA buffer. */
+	if (period_len > (dwc->block_size << reg_width))
+		goto out_err;
+	if (unlikely(period_len & ((1 << reg_width) - 1)))
+		goto out_err;
+	if (unlikely(buf_addr & ((1 << reg_width) - 1)))
+		goto out_err;
+	if (unlikely(!(direction & (DMA_MEM_TO_DEV | DMA_DEV_TO_MEM))))
+		goto out_err;
+
+	retval = ERR_PTR(-ENOMEM);
+
+	if (periods > NR_DESCS_PER_CHANNEL)
+		goto out_err;
+
+	cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
+	if (!cdesc)
+		goto out_err;
+
+	cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
+	if (!cdesc->desc)
+		goto out_err_alloc;
+
+	for (i = 0; i < periods; i++) {
+		desc = dwc_desc_get(dwc);
+		if (!desc)
+			goto out_err_desc_get;
+
+		switch (direction) {
+		case DMA_MEM_TO_DEV:
+			desc->lli.dar = sconfig->dst_addr;
+			desc->lli.sar = buf_addr + (period_len * i);
+			desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
+					| DWC_CTLL_DST_WIDTH(reg_width)
+					| DWC_CTLL_SRC_WIDTH(reg_width)
+					| DWC_CTLL_DST_FIX
+					| DWC_CTLL_SRC_INC
+					| DWC_CTLL_INT_EN);
+
+			desc->lli.ctllo |= sconfig->device_fc ?
+				DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
+				DWC_CTLL_FC(DW_DMA_FC_D_M2P);
+
+			break;
+		case DMA_DEV_TO_MEM:
+			desc->lli.dar = buf_addr + (period_len * i);
+			desc->lli.sar = sconfig->src_addr;
+			desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
+					| DWC_CTLL_SRC_WIDTH(reg_width)
+					| DWC_CTLL_DST_WIDTH(reg_width)
+					| DWC_CTLL_DST_INC
+					| DWC_CTLL_SRC_FIX
+					| DWC_CTLL_INT_EN);
+
+			desc->lli.ctllo |= sconfig->device_fc ?
+				DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
+				DWC_CTLL_FC(DW_DMA_FC_D_P2M);
+
+			break;
+		default:
+			break;
+		}
+
+		desc->lli.ctlhi = (period_len >> reg_width);
+		cdesc->desc[i] = desc;
+
+		if (last) {
+			last->lli.llp = desc->txd.phys;
+			dma_sync_single_for_device(chan2parent(chan),
+					last->txd.phys, sizeof(last->lli),
+					DMA_TO_DEVICE);
+		}
+
+		last = desc;
+	}
+
+	/* lets make a cyclic list */
+	last->lli.llp = cdesc->desc[0]->txd.phys;
+	dma_sync_single_for_device(chan2parent(chan), last->txd.phys,
+			sizeof(last->lli), DMA_TO_DEVICE);
+
+	dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu "
+			"period %zu periods %d\n", (unsigned long long)buf_addr,
+			buf_len, period_len, periods);
+
+	cdesc->periods = periods;
+	dwc->cdesc = cdesc;
+
+	return cdesc;
+
+out_err_desc_get:
+	while (i--)
+		dwc_desc_put(dwc, cdesc->desc[i]);
+out_err_alloc:
+	kfree(cdesc);
+out_err:
+	clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
+	return (struct dw_cyclic_desc *)retval;
+}
+EXPORT_SYMBOL(dw_dma_cyclic_prep);
+
+/**
+ * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
+ * @chan: the DMA channel to free
+ */
+void dw_dma_cyclic_free(struct dma_chan *chan)
+{
+	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
+	struct dw_dma		*dw = to_dw_dma(dwc->chan.device);
+	struct dw_cyclic_desc	*cdesc = dwc->cdesc;
+	int			i;
+	unsigned long		flags;
+
+	dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__);
+
+	if (!cdesc)
+		return;
+
+	spin_lock_irqsave(&dwc->lock, flags);
+
+	dwc_chan_disable(dw, dwc);
+
+	dma_writel(dw, CLEAR.ERROR, dwc->mask);
+	dma_writel(dw, CLEAR.XFER, dwc->mask);
+
+	spin_unlock_irqrestore(&dwc->lock, flags);
+
+	for (i = 0; i < cdesc->periods; i++)
+		dwc_desc_put(dwc, cdesc->desc[i]);
+
+	kfree(cdesc->desc);
+	kfree(cdesc);
+
+	clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
+}
+EXPORT_SYMBOL(dw_dma_cyclic_free);
+
+/*----------------------------------------------------------------------*/
+
+static void dw_dma_off(struct dw_dma *dw)
+{
+	int i;
+
+	dma_writel(dw, CFG, 0);
+
+	channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
+	channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
+	channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
+	channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
+
+	while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
+		cpu_relax();
+
+	for (i = 0; i < dw->dma.chancnt; i++)
+		dw->chan[i].initialized = false;
+}
+
+static int __devinit dw_probe(struct platform_device *pdev)
+{
+	struct dw_dma_platform_data *pdata;
+	struct resource		*io;
+	struct dw_dma		*dw;
+	size_t			size;
+	void __iomem		*regs;
+	bool			autocfg;
+	unsigned int		dw_params;
+	unsigned int		nr_channels;
+	unsigned int		max_blk_size = 0;
+	int			irq;
+	int			err;
+	int			i;
+
+	pdata = dev_get_platdata(&pdev->dev);
+	if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
+		return -EINVAL;
+
+	io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!io)
+		return -EINVAL;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0)
+		return irq;
+
+	regs = devm_request_and_ioremap(&pdev->dev, io);
+	if (!regs)
+		return -EBUSY;
+
+	dw_params = dma_read_byaddr(regs, DW_PARAMS);
+	autocfg = dw_params >> DW_PARAMS_EN & 0x1;
+
+	if (autocfg)
+		nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1;
+	else
+		nr_channels = pdata->nr_channels;
+
+	size = sizeof(struct dw_dma) + nr_channels * sizeof(struct dw_dma_chan);
+	dw = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+	if (!dw)
+		return -ENOMEM;
+
+	dw->clk = devm_clk_get(&pdev->dev, "hclk");
+	if (IS_ERR(dw->clk))
+		return PTR_ERR(dw->clk);
+	clk_prepare_enable(dw->clk);
+
+	dw->regs = regs;
+
+	/* get hardware configuration parameters */
+	if (autocfg) {
+		max_blk_size = dma_readl(dw, MAX_BLK_SIZE);
+
+		dw->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1;
+		for (i = 0; i < dw->nr_masters; i++) {
+			dw->data_width[i] =
+				(dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3) + 2;
+		}
+	} else {
+		dw->nr_masters = pdata->nr_masters;
+		memcpy(dw->data_width, pdata->data_width, 4);
+	}
+
+	/* Calculate all channel mask before DMA setup */
+	dw->all_chan_mask = (1 << nr_channels) - 1;
+
+	/* force dma off, just in case */
+	dw_dma_off(dw);
+
+	/* disable BLOCK interrupts as well */
+	channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
+
+	err = devm_request_irq(&pdev->dev, irq, dw_dma_interrupt, 0,
+			       "dw_dmac", dw);
+	if (err)
+		return err;
+
+	platform_set_drvdata(pdev, dw);
+
+	tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
+
+	INIT_LIST_HEAD(&dw->dma.channels);
+	for (i = 0; i < nr_channels; i++) {
+		struct dw_dma_chan	*dwc = &dw->chan[i];
+		int			r = nr_channels - i - 1;
+
+		dwc->chan.device = &dw->dma;
+		dma_cookie_init(&dwc->chan);
+		if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
+			list_add_tail(&dwc->chan.device_node,
+					&dw->dma.channels);
+		else
+			list_add(&dwc->chan.device_node, &dw->dma.channels);
+
+		/* 7 is highest priority & 0 is lowest. */
+		if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
+			dwc->priority = r;
+		else
+			dwc->priority = i;
+
+		dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
+		spin_lock_init(&dwc->lock);
+		dwc->mask = 1 << i;
+
+		INIT_LIST_HEAD(&dwc->active_list);
+		INIT_LIST_HEAD(&dwc->queue);
+		INIT_LIST_HEAD(&dwc->free_list);
+
+		channel_clear_bit(dw, CH_EN, dwc->mask);
+
+		dwc->dw = dw;
+
+		/* hardware configuration */
+		if (autocfg) {
+			unsigned int dwc_params;
+
+			dwc_params = dma_read_byaddr(regs + r * sizeof(u32),
+						     DWC_PARAMS);
+
+			/* Decode maximum block size for given channel. The
+			 * stored 4 bit value represents blocks from 0x00 for 3
+			 * up to 0x0a for 4095. */
+			dwc->block_size =
+				(4 << ((max_blk_size >> 4 * i) & 0xf)) - 1;
+			dwc->nollp =
+				(dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0;
+		} else {
+			dwc->block_size = pdata->block_size;
+
+			/* Check if channel supports multi block transfer */
+			channel_writel(dwc, LLP, 0xfffffffc);
+			dwc->nollp =
+				(channel_readl(dwc, LLP) & 0xfffffffc) == 0;
+			channel_writel(dwc, LLP, 0);
+		}
+	}
+
+	/* Clear all interrupts on all channels. */
+	dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
+	dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
+	dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
+	dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
+	dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
+
+	dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
+	dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
+	if (pdata->is_private)
+		dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
+	dw->dma.dev = &pdev->dev;
+	dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
+	dw->dma.device_free_chan_resources = dwc_free_chan_resources;
+
+	dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
+
+	dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
+	dw->dma.device_control = dwc_control;
+
+	dw->dma.device_tx_status = dwc_tx_status;
+	dw->dma.device_issue_pending = dwc_issue_pending;
+
+	dma_writel(dw, CFG, DW_CFG_DMA_EN);
+
+	printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n",
+			dev_name(&pdev->dev), nr_channels);
+
+	dma_async_device_register(&dw->dma);
+
+	return 0;
+}
+
+static int __devexit dw_remove(struct platform_device *pdev)
+{
+	struct dw_dma		*dw = platform_get_drvdata(pdev);
+	struct dw_dma_chan	*dwc, *_dwc;
+
+	dw_dma_off(dw);
+	dma_async_device_unregister(&dw->dma);
+
+	tasklet_kill(&dw->tasklet);
+
+	list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
+			chan.device_node) {
+		list_del(&dwc->chan.device_node);
+		channel_clear_bit(dw, CH_EN, dwc->mask);
+	}
+
+	return 0;
+}
+
+static void dw_shutdown(struct platform_device *pdev)
+{
+	struct dw_dma	*dw = platform_get_drvdata(pdev);
+
+	dw_dma_off(platform_get_drvdata(pdev));
+	clk_disable_unprepare(dw->clk);
+}
+
+static int dw_suspend_noirq(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct dw_dma	*dw = platform_get_drvdata(pdev);
+
+	dw_dma_off(platform_get_drvdata(pdev));
+	clk_disable_unprepare(dw->clk);
+
+	return 0;
+}
+
+static int dw_resume_noirq(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct dw_dma	*dw = platform_get_drvdata(pdev);
+
+	clk_prepare_enable(dw->clk);
+	dma_writel(dw, CFG, DW_CFG_DMA_EN);
+
+	return 0;
+}
+
+static const struct dev_pm_ops dw_dev_pm_ops = {
+	.suspend_noirq = dw_suspend_noirq,
+	.resume_noirq = dw_resume_noirq,
+	.freeze_noirq = dw_suspend_noirq,
+	.thaw_noirq = dw_resume_noirq,
+	.restore_noirq = dw_resume_noirq,
+	.poweroff_noirq = dw_suspend_noirq,
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id dw_dma_id_table[] = {
+	{ .compatible = "snps,dma-spear1340" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, dw_dma_id_table);
+#endif
+
+static struct platform_driver dw_driver = {
+	.probe		= dw_probe,
+	.remove		= __devexit_p(dw_remove),
+	.shutdown	= dw_shutdown,
+	.driver = {
+		.name	= "dw_dmac",
+		.pm	= &dw_dev_pm_ops,
+		.of_match_table = of_match_ptr(dw_dma_id_table),
+	},
+};
+
+module_platform_driver(dw_driver);
+
+MODULE_ALIAS("platform:dw_dmac");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
+MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
diff --git a/drivers/dma/dw/dw_dmac_pci.c b/drivers/dma/dw/dw_dmac_pci.c
new file mode 100644
index 0000000..95570df
--- /dev/null
+++ b/drivers/dma/dw/dw_dmac_pci.c
@@ -0,0 +1,127 @@
+/*
+ * PCI driver for the Synopsys DesignWare DMA Controller
+ *
+ * Copyright (C) 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/dw_dmac.h>
+
+#define DRIVER(_is_private, _chan_order, _chan_pri)		\
+	((kernel_ulong_t)&(struct dw_dma_platform_data) {	\
+		.is_private = (_is_private),			\
+		.chan_allocation_order = (_chan_order),		\
+		.chan_priority = (_chan_pri),			\
+	})
+
+static int __devinit dw_pci_probe(struct pci_dev *pdev,
+				  const struct pci_device_id *id)
+{
+	struct platform_device *pd;
+	struct resource r[2];
+	struct dw_dma_platform_data *driver = (void *)id->driver_data;
+	static int instance;
+	int ret;
+
+	ret = pci_enable_device(pdev);
+	if (ret)
+		return ret;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_set_master(pdev);
+	pci_try_set_mwi(pdev);
+
+	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+	if (ret)
+		goto err0;
+
+	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+	if (ret)
+		goto err0;
+
+	pd = platform_device_alloc("dw_dmac", instance);
+	if (!pd) {
+		dev_err(&pdev->dev, "can't allocate dw_dmac platform device\n");
+		ret = -ENOMEM;
+		goto err0;
+	}
+
+	memset(r, 0, sizeof(r));
+
+	r[0].start = pci_resource_start(pdev, 0);
+	r[0].end = pci_resource_end(pdev, 0);
+	r[0].flags = IORESOURCE_MEM;
+
+	r[1].start = pdev->irq;
+	r[1].flags = IORESOURCE_IRQ;
+
+	ret = platform_device_add_resources(pd, r, ARRAY_SIZE(r));
+	if (ret) {
+		dev_err(&pdev->dev, "can't add resources to platform device\n");
+		goto err1;
+	}
+
+	ret = platform_device_add_data(pd, driver, sizeof(*driver));
+	if (ret)
+		goto err1;
+
+	dma_set_coherent_mask(&pd->dev, pdev->dev.coherent_dma_mask);
+	pd->dev.dma_mask = pdev->dev.dma_mask;
+	pd->dev.dma_parms = pdev->dev.dma_parms;
+	pd->dev.parent = &pdev->dev;
+
+	pci_set_drvdata(pdev, pd);
+
+	ret = platform_device_add(pd);
+	if (ret) {
+		dev_err(&pdev->dev, "platform_device_add failed\n");
+		goto err1;
+	}
+
+	instance++;
+	return 0;
+
+err1:
+	platform_device_put(pd);
+err0:
+	pci_disable_device(pdev);
+
+	return ret;
+}
+
+static void __devexit dw_pci_remove(struct pci_dev *pdev)
+{
+	struct platform_device *pd = pci_get_drvdata(pdev);
+
+	platform_device_unregister(pd);
+	pci_set_drvdata(pdev, NULL);
+	pci_disable_device(pdev);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(dw_pci_id_table) = {
+	{ PCI_VDEVICE(INTEL, 0x0827), DRIVER(1, 0, 0) },
+	{ PCI_VDEVICE(INTEL, 0x0830), DRIVER(1, 0, 0) },
+	{ PCI_VDEVICE(INTEL, 0x0f06), DRIVER(1, 0, 0) },
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, dw_pci_id_table);
+
+static struct pci_driver dw_pci_driver = {
+	.name		= "dw_dmac_pci",
+	.id_table	= dw_pci_id_table,
+	.probe		= dw_pci_probe,
+	.remove		= __devexit_p(dw_pci_remove),
+};
+
+module_pci_driver(dw_pci_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("DesignWare DMAC PCI driver");
+MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@linux.intel.com>");
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
diff --git a/drivers/dma/dw/dw_dmac_regs.h b/drivers/dma/dw/dw_dmac_regs.h
new file mode 100644
index 0000000..ff39fa6
--- /dev/null
+++ b/drivers/dma/dw/dw_dmac_regs.h
@@ -0,0 +1,294 @@
+/*
+ * Driver for the Synopsys DesignWare AHB DMA Controller
+ *
+ * Copyright (C) 2005-2007 Atmel Corporation
+ * Copyright (C) 2010-2011 ST Microelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/dw_dmac.h>
+
+#define DW_DMA_MAX_NR_CHANNELS	8
+
+/* flow controller */
+enum dw_dma_fc {
+	DW_DMA_FC_D_M2M,
+	DW_DMA_FC_D_M2P,
+	DW_DMA_FC_D_P2M,
+	DW_DMA_FC_D_P2P,
+	DW_DMA_FC_P_P2M,
+	DW_DMA_FC_SP_P2P,
+	DW_DMA_FC_P_M2P,
+	DW_DMA_FC_DP_P2P,
+};
+
+/*
+ * Redefine this macro to handle differences between 32- and 64-bit
+ * addressing, big vs. little endian, etc.
+ */
+#define DW_REG(name)		u32 name; u32 __pad_##name
+
+/* Hardware register definitions. */
+struct dw_dma_chan_regs {
+	DW_REG(SAR);		/* Source Address Register */
+	DW_REG(DAR);		/* Destination Address Register */
+	DW_REG(LLP);		/* Linked List Pointer */
+	u32	CTL_LO;		/* Control Register Low */
+	u32	CTL_HI;		/* Control Register High */
+	DW_REG(SSTAT);
+	DW_REG(DSTAT);
+	DW_REG(SSTATAR);
+	DW_REG(DSTATAR);
+	u32	CFG_LO;		/* Configuration Register Low */
+	u32	CFG_HI;		/* Configuration Register High */
+	DW_REG(SGR);
+	DW_REG(DSR);
+};
+
+struct dw_dma_irq_regs {
+	DW_REG(XFER);
+	DW_REG(BLOCK);
+	DW_REG(SRC_TRAN);
+	DW_REG(DST_TRAN);
+	DW_REG(ERROR);
+};
+
+struct dw_dma_regs {
+	/* per-channel registers */
+	struct dw_dma_chan_regs	CHAN[DW_DMA_MAX_NR_CHANNELS];
+
+	/* irq handling */
+	struct dw_dma_irq_regs	RAW;		/* r */
+	struct dw_dma_irq_regs	STATUS;		/* r (raw & mask) */
+	struct dw_dma_irq_regs	MASK;		/* rw (set = irq enabled) */
+	struct dw_dma_irq_regs	CLEAR;		/* w (ack, affects "raw") */
+
+	DW_REG(STATUS_INT);			/* r */
+
+	/* software handshaking */
+	DW_REG(REQ_SRC);
+	DW_REG(REQ_DST);
+	DW_REG(SGL_REQ_SRC);
+	DW_REG(SGL_REQ_DST);
+	DW_REG(LAST_SRC);
+	DW_REG(LAST_DST);
+
+	/* miscellaneous */
+	DW_REG(CFG);
+	DW_REG(CH_EN);
+	DW_REG(ID);
+	DW_REG(TEST);
+
+	/* reserved */
+	DW_REG(__reserved0);
+	DW_REG(__reserved1);
+
+	/* optional encoded params, 0x3c8..0x3f7 */
+	u32	__reserved;
+
+	/* per-channel configuration registers */
+	u32	DWC_PARAMS[DW_DMA_MAX_NR_CHANNELS];
+	u32	MULTI_BLK_TYPE;
+	u32	MAX_BLK_SIZE;
+
+	/* top-level parameters */
+	u32	DW_PARAMS;
+};
+
+/* To access the registers in early stage of probe */
+#define dma_read_byaddr(addr, name) \
+	readl((addr) + offsetof(struct dw_dma_regs, name))
+
+/* Bitfields in DW_PARAMS */
+#define DW_PARAMS_NR_CHAN	8		/* number of channels */
+#define DW_PARAMS_NR_MASTER	11		/* number of AHB masters */
+#define DW_PARAMS_DATA_WIDTH(n)	(15 + 2 * (n))
+#define DW_PARAMS_DATA_WIDTH1	15		/* master 1 data width */
+#define DW_PARAMS_DATA_WIDTH2	17		/* master 2 data width */
+#define DW_PARAMS_DATA_WIDTH3	19		/* master 3 data width */
+#define DW_PARAMS_DATA_WIDTH4	21		/* master 4 data width */
+#define DW_PARAMS_EN		28		/* encoded parameters */
+
+/* Bitfields in DWC_PARAMS */
+#define DWC_PARAMS_MBLK_EN	11		/* multi block transfer */
+
+/* Bitfields in CTL_LO */
+#define DWC_CTLL_INT_EN		(1 << 0)	/* irqs enabled? */
+#define DWC_CTLL_DST_WIDTH(n)	((n)<<1)	/* bytes per element */
+#define DWC_CTLL_SRC_WIDTH(n)	((n)<<4)
+#define DWC_CTLL_DST_INC	(0<<7)		/* DAR update/not */
+#define DWC_CTLL_DST_DEC	(1<<7)
+#define DWC_CTLL_DST_FIX	(2<<7)
+#define DWC_CTLL_SRC_INC	(0<<7)		/* SAR update/not */
+#define DWC_CTLL_SRC_DEC	(1<<9)
+#define DWC_CTLL_SRC_FIX	(2<<9)
+#define DWC_CTLL_DST_MSIZE(n)	((n)<<11)	/* burst, #elements */
+#define DWC_CTLL_SRC_MSIZE(n)	((n)<<14)
+#define DWC_CTLL_S_GATH_EN	(1 << 17)	/* src gather, !FIX */
+#define DWC_CTLL_D_SCAT_EN	(1 << 18)	/* dst scatter, !FIX */
+#define DWC_CTLL_FC(n)		((n) << 20)
+#define DWC_CTLL_FC_M2M		(0 << 20)	/* mem-to-mem */
+#define DWC_CTLL_FC_M2P		(1 << 20)	/* mem-to-periph */
+#define DWC_CTLL_FC_P2M		(2 << 20)	/* periph-to-mem */
+#define DWC_CTLL_FC_P2P		(3 << 20)	/* periph-to-periph */
+/* plus 4 transfer types for peripheral-as-flow-controller */
+#define DWC_CTLL_DMS(n)		((n)<<23)	/* dst master select */
+#define DWC_CTLL_SMS(n)		((n)<<25)	/* src master select */
+#define DWC_CTLL_LLP_D_EN	(1 << 27)	/* dest block chain */
+#define DWC_CTLL_LLP_S_EN	(1 << 28)	/* src block chain */
+
+/* Bitfields in CTL_HI */
+#define DWC_CTLH_DONE		0x00001000
+#define DWC_CTLH_BLOCK_TS_MASK	0x00000fff
+
+/* Bitfields in CFG_LO. Platform-configurable bits are in <linux/dw_dmac.h> */
+#define DWC_CFGL_CH_PRIOR_MASK	(0x7 << 5)	/* priority mask */
+#define DWC_CFGL_CH_PRIOR(x)	((x) << 5)	/* priority */
+#define DWC_CFGL_CH_SUSP	(1 << 8)	/* pause xfer */
+#define DWC_CFGL_FIFO_EMPTY	(1 << 9)	/* pause xfer */
+#define DWC_CFGL_HS_DST		(1 << 10)	/* handshake w/dst */
+#define DWC_CFGL_HS_SRC		(1 << 11)	/* handshake w/src */
+#define DWC_CFGL_MAX_BURST(x)	((x) << 20)
+#define DWC_CFGL_RELOAD_SAR	(1 << 30)
+#define DWC_CFGL_RELOAD_DAR	(1 << 31)
+
+/* Bitfields in CFG_HI. Platform-configurable bits are in <linux/dw_dmac.h> */
+#define DWC_CFGH_DS_UPD_EN	(1 << 5)
+#define DWC_CFGH_SS_UPD_EN	(1 << 6)
+
+/* Bitfields in SGR */
+#define DWC_SGR_SGI(x)		((x) << 0)
+#define DWC_SGR_SGC(x)		((x) << 20)
+
+/* Bitfields in DSR */
+#define DWC_DSR_DSI(x)		((x) << 0)
+#define DWC_DSR_DSC(x)		((x) << 20)
+
+/* Bitfields in CFG */
+#define DW_CFG_DMA_EN		(1 << 0)
+
+enum dw_dmac_flags {
+	DW_DMA_IS_CYCLIC = 0,
+	DW_DMA_IS_SOFT_LLP = 1,
+};
+
+struct dw_dma_chan {
+	struct dma_chan		chan;
+	void __iomem		*ch_regs;
+	u8			mask;
+	u8			priority;
+	bool			paused;
+	bool			initialized;
+
+	/* software emulation of the LLP transfers */
+	struct list_head	*tx_list;
+	struct list_head	*tx_node_active;
+
+	spinlock_t		lock;
+
+	/* these other elements are all protected by lock */
+	unsigned long		flags;
+	struct list_head	active_list;
+	struct list_head	queue;
+	struct list_head	free_list;
+	struct dw_cyclic_desc	*cdesc;
+
+	unsigned int		descs_allocated;
+
+	/* hardware configuration */
+	unsigned int		block_size;
+	bool			nollp;
+
+	/* configuration passed via DMA_SLAVE_CONFIG */
+	struct dma_slave_config dma_sconfig;
+
+	/* backlink to dw_dma */
+	struct dw_dma		*dw;
+};
+
+static inline struct dw_dma_chan_regs __iomem *
+__dwc_regs(struct dw_dma_chan *dwc)
+{
+	return dwc->ch_regs;
+}
+
+#define channel_readl(dwc, name) \
+	readl(&(__dwc_regs(dwc)->name))
+#define channel_writel(dwc, name, val) \
+	writel((val), &(__dwc_regs(dwc)->name))
+
+static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
+{
+	return container_of(chan, struct dw_dma_chan, chan);
+}
+
+struct dw_dma {
+	struct dma_device	dma;
+	void __iomem		*regs;
+	struct tasklet_struct	tasklet;
+	struct clk		*clk;
+
+	u8			all_chan_mask;
+
+	/* hardware configuration */
+	unsigned char		nr_masters;
+	unsigned char		data_width[4];
+
+	struct dw_dma_chan	chan[0];
+};
+
+static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
+{
+	return dw->regs;
+}
+
+#define dma_readl(dw, name) \
+	readl(&(__dw_regs(dw)->name))
+#define dma_writel(dw, name, val) \
+	writel((val), &(__dw_regs(dw)->name))
+
+#define channel_set_bit(dw, reg, mask) \
+	dma_writel(dw, reg, ((mask) << 8) | (mask))
+#define channel_clear_bit(dw, reg, mask) \
+	dma_writel(dw, reg, ((mask) << 8) | 0)
+
+static inline struct dw_dma *to_dw_dma(struct dma_device *ddev)
+{
+	return container_of(ddev, struct dw_dma, dma);
+}
+
+/* LLI == Linked List Item; a.k.a. DMA block descriptor */
+struct dw_lli {
+	/* values that are not changed by hardware */
+	u32		sar;
+	u32		dar;
+	u32		llp;		/* chain to next lli */
+	u32		ctllo;
+	/* values that may get written back: */
+	u32		ctlhi;
+	/* sstat and dstat can snapshot peripheral register state.
+	 * silicon config may discard either or both...
+	 */
+	u32		sstat;
+	u32		dstat;
+};
+
+struct dw_desc {
+	/* FIRST values the hardware uses */
+	struct dw_lli			lli;
+
+	/* THEN values for driver housekeeping */
+	struct list_head		desc_node;
+	struct list_head		tx_list;
+	struct dma_async_tx_descriptor	txd;
+	size_t				len;
+};
+
+static inline struct dw_desc *
+txd_to_dw_desc(struct dma_async_tx_descriptor *txd)
+{
+	return container_of(txd, struct dw_desc, txd);
+}
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
deleted file mode 100644
index 9f0129d..0000000
--- a/drivers/dma/dw_dmac.c
+++ /dev/null
@@ -1,1720 +0,0 @@
-/*
- * Core driver for the Synopsys DesignWare DMA Controller
- *
- * Copyright (C) 2007-2008 Atmel Corporation
- * Copyright (C) 2010-2011 ST Microelectronics
- * Copyright (C) 2012 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/bitops.h>
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include "dw_dmac_regs.h"
-#include "dmaengine.h"
-
-/*
- * This supports the Synopsys "DesignWare AHB Central DMA Controller",
- * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
- * of which use ARM any more).  See the "Databook" from Synopsys for
- * information beyond what licensees probably provide.
- *
- * The driver has currently been tested only with the Atmel AT32AP7000,
- * which does not support descriptor writeback.
- */
-
-static inline unsigned int dwc_get_dms(struct dw_dma_slave *slave)
-{
-	return slave ? slave->dst_master : 0;
-}
-
-static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave)
-{
-	return slave ? slave->src_master : 1;
-}
-
-#define DWC_DEFAULT_CTLLO(_chan) ({				\
-		struct dw_dma_slave *__slave = (_chan->private);	\
-		struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan);	\
-		struct dma_slave_config	*_sconfig = &_dwc->dma_sconfig;	\
-		int _dms = dwc_get_dms(__slave);		\
-		int _sms = dwc_get_sms(__slave);		\
-		u8 _smsize = __slave ? _sconfig->src_maxburst :	\
-			DW_DMA_MSIZE_16;			\
-		u8 _dmsize = __slave ? _sconfig->dst_maxburst :	\
-			DW_DMA_MSIZE_16;			\
-								\
-		(DWC_CTLL_DST_MSIZE(_dmsize)			\
-		 | DWC_CTLL_SRC_MSIZE(_smsize)			\
-		 | DWC_CTLL_LLP_D_EN				\
-		 | DWC_CTLL_LLP_S_EN				\
-		 | DWC_CTLL_DMS(_dms)				\
-		 | DWC_CTLL_SMS(_sms));				\
-	})
-
-/*
- * Number of descriptors to allocate for each channel. This should be
- * made configurable somehow; preferably, the clients (at least the
- * ones using slave transfers) should be able to give us a hint.
- */
-#define NR_DESCS_PER_CHANNEL	64
-
-/*----------------------------------------------------------------------*/
-
-/*
- * Because we're not relying on writeback from the controller (it may not
- * even be configured into the core!) we don't need to use dma_pool.  These
- * descriptors -- and associated data -- are cacheable.  We do need to make
- * sure their dcache entries are written back before handing them off to
- * the controller, though.
- */
-
-static struct device *chan2dev(struct dma_chan *chan)
-{
-	return &chan->dev->device;
-}
-static struct device *chan2parent(struct dma_chan *chan)
-{
-	return chan->dev->device.parent;
-}
-
-static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
-{
-	return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
-}
-
-static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
-{
-	struct dw_desc *desc, *_desc;
-	struct dw_desc *ret = NULL;
-	unsigned int i = 0;
-	unsigned long flags;
-
-	spin_lock_irqsave(&dwc->lock, flags);
-	list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
-		i++;
-		if (async_tx_test_ack(&desc->txd)) {
-			list_del(&desc->desc_node);
-			ret = desc;
-			break;
-		}
-		dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
-	}
-	spin_unlock_irqrestore(&dwc->lock, flags);
-
-	dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
-
-	return ret;
-}
-
-static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
-{
-	struct dw_desc	*child;
-
-	list_for_each_entry(child, &desc->tx_list, desc_node)
-		dma_sync_single_for_cpu(chan2parent(&dwc->chan),
-				child->txd.phys, sizeof(child->lli),
-				DMA_TO_DEVICE);
-	dma_sync_single_for_cpu(chan2parent(&dwc->chan),
-			desc->txd.phys, sizeof(desc->lli),
-			DMA_TO_DEVICE);
-}
-
-/*
- * Move a descriptor, including any children, to the free list.
- * `desc' must not be on any lists.
- */
-static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
-{
-	unsigned long flags;
-
-	if (desc) {
-		struct dw_desc *child;
-
-		dwc_sync_desc_for_cpu(dwc, desc);
-
-		spin_lock_irqsave(&dwc->lock, flags);
-		list_for_each_entry(child, &desc->tx_list, desc_node)
-			dev_vdbg(chan2dev(&dwc->chan),
-					"moving child desc %p to freelist\n",
-					child);
-		list_splice_init(&desc->tx_list, &dwc->free_list);
-		dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
-		list_add(&desc->desc_node, &dwc->free_list);
-		spin_unlock_irqrestore(&dwc->lock, flags);
-	}
-}
-
-static void dwc_initialize(struct dw_dma_chan *dwc)
-{
-	struct dw_dma *dw = to_dw_dma(dwc->chan.device);
-	struct dw_dma_slave *dws = dwc->chan.private;
-	u32 cfghi = DWC_CFGH_FIFO_MODE;
-	u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
-
-	if (dwc->initialized == true)
-		return;
-
-	if (dws) {
-		/*
-		 * We need controller-specific data to set up slave
-		 * transfers.
-		 */
-		BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
-
-		cfghi = dws->cfg_hi;
-		cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
-	} else {
-		if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV)
-			cfghi = DWC_CFGH_DST_PER(dwc->dma_sconfig.slave_id);
-		else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM)
-			cfghi = DWC_CFGH_SRC_PER(dwc->dma_sconfig.slave_id);
-	}
-
-	channel_writel(dwc, CFG_LO, cfglo);
-	channel_writel(dwc, CFG_HI, cfghi);
-
-	/* Enable interrupts */
-	channel_set_bit(dw, MASK.XFER, dwc->mask);
-	channel_set_bit(dw, MASK.ERROR, dwc->mask);
-
-	dwc->initialized = true;
-}
-
-/*----------------------------------------------------------------------*/
-
-static inline unsigned int dwc_fast_fls(unsigned long long v)
-{
-	/*
-	 * We can be a lot more clever here, but this should take care
-	 * of the most common optimization.
-	 */
-	if (!(v & 7))
-		return 3;
-	else if (!(v & 3))
-		return 2;
-	else if (!(v & 1))
-		return 1;
-	return 0;
-}
-
-static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
-{
-	dev_err(chan2dev(&dwc->chan),
-		"  SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
-		channel_readl(dwc, SAR),
-		channel_readl(dwc, DAR),
-		channel_readl(dwc, LLP),
-		channel_readl(dwc, CTL_HI),
-		channel_readl(dwc, CTL_LO));
-}
-
-static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
-{
-	channel_clear_bit(dw, CH_EN, dwc->mask);
-	while (dma_readl(dw, CH_EN) & dwc->mask)
-		cpu_relax();
-}
-
-/*----------------------------------------------------------------------*/
-
-/* Perform single block transfer */
-static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
-				       struct dw_desc *desc)
-{
-	struct dw_dma	*dw = to_dw_dma(dwc->chan.device);
-	u32		ctllo;
-
-	/* Software emulation of LLP mode relies on interrupts to continue
-	 * multi block transfer. */
-	ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN;
-
-	channel_writel(dwc, SAR, desc->lli.sar);
-	channel_writel(dwc, DAR, desc->lli.dar);
-	channel_writel(dwc, CTL_LO, ctllo);
-	channel_writel(dwc, CTL_HI, desc->lli.ctlhi);
-	channel_set_bit(dw, CH_EN, dwc->mask);
-}
-
-/* Called with dwc->lock held and bh disabled */
-static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
-{
-	struct dw_dma	*dw = to_dw_dma(dwc->chan.device);
-	unsigned long	was_soft_llp;
-
-	/* ASSERT:  channel is idle */
-	if (dma_readl(dw, CH_EN) & dwc->mask) {
-		dev_err(chan2dev(&dwc->chan),
-			"BUG: Attempted to start non-idle channel\n");
-		dwc_dump_chan_regs(dwc);
-
-		/* The tasklet will hopefully advance the queue... */
-		return;
-	}
-
-	if (dwc->nollp) {
-		was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP,
-						&dwc->flags);
-		if (was_soft_llp) {
-			dev_err(chan2dev(&dwc->chan),
-				"BUG: Attempted to start new LLP transfer "
-				"inside ongoing one\n");
-			return;
-		}
-
-		dwc_initialize(dwc);
-
-		dwc->tx_list = &first->tx_list;
-		dwc->tx_node_active = first->tx_list.next;
-
-		dwc_do_single_block(dwc, first);
-
-		return;
-	}
-
-	dwc_initialize(dwc);
-
-	channel_writel(dwc, LLP, first->txd.phys);
-	channel_writel(dwc, CTL_LO,
-			DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
-	channel_writel(dwc, CTL_HI, 0);
-	channel_set_bit(dw, CH_EN, dwc->mask);
-}
-
-/*----------------------------------------------------------------------*/
-
-static void
-dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
-		bool callback_required)
-{
-	dma_async_tx_callback		callback = NULL;
-	void				*param = NULL;
-	struct dma_async_tx_descriptor	*txd = &desc->txd;
-	struct dw_desc			*child;
-	unsigned long			flags;
-
-	dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
-
-	spin_lock_irqsave(&dwc->lock, flags);
-	dma_cookie_complete(txd);
-	if (callback_required) {
-		callback = txd->callback;
-		param = txd->callback_param;
-	}
-
-	dwc_sync_desc_for_cpu(dwc, desc);
-
-	/* async_tx_ack */
-	list_for_each_entry(child, &desc->tx_list, desc_node)
-		async_tx_ack(&child->txd);
-	async_tx_ack(&desc->txd);
-
-	list_splice_init(&desc->tx_list, &dwc->free_list);
-	list_move(&desc->desc_node, &dwc->free_list);
-
-	if (!dwc->chan.private) {
-		struct device *parent = chan2parent(&dwc->chan);
-		if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
-			if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
-				dma_unmap_single(parent, desc->lli.dar,
-						desc->len, DMA_FROM_DEVICE);
-			else
-				dma_unmap_page(parent, desc->lli.dar,
-						desc->len, DMA_FROM_DEVICE);
-		}
-		if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
-			if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
-				dma_unmap_single(parent, desc->lli.sar,
-						desc->len, DMA_TO_DEVICE);
-			else
-				dma_unmap_page(parent, desc->lli.sar,
-						desc->len, DMA_TO_DEVICE);
-		}
-	}
-
-	spin_unlock_irqrestore(&dwc->lock, flags);
-
-	if (callback_required && callback)
-		callback(param);
-}
-
-static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
-{
-	struct dw_desc *desc, *_desc;
-	LIST_HEAD(list);
-	unsigned long flags;
-
-	spin_lock_irqsave(&dwc->lock, flags);
-	if (dma_readl(dw, CH_EN) & dwc->mask) {
-		dev_err(chan2dev(&dwc->chan),
-			"BUG: XFER bit set, but channel not idle!\n");
-
-		/* Try to continue after resetting the channel... */
-		dwc_chan_disable(dw, dwc);
-	}
-
-	/*
-	 * Submit queued descriptors ASAP, i.e. before we go through
-	 * the completed ones.
-	 */
-	list_splice_init(&dwc->active_list, &list);
-	if (!list_empty(&dwc->queue)) {
-		list_move(dwc->queue.next, &dwc->active_list);
-		dwc_dostart(dwc, dwc_first_active(dwc));
-	}
-
-	spin_unlock_irqrestore(&dwc->lock, flags);
-
-	list_for_each_entry_safe(desc, _desc, &list, desc_node)
-		dwc_descriptor_complete(dwc, desc, true);
-}
-
-static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
-{
-	dma_addr_t llp;
-	struct dw_desc *desc, *_desc;
-	struct dw_desc *child;
-	u32 status_xfer;
-	unsigned long flags;
-
-	spin_lock_irqsave(&dwc->lock, flags);
-	llp = channel_readl(dwc, LLP);
-	status_xfer = dma_readl(dw, RAW.XFER);
-
-	if (status_xfer & dwc->mask) {
-		/* Everything we've submitted is done */
-		dma_writel(dw, CLEAR.XFER, dwc->mask);
-		spin_unlock_irqrestore(&dwc->lock, flags);
-
-		dwc_complete_all(dw, dwc);
-		return;
-	}
-
-	if (list_empty(&dwc->active_list)) {
-		spin_unlock_irqrestore(&dwc->lock, flags);
-		return;
-	}
-
-	dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__,
-			(unsigned long long)llp);
-
-	list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
-		/* check first descriptors addr */
-		if (desc->txd.phys == llp) {
-			spin_unlock_irqrestore(&dwc->lock, flags);
-			return;
-		}
-
-		/* check first descriptors llp */
-		if (desc->lli.llp == llp) {
-			/* This one is currently in progress */
-			spin_unlock_irqrestore(&dwc->lock, flags);
-			return;
-		}
-
-		list_for_each_entry(child, &desc->tx_list, desc_node)
-			if (child->lli.llp == llp) {
-				/* Currently in progress */
-				spin_unlock_irqrestore(&dwc->lock, flags);
-				return;
-			}
-
-		/*
-		 * No descriptors so far seem to be in progress, i.e.
-		 * this one must be done.
-		 */
-		spin_unlock_irqrestore(&dwc->lock, flags);
-		dwc_descriptor_complete(dwc, desc, true);
-		spin_lock_irqsave(&dwc->lock, flags);
-	}
-
-	dev_err(chan2dev(&dwc->chan),
-		"BUG: All descriptors done, but channel not idle!\n");
-
-	/* Try to continue after resetting the channel... */
-	dwc_chan_disable(dw, dwc);
-
-	if (!list_empty(&dwc->queue)) {
-		list_move(dwc->queue.next, &dwc->active_list);
-		dwc_dostart(dwc, dwc_first_active(dwc));
-	}
-	spin_unlock_irqrestore(&dwc->lock, flags);
-}
-
-static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
-{
-	dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
-			"  desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
-			lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo);
-}
-
-static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
-{
-	struct dw_desc *bad_desc;
-	struct dw_desc *child;
-	unsigned long flags;
-
-	dwc_scan_descriptors(dw, dwc);
-
-	spin_lock_irqsave(&dwc->lock, flags);
-
-	/*
-	 * The descriptor currently at the head of the active list is
-	 * borked. Since we don't have any way to report errors, we'll
-	 * just have to scream loudly and try to carry on.
-	 */
-	bad_desc = dwc_first_active(dwc);
-	list_del_init(&bad_desc->desc_node);
-	list_move(dwc->queue.next, dwc->active_list.prev);
-
-	/* Clear the error flag and try to restart the controller */
-	dma_writel(dw, CLEAR.ERROR, dwc->mask);
-	if (!list_empty(&dwc->active_list))
-		dwc_dostart(dwc, dwc_first_active(dwc));
-
-	/*
-	 * KERN_CRITICAL may seem harsh, but since this only happens
-	 * when someone submits a bad physical address in a
-	 * descriptor, we should consider ourselves lucky that the
-	 * controller flagged an error instead of scribbling over
-	 * random memory locations.
-	 */
-	dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
-			"Bad descriptor submitted for DMA!\n");
-	dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
-			"  cookie: %d\n", bad_desc->txd.cookie);
-	dwc_dump_lli(dwc, &bad_desc->lli);
-	list_for_each_entry(child, &bad_desc->tx_list, desc_node)
-		dwc_dump_lli(dwc, &child->lli);
-
-	spin_unlock_irqrestore(&dwc->lock, flags);
-
-	/* Pretend the descriptor completed successfully */
-	dwc_descriptor_complete(dwc, bad_desc, true);
-}
-
-/* --------------------- Cyclic DMA API extensions -------------------- */
-
-inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
-{
-	struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
-	return channel_readl(dwc, SAR);
-}
-EXPORT_SYMBOL(dw_dma_get_src_addr);
-
-inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
-{
-	struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
-	return channel_readl(dwc, DAR);
-}
-EXPORT_SYMBOL(dw_dma_get_dst_addr);
-
-/* called with dwc->lock held and all DMAC interrupts disabled */
-static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
-		u32 status_err, u32 status_xfer)
-{
-	unsigned long flags;
-
-	if (dwc->mask) {
-		void (*callback)(void *param);
-		void *callback_param;
-
-		dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
-				channel_readl(dwc, LLP));
-
-		callback = dwc->cdesc->period_callback;
-		callback_param = dwc->cdesc->period_callback_param;
-
-		if (callback)
-			callback(callback_param);
-	}
-
-	/*
-	 * Error and transfer complete are highly unlikely, and will most
-	 * likely be due to a configuration error by the user.
-	 */
-	if (unlikely(status_err & dwc->mask) ||
-			unlikely(status_xfer & dwc->mask)) {
-		int i;
-
-		dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
-				"interrupt, stopping DMA transfer\n",
-				status_xfer ? "xfer" : "error");
-
-		spin_lock_irqsave(&dwc->lock, flags);
-
-		dwc_dump_chan_regs(dwc);
-
-		dwc_chan_disable(dw, dwc);
-
-		/* make sure DMA does not restart by loading a new list */
-		channel_writel(dwc, LLP, 0);
-		channel_writel(dwc, CTL_LO, 0);
-		channel_writel(dwc, CTL_HI, 0);
-
-		dma_writel(dw, CLEAR.ERROR, dwc->mask);
-		dma_writel(dw, CLEAR.XFER, dwc->mask);
-
-		for (i = 0; i < dwc->cdesc->periods; i++)
-			dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
-
-		spin_unlock_irqrestore(&dwc->lock, flags);
-	}
-}
-
-/* ------------------------------------------------------------------------- */
-
-static void dw_dma_tasklet(unsigned long data)
-{
-	struct dw_dma *dw = (struct dw_dma *)data;
-	struct dw_dma_chan *dwc;
-	u32 status_xfer;
-	u32 status_err;
-	int i;
-
-	status_xfer = dma_readl(dw, RAW.XFER);
-	status_err = dma_readl(dw, RAW.ERROR);
-
-	dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err);
-
-	for (i = 0; i < dw->dma.chancnt; i++) {
-		dwc = &dw->chan[i];
-		if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
-			dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
-		else if (status_err & (1 << i))
-			dwc_handle_error(dw, dwc);
-		else if (status_xfer & (1 << i)) {
-			unsigned long flags;
-
-			spin_lock_irqsave(&dwc->lock, flags);
-			if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
-				if (dwc->tx_node_active != dwc->tx_list) {
-					struct dw_desc *desc =
-						list_entry(dwc->tx_node_active,
-							   struct dw_desc,
-							   desc_node);
-
-					dma_writel(dw, CLEAR.XFER, dwc->mask);
-
-					/* move pointer to next descriptor */
-					dwc->tx_node_active =
-						dwc->tx_node_active->next;
-
-					dwc_do_single_block(dwc, desc);
-
-					spin_unlock_irqrestore(&dwc->lock, flags);
-					continue;
-				} else {
-					/* we are done here */
-					clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
-				}
-			}
-			spin_unlock_irqrestore(&dwc->lock, flags);
-
-			dwc_scan_descriptors(dw, dwc);
-		}
-	}
-
-	/*
-	 * Re-enable interrupts.
-	 */
-	channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
-	channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
-}
-
-static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
-{
-	struct dw_dma *dw = dev_id;
-	u32 status;
-
-	dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__,
-			dma_readl(dw, STATUS_INT));
-
-	/*
-	 * Just disable the interrupts. We'll turn them back on in the
-	 * softirq handler.
-	 */
-	channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
-	channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
-
-	status = dma_readl(dw, STATUS_INT);
-	if (status) {
-		dev_err(dw->dma.dev,
-			"BUG: Unexpected interrupts pending: 0x%x\n",
-			status);
-
-		/* Try to recover */
-		channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
-		channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
-		channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
-		channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
-	}
-
-	tasklet_schedule(&dw->tasklet);
-
-	return IRQ_HANDLED;
-}
-
-/*----------------------------------------------------------------------*/
-
-static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
-{
-	struct dw_desc		*desc = txd_to_dw_desc(tx);
-	struct dw_dma_chan	*dwc = to_dw_dma_chan(tx->chan);
-	dma_cookie_t		cookie;
-	unsigned long		flags;
-
-	spin_lock_irqsave(&dwc->lock, flags);
-	cookie = dma_cookie_assign(tx);
-
-	/*
-	 * REVISIT: We should attempt to chain as many descriptors as
-	 * possible, perhaps even appending to those already submitted
-	 * for DMA. But this is hard to do in a race-free manner.
-	 */
-	if (list_empty(&dwc->active_list)) {
-		dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__,
-				desc->txd.cookie);
-		list_add_tail(&desc->desc_node, &dwc->active_list);
-		dwc_dostart(dwc, dwc_first_active(dwc));
-	} else {
-		dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__,
-				desc->txd.cookie);
-
-		list_add_tail(&desc->desc_node, &dwc->queue);
-	}
-
-	spin_unlock_irqrestore(&dwc->lock, flags);
-
-	return cookie;
-}
-
-static struct dma_async_tx_descriptor *
-dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
-		size_t len, unsigned long flags)
-{
-	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
-	struct dw_dma_slave	*dws = chan->private;
-	struct dw_desc		*desc;
-	struct dw_desc		*first;
-	struct dw_desc		*prev;
-	size_t			xfer_count;
-	size_t			offset;
-	unsigned int		src_width;
-	unsigned int		dst_width;
-	unsigned int		data_width;
-	u32			ctllo;
-
-	dev_vdbg(chan2dev(chan),
-			"%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__,
-			(unsigned long long)dest, (unsigned long long)src,
-			len, flags);
-
-	if (unlikely(!len)) {
-		dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
-		return NULL;
-	}
-
-	data_width = min_t(unsigned int, dwc->dw->data_width[dwc_get_sms(dws)],
-					 dwc->dw->data_width[dwc_get_dms(dws)]);
-
-	src_width = dst_width = min_t(unsigned int, data_width,
-				      dwc_fast_fls(src | dest | len));
-
-	ctllo = DWC_DEFAULT_CTLLO(chan)
-			| DWC_CTLL_DST_WIDTH(dst_width)
-			| DWC_CTLL_SRC_WIDTH(src_width)
-			| DWC_CTLL_DST_INC
-			| DWC_CTLL_SRC_INC
-			| DWC_CTLL_FC_M2M;
-	prev = first = NULL;
-
-	for (offset = 0; offset < len; offset += xfer_count << src_width) {
-		xfer_count = min_t(size_t, (len - offset) >> src_width,
-					   dwc->block_size);
-
-		desc = dwc_desc_get(dwc);
-		if (!desc)
-			goto err_desc_get;
-
-		desc->lli.sar = src + offset;
-		desc->lli.dar = dest + offset;
-		desc->lli.ctllo = ctllo;
-		desc->lli.ctlhi = xfer_count;
-
-		if (!first) {
-			first = desc;
-		} else {
-			prev->lli.llp = desc->txd.phys;
-			dma_sync_single_for_device(chan2parent(chan),
-					prev->txd.phys, sizeof(prev->lli),
-					DMA_TO_DEVICE);
-			list_add_tail(&desc->desc_node,
-					&first->tx_list);
-		}
-		prev = desc;
-	}
-
-
-	if (flags & DMA_PREP_INTERRUPT)
-		/* Trigger interrupt after last block */
-		prev->lli.ctllo |= DWC_CTLL_INT_EN;
-
-	prev->lli.llp = 0;
-	dma_sync_single_for_device(chan2parent(chan),
-			prev->txd.phys, sizeof(prev->lli),
-			DMA_TO_DEVICE);
-
-	first->txd.flags = flags;
-	first->len = len;
-
-	return &first->txd;
-
-err_desc_get:
-	dwc_desc_put(dwc, first);
-	return NULL;
-}
-
-static struct dma_async_tx_descriptor *
-dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
-		unsigned int sg_len, enum dma_transfer_direction direction,
-		unsigned long flags, void *context)
-{
-	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
-	struct dw_dma_slave	*dws = chan->private;
-	struct dma_slave_config	*sconfig = &dwc->dma_sconfig;
-	struct dw_desc		*prev;
-	struct dw_desc		*first;
-	u32			ctllo;
-	dma_addr_t		reg;
-	unsigned int		reg_width;
-	unsigned int		mem_width;
-	unsigned int		data_width;
-	unsigned int		i;
-	struct scatterlist	*sg;
-	size_t			total_len = 0;
-
-	dev_vdbg(chan2dev(chan), "%s\n", __func__);
-
-	if (unlikely(!dws || !sg_len))
-		return NULL;
-
-	prev = first = NULL;
-
-	switch (direction) {
-	case DMA_MEM_TO_DEV:
-		reg_width = __fls(sconfig->dst_addr_width);
-		reg = sconfig->dst_addr;
-		ctllo = (DWC_DEFAULT_CTLLO(chan)
-				| DWC_CTLL_DST_WIDTH(reg_width)
-				| DWC_CTLL_DST_FIX
-				| DWC_CTLL_SRC_INC);
-
-		ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
-			DWC_CTLL_FC(DW_DMA_FC_D_M2P);
-
-		data_width = dwc->dw->data_width[dwc_get_sms(dws)];
-
-		for_each_sg(sgl, sg, sg_len, i) {
-			struct dw_desc	*desc;
-			u32		len, dlen, mem;
-
-			mem = sg_dma_address(sg);
-			len = sg_dma_len(sg);
-
-			mem_width = min_t(unsigned int,
-					  data_width, dwc_fast_fls(mem | len));
-
-slave_sg_todev_fill_desc:
-			desc = dwc_desc_get(dwc);
-			if (!desc) {
-				dev_err(chan2dev(chan),
-					"not enough descriptors available\n");
-				goto err_desc_get;
-			}
-
-			desc->lli.sar = mem;
-			desc->lli.dar = reg;
-			desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
-			if ((len >> mem_width) > dwc->block_size) {
-				dlen = dwc->block_size << mem_width;
-				mem += dlen;
-				len -= dlen;
-			} else {
-				dlen = len;
-				len = 0;
-			}
-
-			desc->lli.ctlhi = dlen >> mem_width;
-
-			if (!first) {
-				first = desc;
-			} else {
-				prev->lli.llp = desc->txd.phys;
-				dma_sync_single_for_device(chan2parent(chan),
-						prev->txd.phys,
-						sizeof(prev->lli),
-						DMA_TO_DEVICE);
-				list_add_tail(&desc->desc_node,
-						&first->tx_list);
-			}
-			prev = desc;
-			total_len += dlen;
-
-			if (len)
-				goto slave_sg_todev_fill_desc;
-		}
-		break;
-	case DMA_DEV_TO_MEM:
-		reg_width = __fls(sconfig->src_addr_width);
-		reg = sconfig->src_addr;
-		ctllo = (DWC_DEFAULT_CTLLO(chan)
-				| DWC_CTLL_SRC_WIDTH(reg_width)
-				| DWC_CTLL_DST_INC
-				| DWC_CTLL_SRC_FIX);
-
-		ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
-			DWC_CTLL_FC(DW_DMA_FC_D_P2M);
-
-		data_width = dwc->dw->data_width[dwc_get_dms(dws)];
-
-		for_each_sg(sgl, sg, sg_len, i) {
-			struct dw_desc	*desc;
-			u32		len, dlen, mem;
-
-			mem = sg_dma_address(sg);
-			len = sg_dma_len(sg);
-
-			mem_width = min_t(unsigned int,
-					  data_width, dwc_fast_fls(mem | len));
-
-slave_sg_fromdev_fill_desc:
-			desc = dwc_desc_get(dwc);
-			if (!desc) {
-				dev_err(chan2dev(chan),
-						"not enough descriptors available\n");
-				goto err_desc_get;
-			}
-
-			desc->lli.sar = reg;
-			desc->lli.dar = mem;
-			desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
-			if ((len >> reg_width) > dwc->block_size) {
-				dlen = dwc->block_size << reg_width;
-				mem += dlen;
-				len -= dlen;
-			} else {
-				dlen = len;
-				len = 0;
-			}
-			desc->lli.ctlhi = dlen >> reg_width;
-
-			if (!first) {
-				first = desc;
-			} else {
-				prev->lli.llp = desc->txd.phys;
-				dma_sync_single_for_device(chan2parent(chan),
-						prev->txd.phys,
-						sizeof(prev->lli),
-						DMA_TO_DEVICE);
-				list_add_tail(&desc->desc_node,
-						&first->tx_list);
-			}
-			prev = desc;
-			total_len += dlen;
-
-			if (len)
-				goto slave_sg_fromdev_fill_desc;
-		}
-		break;
-	default:
-		return NULL;
-	}
-
-	if (flags & DMA_PREP_INTERRUPT)
-		/* Trigger interrupt after last block */
-		prev->lli.ctllo |= DWC_CTLL_INT_EN;
-
-	prev->lli.llp = 0;
-	dma_sync_single_for_device(chan2parent(chan),
-			prev->txd.phys, sizeof(prev->lli),
-			DMA_TO_DEVICE);
-
-	first->len = total_len;
-
-	return &first->txd;
-
-err_desc_get:
-	dwc_desc_put(dwc, first);
-	return NULL;
-}
-
-/*
- * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
- * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
- *
- * NOTE: burst size 2 is not supported by controller.
- *
- * This can be done by finding least significant bit set: n & (n - 1)
- */
-static inline void convert_burst(u32 *maxburst)
-{
-	if (*maxburst > 1)
-		*maxburst = fls(*maxburst) - 2;
-	else
-		*maxburst = 0;
-}
-
-static int
-set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
-{
-	struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
-
-	/* Check if it is chan is configured for slave transfers */
-	if (!chan->private)
-		return -EINVAL;
-
-	memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
-
-	convert_burst(&dwc->dma_sconfig.src_maxburst);
-	convert_burst(&dwc->dma_sconfig.dst_maxburst);
-
-	return 0;
-}
-
-static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
-		       unsigned long arg)
-{
-	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
-	struct dw_dma		*dw = to_dw_dma(chan->device);
-	struct dw_desc		*desc, *_desc;
-	unsigned long		flags;
-	u32			cfglo;
-	LIST_HEAD(list);
-
-	if (cmd == DMA_PAUSE) {
-		spin_lock_irqsave(&dwc->lock, flags);
-
-		cfglo = channel_readl(dwc, CFG_LO);
-		channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
-		while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY))
-			cpu_relax();
-
-		dwc->paused = true;
-		spin_unlock_irqrestore(&dwc->lock, flags);
-	} else if (cmd == DMA_RESUME) {
-		if (!dwc->paused)
-			return 0;
-
-		spin_lock_irqsave(&dwc->lock, flags);
-
-		cfglo = channel_readl(dwc, CFG_LO);
-		channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
-		dwc->paused = false;
-
-		spin_unlock_irqrestore(&dwc->lock, flags);
-	} else if (cmd == DMA_TERMINATE_ALL) {
-		spin_lock_irqsave(&dwc->lock, flags);
-
-		clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
-
-		dwc_chan_disable(dw, dwc);
-
-		dwc->paused = false;
-
-		/* active_list entries will end up before queued entries */
-		list_splice_init(&dwc->queue, &list);
-		list_splice_init(&dwc->active_list, &list);
-
-		spin_unlock_irqrestore(&dwc->lock, flags);
-
-		/* Flush all pending and queued descriptors */
-		list_for_each_entry_safe(desc, _desc, &list, desc_node)
-			dwc_descriptor_complete(dwc, desc, false);
-	} else if (cmd == DMA_SLAVE_CONFIG) {
-		return set_runtime_config(chan, (struct dma_slave_config *)arg);
-	} else {
-		return -ENXIO;
-	}
-
-	return 0;
-}
-
-static enum dma_status
-dwc_tx_status(struct dma_chan *chan,
-	      dma_cookie_t cookie,
-	      struct dma_tx_state *txstate)
-{
-	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
-	enum dma_status		ret;
-
-	ret = dma_cookie_status(chan, cookie, txstate);
-	if (ret != DMA_SUCCESS) {
-		dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
-
-		ret = dma_cookie_status(chan, cookie, txstate);
-	}
-
-	if (ret != DMA_SUCCESS)
-		dma_set_residue(txstate, dwc_first_active(dwc)->len);
-
-	if (dwc->paused)
-		return DMA_PAUSED;
-
-	return ret;
-}
-
-static void dwc_issue_pending(struct dma_chan *chan)
-{
-	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
-
-	if (!list_empty(&dwc->queue))
-		dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
-}
-
-static int dwc_alloc_chan_resources(struct dma_chan *chan)
-{
-	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
-	struct dw_dma		*dw = to_dw_dma(chan->device);
-	struct dw_desc		*desc;
-	int			i;
-	unsigned long		flags;
-
-	dev_vdbg(chan2dev(chan), "%s\n", __func__);
-
-	/* ASSERT:  channel is idle */
-	if (dma_readl(dw, CH_EN) & dwc->mask) {
-		dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
-		return -EIO;
-	}
-
-	dma_cookie_init(chan);
-
-	/*
-	 * NOTE: some controllers may have additional features that we
-	 * need to initialize here, like "scatter-gather" (which
-	 * doesn't mean what you think it means), and status writeback.
-	 */
-
-	spin_lock_irqsave(&dwc->lock, flags);
-	i = dwc->descs_allocated;
-	while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
-		spin_unlock_irqrestore(&dwc->lock, flags);
-
-		desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
-		if (!desc) {
-			dev_info(chan2dev(chan),
-				"only allocated %d descriptors\n", i);
-			spin_lock_irqsave(&dwc->lock, flags);
-			break;
-		}
-
-		INIT_LIST_HEAD(&desc->tx_list);
-		dma_async_tx_descriptor_init(&desc->txd, chan);
-		desc->txd.tx_submit = dwc_tx_submit;
-		desc->txd.flags = DMA_CTRL_ACK;
-		desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli,
-				sizeof(desc->lli), DMA_TO_DEVICE);
-		dwc_desc_put(dwc, desc);
-
-		spin_lock_irqsave(&dwc->lock, flags);
-		i = ++dwc->descs_allocated;
-	}
-
-	spin_unlock_irqrestore(&dwc->lock, flags);
-
-	dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
-
-	return i;
-}
-
-static void dwc_free_chan_resources(struct dma_chan *chan)
-{
-	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
-	struct dw_dma		*dw = to_dw_dma(chan->device);
-	struct dw_desc		*desc, *_desc;
-	unsigned long		flags;
-	LIST_HEAD(list);
-
-	dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
-			dwc->descs_allocated);
-
-	/* ASSERT:  channel is idle */
-	BUG_ON(!list_empty(&dwc->active_list));
-	BUG_ON(!list_empty(&dwc->queue));
-	BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
-
-	spin_lock_irqsave(&dwc->lock, flags);
-	list_splice_init(&dwc->free_list, &list);
-	dwc->descs_allocated = 0;
-	dwc->initialized = false;
-
-	/* Disable interrupts */
-	channel_clear_bit(dw, MASK.XFER, dwc->mask);
-	channel_clear_bit(dw, MASK.ERROR, dwc->mask);
-
-	spin_unlock_irqrestore(&dwc->lock, flags);
-
-	list_for_each_entry_safe(desc, _desc, &list, desc_node) {
-		dev_vdbg(chan2dev(chan), "  freeing descriptor %p\n", desc);
-		dma_unmap_single(chan2parent(chan), desc->txd.phys,
-				sizeof(desc->lli), DMA_TO_DEVICE);
-		kfree(desc);
-	}
-
-	dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
-}
-
-/* --------------------- Cyclic DMA API extensions -------------------- */
-
-/**
- * dw_dma_cyclic_start - start the cyclic DMA transfer
- * @chan: the DMA channel to start
- *
- * Must be called with soft interrupts disabled. Returns zero on success or
- * -errno on failure.
- */
-int dw_dma_cyclic_start(struct dma_chan *chan)
-{
-	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
-	struct dw_dma		*dw = to_dw_dma(dwc->chan.device);
-	unsigned long		flags;
-
-	if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
-		dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
-		return -ENODEV;
-	}
-
-	spin_lock_irqsave(&dwc->lock, flags);
-
-	/* assert channel is idle */
-	if (dma_readl(dw, CH_EN) & dwc->mask) {
-		dev_err(chan2dev(&dwc->chan),
-			"BUG: Attempted to start non-idle channel\n");
-		dwc_dump_chan_regs(dwc);
-		spin_unlock_irqrestore(&dwc->lock, flags);
-		return -EBUSY;
-	}
-
-	dma_writel(dw, CLEAR.ERROR, dwc->mask);
-	dma_writel(dw, CLEAR.XFER, dwc->mask);
-
-	/* setup DMAC channel registers */
-	channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
-	channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
-	channel_writel(dwc, CTL_HI, 0);
-
-	channel_set_bit(dw, CH_EN, dwc->mask);
-
-	spin_unlock_irqrestore(&dwc->lock, flags);
-
-	return 0;
-}
-EXPORT_SYMBOL(dw_dma_cyclic_start);
-
-/**
- * dw_dma_cyclic_stop - stop the cyclic DMA transfer
- * @chan: the DMA channel to stop
- *
- * Must be called with soft interrupts disabled.
- */
-void dw_dma_cyclic_stop(struct dma_chan *chan)
-{
-	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
-	struct dw_dma		*dw = to_dw_dma(dwc->chan.device);
-	unsigned long		flags;
-
-	spin_lock_irqsave(&dwc->lock, flags);
-
-	dwc_chan_disable(dw, dwc);
-
-	spin_unlock_irqrestore(&dwc->lock, flags);
-}
-EXPORT_SYMBOL(dw_dma_cyclic_stop);
-
-/**
- * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
- * @chan: the DMA channel to prepare
- * @buf_addr: physical DMA address where the buffer starts
- * @buf_len: total number of bytes for the entire buffer
- * @period_len: number of bytes for each period
- * @direction: transfer direction, to or from device
- *
- * Must be called before trying to start the transfer. Returns a valid struct
- * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
- */
-struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
-		dma_addr_t buf_addr, size_t buf_len, size_t period_len,
-		enum dma_transfer_direction direction)
-{
-	struct dw_dma_chan		*dwc = to_dw_dma_chan(chan);
-	struct dma_slave_config		*sconfig = &dwc->dma_sconfig;
-	struct dw_cyclic_desc		*cdesc;
-	struct dw_cyclic_desc		*retval = NULL;
-	struct dw_desc			*desc;
-	struct dw_desc			*last = NULL;
-	unsigned long			was_cyclic;
-	unsigned int			reg_width;
-	unsigned int			periods;
-	unsigned int			i;
-	unsigned long			flags;
-
-	spin_lock_irqsave(&dwc->lock, flags);
-	if (dwc->nollp) {
-		spin_unlock_irqrestore(&dwc->lock, flags);
-		dev_dbg(chan2dev(&dwc->chan),
-				"channel doesn't support LLP transfers\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
-		spin_unlock_irqrestore(&dwc->lock, flags);
-		dev_dbg(chan2dev(&dwc->chan),
-				"queue and/or active list are not empty\n");
-		return ERR_PTR(-EBUSY);
-	}
-
-	was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
-	spin_unlock_irqrestore(&dwc->lock, flags);
-	if (was_cyclic) {
-		dev_dbg(chan2dev(&dwc->chan),
-				"channel already prepared for cyclic DMA\n");
-		return ERR_PTR(-EBUSY);
-	}
-
-	retval = ERR_PTR(-EINVAL);
-
-	if (direction == DMA_MEM_TO_DEV)
-		reg_width = __ffs(sconfig->dst_addr_width);
-	else
-		reg_width = __ffs(sconfig->src_addr_width);
-
-	periods = buf_len / period_len;
-
-	/* Check for too big/unaligned periods and unaligned DMA buffer. */
-	if (period_len > (dwc->block_size << reg_width))
-		goto out_err;
-	if (unlikely(period_len & ((1 << reg_width) - 1)))
-		goto out_err;
-	if (unlikely(buf_addr & ((1 << reg_width) - 1)))
-		goto out_err;
-	if (unlikely(!(direction & (DMA_MEM_TO_DEV | DMA_DEV_TO_MEM))))
-		goto out_err;
-
-	retval = ERR_PTR(-ENOMEM);
-
-	if (periods > NR_DESCS_PER_CHANNEL)
-		goto out_err;
-
-	cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
-	if (!cdesc)
-		goto out_err;
-
-	cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
-	if (!cdesc->desc)
-		goto out_err_alloc;
-
-	for (i = 0; i < periods; i++) {
-		desc = dwc_desc_get(dwc);
-		if (!desc)
-			goto out_err_desc_get;
-
-		switch (direction) {
-		case DMA_MEM_TO_DEV:
-			desc->lli.dar = sconfig->dst_addr;
-			desc->lli.sar = buf_addr + (period_len * i);
-			desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
-					| DWC_CTLL_DST_WIDTH(reg_width)
-					| DWC_CTLL_SRC_WIDTH(reg_width)
-					| DWC_CTLL_DST_FIX
-					| DWC_CTLL_SRC_INC
-					| DWC_CTLL_INT_EN);
-
-			desc->lli.ctllo |= sconfig->device_fc ?
-				DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
-				DWC_CTLL_FC(DW_DMA_FC_D_M2P);
-
-			break;
-		case DMA_DEV_TO_MEM:
-			desc->lli.dar = buf_addr + (period_len * i);
-			desc->lli.sar = sconfig->src_addr;
-			desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
-					| DWC_CTLL_SRC_WIDTH(reg_width)
-					| DWC_CTLL_DST_WIDTH(reg_width)
-					| DWC_CTLL_DST_INC
-					| DWC_CTLL_SRC_FIX
-					| DWC_CTLL_INT_EN);
-
-			desc->lli.ctllo |= sconfig->device_fc ?
-				DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
-				DWC_CTLL_FC(DW_DMA_FC_D_P2M);
-
-			break;
-		default:
-			break;
-		}
-
-		desc->lli.ctlhi = (period_len >> reg_width);
-		cdesc->desc[i] = desc;
-
-		if (last) {
-			last->lli.llp = desc->txd.phys;
-			dma_sync_single_for_device(chan2parent(chan),
-					last->txd.phys, sizeof(last->lli),
-					DMA_TO_DEVICE);
-		}
-
-		last = desc;
-	}
-
-	/* lets make a cyclic list */
-	last->lli.llp = cdesc->desc[0]->txd.phys;
-	dma_sync_single_for_device(chan2parent(chan), last->txd.phys,
-			sizeof(last->lli), DMA_TO_DEVICE);
-
-	dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu "
-			"period %zu periods %d\n", (unsigned long long)buf_addr,
-			buf_len, period_len, periods);
-
-	cdesc->periods = periods;
-	dwc->cdesc = cdesc;
-
-	return cdesc;
-
-out_err_desc_get:
-	while (i--)
-		dwc_desc_put(dwc, cdesc->desc[i]);
-out_err_alloc:
-	kfree(cdesc);
-out_err:
-	clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
-	return (struct dw_cyclic_desc *)retval;
-}
-EXPORT_SYMBOL(dw_dma_cyclic_prep);
-
-/**
- * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
- * @chan: the DMA channel to free
- */
-void dw_dma_cyclic_free(struct dma_chan *chan)
-{
-	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
-	struct dw_dma		*dw = to_dw_dma(dwc->chan.device);
-	struct dw_cyclic_desc	*cdesc = dwc->cdesc;
-	int			i;
-	unsigned long		flags;
-
-	dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__);
-
-	if (!cdesc)
-		return;
-
-	spin_lock_irqsave(&dwc->lock, flags);
-
-	dwc_chan_disable(dw, dwc);
-
-	dma_writel(dw, CLEAR.ERROR, dwc->mask);
-	dma_writel(dw, CLEAR.XFER, dwc->mask);
-
-	spin_unlock_irqrestore(&dwc->lock, flags);
-
-	for (i = 0; i < cdesc->periods; i++)
-		dwc_desc_put(dwc, cdesc->desc[i]);
-
-	kfree(cdesc->desc);
-	kfree(cdesc);
-
-	clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
-}
-EXPORT_SYMBOL(dw_dma_cyclic_free);
-
-/*----------------------------------------------------------------------*/
-
-static void dw_dma_off(struct dw_dma *dw)
-{
-	int i;
-
-	dma_writel(dw, CFG, 0);
-
-	channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
-	channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
-	channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
-	channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
-
-	while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
-		cpu_relax();
-
-	for (i = 0; i < dw->dma.chancnt; i++)
-		dw->chan[i].initialized = false;
-}
-
-static int __devinit dw_probe(struct platform_device *pdev)
-{
-	struct dw_dma_platform_data *pdata;
-	struct resource		*io;
-	struct dw_dma		*dw;
-	size_t			size;
-	void __iomem		*regs;
-	bool			autocfg;
-	unsigned int		dw_params;
-	unsigned int		nr_channels;
-	unsigned int		max_blk_size = 0;
-	int			irq;
-	int			err;
-	int			i;
-
-	pdata = dev_get_platdata(&pdev->dev);
-	if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
-		return -EINVAL;
-
-	io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!io)
-		return -EINVAL;
-
-	irq = platform_get_irq(pdev, 0);
-	if (irq < 0)
-		return irq;
-
-	regs = devm_request_and_ioremap(&pdev->dev, io);
-	if (!regs)
-		return -EBUSY;
-
-	dw_params = dma_read_byaddr(regs, DW_PARAMS);
-	autocfg = dw_params >> DW_PARAMS_EN & 0x1;
-
-	if (autocfg)
-		nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1;
-	else
-		nr_channels = pdata->nr_channels;
-
-	size = sizeof(struct dw_dma) + nr_channels * sizeof(struct dw_dma_chan);
-	dw = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
-	if (!dw)
-		return -ENOMEM;
-
-	dw->clk = devm_clk_get(&pdev->dev, "hclk");
-	if (IS_ERR(dw->clk))
-		return PTR_ERR(dw->clk);
-	clk_prepare_enable(dw->clk);
-
-	dw->regs = regs;
-
-	/* get hardware configuration parameters */
-	if (autocfg) {
-		max_blk_size = dma_readl(dw, MAX_BLK_SIZE);
-
-		dw->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1;
-		for (i = 0; i < dw->nr_masters; i++) {
-			dw->data_width[i] =
-				(dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3) + 2;
-		}
-	} else {
-		dw->nr_masters = pdata->nr_masters;
-		memcpy(dw->data_width, pdata->data_width, 4);
-	}
-
-	/* Calculate all channel mask before DMA setup */
-	dw->all_chan_mask = (1 << nr_channels) - 1;
-
-	/* force dma off, just in case */
-	dw_dma_off(dw);
-
-	/* disable BLOCK interrupts as well */
-	channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
-
-	err = devm_request_irq(&pdev->dev, irq, dw_dma_interrupt, 0,
-			       "dw_dmac", dw);
-	if (err)
-		return err;
-
-	platform_set_drvdata(pdev, dw);
-
-	tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
-
-	INIT_LIST_HEAD(&dw->dma.channels);
-	for (i = 0; i < nr_channels; i++) {
-		struct dw_dma_chan	*dwc = &dw->chan[i];
-		int			r = nr_channels - i - 1;
-
-		dwc->chan.device = &dw->dma;
-		dma_cookie_init(&dwc->chan);
-		if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
-			list_add_tail(&dwc->chan.device_node,
-					&dw->dma.channels);
-		else
-			list_add(&dwc->chan.device_node, &dw->dma.channels);
-
-		/* 7 is highest priority & 0 is lowest. */
-		if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
-			dwc->priority = r;
-		else
-			dwc->priority = i;
-
-		dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
-		spin_lock_init(&dwc->lock);
-		dwc->mask = 1 << i;
-
-		INIT_LIST_HEAD(&dwc->active_list);
-		INIT_LIST_HEAD(&dwc->queue);
-		INIT_LIST_HEAD(&dwc->free_list);
-
-		channel_clear_bit(dw, CH_EN, dwc->mask);
-
-		dwc->dw = dw;
-
-		/* hardware configuration */
-		if (autocfg) {
-			unsigned int dwc_params;
-
-			dwc_params = dma_read_byaddr(regs + r * sizeof(u32),
-						     DWC_PARAMS);
-
-			/* Decode maximum block size for given channel. The
-			 * stored 4 bit value represents blocks from 0x00 for 3
-			 * up to 0x0a for 4095. */
-			dwc->block_size =
-				(4 << ((max_blk_size >> 4 * i) & 0xf)) - 1;
-			dwc->nollp =
-				(dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0;
-		} else {
-			dwc->block_size = pdata->block_size;
-
-			/* Check if channel supports multi block transfer */
-			channel_writel(dwc, LLP, 0xfffffffc);
-			dwc->nollp =
-				(channel_readl(dwc, LLP) & 0xfffffffc) == 0;
-			channel_writel(dwc, LLP, 0);
-		}
-	}
-
-	/* Clear all interrupts on all channels. */
-	dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
-	dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
-	dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
-	dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
-	dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
-
-	dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
-	dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
-	if (pdata->is_private)
-		dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
-	dw->dma.dev = &pdev->dev;
-	dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
-	dw->dma.device_free_chan_resources = dwc_free_chan_resources;
-
-	dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
-
-	dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
-	dw->dma.device_control = dwc_control;
-
-	dw->dma.device_tx_status = dwc_tx_status;
-	dw->dma.device_issue_pending = dwc_issue_pending;
-
-	dma_writel(dw, CFG, DW_CFG_DMA_EN);
-
-	printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n",
-			dev_name(&pdev->dev), nr_channels);
-
-	dma_async_device_register(&dw->dma);
-
-	return 0;
-}
-
-static int __devexit dw_remove(struct platform_device *pdev)
-{
-	struct dw_dma		*dw = platform_get_drvdata(pdev);
-	struct dw_dma_chan	*dwc, *_dwc;
-
-	dw_dma_off(dw);
-	dma_async_device_unregister(&dw->dma);
-
-	tasklet_kill(&dw->tasklet);
-
-	list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
-			chan.device_node) {
-		list_del(&dwc->chan.device_node);
-		channel_clear_bit(dw, CH_EN, dwc->mask);
-	}
-
-	return 0;
-}
-
-static void dw_shutdown(struct platform_device *pdev)
-{
-	struct dw_dma	*dw = platform_get_drvdata(pdev);
-
-	dw_dma_off(platform_get_drvdata(pdev));
-	clk_disable_unprepare(dw->clk);
-}
-
-static int dw_suspend_noirq(struct device *dev)
-{
-	struct platform_device *pdev = to_platform_device(dev);
-	struct dw_dma	*dw = platform_get_drvdata(pdev);
-
-	dw_dma_off(platform_get_drvdata(pdev));
-	clk_disable_unprepare(dw->clk);
-
-	return 0;
-}
-
-static int dw_resume_noirq(struct device *dev)
-{
-	struct platform_device *pdev = to_platform_device(dev);
-	struct dw_dma	*dw = platform_get_drvdata(pdev);
-
-	clk_prepare_enable(dw->clk);
-	dma_writel(dw, CFG, DW_CFG_DMA_EN);
-
-	return 0;
-}
-
-static const struct dev_pm_ops dw_dev_pm_ops = {
-	.suspend_noirq = dw_suspend_noirq,
-	.resume_noirq = dw_resume_noirq,
-	.freeze_noirq = dw_suspend_noirq,
-	.thaw_noirq = dw_resume_noirq,
-	.restore_noirq = dw_resume_noirq,
-	.poweroff_noirq = dw_suspend_noirq,
-};
-
-#ifdef CONFIG_OF
-static const struct of_device_id dw_dma_id_table[] = {
-	{ .compatible = "snps,dma-spear1340" },
-	{}
-};
-MODULE_DEVICE_TABLE(of, dw_dma_id_table);
-#endif
-
-static struct platform_driver dw_driver = {
-	.probe		= dw_probe,
-	.remove		= __devexit_p(dw_remove),
-	.shutdown	= dw_shutdown,
-	.driver = {
-		.name	= "dw_dmac",
-		.pm	= &dw_dev_pm_ops,
-		.of_match_table = of_match_ptr(dw_dma_id_table),
-	},
-};
-
-module_platform_driver(dw_driver);
-
-MODULE_ALIAS("platform:dw_dmac");
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
-MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
-MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
diff --git a/drivers/dma/dw_dmac_pci.c b/drivers/dma/dw_dmac_pci.c
deleted file mode 100644
index 95570df..0000000
--- a/drivers/dma/dw_dmac_pci.c
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * PCI driver for the Synopsys DesignWare DMA Controller
- *
- * Copyright (C) 2012 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/platform_device.h>
-#include <linux/dw_dmac.h>
-
-#define DRIVER(_is_private, _chan_order, _chan_pri)		\
-	((kernel_ulong_t)&(struct dw_dma_platform_data) {	\
-		.is_private = (_is_private),			\
-		.chan_allocation_order = (_chan_order),		\
-		.chan_priority = (_chan_pri),			\
-	})
-
-static int __devinit dw_pci_probe(struct pci_dev *pdev,
-				  const struct pci_device_id *id)
-{
-	struct platform_device *pd;
-	struct resource r[2];
-	struct dw_dma_platform_data *driver = (void *)id->driver_data;
-	static int instance;
-	int ret;
-
-	ret = pci_enable_device(pdev);
-	if (ret)
-		return ret;
-
-	pci_set_power_state(pdev, PCI_D0);
-	pci_set_master(pdev);
-	pci_try_set_mwi(pdev);
-
-	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-	if (ret)
-		goto err0;
-
-	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-	if (ret)
-		goto err0;
-
-	pd = platform_device_alloc("dw_dmac", instance);
-	if (!pd) {
-		dev_err(&pdev->dev, "can't allocate dw_dmac platform device\n");
-		ret = -ENOMEM;
-		goto err0;
-	}
-
-	memset(r, 0, sizeof(r));
-
-	r[0].start = pci_resource_start(pdev, 0);
-	r[0].end = pci_resource_end(pdev, 0);
-	r[0].flags = IORESOURCE_MEM;
-
-	r[1].start = pdev->irq;
-	r[1].flags = IORESOURCE_IRQ;
-
-	ret = platform_device_add_resources(pd, r, ARRAY_SIZE(r));
-	if (ret) {
-		dev_err(&pdev->dev, "can't add resources to platform device\n");
-		goto err1;
-	}
-
-	ret = platform_device_add_data(pd, driver, sizeof(*driver));
-	if (ret)
-		goto err1;
-
-	dma_set_coherent_mask(&pd->dev, pdev->dev.coherent_dma_mask);
-	pd->dev.dma_mask = pdev->dev.dma_mask;
-	pd->dev.dma_parms = pdev->dev.dma_parms;
-	pd->dev.parent = &pdev->dev;
-
-	pci_set_drvdata(pdev, pd);
-
-	ret = platform_device_add(pd);
-	if (ret) {
-		dev_err(&pdev->dev, "platform_device_add failed\n");
-		goto err1;
-	}
-
-	instance++;
-	return 0;
-
-err1:
-	platform_device_put(pd);
-err0:
-	pci_disable_device(pdev);
-
-	return ret;
-}
-
-static void __devexit dw_pci_remove(struct pci_dev *pdev)
-{
-	struct platform_device *pd = pci_get_drvdata(pdev);
-
-	platform_device_unregister(pd);
-	pci_set_drvdata(pdev, NULL);
-	pci_disable_device(pdev);
-}
-
-static DEFINE_PCI_DEVICE_TABLE(dw_pci_id_table) = {
-	{ PCI_VDEVICE(INTEL, 0x0827), DRIVER(1, 0, 0) },
-	{ PCI_VDEVICE(INTEL, 0x0830), DRIVER(1, 0, 0) },
-	{ PCI_VDEVICE(INTEL, 0x0f06), DRIVER(1, 0, 0) },
-	{ 0, }
-};
-MODULE_DEVICE_TABLE(pci, dw_pci_id_table);
-
-static struct pci_driver dw_pci_driver = {
-	.name		= "dw_dmac_pci",
-	.id_table	= dw_pci_id_table,
-	.probe		= dw_pci_probe,
-	.remove		= __devexit_p(dw_pci_remove),
-};
-
-module_pci_driver(dw_pci_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("DesignWare DMAC PCI driver");
-MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@linux.intel.com>");
-MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
deleted file mode 100644
index ff39fa6..0000000
--- a/drivers/dma/dw_dmac_regs.h
+++ /dev/null
@@ -1,294 +0,0 @@
-/*
- * Driver for the Synopsys DesignWare AHB DMA Controller
- *
- * Copyright (C) 2005-2007 Atmel Corporation
- * Copyright (C) 2010-2011 ST Microelectronics
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/dw_dmac.h>
-
-#define DW_DMA_MAX_NR_CHANNELS	8
-
-/* flow controller */
-enum dw_dma_fc {
-	DW_DMA_FC_D_M2M,
-	DW_DMA_FC_D_M2P,
-	DW_DMA_FC_D_P2M,
-	DW_DMA_FC_D_P2P,
-	DW_DMA_FC_P_P2M,
-	DW_DMA_FC_SP_P2P,
-	DW_DMA_FC_P_M2P,
-	DW_DMA_FC_DP_P2P,
-};
-
-/*
- * Redefine this macro to handle differences between 32- and 64-bit
- * addressing, big vs. little endian, etc.
- */
-#define DW_REG(name)		u32 name; u32 __pad_##name
-
-/* Hardware register definitions. */
-struct dw_dma_chan_regs {
-	DW_REG(SAR);		/* Source Address Register */
-	DW_REG(DAR);		/* Destination Address Register */
-	DW_REG(LLP);		/* Linked List Pointer */
-	u32	CTL_LO;		/* Control Register Low */
-	u32	CTL_HI;		/* Control Register High */
-	DW_REG(SSTAT);
-	DW_REG(DSTAT);
-	DW_REG(SSTATAR);
-	DW_REG(DSTATAR);
-	u32	CFG_LO;		/* Configuration Register Low */
-	u32	CFG_HI;		/* Configuration Register High */
-	DW_REG(SGR);
-	DW_REG(DSR);
-};
-
-struct dw_dma_irq_regs {
-	DW_REG(XFER);
-	DW_REG(BLOCK);
-	DW_REG(SRC_TRAN);
-	DW_REG(DST_TRAN);
-	DW_REG(ERROR);
-};
-
-struct dw_dma_regs {
-	/* per-channel registers */
-	struct dw_dma_chan_regs	CHAN[DW_DMA_MAX_NR_CHANNELS];
-
-	/* irq handling */
-	struct dw_dma_irq_regs	RAW;		/* r */
-	struct dw_dma_irq_regs	STATUS;		/* r (raw & mask) */
-	struct dw_dma_irq_regs	MASK;		/* rw (set = irq enabled) */
-	struct dw_dma_irq_regs	CLEAR;		/* w (ack, affects "raw") */
-
-	DW_REG(STATUS_INT);			/* r */
-
-	/* software handshaking */
-	DW_REG(REQ_SRC);
-	DW_REG(REQ_DST);
-	DW_REG(SGL_REQ_SRC);
-	DW_REG(SGL_REQ_DST);
-	DW_REG(LAST_SRC);
-	DW_REG(LAST_DST);
-
-	/* miscellaneous */
-	DW_REG(CFG);
-	DW_REG(CH_EN);
-	DW_REG(ID);
-	DW_REG(TEST);
-
-	/* reserved */
-	DW_REG(__reserved0);
-	DW_REG(__reserved1);
-
-	/* optional encoded params, 0x3c8..0x3f7 */
-	u32	__reserved;
-
-	/* per-channel configuration registers */
-	u32	DWC_PARAMS[DW_DMA_MAX_NR_CHANNELS];
-	u32	MULTI_BLK_TYPE;
-	u32	MAX_BLK_SIZE;
-
-	/* top-level parameters */
-	u32	DW_PARAMS;
-};
-
-/* To access the registers in early stage of probe */
-#define dma_read_byaddr(addr, name) \
-	readl((addr) + offsetof(struct dw_dma_regs, name))
-
-/* Bitfields in DW_PARAMS */
-#define DW_PARAMS_NR_CHAN	8		/* number of channels */
-#define DW_PARAMS_NR_MASTER	11		/* number of AHB masters */
-#define DW_PARAMS_DATA_WIDTH(n)	(15 + 2 * (n))
-#define DW_PARAMS_DATA_WIDTH1	15		/* master 1 data width */
-#define DW_PARAMS_DATA_WIDTH2	17		/* master 2 data width */
-#define DW_PARAMS_DATA_WIDTH3	19		/* master 3 data width */
-#define DW_PARAMS_DATA_WIDTH4	21		/* master 4 data width */
-#define DW_PARAMS_EN		28		/* encoded parameters */
-
-/* Bitfields in DWC_PARAMS */
-#define DWC_PARAMS_MBLK_EN	11		/* multi block transfer */
-
-/* Bitfields in CTL_LO */
-#define DWC_CTLL_INT_EN		(1 << 0)	/* irqs enabled? */
-#define DWC_CTLL_DST_WIDTH(n)	((n)<<1)	/* bytes per element */
-#define DWC_CTLL_SRC_WIDTH(n)	((n)<<4)
-#define DWC_CTLL_DST_INC	(0<<7)		/* DAR update/not */
-#define DWC_CTLL_DST_DEC	(1<<7)
-#define DWC_CTLL_DST_FIX	(2<<7)
-#define DWC_CTLL_SRC_INC	(0<<7)		/* SAR update/not */
-#define DWC_CTLL_SRC_DEC	(1<<9)
-#define DWC_CTLL_SRC_FIX	(2<<9)
-#define DWC_CTLL_DST_MSIZE(n)	((n)<<11)	/* burst, #elements */
-#define DWC_CTLL_SRC_MSIZE(n)	((n)<<14)
-#define DWC_CTLL_S_GATH_EN	(1 << 17)	/* src gather, !FIX */
-#define DWC_CTLL_D_SCAT_EN	(1 << 18)	/* dst scatter, !FIX */
-#define DWC_CTLL_FC(n)		((n) << 20)
-#define DWC_CTLL_FC_M2M		(0 << 20)	/* mem-to-mem */
-#define DWC_CTLL_FC_M2P		(1 << 20)	/* mem-to-periph */
-#define DWC_CTLL_FC_P2M		(2 << 20)	/* periph-to-mem */
-#define DWC_CTLL_FC_P2P		(3 << 20)	/* periph-to-periph */
-/* plus 4 transfer types for peripheral-as-flow-controller */
-#define DWC_CTLL_DMS(n)		((n)<<23)	/* dst master select */
-#define DWC_CTLL_SMS(n)		((n)<<25)	/* src master select */
-#define DWC_CTLL_LLP_D_EN	(1 << 27)	/* dest block chain */
-#define DWC_CTLL_LLP_S_EN	(1 << 28)	/* src block chain */
-
-/* Bitfields in CTL_HI */
-#define DWC_CTLH_DONE		0x00001000
-#define DWC_CTLH_BLOCK_TS_MASK	0x00000fff
-
-/* Bitfields in CFG_LO. Platform-configurable bits are in <linux/dw_dmac.h> */
-#define DWC_CFGL_CH_PRIOR_MASK	(0x7 << 5)	/* priority mask */
-#define DWC_CFGL_CH_PRIOR(x)	((x) << 5)	/* priority */
-#define DWC_CFGL_CH_SUSP	(1 << 8)	/* pause xfer */
-#define DWC_CFGL_FIFO_EMPTY	(1 << 9)	/* pause xfer */
-#define DWC_CFGL_HS_DST		(1 << 10)	/* handshake w/dst */
-#define DWC_CFGL_HS_SRC		(1 << 11)	/* handshake w/src */
-#define DWC_CFGL_MAX_BURST(x)	((x) << 20)
-#define DWC_CFGL_RELOAD_SAR	(1 << 30)
-#define DWC_CFGL_RELOAD_DAR	(1 << 31)
-
-/* Bitfields in CFG_HI. Platform-configurable bits are in <linux/dw_dmac.h> */
-#define DWC_CFGH_DS_UPD_EN	(1 << 5)
-#define DWC_CFGH_SS_UPD_EN	(1 << 6)
-
-/* Bitfields in SGR */
-#define DWC_SGR_SGI(x)		((x) << 0)
-#define DWC_SGR_SGC(x)		((x) << 20)
-
-/* Bitfields in DSR */
-#define DWC_DSR_DSI(x)		((x) << 0)
-#define DWC_DSR_DSC(x)		((x) << 20)
-
-/* Bitfields in CFG */
-#define DW_CFG_DMA_EN		(1 << 0)
-
-enum dw_dmac_flags {
-	DW_DMA_IS_CYCLIC = 0,
-	DW_DMA_IS_SOFT_LLP = 1,
-};
-
-struct dw_dma_chan {
-	struct dma_chan		chan;
-	void __iomem		*ch_regs;
-	u8			mask;
-	u8			priority;
-	bool			paused;
-	bool			initialized;
-
-	/* software emulation of the LLP transfers */
-	struct list_head	*tx_list;
-	struct list_head	*tx_node_active;
-
-	spinlock_t		lock;
-
-	/* these other elements are all protected by lock */
-	unsigned long		flags;
-	struct list_head	active_list;
-	struct list_head	queue;
-	struct list_head	free_list;
-	struct dw_cyclic_desc	*cdesc;
-
-	unsigned int		descs_allocated;
-
-	/* hardware configuration */
-	unsigned int		block_size;
-	bool			nollp;
-
-	/* configuration passed via DMA_SLAVE_CONFIG */
-	struct dma_slave_config dma_sconfig;
-
-	/* backlink to dw_dma */
-	struct dw_dma		*dw;
-};
-
-static inline struct dw_dma_chan_regs __iomem *
-__dwc_regs(struct dw_dma_chan *dwc)
-{
-	return dwc->ch_regs;
-}
-
-#define channel_readl(dwc, name) \
-	readl(&(__dwc_regs(dwc)->name))
-#define channel_writel(dwc, name, val) \
-	writel((val), &(__dwc_regs(dwc)->name))
-
-static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
-{
-	return container_of(chan, struct dw_dma_chan, chan);
-}
-
-struct dw_dma {
-	struct dma_device	dma;
-	void __iomem		*regs;
-	struct tasklet_struct	tasklet;
-	struct clk		*clk;
-
-	u8			all_chan_mask;
-
-	/* hardware configuration */
-	unsigned char		nr_masters;
-	unsigned char		data_width[4];
-
-	struct dw_dma_chan	chan[0];
-};
-
-static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
-{
-	return dw->regs;
-}
-
-#define dma_readl(dw, name) \
-	readl(&(__dw_regs(dw)->name))
-#define dma_writel(dw, name, val) \
-	writel((val), &(__dw_regs(dw)->name))
-
-#define channel_set_bit(dw, reg, mask) \
-	dma_writel(dw, reg, ((mask) << 8) | (mask))
-#define channel_clear_bit(dw, reg, mask) \
-	dma_writel(dw, reg, ((mask) << 8) | 0)
-
-static inline struct dw_dma *to_dw_dma(struct dma_device *ddev)
-{
-	return container_of(ddev, struct dw_dma, dma);
-}
-
-/* LLI == Linked List Item; a.k.a. DMA block descriptor */
-struct dw_lli {
-	/* values that are not changed by hardware */
-	u32		sar;
-	u32		dar;
-	u32		llp;		/* chain to next lli */
-	u32		ctllo;
-	/* values that may get written back: */
-	u32		ctlhi;
-	/* sstat and dstat can snapshot peripheral register state.
-	 * silicon config may discard either or both...
-	 */
-	u32		sstat;
-	u32		dstat;
-};
-
-struct dw_desc {
-	/* FIRST values the hardware uses */
-	struct dw_lli			lli;
-
-	/* THEN values for driver housekeeping */
-	struct list_head		desc_node;
-	struct list_head		tx_list;
-	struct dma_async_tx_descriptor	txd;
-	size_t				len;
-};
-
-static inline struct dw_desc *
-txd_to_dw_desc(struct dma_async_tx_descriptor *txd)
-{
-	return container_of(txd, struct dw_desc, txd);
-}
-- 
1.7.10.4


  parent reply	other threads:[~2012-09-26 12:40 UTC|newest]

Thread overview: 45+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2012-09-25 12:13 [PATCHv1 0/6] dw_dmac: split the driver and introduce PCI part Andy Shevchenko
2012-09-25 12:13 ` [PATCHv1 1/6] dmaengine: dw_dmac: Remove clk API dependency Andy Shevchenko
2012-09-26  3:42   ` viresh kumar
2012-09-26 12:33     ` Andy Shevchenko
2012-09-25 12:13 ` [PATCHv1 2/6] dmaengine: dw_dmac: add driver for Atmel AT32 Andy Shevchenko
2012-09-26  3:50   ` viresh kumar
2012-09-26  6:47     ` Andy Shevchenko
2012-09-26  6:51       ` viresh kumar
2012-09-26  6:56         ` Andy Shevchenko
2012-09-26  3:52   ` viresh kumar
2012-09-25 12:13 ` [PATCHv1 3/6] dmaengine: dw_dmac: Add PCI part of the driver Andy Shevchenko
2012-09-26  4:00   ` viresh kumar
2012-09-26  7:48     ` Andy Shevchenko
2012-09-25 12:13 ` [PATCHv1 4/6] avr32: at32ap700x: rename DMA controller Andy Shevchenko
2012-09-25 12:13 ` [PATCHv1 5/6] MAINTAINERS: fix indentation for Viresh Kumar Andy Shevchenko
2012-09-26  3:36   ` viresh kumar
2012-09-26  7:10     ` Andy Shevchenko
2012-09-25 12:13 ` [PATCHv1 6/6] MAINTAINERS: add recently created files to dw_dmac section Andy Shevchenko
2012-09-25 13:19   ` Joe Perches
2012-09-25 13:37     ` Andy Shevchenko
2012-09-25 15:33       ` Vinod Koul
2012-09-25 16:57       ` Joe Perches
2012-09-26  6:44         ` Andy Shevchenko
2012-09-26  3:39       ` viresh kumar
2012-09-26 12:40 ` [PATCHv2 0/4] dw_dmac: split the driver and introduce PCI part Andy Shevchenko
2012-09-26 12:40   ` [PATCHv2 1/4] dmaengine: dw_dmac: convert to platform driver Andy Shevchenko
2012-09-26 14:13     ` viresh kumar
2012-09-26 18:00       ` Andy Shevchenko
2012-09-27  3:47         ` viresh kumar
2012-09-26 12:40   ` [PATCHv2 2/4] dmaengine: dw_dmac: Add PCI part of the driver Andy Shevchenko
2012-09-26 14:33     ` viresh kumar
2012-09-26 17:55       ` Andy Shevchenko
2012-09-26 19:41         ` Arnd Bergmann
2012-09-27  3:53           ` viresh kumar
2012-09-27  7:41             ` Arnd Bergmann
2012-09-27 14:22               ` Vinod Koul
2012-09-27 14:42                 ` Arnd Bergmann
2012-09-26 12:40   ` Andy Shevchenko [this message]
2012-09-26 14:53     ` [PATCHv2 3/4] dma: move dw_dmac driver to an own directory viresh kumar
2012-09-26 17:50       ` Andy Shevchenko
2012-09-26 12:40   ` [PATCHv2 4/4] MAINTAINERS: add recently created files to dw_dmac section Andy Shevchenko
2012-09-26 14:45     ` viresh kumar
2012-09-26 14:48       ` viresh kumar
2012-09-26 17:49         ` Andy Shevchenko
2012-09-27  6:38       ` Andy Shevchenko

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1348663237-3237-4-git-send-email-andriy.shevchenko@linux.intel.com \
    --to=andriy.shevchenko@linux.intel.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=vinod.koul@intel.com \
    --cc=viresh.linux@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).