linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v7 0/2] add uart DMA function
@ 2018-12-25  1:26 Long Cheng
  2018-12-25  1:26 ` [PATCH v7 1/2] dmaengine: 8250_mtk_dma: add MediaTek uart DMA support Long Cheng
  2018-12-25  1:26 ` [PATCH v7 2/2] arm: dts: mt2712: add uart APDMA to device tree Long Cheng
  0 siblings, 2 replies; 9+ messages in thread
From: Long Cheng @ 2018-12-25  1:26 UTC (permalink / raw)
  To: Vinod Koul, Randy Dunlap, Rob Herring, Mark Rutland, Ryder Lee,
	Sean Wang
  Cc: Matthias Brugger, Dan Williams, Greg Kroah-Hartman, Jiri Slaby,
	Sean Wang, dmaengine, devicetree, linux-arm-kernel,
	linux-mediatek, linux-kernel, linux-serial, srv_heupstream,
	Yingjoe Chen, YT Shen, Long Cheng

In Mediatek SOCs, the uart can support DMA function.
Base on DMA engine formwork, we add the DMA code to support uart. And put the code under drivers/dma.

This series contains document bindings, Kconfig to control the function enable or not,
device tree including interrupt and dma device node, the code of UART DMA

Changes compared to v6:
-Correct spelling
Changes compared to v5:
-move 'requst irqs' to alloc channel
-remove tasklet.
Changes compared to v4:
-modify Kconfig depends on.
Changes compared to v3:
-fix CONFIG_PM, will cause build fail
Changes compared to v2:
-remove unimportant parameters
-instead of cookie, use APIs of virtual channel.
-use of_dma_xlate_by_chan_id.
Changes compared to v1:
-mian revised file, 8250_mtk_dma.c
--parameters renamed for standard
--remove atomic operation


Long Cheng (2):
  dmaengine: 8250_mtk_dma: add MediaTek uart DMA support
  arm: dts: mt2712: add uart APDMA to device tree

 arch/arm64/boot/dts/mediatek/mt2712e.dtsi |   50 +++
 drivers/dma/mediatek/8250_mtk_dma.c       |  694 +++++++++++++++++++++++++++++
 drivers/dma/mediatek/Kconfig              |   11 +
 drivers/dma/mediatek/Makefile             |    1 +
 4 files changed, 756 insertions(+)
 create mode 100644 drivers/dma/mediatek/8250_mtk_dma.c

-- 
1.7.9.5


^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH v7 1/2] dmaengine: 8250_mtk_dma: add MediaTek uart DMA support
  2018-12-25  1:26 [PATCH v7 0/2] add uart DMA function Long Cheng
@ 2018-12-25  1:26 ` Long Cheng
  2018-12-25  7:16   ` Nicolas Boichat
  2018-12-25  1:26 ` [PATCH v7 2/2] arm: dts: mt2712: add uart APDMA to device tree Long Cheng
  1 sibling, 1 reply; 9+ messages in thread
From: Long Cheng @ 2018-12-25  1:26 UTC (permalink / raw)
  To: Vinod Koul, Randy Dunlap, Rob Herring, Mark Rutland, Ryder Lee,
	Sean Wang
  Cc: Matthias Brugger, Dan Williams, Greg Kroah-Hartman, Jiri Slaby,
	Sean Wang, dmaengine, devicetree, linux-arm-kernel,
	linux-mediatek, linux-kernel, linux-serial, srv_heupstream,
	Yingjoe Chen, YT Shen, Long Cheng

In DMA engine framework, add 8250 uart dma to support MediaTek uart.
If MediaTek uart enabled(SERIAL_8250_MT6577), and want to improve
the performance, can enable the function.

Signed-off-by: Long Cheng <long.cheng@mediatek.com>
---
 drivers/dma/mediatek/8250_mtk_dma.c |  694 +++++++++++++++++++++++++++++++++++
 drivers/dma/mediatek/Kconfig        |   11 +
 drivers/dma/mediatek/Makefile       |    1 +
 3 files changed, 706 insertions(+)
 create mode 100644 drivers/dma/mediatek/8250_mtk_dma.c

diff --git a/drivers/dma/mediatek/8250_mtk_dma.c b/drivers/dma/mediatek/8250_mtk_dma.c
new file mode 100644
index 0000000..c4090f2
--- /dev/null
+++ b/drivers/dma/mediatek/8250_mtk_dma.c
@@ -0,0 +1,694 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MediaTek 8250 DMA driver.
+ *
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Long Cheng <long.cheng@mediatek.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_dma.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/pm_runtime.h>
+#include <linux/iopoll.h>
+
+#include "../virt-dma.h"
+
+#define MTK_UART_APDMA_CHANNELS		(CONFIG_SERIAL_8250_NR_UARTS * 2)
+
+#define VFF_EN_B		BIT(0)
+#define VFF_STOP_B		BIT(0)
+#define VFF_FLUSH_B		BIT(0)
+#define VFF_4G_SUPPORT_B	BIT(0)
+#define VFF_RX_INT_EN0_B	BIT(0)	/*rx valid size >=  vff thre*/
+#define VFF_RX_INT_EN1_B	BIT(1)
+#define VFF_TX_INT_EN_B		BIT(0)	/*tx left size >= vff thre*/
+#define VFF_WARM_RST_B		BIT(0)
+#define VFF_RX_INT_FLAG_CLR_B	(BIT(0) | BIT(1))
+#define VFF_TX_INT_FLAG_CLR_B	0
+#define VFF_STOP_CLR_B		0
+#define VFF_FLUSH_CLR_B		0
+#define VFF_INT_EN_CLR_B	0
+#define VFF_4G_SUPPORT_CLR_B	0
+
+/* interrupt trigger level for tx */
+#define VFF_TX_THRE(n)		((n) * 7 / 8)
+/* interrupt trigger level for rx */
+#define VFF_RX_THRE(n)		((n) * 3 / 4)
+
+#define MTK_UART_APDMA_RING_SIZE	0xffffU
+/* invert this bit when wrap ring head again*/
+#define MTK_UART_APDMA_RING_WRAP	0x10000U
+
+#define VFF_INT_FLAG		0x00
+#define VFF_INT_EN		0x04
+#define VFF_EN			0x08
+#define VFF_RST			0x0c
+#define VFF_STOP		0x10
+#define VFF_FLUSH		0x14
+#define VFF_ADDR		0x1c
+#define VFF_LEN			0x24
+#define VFF_THRE		0x28
+#define VFF_WPT			0x2c
+#define VFF_RPT			0x30
+/*TX: the buffer size HW can read. RX: the buffer size SW can read.*/
+#define VFF_VALID_SIZE		0x3c
+/*TX: the buffer size SW can write. RX: the buffer size HW can write.*/
+#define VFF_LEFT_SIZE		0x40
+#define VFF_DEBUG_STATUS	0x50
+#define VFF_4G_SUPPORT		0x54
+
+struct mtk_uart_apdmadev {
+	struct dma_device ddev;
+	struct clk *clk;
+	bool support_33bits;
+	unsigned int dma_irq[MTK_UART_APDMA_CHANNELS];
+};
+
+struct mtk_uart_apdma_desc {
+	struct virt_dma_desc vd;
+
+	unsigned int avail_len;
+};
+
+struct mtk_chan {
+	struct virt_dma_chan vc;
+	struct dma_slave_config	cfg;
+	void __iomem *base;
+	struct mtk_uart_apdma_desc *desc;
+
+	bool requested;
+
+	unsigned int rx_status;
+};
+
+static inline struct mtk_uart_apdmadev *
+to_mtk_uart_apdma_dev(struct dma_device *d)
+{
+	return container_of(d, struct mtk_uart_apdmadev, ddev);
+}
+
+static inline struct mtk_chan *to_mtk_uart_apdma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct mtk_chan, vc.chan);
+}
+
+static inline struct mtk_uart_apdma_desc *to_mtk_uart_apdma_desc
+	(struct dma_async_tx_descriptor *t)
+{
+	return container_of(t, struct mtk_uart_apdma_desc, vd.tx);
+}
+
+static void mtk_uart_apdma_chan_write(struct mtk_chan *c,
+			       unsigned int reg, unsigned int val)
+{
+	writel(val, c->base + reg);
+}
+
+static unsigned int
+mtk_uart_apdma_chan_read(struct mtk_chan *c, unsigned int reg)
+{
+	return readl(c->base + reg);
+}
+
+static void mtk_uart_apdma_desc_free(struct virt_dma_desc *vd)
+{
+	struct dma_chan *chan = vd->tx.chan;
+	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
+
+	kfree(c->desc);
+	c->desc = NULL;
+}
+
+static void mtk_uart_apdma_start_tx(struct mtk_chan *c)
+{
+	unsigned int txcount = c->desc->avail_len;
+	unsigned int len, send, left, wpt, wrap;
+
+	if (mtk_uart_apdma_chan_read(c, VFF_LEFT_SIZE) == 0U) {
+		mtk_uart_apdma_chan_write(c, VFF_INT_EN, VFF_TX_INT_EN_B);
+	} else {
+		len = mtk_uart_apdma_chan_read(c, VFF_LEN);
+
+		while (((left = mtk_uart_apdma_chan_read(c,
+					VFF_LEFT_SIZE)) > 0U)
+				&& (c->desc->avail_len != 0U)) {
+			send = min_t(unsigned int, left, c->desc->avail_len);
+			wpt = mtk_uart_apdma_chan_read(c, VFF_WPT);
+			wrap = wpt & MTK_UART_APDMA_RING_WRAP ?
+					0U : MTK_UART_APDMA_RING_WRAP;
+
+			if ((wpt & (len - 1U)) + send < len)
+				mtk_uart_apdma_chan_write(c,
+						VFF_WPT, wpt + send);
+			else
+				mtk_uart_apdma_chan_write(c, VFF_WPT,
+						   ((wpt + send) & (len - 1U))
+						   | wrap);
+
+			c->desc->avail_len -= send;
+		}
+
+		if (txcount != c->desc->avail_len) {
+			mtk_uart_apdma_chan_write(c,
+					VFF_INT_EN, VFF_TX_INT_EN_B);
+			if (mtk_uart_apdma_chan_read(c,
+						VFF_FLUSH) == 0U)
+				mtk_uart_apdma_chan_write(c,
+						VFF_FLUSH, VFF_FLUSH_B);
+		}
+	}
+}
+
+static void mtk_uart_apdma_start_rx(struct mtk_chan *c)
+{
+	struct mtk_uart_apdma_desc *d = c->desc;
+	unsigned int rx_len, wg, rg, count;
+
+	if (mtk_uart_apdma_chan_read(c, VFF_VALID_SIZE) == 0U)
+		return;
+
+	if (d && vchan_next_desc(&c->vc)) {
+		rx_len = mtk_uart_apdma_chan_read(c, VFF_LEN);
+		rg = mtk_uart_apdma_chan_read(c, VFF_RPT);
+		wg = mtk_uart_apdma_chan_read(c, VFF_WPT);
+		count = ((rg ^ wg) & MTK_UART_APDMA_RING_WRAP) ?
+				((wg & MTK_UART_APDMA_RING_SIZE) +
+				rx_len - (rg & MTK_UART_APDMA_RING_SIZE)) :
+				((wg & MTK_UART_APDMA_RING_SIZE) -
+				(rg & MTK_UART_APDMA_RING_SIZE));
+
+		c->rx_status = count;
+		mtk_uart_apdma_chan_write(c, VFF_RPT, wg);
+
+		list_del(&d->vd.node);
+		vchan_cookie_complete(&d->vd);
+	}
+}
+
+static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id)
+{
+	struct dma_chan *chan = (struct dma_chan *)dev_id;
+	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
+	struct mtk_uart_apdma_desc *d;
+	unsigned long flags;
+
+	spin_lock_irqsave(&c->vc.lock, flags);
+	switch (c->cfg.direction) {
+	case DMA_DEV_TO_MEM:
+		mtk_uart_apdma_chan_write(c,
+				VFF_INT_FLAG, VFF_RX_INT_FLAG_CLR_B);
+		mtk_uart_apdma_start_rx(c);
+		break;
+	case DMA_MEM_TO_DEV:
+		d = c->desc;
+
+		mtk_uart_apdma_chan_write(c,
+				VFF_INT_FLAG, VFF_TX_INT_FLAG_CLR_B);
+
+		if (d->avail_len != 0U) {
+			mtk_uart_apdma_start_tx(c);
+		} else {
+			list_del(&d->vd.node);
+			vchan_cookie_complete(&d->vd);
+		}
+		break;
+	default:
+		break;
+	}
+	spin_unlock_irqrestore(&c->vc.lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device);
+	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
+	u32 status;
+	int ret = -EBUSY;
+
+	pm_runtime_get_sync(mtkd->ddev.dev);
+
+	mtk_uart_apdma_chan_write(c, VFF_ADDR, 0);
+	mtk_uart_apdma_chan_write(c, VFF_THRE, 0);
+	mtk_uart_apdma_chan_write(c, VFF_LEN, 0);
+	mtk_uart_apdma_chan_write(c, VFF_RST, VFF_WARM_RST_B);
+
+	ret = readx_poll_timeout(readl,
+				 c->base + VFF_EN,
+				 status, status == 0, 10, 100);
+	if (ret) {
+		dev_err(c->vc.chan.device->dev,
+				"dma reset: fail, timeout\n");
+		goto exit;
+	}
+
+	if (!c->requested) {
+		c->requested = true;
+		ret = request_irq(mtkd->dma_irq[chan->chan_id],
+				  mtk_uart_apdma_irq_handler,
+				  IRQF_TRIGGER_NONE,
+				  KBUILD_MODNAME, chan);
+		if (ret < 0) {
+			dev_err(chan->device->dev, "Can't request dma IRQ\n");
+			return -EINVAL;
+		}
+	}
+
+	if (mtkd->support_33bits)
+		mtk_uart_apdma_chan_write(c,
+				VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B);
+
+exit:
+	return ret;
+}
+
+static void mtk_uart_apdma_free_chan_resources(struct dma_chan *chan)
+{
+	struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device);
+	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
+
+	if (c->requested) {
+		c->requested = false;
+		free_irq(mtkd->dma_irq[chan->chan_id], chan);
+	}
+
+	tasklet_kill(&c->vc.task);
+
+	vchan_free_chan_resources(&c->vc);
+
+	pm_runtime_put_sync(mtkd->ddev.dev);
+}
+
+static enum dma_status mtk_uart_apdma_tx_status(struct dma_chan *chan,
+					 dma_cookie_t cookie,
+					 struct dma_tx_state *txstate)
+{
+	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
+	enum dma_status ret;
+	unsigned long flags;
+
+	if (!txstate)
+		return DMA_ERROR;
+
+	ret = dma_cookie_status(chan, cookie, txstate);
+	spin_lock_irqsave(&c->vc.lock, flags);
+	if (ret == DMA_IN_PROGRESS) {
+		c->rx_status = mtk_uart_apdma_chan_read(c, VFF_RPT)
+			     & MTK_UART_APDMA_RING_SIZE;
+		dma_set_residue(txstate, c->rx_status);
+	} else if (ret == DMA_COMPLETE && c->cfg.direction == DMA_DEV_TO_MEM) {
+		dma_set_residue(txstate, c->rx_status);
+	} else {
+		dma_set_residue(txstate, 0);
+	}
+	spin_unlock_irqrestore(&c->vc.lock, flags);
+
+	return ret;
+}
+
+/*
+ * dmaengine_prep_slave_single will call the function. and sglen is 1.
+ * 8250 uart using one ring buffer, and deal with one sg.
+ */
+static struct dma_async_tx_descriptor *mtk_uart_apdma_prep_slave_sg
+	(struct dma_chan *chan, struct scatterlist *sgl,
+	unsigned int sglen,	enum dma_transfer_direction dir,
+	unsigned long tx_flags, void *context)
+{
+	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
+	struct mtk_uart_apdma_desc *d;
+
+	if ((dir != DMA_DEV_TO_MEM) &&
+		(dir != DMA_MEM_TO_DEV)) {
+		dev_err(chan->device->dev, "bad direction\n");
+		return NULL;
+	}
+
+	/* Now allocate and setup the descriptor */
+	d = kzalloc(sizeof(*d), GFP_ATOMIC);
+	if (!d)
+		return NULL;
+
+	/* sglen is 1 */
+	d->avail_len = sg_dma_len(sgl);
+
+	return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
+}
+
+static void mtk_uart_apdma_issue_pending(struct dma_chan *chan)
+{
+	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
+	struct virt_dma_desc *vd;
+	unsigned long flags;
+
+	spin_lock_irqsave(&c->vc.lock, flags);
+	if (c->cfg.direction == DMA_DEV_TO_MEM) {
+		if (vchan_issue_pending(&c->vc) && !c->desc) {
+			vd = vchan_next_desc(&c->vc);
+			c->desc = to_mtk_uart_apdma_desc(&vd->tx);
+			mtk_uart_apdma_start_rx(c);
+		}
+	} else if (c->cfg.direction == DMA_MEM_TO_DEV) {
+		if (vchan_issue_pending(&c->vc) && !c->desc) {
+			vd = vchan_next_desc(&c->vc);
+			c->desc = to_mtk_uart_apdma_desc(&vd->tx);
+			mtk_uart_apdma_start_tx(c);
+		}
+	}
+	spin_unlock_irqrestore(&c->vc.lock, flags);
+}
+
+static int mtk_uart_apdma_slave_config(struct dma_chan *chan,
+				struct dma_slave_config *cfg)
+{
+	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
+	struct mtk_uart_apdmadev *mtkd =
+				to_mtk_uart_apdma_dev(c->vc.chan.device);
+
+	c->cfg = *cfg;
+
+	if (cfg->direction == DMA_DEV_TO_MEM) {
+		unsigned int rx_len = cfg->src_addr_width * 1024;
+
+		mtk_uart_apdma_chan_write(c, VFF_ADDR, cfg->src_addr);
+		mtk_uart_apdma_chan_write(c, VFF_LEN, rx_len);
+		mtk_uart_apdma_chan_write(c, VFF_THRE, VFF_RX_THRE(rx_len));
+		mtk_uart_apdma_chan_write(c,
+				VFF_INT_EN, VFF_RX_INT_EN0_B
+				| VFF_RX_INT_EN1_B);
+		mtk_uart_apdma_chan_write(c, VFF_RPT, 0);
+		mtk_uart_apdma_chan_write(c,
+				VFF_INT_FLAG, VFF_RX_INT_FLAG_CLR_B);
+		mtk_uart_apdma_chan_write(c, VFF_EN, VFF_EN_B);
+	} else if (cfg->direction == DMA_MEM_TO_DEV)	{
+		unsigned int tx_len = cfg->dst_addr_width * 1024;
+
+		mtk_uart_apdma_chan_write(c, VFF_ADDR, cfg->dst_addr);
+		mtk_uart_apdma_chan_write(c, VFF_LEN, tx_len);
+		mtk_uart_apdma_chan_write(c, VFF_THRE, VFF_TX_THRE(tx_len));
+		mtk_uart_apdma_chan_write(c, VFF_WPT, 0);
+		mtk_uart_apdma_chan_write(c,
+				VFF_INT_FLAG, VFF_TX_INT_FLAG_CLR_B);
+		mtk_uart_apdma_chan_write(c, VFF_EN, VFF_EN_B);
+	}
+
+	if (mtkd->support_33bits)
+		mtk_uart_apdma_chan_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_B);
+
+	if (mtk_uart_apdma_chan_read(c, VFF_EN) != VFF_EN_B) {
+		dev_err(chan->device->dev,
+			"config dma dir[%d] fail\n", cfg->direction);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int mtk_uart_apdma_terminate_all(struct dma_chan *chan)
+{
+	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
+	unsigned long flags;
+	u32 status;
+	int ret;
+
+	spin_lock_irqsave(&c->vc.lock, flags);
+
+	mtk_uart_apdma_chan_write(c, VFF_FLUSH, VFF_FLUSH_CLR_B);
+	/* Wait for flush */
+	ret = readx_poll_timeout(readl,
+				 c->base + VFF_FLUSH,
+				 status,
+				 (status & VFF_FLUSH_B) != VFF_FLUSH_B,
+				 10, 100);
+	if (ret)
+		dev_err(c->vc.chan.device->dev,
+			"dma stop: polling FLUSH fail, DEBUG=0x%x\n",
+			mtk_uart_apdma_chan_read(c, VFF_DEBUG_STATUS));
+
+	/*set stop as 1 -> wait until en is 0 -> set stop as 0*/
+	mtk_uart_apdma_chan_write(c, VFF_STOP, VFF_STOP_B);
+	ret = readx_poll_timeout(readl,
+				 c->base + VFF_EN,
+				 status, status == 0, 10, 100);
+	if (ret)
+		dev_err(c->vc.chan.device->dev,
+			"dma stop: polling VFF_EN fail, DEBUG=0x%x\n",
+			mtk_uart_apdma_chan_read(c, VFF_DEBUG_STATUS));
+
+	mtk_uart_apdma_chan_write(c, VFF_STOP, VFF_STOP_CLR_B);
+	mtk_uart_apdma_chan_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
+
+	switch (c->cfg.direction) {
+	case DMA_DEV_TO_MEM:
+		mtk_uart_apdma_chan_write(c,
+				VFF_INT_FLAG, VFF_RX_INT_FLAG_CLR_B);
+		break;
+	case DMA_MEM_TO_DEV:
+		mtk_uart_apdma_chan_write(c,
+				VFF_INT_FLAG, VFF_TX_INT_FLAG_CLR_B);
+		break;
+	default:
+		break;
+	}
+
+	spin_unlock_irqrestore(&c->vc.lock, flags);
+
+	return 0;
+}
+
+static int mtk_uart_apdma_device_pause(struct dma_chan *chan)
+{
+	/* just for check caps pass */
+	return 0;
+}
+
+static int mtk_uart_apdma_device_resume(struct dma_chan *chan)
+{
+	/* just for check caps pass */
+	return 0;
+}
+
+static void mtk_uart_apdma_free(struct mtk_uart_apdmadev *mtkd)
+{
+	while (list_empty(&mtkd->ddev.channels) == 0) {
+		struct mtk_chan *c = list_first_entry(&mtkd->ddev.channels,
+			struct mtk_chan, vc.chan.device_node);
+
+		list_del(&c->vc.chan.device_node);
+		tasklet_kill(&c->vc.task);
+	}
+}
+
+static const struct of_device_id mtk_uart_apdma_match[] = {
+	{ .compatible = "mediatek,mt6577-uart-dma", },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, mtk_uart_apdma_match);
+
+static int mtk_uart_apdma_probe(struct platform_device *pdev)
+{
+	struct mtk_uart_apdmadev *mtkd;
+	struct resource *res;
+	struct mtk_chan *c;
+	unsigned int i;
+	int rc;
+
+	mtkd = devm_kzalloc(&pdev->dev, sizeof(*mtkd), GFP_KERNEL);
+	if (!mtkd)
+		return -ENOMEM;
+
+	mtkd->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(mtkd->clk)) {
+		dev_err(&pdev->dev, "No clock specified\n");
+		rc = PTR_ERR(mtkd->clk);
+		goto err_no_dma;
+	}
+
+	if (of_property_read_bool(pdev->dev.of_node, "dma-33bits")) {
+		dev_info(&pdev->dev, "Support dma 33bits\n");
+		mtkd->support_33bits = true;
+	}
+
+	rc = dma_set_mask_and_coherent(&pdev->dev,
+				DMA_BIT_MASK(32 | mtkd->support_33bits));
+	if (rc)
+		goto err_no_dma;
+
+	dma_cap_set(DMA_SLAVE, mtkd->ddev.cap_mask);
+	mtkd->ddev.device_alloc_chan_resources =
+				mtk_uart_apdma_alloc_chan_resources;
+	mtkd->ddev.device_free_chan_resources =
+				mtk_uart_apdma_free_chan_resources;
+	mtkd->ddev.device_tx_status = mtk_uart_apdma_tx_status;
+	mtkd->ddev.device_issue_pending = mtk_uart_apdma_issue_pending;
+	mtkd->ddev.device_prep_slave_sg = mtk_uart_apdma_prep_slave_sg;
+	mtkd->ddev.device_config = mtk_uart_apdma_slave_config;
+	mtkd->ddev.device_pause = mtk_uart_apdma_device_pause;
+	mtkd->ddev.device_resume = mtk_uart_apdma_device_resume;
+	mtkd->ddev.device_terminate_all = mtk_uart_apdma_terminate_all;
+	mtkd->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE);
+	mtkd->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE);
+	mtkd->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+	mtkd->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+	mtkd->ddev.dev = &pdev->dev;
+	INIT_LIST_HEAD(&mtkd->ddev.channels);
+
+	for (i = 0; i < MTK_UART_APDMA_CHANNELS; i++) {
+		c = devm_kzalloc(mtkd->ddev.dev, sizeof(*c), GFP_KERNEL);
+		if (!c) {
+			rc = -ENODEV;
+			goto err_no_dma;
+		}
+
+		res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+		if (!res) {
+			rc = -ENODEV;
+			goto err_no_dma;
+		}
+
+		c->base = devm_ioremap_resource(&pdev->dev, res);
+		if (IS_ERR(c->base)) {
+			rc = PTR_ERR(c->base);
+			goto err_no_dma;
+		}
+		c->requested = false;
+		c->vc.desc_free = mtk_uart_apdma_desc_free;
+		vchan_init(&c->vc, &mtkd->ddev);
+
+		mtkd->dma_irq[i] = platform_get_irq(pdev, i);
+		if ((int)mtkd->dma_irq[i] < 0) {
+			dev_err(&pdev->dev, "failed to get IRQ[%d]\n", i);
+			rc = -EINVAL;
+			goto err_no_dma;
+		}
+	}
+
+	pm_runtime_enable(&pdev->dev);
+	pm_runtime_set_active(&pdev->dev);
+
+	rc = dma_async_device_register(&mtkd->ddev);
+	if (rc)
+		goto rpm_disable;
+
+	platform_set_drvdata(pdev, mtkd);
+
+	if (pdev->dev.of_node) {
+		/* Device-tree DMA controller registration */
+		rc = of_dma_controller_register(pdev->dev.of_node,
+						of_dma_xlate_by_chan_id,
+						mtkd);
+		if (rc)
+			goto dma_remove;
+	}
+
+	return rc;
+
+dma_remove:
+	dma_async_device_unregister(&mtkd->ddev);
+rpm_disable:
+	pm_runtime_disable(&pdev->dev);
+err_no_dma:
+	mtk_uart_apdma_free(mtkd);
+	return rc;
+}
+
+static int mtk_uart_apdma_remove(struct platform_device *pdev)
+{
+	struct mtk_uart_apdmadev *mtkd = platform_get_drvdata(pdev);
+
+	if (pdev->dev.of_node)
+		of_dma_controller_free(pdev->dev.of_node);
+
+	pm_runtime_disable(&pdev->dev);
+	pm_runtime_put_noidle(&pdev->dev);
+
+	dma_async_device_unregister(&mtkd->ddev);
+	mtk_uart_apdma_free(mtkd);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mtk_uart_apdma_suspend(struct device *dev)
+{
+	struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
+
+	if (!pm_runtime_suspended(dev))
+		clk_disable_unprepare(mtkd->clk);
+
+	return 0;
+}
+
+static int mtk_uart_apdma_resume(struct device *dev)
+{
+	int ret;
+	struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
+
+	if (!pm_runtime_suspended(dev)) {
+		ret = clk_prepare_enable(mtkd->clk);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM
+static int mtk_uart_apdma_runtime_suspend(struct device *dev)
+{
+	struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
+
+	clk_disable_unprepare(mtkd->clk);
+
+	return 0;
+}
+
+static int mtk_uart_apdma_runtime_resume(struct device *dev)
+{
+	int ret;
+	struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
+
+	ret = clk_prepare_enable(mtkd->clk);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops mtk_uart_apdma_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(mtk_uart_apdma_suspend, mtk_uart_apdma_resume)
+	SET_RUNTIME_PM_OPS(mtk_uart_apdma_runtime_suspend,
+			   mtk_uart_apdma_runtime_resume, NULL)
+};
+
+static struct platform_driver mtk_uart_apdma_driver = {
+	.probe	= mtk_uart_apdma_probe,
+	.remove	= mtk_uart_apdma_remove,
+	.driver = {
+		.name		= KBUILD_MODNAME,
+		.pm		= &mtk_uart_apdma_pm_ops,
+		.of_match_table = of_match_ptr(mtk_uart_apdma_match),
+	},
+};
+
+module_platform_driver(mtk_uart_apdma_driver);
+
+MODULE_DESCRIPTION("MediaTek UART APDMA Controller Driver");
+MODULE_AUTHOR("Long Cheng <long.cheng@mediatek.com>");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/dma/mediatek/Kconfig b/drivers/dma/mediatek/Kconfig
index 27bac0b..1a523c87 100644
--- a/drivers/dma/mediatek/Kconfig
+++ b/drivers/dma/mediatek/Kconfig
@@ -1,4 +1,15 @@
 
+config DMA_MTK_UART
+	tristate "MediaTek SoCs APDMA support for UART"
+	depends on OF && SERIAL_8250_MT6577
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Support for the UART DMA engine found on MediaTek MTK SoCs.
+	  when SERIAL_8250_MT6577 is enabled, and if you want to use DMA,
+	  you can enable the config. the DMA engine can only be used
+	  with MediaTek SoCs.
+
 config MTK_HSDMA
 	tristate "MediaTek High-Speed DMA controller support"
 	depends on ARCH_MEDIATEK || COMPILE_TEST
diff --git a/drivers/dma/mediatek/Makefile b/drivers/dma/mediatek/Makefile
index 6e778f8..2f2efd9 100644
--- a/drivers/dma/mediatek/Makefile
+++ b/drivers/dma/mediatek/Makefile
@@ -1 +1,2 @@
+obj-$(CONFIG_DMA_MTK_UART) += 8250_mtk_dma.o
 obj-$(CONFIG_MTK_HSDMA) += mtk-hsdma.o
-- 
1.7.9.5


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH v7 2/2] arm: dts: mt2712: add uart APDMA to device tree
  2018-12-25  1:26 [PATCH v7 0/2] add uart DMA function Long Cheng
  2018-12-25  1:26 ` [PATCH v7 1/2] dmaengine: 8250_mtk_dma: add MediaTek uart DMA support Long Cheng
@ 2018-12-25  1:26 ` Long Cheng
  1 sibling, 0 replies; 9+ messages in thread
From: Long Cheng @ 2018-12-25  1:26 UTC (permalink / raw)
  To: Vinod Koul, Randy Dunlap, Rob Herring, Mark Rutland, Ryder Lee,
	Sean Wang
  Cc: Matthias Brugger, Dan Williams, Greg Kroah-Hartman, Jiri Slaby,
	Sean Wang, dmaengine, devicetree, linux-arm-kernel,
	linux-mediatek, linux-kernel, linux-serial, srv_heupstream,
	Yingjoe Chen, YT Shen, Long Cheng

1. add uart APDMA controller device node
2. add uart 0/1/2/3/4/5 DMA function

Signed-off-by: Long Cheng <long.cheng@mediatek.com>
---
 arch/arm64/boot/dts/mediatek/mt2712e.dtsi |   50 +++++++++++++++++++++++++++++
 1 file changed, 50 insertions(+)

diff --git a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
index 976d92a..be1a22a 100644
--- a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
@@ -300,6 +300,9 @@
 		interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_LOW>;
 		clocks = <&baud_clk>, <&sys_clk>;
 		clock-names = "baud", "bus";
+		dmas = <&apdma 10
+			&apdma 11>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -369,6 +372,38 @@
 			 (GIC_CPU_MASK_RAW(0x13) | IRQ_TYPE_LEVEL_HIGH)>;
 	};
 
+	apdma: dma-controller@11000400 {
+		compatible = "mediatek,mt2712-uart-dma",
+			     "mediatek,mt6577-uart-dma";
+		reg = <0 0x11000400 0 0x80>,
+		      <0 0x11000480 0 0x80>,
+		      <0 0x11000500 0 0x80>,
+		      <0 0x11000580 0 0x80>,
+		      <0 0x11000600 0 0x80>,
+		      <0 0x11000680 0 0x80>,
+		      <0 0x11000700 0 0x80>,
+		      <0 0x11000780 0 0x80>,
+		      <0 0x11000800 0 0x80>,
+		      <0 0x11000880 0 0x80>,
+		      <0 0x11000900 0 0x80>,
+		      <0 0x11000980 0 0x80>;
+		interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_SPI 104 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_SPI 105 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_SPI 106 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_SPI 107 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_SPI 108 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_SPI 109 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_SPI 110 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_SPI 111 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_SPI 112 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_SPI 113 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_SPI 114 IRQ_TYPE_LEVEL_LOW>;
+		clocks = <&pericfg CLK_PERI_AP_DMA>;
+		clock-names = "apdma";
+		#dma-cells = <1>;
+	};
+
 	auxadc: adc@11001000 {
 		compatible = "mediatek,mt2712-auxadc";
 		reg = <0 0x11001000 0 0x1000>;
@@ -385,6 +420,9 @@
 		interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_LOW>;
 		clocks = <&baud_clk>, <&sys_clk>;
 		clock-names = "baud", "bus";
+		dmas = <&apdma 0
+			&apdma 1>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -395,6 +433,9 @@
 		interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_LOW>;
 		clocks = <&baud_clk>, <&sys_clk>;
 		clock-names = "baud", "bus";
+		dmas = <&apdma 2
+			&apdma 3>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -405,6 +446,9 @@
 		interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_LOW>;
 		clocks = <&baud_clk>, <&sys_clk>;
 		clock-names = "baud", "bus";
+		dmas = <&apdma 4
+			&apdma 5>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -415,6 +459,9 @@
 		interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_LOW>;
 		clocks = <&baud_clk>, <&sys_clk>;
 		clock-names = "baud", "bus";
+		dmas = <&apdma 6
+			&apdma 7>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -629,6 +676,9 @@
 		interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_LOW>;
 		clocks = <&baud_clk>, <&sys_clk>;
 		clock-names = "baud", "bus";
+		dmas = <&apdma 8
+			&apdma 9>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
-- 
1.7.9.5


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH v7 1/2] dmaengine: 8250_mtk_dma: add MediaTek uart DMA support
  2018-12-25  1:26 ` [PATCH v7 1/2] dmaengine: 8250_mtk_dma: add MediaTek uart DMA support Long Cheng
@ 2018-12-25  7:16   ` Nicolas Boichat
  2018-12-25 12:06     ` Long Cheng
  0 siblings, 1 reply; 9+ messages in thread
From: Nicolas Boichat @ 2018-12-25  7:16 UTC (permalink / raw)
  To: Long Cheng
  Cc: Vinod Koul, Randy Dunlap, Rob Herring, Mark Rutland, Ryder Lee,
	Sean Wang, Matthias Brugger, Dan Williams, Greg Kroah-Hartman,
	Jiri Slaby, Sean Wang, dmaengine, devicetree,
	linux-arm Mailing List, linux-mediatek, lkml, linux-serial,
	srv_heupstream, Yingjoe Chen, YT Shen

Not a full review, a few comments below.

Thanks,

On Tue, Dec 25, 2018 at 9:27 AM Long Cheng <long.cheng@mediatek.com> wrote:
>
> In DMA engine framework, add 8250 uart dma to support MediaTek uart.
> If MediaTek uart enabled(SERIAL_8250_MT6577), and want to improve
> the performance, can enable the function.
>
> Signed-off-by: Long Cheng <long.cheng@mediatek.com>
> ---
>  drivers/dma/mediatek/8250_mtk_dma.c |  694 +++++++++++++++++++++++++++++++++++
>  drivers/dma/mediatek/Kconfig        |   11 +
>  drivers/dma/mediatek/Makefile       |    1 +
>  3 files changed, 706 insertions(+)
>  create mode 100644 drivers/dma/mediatek/8250_mtk_dma.c
>
> diff --git a/drivers/dma/mediatek/8250_mtk_dma.c b/drivers/dma/mediatek/8250_mtk_dma.c
> new file mode 100644
> index 0000000..c4090f2
> --- /dev/null
> +++ b/drivers/dma/mediatek/8250_mtk_dma.c
> @@ -0,0 +1,694 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * MediaTek 8250 DMA driver.
> + *
> + * Copyright (c) 2018 MediaTek Inc.
> + * Author: Long Cheng <long.cheng@mediatek.com>
> + */
> +
> +#include <linux/clk.h>
> +#include <linux/dmaengine.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/err.h>
> +#include <linux/init.h>
> +#include <linux/interrupt.h>
> +#include <linux/list.h>
> +#include <linux/kernel.h>
> +#include <linux/module.h>
> +#include <linux/of_dma.h>
> +#include <linux/of_device.h>
> +#include <linux/platform_device.h>
> +#include <linux/slab.h>
> +#include <linux/spinlock.h>
> +#include <linux/pm_runtime.h>
> +#include <linux/iopoll.h>

Alphabetical order.

> +
> +#include "../virt-dma.h"
> +
> +#define MTK_UART_APDMA_CHANNELS                (CONFIG_SERIAL_8250_NR_UARTS * 2)
> +
> +#define VFF_EN_B               BIT(0)
> +#define VFF_STOP_B             BIT(0)
> +#define VFF_FLUSH_B            BIT(0)
> +#define VFF_4G_SUPPORT_B       BIT(0)
> +#define VFF_RX_INT_EN0_B       BIT(0)  /*rx valid size >=  vff thre*/
> +#define VFF_RX_INT_EN1_B       BIT(1)
> +#define VFF_TX_INT_EN_B                BIT(0)  /*tx left size >= vff thre*/
> +#define VFF_WARM_RST_B         BIT(0)
> +#define VFF_RX_INT_FLAG_CLR_B  (BIT(0) | BIT(1))
> +#define VFF_TX_INT_FLAG_CLR_B  0
> +#define VFF_STOP_CLR_B         0
> +#define VFF_FLUSH_CLR_B                0
> +#define VFF_INT_EN_CLR_B       0
> +#define VFF_4G_SUPPORT_CLR_B   0
> +
> +/* interrupt trigger level for tx */
> +#define VFF_TX_THRE(n)         ((n) * 7 / 8)
> +/* interrupt trigger level for rx */
> +#define VFF_RX_THRE(n)         ((n) * 3 / 4)
> +
> +#define MTK_UART_APDMA_RING_SIZE       0xffffU
> +/* invert this bit when wrap ring head again*/
> +#define MTK_UART_APDMA_RING_WRAP       0x10000U
> +
> +#define VFF_INT_FLAG           0x00
> +#define VFF_INT_EN             0x04
> +#define VFF_EN                 0x08
> +#define VFF_RST                        0x0c
> +#define VFF_STOP               0x10
> +#define VFF_FLUSH              0x14
> +#define VFF_ADDR               0x1c
> +#define VFF_LEN                        0x24
> +#define VFF_THRE               0x28
> +#define VFF_WPT                        0x2c
> +#define VFF_RPT                        0x30
> +/*TX: the buffer size HW can read. RX: the buffer size SW can read.*/
> +#define VFF_VALID_SIZE         0x3c
> +/*TX: the buffer size SW can write. RX: the buffer size HW can write.*/
> +#define VFF_LEFT_SIZE          0x40
> +#define VFF_DEBUG_STATUS       0x50
> +#define VFF_4G_SUPPORT         0x54
> +
> +struct mtk_uart_apdmadev {
> +       struct dma_device ddev;
> +       struct clk *clk;
> +       bool support_33bits;
> +       unsigned int dma_irq[MTK_UART_APDMA_CHANNELS];
> +};
> +
> +struct mtk_uart_apdma_desc {
> +       struct virt_dma_desc vd;
> +
> +       unsigned int avail_len;
> +};
> +
> +struct mtk_chan {
> +       struct virt_dma_chan vc;
> +       struct dma_slave_config cfg;
> +       void __iomem *base;
> +       struct mtk_uart_apdma_desc *desc;
> +
> +       bool requested;
> +
> +       unsigned int rx_status;
> +};
> +
> +static inline struct mtk_uart_apdmadev *
> +to_mtk_uart_apdma_dev(struct dma_device *d)
> +{
> +       return container_of(d, struct mtk_uart_apdmadev, ddev);
> +}
> +
> +static inline struct mtk_chan *to_mtk_uart_apdma_chan(struct dma_chan *c)
> +{
> +       return container_of(c, struct mtk_chan, vc.chan);
> +}
> +
> +static inline struct mtk_uart_apdma_desc *to_mtk_uart_apdma_desc
> +       (struct dma_async_tx_descriptor *t)
> +{
> +       return container_of(t, struct mtk_uart_apdma_desc, vd.tx);
> +}
> +
> +static void mtk_uart_apdma_chan_write(struct mtk_chan *c,
> +                              unsigned int reg, unsigned int val)
> +{
> +       writel(val, c->base + reg);
> +}
> +
> +static unsigned int
> +mtk_uart_apdma_chan_read(struct mtk_chan *c, unsigned int reg)
> +{
> +       return readl(c->base + reg);
> +}
> +
> +static void mtk_uart_apdma_desc_free(struct virt_dma_desc *vd)
> +{
> +       struct dma_chan *chan = vd->tx.chan;
> +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> +
> +       kfree(c->desc);
> +       c->desc = NULL;

Why? I'm afraid this may mask double-free.

> +}
> +
> +static void mtk_uart_apdma_start_tx(struct mtk_chan *c)
> +{
> +       unsigned int txcount = c->desc->avail_len;

Variable name is confusing... I'd rather have a boolean `transmitted`
(or something), set to 0 here, and set to 1 in the loop (but again,
maybe this is not necessary at all, see below).

> +       unsigned int len, send, left, wpt, wrap;
> +
> +       if (mtk_uart_apdma_chan_read(c, VFF_LEFT_SIZE) == 0U) {
> +               mtk_uart_apdma_chan_write(c, VFF_INT_EN, VFF_TX_INT_EN_B);

I'd add a "return;" here and de-indent the rest of the function (that
should help to make it more readable).

> +       } else {
> +               len = mtk_uart_apdma_chan_read(c, VFF_LEN);

You make an assumption below that len is always a power of 2. I wonder
if we want to validate that with some BUG/WARN call.

Also, I assume VFF_LEN is constant? Maybe we can read it once only in
the probe function?

> +
> +               while (((left = mtk_uart_apdma_chan_read(c,
> +                                       VFF_LEFT_SIZE)) > 0U)
> +                               && (c->desc->avail_len != 0U)) {

Why do we need a while loop here? LEFT_SIZE will always be smaller
than LEN, right? Shouldn't we load as much as we can, then let an
interrupt happen and reload the FIFO at that stage? Or does this help
gain a bit of extra performance?

> +                       send = min_t(unsigned int, left, c->desc->avail_len);
> +                       wpt = mtk_uart_apdma_chan_read(c, VFF_WPT);
> +                       wrap = wpt & MTK_UART_APDMA_RING_WRAP ?

wrap = wrp ^ MTK_UART_APDMA_RING_WRAP ?

> +                                       0U : MTK_UART_APDMA_RING_WRAP;
> +
> +                       if ((wpt & (len - 1U)) + send < len)
> +                               mtk_uart_apdma_chan_write(c,
> +                                               VFF_WPT, wpt + send);
> +                       else
> +                               mtk_uart_apdma_chan_write(c, VFF_WPT,
> +                                                  ((wpt + send) & (len - 1U))
> +                                                  | wrap);
> +
> +                       c->desc->avail_len -= send;
> +               }
> +
> +               if (txcount != c->desc->avail_len) {

We know mtk_uart_apdma_chan_read(c, VFF_LEFT_SIZE) > 0 at least once,
so this test should always be true.

Unless we have c->desc->avail_len == 0, but then we won't even call
this function... So this is always true.

> +                       mtk_uart_apdma_chan_write(c,
> +                                       VFF_INT_EN, VFF_TX_INT_EN_B);
> +                       if (mtk_uart_apdma_chan_read(c,
> +                                               VFF_FLUSH) == 0U)
> +                               mtk_uart_apdma_chan_write(c,
> +                                               VFF_FLUSH, VFF_FLUSH_B);
> +               }
> +       }
> +}
> +
> +static void mtk_uart_apdma_start_rx(struct mtk_chan *c)
> +{
> +       struct mtk_uart_apdma_desc *d = c->desc;
> +       unsigned int rx_len, wg, rg, count;
> +
> +       if (mtk_uart_apdma_chan_read(c, VFF_VALID_SIZE) == 0U)
> +               return;
> +
> +       if (d && vchan_next_desc(&c->vc)) {
> +               rx_len = mtk_uart_apdma_chan_read(c, VFF_LEN);
> +               rg = mtk_uart_apdma_chan_read(c, VFF_RPT);
> +               wg = mtk_uart_apdma_chan_read(c, VFF_WPT);
> +               count = ((rg ^ wg) & MTK_UART_APDMA_RING_WRAP) ?
> +                               ((wg & MTK_UART_APDMA_RING_SIZE) +
> +                               rx_len - (rg & MTK_UART_APDMA_RING_SIZE)) :
> +                               ((wg & MTK_UART_APDMA_RING_SIZE) -
> +                               (rg & MTK_UART_APDMA_RING_SIZE));

This is hard to read, please use a if/else.

> +
> +               c->rx_status = count;
> +               mtk_uart_apdma_chan_write(c, VFF_RPT, wg);
> +
> +               list_del(&d->vd.node);
> +               vchan_cookie_complete(&d->vd);
> +       }
> +}
> +
> +static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id)
> +{
> +       struct dma_chan *chan = (struct dma_chan *)dev_id;
> +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> +       struct mtk_uart_apdma_desc *d;
> +       unsigned long flags;
> +
> +       spin_lock_irqsave(&c->vc.lock, flags);
> +       switch (c->cfg.direction) {
> +       case DMA_DEV_TO_MEM:

You use an if/else below in a similar case, please be consistent (IMHO
if/else is a bit more compact, so I'd use that here).

> +               mtk_uart_apdma_chan_write(c,
> +                               VFF_INT_FLAG, VFF_RX_INT_FLAG_CLR_B);
> +               mtk_uart_apdma_start_rx(c);
> +               break;
> +       case DMA_MEM_TO_DEV:
> +               d = c->desc;
> +
> +               mtk_uart_apdma_chan_write(c,
> +                               VFF_INT_FLAG, VFF_TX_INT_FLAG_CLR_B);
> +
> +               if (d->avail_len != 0U) {
> +                       mtk_uart_apdma_start_tx(c);
> +               } else {
> +                       list_del(&d->vd.node);
> +                       vchan_cookie_complete(&d->vd);
> +               }
> +               break;
> +       default:
> +               break;
> +       }
> +       spin_unlock_irqrestore(&c->vc.lock, flags);
> +
> +       return IRQ_HANDLED;
> +}
> +
> +static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
> +{
> +       struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device);
> +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> +       u32 status;
> +       int ret = -EBUSY;
> +
> +       pm_runtime_get_sync(mtkd->ddev.dev);
> +
> +       mtk_uart_apdma_chan_write(c, VFF_ADDR, 0);
> +       mtk_uart_apdma_chan_write(c, VFF_THRE, 0);
> +       mtk_uart_apdma_chan_write(c, VFF_LEN, 0);
> +       mtk_uart_apdma_chan_write(c, VFF_RST, VFF_WARM_RST_B);
> +
> +       ret = readx_poll_timeout(readl,
> +                                c->base + VFF_EN,
> +                                status, status == 0, 10, 100);
> +       if (ret) {
> +               dev_err(c->vc.chan.device->dev,
> +                               "dma reset: fail, timeout\n");
> +               goto exit;
> +       }
> +
> +       if (!c->requested) {
> +               c->requested = true;
> +               ret = request_irq(mtkd->dma_irq[chan->chan_id],
> +                                 mtk_uart_apdma_irq_handler,
> +                                 IRQF_TRIGGER_NONE,
> +                                 KBUILD_MODNAME, chan);
> +               if (ret < 0) {
> +                       dev_err(chan->device->dev, "Can't request dma IRQ\n");
> +                       return -EINVAL;
> +               }
> +       }
> +
> +       if (mtkd->support_33bits)
> +               mtk_uart_apdma_chan_write(c,
> +                               VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B);
> +
> +exit:
> +       return ret;
> +}
> +
> +static void mtk_uart_apdma_free_chan_resources(struct dma_chan *chan)
> +{
> +       struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device);
> +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> +
> +       if (c->requested) {
> +               c->requested = false;
> +               free_irq(mtkd->dma_irq[chan->chan_id], chan);
> +       }
> +
> +       tasklet_kill(&c->vc.task);
> +
> +       vchan_free_chan_resources(&c->vc);
> +
> +       pm_runtime_put_sync(mtkd->ddev.dev);
> +}
> +
> +static enum dma_status mtk_uart_apdma_tx_status(struct dma_chan *chan,
> +                                        dma_cookie_t cookie,
> +                                        struct dma_tx_state *txstate)
> +{
> +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> +       enum dma_status ret;
> +       unsigned long flags;
> +
> +       if (!txstate)
> +               return DMA_ERROR;
> +
> +       ret = dma_cookie_status(chan, cookie, txstate);
> +       spin_lock_irqsave(&c->vc.lock, flags);
> +       if (ret == DMA_IN_PROGRESS) {
> +               c->rx_status = mtk_uart_apdma_chan_read(c, VFF_RPT)
> +                            & MTK_UART_APDMA_RING_SIZE;
> +               dma_set_residue(txstate, c->rx_status);
> +       } else if (ret == DMA_COMPLETE && c->cfg.direction == DMA_DEV_TO_MEM) {
> +               dma_set_residue(txstate, c->rx_status);
> +       } else {
> +               dma_set_residue(txstate, 0);
> +       }
> +       spin_unlock_irqrestore(&c->vc.lock, flags);
> +
> +       return ret;
> +}
> +
> +/*
> + * dmaengine_prep_slave_single will call the function. and sglen is 1.
> + * 8250 uart using one ring buffer, and deal with one sg.
> + */
> +static struct dma_async_tx_descriptor *mtk_uart_apdma_prep_slave_sg
> +       (struct dma_chan *chan, struct scatterlist *sgl,
> +       unsigned int sglen,     enum dma_transfer_direction dir,
> +       unsigned long tx_flags, void *context)
> +{
> +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> +       struct mtk_uart_apdma_desc *d;
> +
> +       if ((dir != DMA_DEV_TO_MEM) &&
> +               (dir != DMA_MEM_TO_DEV)) {
> +               dev_err(chan->device->dev, "bad direction\n");
> +               return NULL;
> +       }
> +
> +       /* Now allocate and setup the descriptor */
> +       d = kzalloc(sizeof(*d), GFP_ATOMIC);
> +       if (!d)
> +               return NULL;
> +
> +       /* sglen is 1 */
> +       d->avail_len = sg_dma_len(sgl);
> +
> +       return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
> +}
> +
> +static void mtk_uart_apdma_issue_pending(struct dma_chan *chan)
> +{
> +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> +       struct virt_dma_desc *vd;
> +       unsigned long flags;
> +
> +       spin_lock_irqsave(&c->vc.lock, flags);
> +       if (c->cfg.direction == DMA_DEV_TO_MEM) {
> +               if (vchan_issue_pending(&c->vc) && !c->desc) {
> +                       vd = vchan_next_desc(&c->vc);
> +                       c->desc = to_mtk_uart_apdma_desc(&vd->tx);
> +                       mtk_uart_apdma_start_rx(c);
> +               }
> +       } else if (c->cfg.direction == DMA_MEM_TO_DEV) {
> +               if (vchan_issue_pending(&c->vc) && !c->desc) {
> +                       vd = vchan_next_desc(&c->vc);
> +                       c->desc = to_mtk_uart_apdma_desc(&vd->tx);
> +                       mtk_uart_apdma_start_tx(c);
> +               }
> +       }
> +       spin_unlock_irqrestore(&c->vc.lock, flags);
> +}
> +
> +static int mtk_uart_apdma_slave_config(struct dma_chan *chan,
> +                               struct dma_slave_config *cfg)
> +{
> +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> +       struct mtk_uart_apdmadev *mtkd =
> +                               to_mtk_uart_apdma_dev(c->vc.chan.device);
> +
> +       c->cfg = *cfg;
> +
> +       if (cfg->direction == DMA_DEV_TO_MEM) {
> +               unsigned int rx_len = cfg->src_addr_width * 1024;
> +
> +               mtk_uart_apdma_chan_write(c, VFF_ADDR, cfg->src_addr);
> +               mtk_uart_apdma_chan_write(c, VFF_LEN, rx_len);
> +               mtk_uart_apdma_chan_write(c, VFF_THRE, VFF_RX_THRE(rx_len));
> +               mtk_uart_apdma_chan_write(c,
> +                               VFF_INT_EN, VFF_RX_INT_EN0_B
> +                               | VFF_RX_INT_EN1_B);
> +               mtk_uart_apdma_chan_write(c, VFF_RPT, 0);
> +               mtk_uart_apdma_chan_write(c,
> +                               VFF_INT_FLAG, VFF_RX_INT_FLAG_CLR_B);
> +               mtk_uart_apdma_chan_write(c, VFF_EN, VFF_EN_B);
> +       } else if (cfg->direction == DMA_MEM_TO_DEV)    {
> +               unsigned int tx_len = cfg->dst_addr_width * 1024;
> +
> +               mtk_uart_apdma_chan_write(c, VFF_ADDR, cfg->dst_addr);
> +               mtk_uart_apdma_chan_write(c, VFF_LEN, tx_len);
> +               mtk_uart_apdma_chan_write(c, VFF_THRE, VFF_TX_THRE(tx_len));
> +               mtk_uart_apdma_chan_write(c, VFF_WPT, 0);
> +               mtk_uart_apdma_chan_write(c,
> +                               VFF_INT_FLAG, VFF_TX_INT_FLAG_CLR_B);
> +               mtk_uart_apdma_chan_write(c, VFF_EN, VFF_EN_B);
> +       }
> +
> +       if (mtkd->support_33bits)
> +               mtk_uart_apdma_chan_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_B);
> +
> +       if (mtk_uart_apdma_chan_read(c, VFF_EN) != VFF_EN_B) {
> +               dev_err(chan->device->dev,
> +                       "config dma dir[%d] fail\n", cfg->direction);
> +               return -EINVAL;
> +       }
> +
> +       return 0;
> +}
> +
> +static int mtk_uart_apdma_terminate_all(struct dma_chan *chan)
> +{
> +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> +       unsigned long flags;
> +       u32 status;
> +       int ret;
> +
> +       spin_lock_irqsave(&c->vc.lock, flags);
> +
> +       mtk_uart_apdma_chan_write(c, VFF_FLUSH, VFF_FLUSH_CLR_B);
> +       /* Wait for flush */
> +       ret = readx_poll_timeout(readl,
> +                                c->base + VFF_FLUSH,
> +                                status,
> +                                (status & VFF_FLUSH_B) != VFF_FLUSH_B,
> +                                10, 100);
> +       if (ret)
> +               dev_err(c->vc.chan.device->dev,
> +                       "dma stop: polling FLUSH fail, DEBUG=0x%x\n",
> +                       mtk_uart_apdma_chan_read(c, VFF_DEBUG_STATUS));
> +
> +       /*set stop as 1 -> wait until en is 0 -> set stop as 0*/
> +       mtk_uart_apdma_chan_write(c, VFF_STOP, VFF_STOP_B);
> +       ret = readx_poll_timeout(readl,
> +                                c->base + VFF_EN,
> +                                status, status == 0, 10, 100);
> +       if (ret)
> +               dev_err(c->vc.chan.device->dev,
> +                       "dma stop: polling VFF_EN fail, DEBUG=0x%x\n",
> +                       mtk_uart_apdma_chan_read(c, VFF_DEBUG_STATUS));
> +
> +       mtk_uart_apdma_chan_write(c, VFF_STOP, VFF_STOP_CLR_B);
> +       mtk_uart_apdma_chan_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
> +
> +       switch (c->cfg.direction) {
> +       case DMA_DEV_TO_MEM:

Ditto, if/else?

> +               mtk_uart_apdma_chan_write(c,
> +                               VFF_INT_FLAG, VFF_RX_INT_FLAG_CLR_B);
> +               break;
> +       case DMA_MEM_TO_DEV:
> +               mtk_uart_apdma_chan_write(c,
> +                               VFF_INT_FLAG, VFF_TX_INT_FLAG_CLR_B);
> +               break;
> +       default:
> +               break;
> +       }
> +
> +       spin_unlock_irqrestore(&c->vc.lock, flags);
> +
> +       return 0;
> +}
> +
> +static int mtk_uart_apdma_device_pause(struct dma_chan *chan)
> +{
> +       /* just for check caps pass */
> +       return 0;
> +}
> +
> +static int mtk_uart_apdma_device_resume(struct dma_chan *chan)
> +{
> +       /* just for check caps pass */
> +       return 0;
> +}
> +
> +static void mtk_uart_apdma_free(struct mtk_uart_apdmadev *mtkd)
> +{
> +       while (list_empty(&mtkd->ddev.channels) == 0) {
> +               struct mtk_chan *c = list_first_entry(&mtkd->ddev.channels,
> +                       struct mtk_chan, vc.chan.device_node);
> +
> +               list_del(&c->vc.chan.device_node);
> +               tasklet_kill(&c->vc.task);
> +       }
> +}
> +
> +static const struct of_device_id mtk_uart_apdma_match[] = {
> +       { .compatible = "mediatek,mt6577-uart-dma", },
> +       { /* sentinel */ },
> +};
> +MODULE_DEVICE_TABLE(of, mtk_uart_apdma_match);
> +
> +static int mtk_uart_apdma_probe(struct platform_device *pdev)
> +{
> +       struct mtk_uart_apdmadev *mtkd;
> +       struct resource *res;
> +       struct mtk_chan *c;
> +       unsigned int i;
> +       int rc;
> +
> +       mtkd = devm_kzalloc(&pdev->dev, sizeof(*mtkd), GFP_KERNEL);
> +       if (!mtkd)
> +               return -ENOMEM;
> +
> +       mtkd->clk = devm_clk_get(&pdev->dev, NULL);
> +       if (IS_ERR(mtkd->clk)) {
> +               dev_err(&pdev->dev, "No clock specified\n");
> +               rc = PTR_ERR(mtkd->clk);
> +               goto err_no_dma;
> +       }
> +
> +       if (of_property_read_bool(pdev->dev.of_node, "dma-33bits")) {
> +               dev_info(&pdev->dev, "Support dma 33bits\n");

Drop this, a bit verbose.

> +               mtkd->support_33bits = true;
> +       }
> +
> +       rc = dma_set_mask_and_coherent(&pdev->dev,
> +                               DMA_BIT_MASK(32 | mtkd->support_33bits));
> +       if (rc)
> +               goto err_no_dma;
> +
> +       dma_cap_set(DMA_SLAVE, mtkd->ddev.cap_mask);
> +       mtkd->ddev.device_alloc_chan_resources =
> +                               mtk_uart_apdma_alloc_chan_resources;
> +       mtkd->ddev.device_free_chan_resources =
> +                               mtk_uart_apdma_free_chan_resources;
> +       mtkd->ddev.device_tx_status = mtk_uart_apdma_tx_status;
> +       mtkd->ddev.device_issue_pending = mtk_uart_apdma_issue_pending;
> +       mtkd->ddev.device_prep_slave_sg = mtk_uart_apdma_prep_slave_sg;
> +       mtkd->ddev.device_config = mtk_uart_apdma_slave_config;
> +       mtkd->ddev.device_pause = mtk_uart_apdma_device_pause;
> +       mtkd->ddev.device_resume = mtk_uart_apdma_device_resume;
> +       mtkd->ddev.device_terminate_all = mtk_uart_apdma_terminate_all;
> +       mtkd->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE);
> +       mtkd->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE);
> +       mtkd->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
> +       mtkd->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
> +       mtkd->ddev.dev = &pdev->dev;
> +       INIT_LIST_HEAD(&mtkd->ddev.channels);
> +
> +       for (i = 0; i < MTK_UART_APDMA_CHANNELS; i++) {
> +               c = devm_kzalloc(mtkd->ddev.dev, sizeof(*c), GFP_KERNEL);
> +               if (!c) {
> +                       rc = -ENODEV;
> +                       goto err_no_dma;
> +               }
> +
> +               res = platform_get_resource(pdev, IORESOURCE_MEM, i);
> +               if (!res) {
> +                       rc = -ENODEV;
> +                       goto err_no_dma;
> +               }
> +
> +               c->base = devm_ioremap_resource(&pdev->dev, res);
> +               if (IS_ERR(c->base)) {
> +                       rc = PTR_ERR(c->base);
> +                       goto err_no_dma;
> +               }
> +               c->requested = false;
> +               c->vc.desc_free = mtk_uart_apdma_desc_free;
> +               vchan_init(&c->vc, &mtkd->ddev);
> +
> +               mtkd->dma_irq[i] = platform_get_irq(pdev, i);
> +               if ((int)mtkd->dma_irq[i] < 0) {
> +                       dev_err(&pdev->dev, "failed to get IRQ[%d]\n", i);
> +                       rc = -EINVAL;
> +                       goto err_no_dma;
> +               }
> +       }
> +
> +       pm_runtime_enable(&pdev->dev);
> +       pm_runtime_set_active(&pdev->dev);
> +
> +       rc = dma_async_device_register(&mtkd->ddev);
> +       if (rc)
> +               goto rpm_disable;
> +
> +       platform_set_drvdata(pdev, mtkd);
> +
> +       if (pdev->dev.of_node) {
> +               /* Device-tree DMA controller registration */
> +               rc = of_dma_controller_register(pdev->dev.of_node,
> +                                               of_dma_xlate_by_chan_id,
> +                                               mtkd);
> +               if (rc)
> +                       goto dma_remove;
> +       }
> +
> +       return rc;
> +
> +dma_remove:
> +       dma_async_device_unregister(&mtkd->ddev);
> +rpm_disable:
> +       pm_runtime_disable(&pdev->dev);
> +err_no_dma:
> +       mtk_uart_apdma_free(mtkd);
> +       return rc;
> +}
> +
> +static int mtk_uart_apdma_remove(struct platform_device *pdev)
> +{
> +       struct mtk_uart_apdmadev *mtkd = platform_get_drvdata(pdev);
> +
> +       if (pdev->dev.of_node)
> +               of_dma_controller_free(pdev->dev.of_node);
> +
> +       pm_runtime_disable(&pdev->dev);
> +       pm_runtime_put_noidle(&pdev->dev);
> +
> +       dma_async_device_unregister(&mtkd->ddev);
> +       mtk_uart_apdma_free(mtkd);
> +
> +       return 0;
> +}
> +
> +#ifdef CONFIG_PM_SLEEP
> +static int mtk_uart_apdma_suspend(struct device *dev)
> +{
> +       struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
> +
> +       if (!pm_runtime_suspended(dev))
> +               clk_disable_unprepare(mtkd->clk);
> +
> +       return 0;
> +}
> +
> +static int mtk_uart_apdma_resume(struct device *dev)
> +{
> +       int ret;
> +       struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
> +
> +       if (!pm_runtime_suspended(dev)) {
> +               ret = clk_prepare_enable(mtkd->clk);
> +               if (ret)
> +                       return ret;
> +       }
> +
> +       return 0;
> +}
> +#endif /* CONFIG_PM_SLEEP */
> +
> +#ifdef CONFIG_PM
> +static int mtk_uart_apdma_runtime_suspend(struct device *dev)
> +{
> +       struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
> +
> +       clk_disable_unprepare(mtkd->clk);
> +
> +       return 0;
> +}
> +
> +static int mtk_uart_apdma_runtime_resume(struct device *dev)
> +{
> +       int ret;
> +       struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
> +
> +       ret = clk_prepare_enable(mtkd->clk);
> +       if (ret)
> +               return ret;
> +
> +       return 0;
> +}
> +#endif /* CONFIG_PM */
> +
> +static const struct dev_pm_ops mtk_uart_apdma_pm_ops = {
> +       SET_SYSTEM_SLEEP_PM_OPS(mtk_uart_apdma_suspend, mtk_uart_apdma_resume)
> +       SET_RUNTIME_PM_OPS(mtk_uart_apdma_runtime_suspend,
> +                          mtk_uart_apdma_runtime_resume, NULL)
> +};
> +
> +static struct platform_driver mtk_uart_apdma_driver = {
> +       .probe  = mtk_uart_apdma_probe,
> +       .remove = mtk_uart_apdma_remove,
> +       .driver = {
> +               .name           = KBUILD_MODNAME,
> +               .pm             = &mtk_uart_apdma_pm_ops,
> +               .of_match_table = of_match_ptr(mtk_uart_apdma_match),
> +       },
> +};
> +
> +module_platform_driver(mtk_uart_apdma_driver);
> +
> +MODULE_DESCRIPTION("MediaTek UART APDMA Controller Driver");
> +MODULE_AUTHOR("Long Cheng <long.cheng@mediatek.com>");
> +MODULE_LICENSE("GPL v2");
> +
> diff --git a/drivers/dma/mediatek/Kconfig b/drivers/dma/mediatek/Kconfig
> index 27bac0b..1a523c87 100644
> --- a/drivers/dma/mediatek/Kconfig
> +++ b/drivers/dma/mediatek/Kconfig
> @@ -1,4 +1,15 @@
>
> +config DMA_MTK_UART
> +       tristate "MediaTek SoCs APDMA support for UART"
> +       depends on OF && SERIAL_8250_MT6577
> +       select DMA_ENGINE
> +       select DMA_VIRTUAL_CHANNELS
> +       help
> +         Support for the UART DMA engine found on MediaTek MTK SoCs.
> +         when SERIAL_8250_MT6577 is enabled, and if you want to use DMA,
> +         you can enable the config. the DMA engine can only be used
> +         with MediaTek SoCs.
> +
>  config MTK_HSDMA
>         tristate "MediaTek High-Speed DMA controller support"
>         depends on ARCH_MEDIATEK || COMPILE_TEST
> diff --git a/drivers/dma/mediatek/Makefile b/drivers/dma/mediatek/Makefile
> index 6e778f8..2f2efd9 100644
> --- a/drivers/dma/mediatek/Makefile
> +++ b/drivers/dma/mediatek/Makefile
> @@ -1 +1,2 @@
> +obj-$(CONFIG_DMA_MTK_UART) += 8250_mtk_dma.o
>  obj-$(CONFIG_MTK_HSDMA) += mtk-hsdma.o
> --
> 1.7.9.5
>

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v7 1/2] dmaengine: 8250_mtk_dma: add MediaTek uart DMA support
  2018-12-25  7:16   ` Nicolas Boichat
@ 2018-12-25 12:06     ` Long Cheng
  2018-12-26  0:05       ` Nicolas Boichat
  0 siblings, 1 reply; 9+ messages in thread
From: Long Cheng @ 2018-12-25 12:06 UTC (permalink / raw)
  To: Nicolas Boichat
  Cc: Vinod Koul, Randy Dunlap, Rob Herring, Mark Rutland, Ryder Lee,
	Sean Wang, Matthias Brugger, Dan Williams, Greg Kroah-Hartman,
	Jiri Slaby, Sean Wang, dmaengine, devicetree,
	linux-arm Mailing List, linux-mediatek, lkml, linux-serial,
	srv_heupstream, Yingjoe Chen, YT Shen

Thanks for your comments.

On Tue, 2018-12-25 at 15:16 +0800, Nicolas Boichat wrote:
> Not a full review, a few comments below.
> 
> Thanks,
> 

would you like help to full review at this version patch?  and then i
can modify these at next version patch. thanks.

> On Tue, Dec 25, 2018 at 9:27 AM Long Cheng <long.cheng@mediatek.com> wrote:
> >
> > In DMA engine framework, add 8250 uart dma to support MediaTek uart.
> > If MediaTek uart enabled(SERIAL_8250_MT6577), and want to improve
> > the performance, can enable the function.
> >
> > Signed-off-by: Long Cheng <long.cheng@mediatek.com>
> > ---
> >  drivers/dma/mediatek/8250_mtk_dma.c |  694 +++++++++++++++++++++++++++++++++++
> >  drivers/dma/mediatek/Kconfig        |   11 +
> >  drivers/dma/mediatek/Makefile       |    1 +
> >  3 files changed, 706 insertions(+)
> >  create mode 100644 drivers/dma/mediatek/8250_mtk_dma.c
> >
> > diff --git a/drivers/dma/mediatek/8250_mtk_dma.c b/drivers/dma/mediatek/8250_mtk_dma.c
> > new file mode 100644
> > index 0000000..c4090f2
> > --- /dev/null
> > +++ b/drivers/dma/mediatek/8250_mtk_dma.c
> > @@ -0,0 +1,694 @@
> > +// SPDX-License-Identifier: GPL-2.0
> > +/*
> > + * MediaTek 8250 DMA driver.
> > + *
> > + * Copyright (c) 2018 MediaTek Inc.
> > + * Author: Long Cheng <long.cheng@mediatek.com>
> > + */
> > +
> > +#include <linux/clk.h>
> > +#include <linux/dmaengine.h>
> > +#include <linux/dma-mapping.h>
> > +#include <linux/err.h>
> > +#include <linux/init.h>
> > +#include <linux/interrupt.h>
> > +#include <linux/list.h>
> > +#include <linux/kernel.h>
> > +#include <linux/module.h>
> > +#include <linux/of_dma.h>
> > +#include <linux/of_device.h>
> > +#include <linux/platform_device.h>
> > +#include <linux/slab.h>
> > +#include <linux/spinlock.h>
> > +#include <linux/pm_runtime.h>
> > +#include <linux/iopoll.h>
> 
> Alphabetical order.

ok, i will order.

> 
> > +
> > +#include "../virt-dma.h"
> > +
> > +#define MTK_UART_APDMA_CHANNELS                (CONFIG_SERIAL_8250_NR_UARTS * 2)
> > +
> > +#define VFF_EN_B               BIT(0)
> > +#define VFF_STOP_B             BIT(0)
> > +#define VFF_FLUSH_B            BIT(0)
> > +#define VFF_4G_SUPPORT_B       BIT(0)
> > +#define VFF_RX_INT_EN0_B       BIT(0)  /*rx valid size >=  vff thre*/
> > +#define VFF_RX_INT_EN1_B       BIT(1)
> > +#define VFF_TX_INT_EN_B                BIT(0)  /*tx left size >= vff thre*/
> > +#define VFF_WARM_RST_B         BIT(0)
> > +#define VFF_RX_INT_FLAG_CLR_B  (BIT(0) | BIT(1))
> > +#define VFF_TX_INT_FLAG_CLR_B  0
> > +#define VFF_STOP_CLR_B         0
> > +#define VFF_FLUSH_CLR_B                0
> > +#define VFF_INT_EN_CLR_B       0
> > +#define VFF_4G_SUPPORT_CLR_B   0
> > +
> > +/* interrupt trigger level for tx */
> > +#define VFF_TX_THRE(n)         ((n) * 7 / 8)
> > +/* interrupt trigger level for rx */
> > +#define VFF_RX_THRE(n)         ((n) * 3 / 4)
> > +
> > +#define MTK_UART_APDMA_RING_SIZE       0xffffU
> > +/* invert this bit when wrap ring head again*/
> > +#define MTK_UART_APDMA_RING_WRAP       0x10000U
> > +
> > +#define VFF_INT_FLAG           0x00
> > +#define VFF_INT_EN             0x04
> > +#define VFF_EN                 0x08
> > +#define VFF_RST                        0x0c
> > +#define VFF_STOP               0x10
> > +#define VFF_FLUSH              0x14
> > +#define VFF_ADDR               0x1c
> > +#define VFF_LEN                        0x24
> > +#define VFF_THRE               0x28
> > +#define VFF_WPT                        0x2c
> > +#define VFF_RPT                        0x30
> > +/*TX: the buffer size HW can read. RX: the buffer size SW can read.*/
> > +#define VFF_VALID_SIZE         0x3c
> > +/*TX: the buffer size SW can write. RX: the buffer size HW can write.*/
> > +#define VFF_LEFT_SIZE          0x40
> > +#define VFF_DEBUG_STATUS       0x50
> > +#define VFF_4G_SUPPORT         0x54
> > +
> > +struct mtk_uart_apdmadev {
> > +       struct dma_device ddev;
> > +       struct clk *clk;
> > +       bool support_33bits;
> > +       unsigned int dma_irq[MTK_UART_APDMA_CHANNELS];
> > +};
> > +
> > +struct mtk_uart_apdma_desc {
> > +       struct virt_dma_desc vd;
> > +
> > +       unsigned int avail_len;
> > +};
> > +
> > +struct mtk_chan {
> > +       struct virt_dma_chan vc;
> > +       struct dma_slave_config cfg;
> > +       void __iomem *base;
> > +       struct mtk_uart_apdma_desc *desc;
> > +
> > +       bool requested;
> > +
> > +       unsigned int rx_status;
> > +};
> > +
> > +static inline struct mtk_uart_apdmadev *
> > +to_mtk_uart_apdma_dev(struct dma_device *d)
> > +{
> > +       return container_of(d, struct mtk_uart_apdmadev, ddev);
> > +}
> > +
> > +static inline struct mtk_chan *to_mtk_uart_apdma_chan(struct dma_chan *c)
> > +{
> > +       return container_of(c, struct mtk_chan, vc.chan);
> > +}
> > +
> > +static inline struct mtk_uart_apdma_desc *to_mtk_uart_apdma_desc
> > +       (struct dma_async_tx_descriptor *t)
> > +{
> > +       return container_of(t, struct mtk_uart_apdma_desc, vd.tx);
> > +}
> > +
> > +static void mtk_uart_apdma_chan_write(struct mtk_chan *c,
> > +                              unsigned int reg, unsigned int val)
> > +{
> > +       writel(val, c->base + reg);
> > +}
> > +
> > +static unsigned int
> > +mtk_uart_apdma_chan_read(struct mtk_chan *c, unsigned int reg)
> > +{
> > +       return readl(c->base + reg);
> > +}
> > +
> > +static void mtk_uart_apdma_desc_free(struct virt_dma_desc *vd)
> > +{
> > +       struct dma_chan *chan = vd->tx.chan;
> > +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> > +
> > +       kfree(c->desc);
> > +       c->desc = NULL;
> 
> Why? I'm afraid this may mask double-free.
> 

i will remove it.

> > +}
> > +
> > +static void mtk_uart_apdma_start_tx(struct mtk_chan *c)
> > +{
> > +       unsigned int txcount = c->desc->avail_len;
> 
> Variable name is confusing... I'd rather have a boolean `transmitted`
> (or something), set to 0 here, and set to 1 in the loop (but again,
> maybe this is not necessary at all, see below).

ok, i modify the function and remove it.

> 
> > +       unsigned int len, send, left, wpt, wrap;
> > +
> > +       if (mtk_uart_apdma_chan_read(c, VFF_LEFT_SIZE) == 0U) {
> > +               mtk_uart_apdma_chan_write(c, VFF_INT_EN, VFF_TX_INT_EN_B);
> 
> I'd add a "return;" here and de-indent the rest of the function (that
> should help to make it more readable).

add

> 
> > +       } else {
> > +               len = mtk_uart_apdma_chan_read(c, VFF_LEN);
> 
> You make an assumption below that len is always a power of 2. I wonder
> if we want to validate that with some BUG/WARN call.
> 
> Also, I assume VFF_LEN is constant? Maybe we can read it once only in
> the probe function?
> 

no, the value of VFF_LEN isn't constant.

> > +
> > +               while (((left = mtk_uart_apdma_chan_read(c,
> > +                                       VFF_LEFT_SIZE)) > 0U)
> > +                               && (c->desc->avail_len != 0U)) {
> 
> Why do we need a while loop here? LEFT_SIZE will always be smaller
> than LEN, right? Shouldn't we load as much as we can, then let an
> interrupt happen and reload the FIFO at that stage? Or does this help
> gain a bit of extra performance?

i will remove while, use 'if' instead of.
> 
> > +                       send = min_t(unsigned int, left, c->desc->avail_len);
> > +                       wpt = mtk_uart_apdma_chan_read(c, VFF_WPT);
> > +                       wrap = wpt & MTK_UART_APDMA_RING_WRAP ?
> 
> wrap = wrp ^ MTK_UART_APDMA_RING_WRAP ?
> 

wrap = wpt & MTK_UART_APDMA_RING_WRAP ?
i had test it.

> > +                                       0U : MTK_UART_APDMA_RING_WRAP;
> > +
> > +                       if ((wpt & (len - 1U)) + send < len)
> > +                               mtk_uart_apdma_chan_write(c,
> > +                                               VFF_WPT, wpt + send);
> > +                       else
> > +                               mtk_uart_apdma_chan_write(c, VFF_WPT,
> > +                                                  ((wpt + send) & (len - 1U))
> > +                                                  | wrap);
> > +
> > +                       c->desc->avail_len -= send;
> > +               }
> > +
> > +               if (txcount != c->desc->avail_len) {
> 
> We know mtk_uart_apdma_chan_read(c, VFF_LEFT_SIZE) > 0 at least once,
> so this test should always be true.
> 
> Unless we have c->desc->avail_len == 0, but then we won't even call
> this function... So this is always true.
> 

yes. you are right. i will remove it.

> > +                       mtk_uart_apdma_chan_write(c,
> > +                                       VFF_INT_EN, VFF_TX_INT_EN_B);
> > +                       if (mtk_uart_apdma_chan_read(c,
> > +                                               VFF_FLUSH) == 0U)
> > +                               mtk_uart_apdma_chan_write(c,
> > +                                               VFF_FLUSH, VFF_FLUSH_B);
> > +               }
> > +       }
> > +}
> > +
> > +static void mtk_uart_apdma_start_rx(struct mtk_chan *c)
> > +{
> > +       struct mtk_uart_apdma_desc *d = c->desc;
> > +       unsigned int rx_len, wg, rg, count;
> > +
> > +       if (mtk_uart_apdma_chan_read(c, VFF_VALID_SIZE) == 0U)
> > +               return;
> > +
> > +       if (d && vchan_next_desc(&c->vc)) {
> > +               rx_len = mtk_uart_apdma_chan_read(c, VFF_LEN);
> > +               rg = mtk_uart_apdma_chan_read(c, VFF_RPT);
> > +               wg = mtk_uart_apdma_chan_read(c, VFF_WPT);
> > +               count = ((rg ^ wg) & MTK_UART_APDMA_RING_WRAP) ?
> > +                               ((wg & MTK_UART_APDMA_RING_SIZE) +
> > +                               rx_len - (rg & MTK_UART_APDMA_RING_SIZE)) :
> > +                               ((wg & MTK_UART_APDMA_RING_SIZE) -
> > +                               (rg & MTK_UART_APDMA_RING_SIZE));
> 
> This is hard to read, please use a if/else.
> 

ok.

> > +
> > +               c->rx_status = count;
> > +               mtk_uart_apdma_chan_write(c, VFF_RPT, wg);
> > +
> > +               list_del(&d->vd.node);
> > +               vchan_cookie_complete(&d->vd);
> > +       }
> > +}
> > +
> > +static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id)
> > +{
> > +       struct dma_chan *chan = (struct dma_chan *)dev_id;
> > +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> > +       struct mtk_uart_apdma_desc *d;
> > +       unsigned long flags;
> > +
> > +       spin_lock_irqsave(&c->vc.lock, flags);
> > +       switch (c->cfg.direction) {
> > +       case DMA_DEV_TO_MEM:
> 
> You use an if/else below in a similar case, please be consistent (IMHO
> if/else is a bit more compact, so I'd use that here).
> 
sean wang had review it. the link
http://lists.infradead.org/pipermail/linux-mediatek/2018-December/016292.html

> > +               mtk_uart_apdma_chan_write(c,
> > +                               VFF_INT_FLAG, VFF_RX_INT_FLAG_CLR_B);
> > +               mtk_uart_apdma_start_rx(c);
> > +               break;
> > +       case DMA_MEM_TO_DEV:
> > +               d = c->desc;
> > +
> > +               mtk_uart_apdma_chan_write(c,
> > +                               VFF_INT_FLAG, VFF_TX_INT_FLAG_CLR_B);
> > +
> > +               if (d->avail_len != 0U) {
> > +                       mtk_uart_apdma_start_tx(c);
> > +               } else {
> > +                       list_del(&d->vd.node);
> > +                       vchan_cookie_complete(&d->vd);
> > +               }
> > +               break;
> > +       default:
> > +               break;
> > +       }
> > +       spin_unlock_irqrestore(&c->vc.lock, flags);
> > +
> > +       return IRQ_HANDLED;
> > +}
> > +
> > +static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
> > +{
> > +       struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device);
> > +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> > +       u32 status;
> > +       int ret = -EBUSY;
> > +
> > +       pm_runtime_get_sync(mtkd->ddev.dev);
> > +
> > +       mtk_uart_apdma_chan_write(c, VFF_ADDR, 0);
> > +       mtk_uart_apdma_chan_write(c, VFF_THRE, 0);
> > +       mtk_uart_apdma_chan_write(c, VFF_LEN, 0);
> > +       mtk_uart_apdma_chan_write(c, VFF_RST, VFF_WARM_RST_B);
> > +
> > +       ret = readx_poll_timeout(readl,
> > +                                c->base + VFF_EN,
> > +                                status, status == 0, 10, 100);
> > +       if (ret) {
> > +               dev_err(c->vc.chan.device->dev,
> > +                               "dma reset: fail, timeout\n");
> > +               goto exit;
> > +       }
> > +
> > +       if (!c->requested) {
> > +               c->requested = true;
> > +               ret = request_irq(mtkd->dma_irq[chan->chan_id],
> > +                                 mtk_uart_apdma_irq_handler,
> > +                                 IRQF_TRIGGER_NONE,
> > +                                 KBUILD_MODNAME, chan);
> > +               if (ret < 0) {
> > +                       dev_err(chan->device->dev, "Can't request dma IRQ\n");
> > +                       return -EINVAL;
> > +               }
> > +       }
> > +
> > +       if (mtkd->support_33bits)
> > +               mtk_uart_apdma_chan_write(c,
> > +                               VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B);
> > +
> > +exit:
> > +       return ret;
> > +}
> > +
> > +static void mtk_uart_apdma_free_chan_resources(struct dma_chan *chan)
> > +{
> > +       struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device);
> > +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> > +
> > +       if (c->requested) {
> > +               c->requested = false;
> > +               free_irq(mtkd->dma_irq[chan->chan_id], chan);
> > +       }
> > +
> > +       tasklet_kill(&c->vc.task);
> > +
> > +       vchan_free_chan_resources(&c->vc);
> > +
> > +       pm_runtime_put_sync(mtkd->ddev.dev);
> > +}
> > +
> > +static enum dma_status mtk_uart_apdma_tx_status(struct dma_chan *chan,
> > +                                        dma_cookie_t cookie,
> > +                                        struct dma_tx_state *txstate)
> > +{
> > +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> > +       enum dma_status ret;
> > +       unsigned long flags;
> > +
> > +       if (!txstate)
> > +               return DMA_ERROR;
> > +
> > +       ret = dma_cookie_status(chan, cookie, txstate);
> > +       spin_lock_irqsave(&c->vc.lock, flags);
> > +       if (ret == DMA_IN_PROGRESS) {
> > +               c->rx_status = mtk_uart_apdma_chan_read(c, VFF_RPT)
> > +                            & MTK_UART_APDMA_RING_SIZE;
> > +               dma_set_residue(txstate, c->rx_status);
> > +       } else if (ret == DMA_COMPLETE && c->cfg.direction == DMA_DEV_TO_MEM) {
> > +               dma_set_residue(txstate, c->rx_status);
> > +       } else {
> > +               dma_set_residue(txstate, 0);
> > +       }
> > +       spin_unlock_irqrestore(&c->vc.lock, flags);
> > +
> > +       return ret;
> > +}
> > +
> > +/*
> > + * dmaengine_prep_slave_single will call the function. and sglen is 1.
> > + * 8250 uart using one ring buffer, and deal with one sg.
> > + */
> > +static struct dma_async_tx_descriptor *mtk_uart_apdma_prep_slave_sg
> > +       (struct dma_chan *chan, struct scatterlist *sgl,
> > +       unsigned int sglen,     enum dma_transfer_direction dir,
> > +       unsigned long tx_flags, void *context)
> > +{
> > +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> > +       struct mtk_uart_apdma_desc *d;
> > +
> > +       if ((dir != DMA_DEV_TO_MEM) &&
> > +               (dir != DMA_MEM_TO_DEV)) {
> > +               dev_err(chan->device->dev, "bad direction\n");
> > +               return NULL;
> > +       }
> > +
> > +       /* Now allocate and setup the descriptor */
> > +       d = kzalloc(sizeof(*d), GFP_ATOMIC);
> > +       if (!d)
> > +               return NULL;
> > +
> > +       /* sglen is 1 */
> > +       d->avail_len = sg_dma_len(sgl);
> > +
> > +       return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
> > +}
> > +
> > +static void mtk_uart_apdma_issue_pending(struct dma_chan *chan)
> > +{
> > +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> > +       struct virt_dma_desc *vd;
> > +       unsigned long flags;
> > +
> > +       spin_lock_irqsave(&c->vc.lock, flags);
> > +       if (c->cfg.direction == DMA_DEV_TO_MEM) {
> > +               if (vchan_issue_pending(&c->vc) && !c->desc) {
> > +                       vd = vchan_next_desc(&c->vc);
> > +                       c->desc = to_mtk_uart_apdma_desc(&vd->tx);
> > +                       mtk_uart_apdma_start_rx(c);
> > +               }
> > +       } else if (c->cfg.direction == DMA_MEM_TO_DEV) {
> > +               if (vchan_issue_pending(&c->vc) && !c->desc) {
> > +                       vd = vchan_next_desc(&c->vc);
> > +                       c->desc = to_mtk_uart_apdma_desc(&vd->tx);
> > +                       mtk_uart_apdma_start_tx(c);
> > +               }
> > +       }
> > +       spin_unlock_irqrestore(&c->vc.lock, flags);
> > +}
> > +
> > +static int mtk_uart_apdma_slave_config(struct dma_chan *chan,
> > +                               struct dma_slave_config *cfg)
> > +{
> > +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> > +       struct mtk_uart_apdmadev *mtkd =
> > +                               to_mtk_uart_apdma_dev(c->vc.chan.device);
> > +
> > +       c->cfg = *cfg;
> > +
> > +       if (cfg->direction == DMA_DEV_TO_MEM) {
> > +               unsigned int rx_len = cfg->src_addr_width * 1024;
> > +
> > +               mtk_uart_apdma_chan_write(c, VFF_ADDR, cfg->src_addr);
> > +               mtk_uart_apdma_chan_write(c, VFF_LEN, rx_len);
> > +               mtk_uart_apdma_chan_write(c, VFF_THRE, VFF_RX_THRE(rx_len));
> > +               mtk_uart_apdma_chan_write(c,
> > +                               VFF_INT_EN, VFF_RX_INT_EN0_B
> > +                               | VFF_RX_INT_EN1_B);
> > +               mtk_uart_apdma_chan_write(c, VFF_RPT, 0);
> > +               mtk_uart_apdma_chan_write(c,
> > +                               VFF_INT_FLAG, VFF_RX_INT_FLAG_CLR_B);
> > +               mtk_uart_apdma_chan_write(c, VFF_EN, VFF_EN_B);
> > +       } else if (cfg->direction == DMA_MEM_TO_DEV)    {
> > +               unsigned int tx_len = cfg->dst_addr_width * 1024;
> > +
> > +               mtk_uart_apdma_chan_write(c, VFF_ADDR, cfg->dst_addr);
> > +               mtk_uart_apdma_chan_write(c, VFF_LEN, tx_len);
> > +               mtk_uart_apdma_chan_write(c, VFF_THRE, VFF_TX_THRE(tx_len));
> > +               mtk_uart_apdma_chan_write(c, VFF_WPT, 0);
> > +               mtk_uart_apdma_chan_write(c,
> > +                               VFF_INT_FLAG, VFF_TX_INT_FLAG_CLR_B);
> > +               mtk_uart_apdma_chan_write(c, VFF_EN, VFF_EN_B);
> > +       }
> > +
> > +       if (mtkd->support_33bits)
> > +               mtk_uart_apdma_chan_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_B);
> > +
> > +       if (mtk_uart_apdma_chan_read(c, VFF_EN) != VFF_EN_B) {
> > +               dev_err(chan->device->dev,
> > +                       "config dma dir[%d] fail\n", cfg->direction);
> > +               return -EINVAL;
> > +       }
> > +
> > +       return 0;
> > +}
> > +
> > +static int mtk_uart_apdma_terminate_all(struct dma_chan *chan)
> > +{
> > +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> > +       unsigned long flags;
> > +       u32 status;
> > +       int ret;
> > +
> > +       spin_lock_irqsave(&c->vc.lock, flags);
> > +
> > +       mtk_uart_apdma_chan_write(c, VFF_FLUSH, VFF_FLUSH_CLR_B);
> > +       /* Wait for flush */
> > +       ret = readx_poll_timeout(readl,
> > +                                c->base + VFF_FLUSH,
> > +                                status,
> > +                                (status & VFF_FLUSH_B) != VFF_FLUSH_B,
> > +                                10, 100);
> > +       if (ret)
> > +               dev_err(c->vc.chan.device->dev,
> > +                       "dma stop: polling FLUSH fail, DEBUG=0x%x\n",
> > +                       mtk_uart_apdma_chan_read(c, VFF_DEBUG_STATUS));
> > +
> > +       /*set stop as 1 -> wait until en is 0 -> set stop as 0*/
> > +       mtk_uart_apdma_chan_write(c, VFF_STOP, VFF_STOP_B);
> > +       ret = readx_poll_timeout(readl,
> > +                                c->base + VFF_EN,
> > +                                status, status == 0, 10, 100);
> > +       if (ret)
> > +               dev_err(c->vc.chan.device->dev,
> > +                       "dma stop: polling VFF_EN fail, DEBUG=0x%x\n",
> > +                       mtk_uart_apdma_chan_read(c, VFF_DEBUG_STATUS));
> > +
> > +       mtk_uart_apdma_chan_write(c, VFF_STOP, VFF_STOP_CLR_B);
> > +       mtk_uart_apdma_chan_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
> > +
> > +       switch (c->cfg.direction) {
> > +       case DMA_DEV_TO_MEM:
> 
> Ditto, if/else?
> 
ditto.
http://lists.infradead.org/pipermail/linux-mediatek/2018-December/016292.html

> > +               mtk_uart_apdma_chan_write(c,
> > +                               VFF_INT_FLAG, VFF_RX_INT_FLAG_CLR_B);
> > +               break;
> > +       case DMA_MEM_TO_DEV:
> > +               mtk_uart_apdma_chan_write(c,
> > +                               VFF_INT_FLAG, VFF_TX_INT_FLAG_CLR_B);
> > +               break;
> > +       default:
> > +               break;
> > +       }
> > +
> > +       spin_unlock_irqrestore(&c->vc.lock, flags);
> > +
> > +       return 0;
> > +}
> > +
> > +static int mtk_uart_apdma_device_pause(struct dma_chan *chan)
> > +{
> > +       /* just for check caps pass */
> > +       return 0;
> > +}
> > +
> > +static int mtk_uart_apdma_device_resume(struct dma_chan *chan)
> > +{
> > +       /* just for check caps pass */
> > +       return 0;
> > +}
> > +
> > +static void mtk_uart_apdma_free(struct mtk_uart_apdmadev *mtkd)
> > +{
> > +       while (list_empty(&mtkd->ddev.channels) == 0) {
> > +               struct mtk_chan *c = list_first_entry(&mtkd->ddev.channels,
> > +                       struct mtk_chan, vc.chan.device_node);
> > +
> > +               list_del(&c->vc.chan.device_node);
> > +               tasklet_kill(&c->vc.task);
> > +       }
> > +}
> > +
> > +static const struct of_device_id mtk_uart_apdma_match[] = {
> > +       { .compatible = "mediatek,mt6577-uart-dma", },
> > +       { /* sentinel */ },
> > +};
> > +MODULE_DEVICE_TABLE(of, mtk_uart_apdma_match);
> > +
> > +static int mtk_uart_apdma_probe(struct platform_device *pdev)
> > +{
> > +       struct mtk_uart_apdmadev *mtkd;
> > +       struct resource *res;
> > +       struct mtk_chan *c;
> > +       unsigned int i;
> > +       int rc;
> > +
> > +       mtkd = devm_kzalloc(&pdev->dev, sizeof(*mtkd), GFP_KERNEL);
> > +       if (!mtkd)
> > +               return -ENOMEM;
> > +
> > +       mtkd->clk = devm_clk_get(&pdev->dev, NULL);
> > +       if (IS_ERR(mtkd->clk)) {
> > +               dev_err(&pdev->dev, "No clock specified\n");
> > +               rc = PTR_ERR(mtkd->clk);
> > +               goto err_no_dma;
> > +       }
> > +
> > +       if (of_property_read_bool(pdev->dev.of_node, "dma-33bits")) {
> > +               dev_info(&pdev->dev, "Support dma 33bits\n");
> 
> Drop this, a bit verbose.
> 
> > +               mtkd->support_33bits = true;
> > +       }
> > +
> > +       rc = dma_set_mask_and_coherent(&pdev->dev,
> > +                               DMA_BIT_MASK(32 | mtkd->support_33bits));
> > +       if (rc)
> > +               goto err_no_dma;
> > +
> > +       dma_cap_set(DMA_SLAVE, mtkd->ddev.cap_mask);
> > +       mtkd->ddev.device_alloc_chan_resources =
> > +                               mtk_uart_apdma_alloc_chan_resources;
> > +       mtkd->ddev.device_free_chan_resources =
> > +                               mtk_uart_apdma_free_chan_resources;
> > +       mtkd->ddev.device_tx_status = mtk_uart_apdma_tx_status;
> > +       mtkd->ddev.device_issue_pending = mtk_uart_apdma_issue_pending;
> > +       mtkd->ddev.device_prep_slave_sg = mtk_uart_apdma_prep_slave_sg;
> > +       mtkd->ddev.device_config = mtk_uart_apdma_slave_config;
> > +       mtkd->ddev.device_pause = mtk_uart_apdma_device_pause;
> > +       mtkd->ddev.device_resume = mtk_uart_apdma_device_resume;
> > +       mtkd->ddev.device_terminate_all = mtk_uart_apdma_terminate_all;
> > +       mtkd->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE);
> > +       mtkd->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE);
> > +       mtkd->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
> > +       mtkd->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
> > +       mtkd->ddev.dev = &pdev->dev;
> > +       INIT_LIST_HEAD(&mtkd->ddev.channels);
> > +
> > +       for (i = 0; i < MTK_UART_APDMA_CHANNELS; i++) {
> > +               c = devm_kzalloc(mtkd->ddev.dev, sizeof(*c), GFP_KERNEL);
> > +               if (!c) {
> > +                       rc = -ENODEV;
> > +                       goto err_no_dma;
> > +               }
> > +
> > +               res = platform_get_resource(pdev, IORESOURCE_MEM, i);
> > +               if (!res) {
> > +                       rc = -ENODEV;
> > +                       goto err_no_dma;
> > +               }
> > +
> > +               c->base = devm_ioremap_resource(&pdev->dev, res);
> > +               if (IS_ERR(c->base)) {
> > +                       rc = PTR_ERR(c->base);
> > +                       goto err_no_dma;
> > +               }
> > +               c->requested = false;
> > +               c->vc.desc_free = mtk_uart_apdma_desc_free;
> > +               vchan_init(&c->vc, &mtkd->ddev);
> > +
> > +               mtkd->dma_irq[i] = platform_get_irq(pdev, i);
> > +               if ((int)mtkd->dma_irq[i] < 0) {
> > +                       dev_err(&pdev->dev, "failed to get IRQ[%d]\n", i);
> > +                       rc = -EINVAL;
> > +                       goto err_no_dma;
> > +               }
> > +       }
> > +
> > +       pm_runtime_enable(&pdev->dev);
> > +       pm_runtime_set_active(&pdev->dev);
> > +
> > +       rc = dma_async_device_register(&mtkd->ddev);
> > +       if (rc)
> > +               goto rpm_disable;
> > +
> > +       platform_set_drvdata(pdev, mtkd);
> > +
> > +       if (pdev->dev.of_node) {
> > +               /* Device-tree DMA controller registration */
> > +               rc = of_dma_controller_register(pdev->dev.of_node,
> > +                                               of_dma_xlate_by_chan_id,
> > +                                               mtkd);
> > +               if (rc)
> > +                       goto dma_remove;
> > +       }
> > +
> > +       return rc;
> > +
> > +dma_remove:
> > +       dma_async_device_unregister(&mtkd->ddev);
> > +rpm_disable:
> > +       pm_runtime_disable(&pdev->dev);
> > +err_no_dma:
> > +       mtk_uart_apdma_free(mtkd);
> > +       return rc;
> > +}
> > +
> > +static int mtk_uart_apdma_remove(struct platform_device *pdev)
> > +{
> > +       struct mtk_uart_apdmadev *mtkd = platform_get_drvdata(pdev);
> > +
> > +       if (pdev->dev.of_node)
> > +               of_dma_controller_free(pdev->dev.of_node);
> > +
> > +       pm_runtime_disable(&pdev->dev);
> > +       pm_runtime_put_noidle(&pdev->dev);
> > +
> > +       dma_async_device_unregister(&mtkd->ddev);
> > +       mtk_uart_apdma_free(mtkd);
> > +
> > +       return 0;
> > +}
> > +
> > +#ifdef CONFIG_PM_SLEEP
> > +static int mtk_uart_apdma_suspend(struct device *dev)
> > +{
> > +       struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
> > +
> > +       if (!pm_runtime_suspended(dev))
> > +               clk_disable_unprepare(mtkd->clk);
> > +
> > +       return 0;
> > +}
> > +
> > +static int mtk_uart_apdma_resume(struct device *dev)
> > +{
> > +       int ret;
> > +       struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
> > +
> > +       if (!pm_runtime_suspended(dev)) {
> > +               ret = clk_prepare_enable(mtkd->clk);
> > +               if (ret)
> > +                       return ret;
> > +       }
> > +
> > +       return 0;
> > +}
> > +#endif /* CONFIG_PM_SLEEP */
> > +
> > +#ifdef CONFIG_PM
> > +static int mtk_uart_apdma_runtime_suspend(struct device *dev)
> > +{
> > +       struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
> > +
> > +       clk_disable_unprepare(mtkd->clk);
> > +
> > +       return 0;
> > +}
> > +
> > +static int mtk_uart_apdma_runtime_resume(struct device *dev)
> > +{
> > +       int ret;
> > +       struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
> > +
> > +       ret = clk_prepare_enable(mtkd->clk);
> > +       if (ret)
> > +               return ret;
> > +
> > +       return 0;
> > +}
> > +#endif /* CONFIG_PM */
> > +
> > +static const struct dev_pm_ops mtk_uart_apdma_pm_ops = {
> > +       SET_SYSTEM_SLEEP_PM_OPS(mtk_uart_apdma_suspend, mtk_uart_apdma_resume)
> > +       SET_RUNTIME_PM_OPS(mtk_uart_apdma_runtime_suspend,
> > +                          mtk_uart_apdma_runtime_resume, NULL)
> > +};
> > +
> > +static struct platform_driver mtk_uart_apdma_driver = {
> > +       .probe  = mtk_uart_apdma_probe,
> > +       .remove = mtk_uart_apdma_remove,
> > +       .driver = {
> > +               .name           = KBUILD_MODNAME,
> > +               .pm             = &mtk_uart_apdma_pm_ops,
> > +               .of_match_table = of_match_ptr(mtk_uart_apdma_match),
> > +       },
> > +};
> > +
> > +module_platform_driver(mtk_uart_apdma_driver);
> > +
> > +MODULE_DESCRIPTION("MediaTek UART APDMA Controller Driver");
> > +MODULE_AUTHOR("Long Cheng <long.cheng@mediatek.com>");
> > +MODULE_LICENSE("GPL v2");
> > +
> > diff --git a/drivers/dma/mediatek/Kconfig b/drivers/dma/mediatek/Kconfig
> > index 27bac0b..1a523c87 100644
> > --- a/drivers/dma/mediatek/Kconfig
> > +++ b/drivers/dma/mediatek/Kconfig
> > @@ -1,4 +1,15 @@
> >
> > +config DMA_MTK_UART
> > +       tristate "MediaTek SoCs APDMA support for UART"
> > +       depends on OF && SERIAL_8250_MT6577
> > +       select DMA_ENGINE
> > +       select DMA_VIRTUAL_CHANNELS
> > +       help
> > +         Support for the UART DMA engine found on MediaTek MTK SoCs.
> > +         when SERIAL_8250_MT6577 is enabled, and if you want to use DMA,
> > +         you can enable the config. the DMA engine can only be used
> > +         with MediaTek SoCs.
> > +
> >  config MTK_HSDMA
> >         tristate "MediaTek High-Speed DMA controller support"
> >         depends on ARCH_MEDIATEK || COMPILE_TEST
> > diff --git a/drivers/dma/mediatek/Makefile b/drivers/dma/mediatek/Makefile
> > index 6e778f8..2f2efd9 100644
> > --- a/drivers/dma/mediatek/Makefile
> > +++ b/drivers/dma/mediatek/Makefile
> > @@ -1 +1,2 @@
> > +obj-$(CONFIG_DMA_MTK_UART) += 8250_mtk_dma.o
> >  obj-$(CONFIG_MTK_HSDMA) += mtk-hsdma.o
> > --
> > 1.7.9.5
> >



^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v7 1/2] dmaengine: 8250_mtk_dma: add MediaTek uart DMA support
  2018-12-25 12:06     ` Long Cheng
@ 2018-12-26  0:05       ` Nicolas Boichat
  2018-12-26  6:35         ` Long Cheng
  0 siblings, 1 reply; 9+ messages in thread
From: Nicolas Boichat @ 2018-12-26  0:05 UTC (permalink / raw)
  To: Long Cheng
  Cc: Vinod Koul, Randy Dunlap, Rob Herring, Mark Rutland, Ryder Lee,
	Sean Wang, Matthias Brugger, Dan Williams, Greg Kroah-Hartman,
	Jiri Slaby, Sean Wang, dmaengine, devicetree,
	linux-arm Mailing List, linux-mediatek, lkml, linux-serial,
	srv_heupstream, Yingjoe Chen, YT Shen

On Tue, Dec 25, 2018 at 8:06 PM Long Cheng <long.cheng@mediatek.com> wrote:
>
> Thanks for your comments.
>
> On Tue, 2018-12-25 at 15:16 +0800, Nicolas Boichat wrote:
> > Not a full review, a few comments below.
> >
> > Thanks,
> >
>
> would you like help to full review at this version patch?  and then i
> can modify these at next version patch. thanks.

Added a few more comments ,-)

> > On Tue, Dec 25, 2018 at 9:27 AM Long Cheng <long.cheng@mediatek.com> wrote:
> > >
> > > In DMA engine framework, add 8250 uart dma to support MediaTek uart.
> > > If MediaTek uart enabled(SERIAL_8250_MT6577), and want to improve
> > > the performance, can enable the function.
> > >
> > > Signed-off-by: Long Cheng <long.cheng@mediatek.com>
> > > ---
> > >  drivers/dma/mediatek/8250_mtk_dma.c |  694 +++++++++++++++++++++++++++++++++++
> > >  drivers/dma/mediatek/Kconfig        |   11 +
> > >  drivers/dma/mediatek/Makefile       |    1 +
> > >  3 files changed, 706 insertions(+)
> > >  create mode 100644 drivers/dma/mediatek/8250_mtk_dma.c
> > >
> > > diff --git a/drivers/dma/mediatek/8250_mtk_dma.c b/drivers/dma/mediatek/8250_mtk_dma.c
> > > new file mode 100644
> > > index 0000000..c4090f2
> > > --- /dev/null
> > > +++ b/drivers/dma/mediatek/8250_mtk_dma.c
> > > @@ -0,0 +1,694 @@
> > > +// SPDX-License-Identifier: GPL-2.0
> > > +/*
> > > + * MediaTek 8250 DMA driver.
> > > + *
> > > + * Copyright (c) 2018 MediaTek Inc.
> > > + * Author: Long Cheng <long.cheng@mediatek.com>
> > > + */
> > > +
> > > +#include <linux/clk.h>
> > > +#include <linux/dmaengine.h>
> > > +#include <linux/dma-mapping.h>
> > > +#include <linux/err.h>
> > > +#include <linux/init.h>
> > > +#include <linux/interrupt.h>
> > > +#include <linux/list.h>
> > > +#include <linux/kernel.h>
> > > +#include <linux/module.h>
> > > +#include <linux/of_dma.h>
> > > +#include <linux/of_device.h>
> > > +#include <linux/platform_device.h>
> > > +#include <linux/slab.h>
> > > +#include <linux/spinlock.h>
> > > +#include <linux/pm_runtime.h>
> > > +#include <linux/iopoll.h>
> >
> > Alphabetical order.
>
> ok, i will order.
>
> >
> > > +
> > > +#include "../virt-dma.h"
> > > +
> > > +#define MTK_UART_APDMA_CHANNELS                (CONFIG_SERIAL_8250_NR_UARTS * 2)
> > > +
> > > +#define VFF_EN_B               BIT(0)
> > > +#define VFF_STOP_B             BIT(0)
> > > +#define VFF_FLUSH_B            BIT(0)
> > > +#define VFF_4G_SUPPORT_B       BIT(0)
> > > +#define VFF_RX_INT_EN0_B       BIT(0)  /*rx valid size >=  vff thre*/
> > > +#define VFF_RX_INT_EN1_B       BIT(1)
> > > +#define VFF_TX_INT_EN_B                BIT(0)  /*tx left size >= vff thre*/
> > > +#define VFF_WARM_RST_B         BIT(0)
> > > +#define VFF_RX_INT_FLAG_CLR_B  (BIT(0) | BIT(1))
> > > +#define VFF_TX_INT_FLAG_CLR_B  0
> > > +#define VFF_STOP_CLR_B         0
> > > +#define VFF_FLUSH_CLR_B                0
> > > +#define VFF_INT_EN_CLR_B       0
> > > +#define VFF_4G_SUPPORT_CLR_B   0
> > > +
> > > +/* interrupt trigger level for tx */
> > > +#define VFF_TX_THRE(n)         ((n) * 7 / 8)
> > > +/* interrupt trigger level for rx */
> > > +#define VFF_RX_THRE(n)         ((n) * 3 / 4)
> > > +
> > > +#define MTK_UART_APDMA_RING_SIZE       0xffffU
> > > +/* invert this bit when wrap ring head again*/
> > > +#define MTK_UART_APDMA_RING_WRAP       0x10000U
> > > +
> > > +#define VFF_INT_FLAG           0x00
> > > +#define VFF_INT_EN             0x04
> > > +#define VFF_EN                 0x08
> > > +#define VFF_RST                        0x0c
> > > +#define VFF_STOP               0x10
> > > +#define VFF_FLUSH              0x14
> > > +#define VFF_ADDR               0x1c
> > > +#define VFF_LEN                        0x24
> > > +#define VFF_THRE               0x28
> > > +#define VFF_WPT                        0x2c
> > > +#define VFF_RPT                        0x30
> > > +/*TX: the buffer size HW can read. RX: the buffer size SW can read.*/
> > > +#define VFF_VALID_SIZE         0x3c
> > > +/*TX: the buffer size SW can write. RX: the buffer size HW can write.*/
> > > +#define VFF_LEFT_SIZE          0x40
> > > +#define VFF_DEBUG_STATUS       0x50
> > > +#define VFF_4G_SUPPORT         0x54
> > > +
> > > +struct mtk_uart_apdmadev {
> > > +       struct dma_device ddev;
> > > +       struct clk *clk;
> > > +       bool support_33bits;
> > > +       unsigned int dma_irq[MTK_UART_APDMA_CHANNELS];
> > > +};
> > > +
> > > +struct mtk_uart_apdma_desc {
> > > +       struct virt_dma_desc vd;
> > > +
> > > +       unsigned int avail_len;
> > > +};
> > > +
> > > +struct mtk_chan {
> > > +       struct virt_dma_chan vc;
> > > +       struct dma_slave_config cfg;
> > > +       void __iomem *base;
> > > +       struct mtk_uart_apdma_desc *desc;
> > > +
> > > +       bool requested;
> > > +
> > > +       unsigned int rx_status;
> > > +};
> > > +
> > > +static inline struct mtk_uart_apdmadev *
> > > +to_mtk_uart_apdma_dev(struct dma_device *d)
> > > +{
> > > +       return container_of(d, struct mtk_uart_apdmadev, ddev);
> > > +}
> > > +
> > > +static inline struct mtk_chan *to_mtk_uart_apdma_chan(struct dma_chan *c)
> > > +{
> > > +       return container_of(c, struct mtk_chan, vc.chan);
> > > +}
> > > +
> > > +static inline struct mtk_uart_apdma_desc *to_mtk_uart_apdma_desc
> > > +       (struct dma_async_tx_descriptor *t)
> > > +{
> > > +       return container_of(t, struct mtk_uart_apdma_desc, vd.tx);
> > > +}
> > > +
> > > +static void mtk_uart_apdma_chan_write(struct mtk_chan *c,
> > > +                              unsigned int reg, unsigned int val)
> > > +{
> > > +       writel(val, c->base + reg);
> > > +}
> > > +
> > > +static unsigned int
> > > +mtk_uart_apdma_chan_read(struct mtk_chan *c, unsigned int reg)
> > > +{
> > > +       return readl(c->base + reg);
> > > +}
> > > +
> > > +static void mtk_uart_apdma_desc_free(struct virt_dma_desc *vd)
> > > +{
> > > +       struct dma_chan *chan = vd->tx.chan;
> > > +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> > > +
> > > +       kfree(c->desc);
> > > +       c->desc = NULL;
> >
> > Why? I'm afraid this may mask double-free.
> >
>
> i will remove it.
>
> > > +}
> > > +
> > > +static void mtk_uart_apdma_start_tx(struct mtk_chan *c)
> > > +{
> > > +       unsigned int txcount = c->desc->avail_len;
> >
> > Variable name is confusing... I'd rather have a boolean `transmitted`
> > (or something), set to 0 here, and set to 1 in the loop (but again,
> > maybe this is not necessary at all, see below).
>
> ok, i modify the function and remove it.
>
> >
> > > +       unsigned int len, send, left, wpt, wrap;
> > > +
> > > +       if (mtk_uart_apdma_chan_read(c, VFF_LEFT_SIZE) == 0U) {
> > > +               mtk_uart_apdma_chan_write(c, VFF_INT_EN, VFF_TX_INT_EN_B);
> >
> > I'd add a "return;" here and de-indent the rest of the function (that
> > should help to make it more readable).
>
> add
>
> >
> > > +       } else {
> > > +               len = mtk_uart_apdma_chan_read(c, VFF_LEN);
> >
> > You make an assumption below that len is always a power of 2. I wonder
> > if we want to validate that with some BUG/WARN call.
> >
> > Also, I assume VFF_LEN is constant? Maybe we can read it once only in
> > the probe function?
> >
>
> no, the value of VFF_LEN isn't constant.
>
> > > +
> > > +               while (((left = mtk_uart_apdma_chan_read(c,
> > > +                                       VFF_LEFT_SIZE)) > 0U)
> > > +                               && (c->desc->avail_len != 0U)) {
> >
> > Why do we need a while loop here? LEFT_SIZE will always be smaller
> > than LEN, right? Shouldn't we load as much as we can, then let an
> > interrupt happen and reload the FIFO at that stage? Or does this help
> > gain a bit of extra performance?
>
> i will remove while, use 'if' instead of.
> >
> > > +                       send = min_t(unsigned int, left, c->desc->avail_len);
> > > +                       wpt = mtk_uart_apdma_chan_read(c, VFF_WPT);
> > > +                       wrap = wpt & MTK_UART_APDMA_RING_WRAP ?
> >
> > wrap = wrp ^ MTK_UART_APDMA_RING_WRAP ?
> >
>
> wrap = wpt & MTK_UART_APDMA_RING_WRAP ?
> i had test it.

Sorry, I got it wrong (and my question mark is confusing), what I mean is this:

wrap = (wrp ^ MTK_UART_APDMA_RING_WRAP) & MTK_UART_APDMA_RING_WRAP;

But then it's marginally shorter than your solution, so maybe keep
yours, it's a little simpler to understand.

> > > +                                       0U : MTK_UART_APDMA_RING_WRAP;
> > > +
> > > +                       if ((wpt & (len - 1U)) + send < len)

Is it possible for wpt > len? If not, then wpt + send < len should be enough.

Also, wpt + send is used a few times now, so maybe have it a separate variable?

Something like this might be nicer:

next_wpt = wpt + send;
if ((wpt & (len - 1U)) + send >= len) {
   next_wpt = next_wpt & (len - 1U);
   if (!(wpt & MTK_UART_APDMA_RING_WRAP))
      next_wpt |= MTK_UART_APDMA_RING_WRAP;
}
mtk_uart_apdma_chan_write(c, VFF_WPT, next_wpt);

Or if the assumption than wpt < len holds:
if (next_wpt >= len) {
   next_wpt = next_wpt - len;
...

> > > +                               mtk_uart_apdma_chan_write(c,
> > > +                                               VFF_WPT, wpt + send);
> > > +                       else
> > > +                               mtk_uart_apdma_chan_write(c, VFF_WPT,
> > > +                                                  ((wpt + send) & (len - 1U))
> > > +                                                  | wrap);
> > > +
> > > +                       c->desc->avail_len -= send;
> > > +               }
> > > +
> > > +               if (txcount != c->desc->avail_len) {
> >
> > We know mtk_uart_apdma_chan_read(c, VFF_LEFT_SIZE) > 0 at least once,
> > so this test should always be true.
> >
> > Unless we have c->desc->avail_len == 0, but then we won't even call
> > this function... So this is always true.
> >
>
> yes. you are right. i will remove it.
>
> > > +                       mtk_uart_apdma_chan_write(c,
> > > +                                       VFF_INT_EN, VFF_TX_INT_EN_B);
> > > +                       if (mtk_uart_apdma_chan_read(c,
> > > +                                               VFF_FLUSH) == 0U)
> > > +                               mtk_uart_apdma_chan_write(c,
> > > +                                               VFF_FLUSH, VFF_FLUSH_B);
> > > +               }
> > > +       }
> > > +}
> > > +
> > > +static void mtk_uart_apdma_start_rx(struct mtk_chan *c)
> > > +{
> > > +       struct mtk_uart_apdma_desc *d = c->desc;
> > > +       unsigned int rx_len, wg, rg, count;
> > > +
> > > +       if (mtk_uart_apdma_chan_read(c, VFF_VALID_SIZE) == 0U)
> > > +               return;
> > > +
> > > +       if (d && vchan_next_desc(&c->vc)) {

I'd negate the test to de-indent the rest of the code:
if (!d || !vchan_next_desc(&c->vc))
   return;

> > > +               rx_len = mtk_uart_apdma_chan_read(c, VFF_LEN);
> > > +               rg = mtk_uart_apdma_chan_read(c, VFF_RPT);
> > > +               wg = mtk_uart_apdma_chan_read(c, VFF_WPT);
> > > +               count = ((rg ^ wg) & MTK_UART_APDMA_RING_WRAP) ?
> > > +                               ((wg & MTK_UART_APDMA_RING_SIZE) +
> > > +                               rx_len - (rg & MTK_UART_APDMA_RING_SIZE)) :
> > > +                               ((wg & MTK_UART_APDMA_RING_SIZE) -
> > > +                               (rg & MTK_UART_APDMA_RING_SIZE));
> >
> > This is hard to read, please use a if/else.
> >
>
> ok.
>
> > > +
> > > +               c->rx_status = count;
> > > +               mtk_uart_apdma_chan_write(c, VFF_RPT, wg);
> > > +
> > > +               list_del(&d->vd.node);
> > > +               vchan_cookie_complete(&d->vd);
> > > +       }
> > > +}
> > > +
> > > +static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id)
> > > +{
> > > +       struct dma_chan *chan = (struct dma_chan *)dev_id;
> > > +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> > > +       struct mtk_uart_apdma_desc *d;
> > > +       unsigned long flags;
> > > +
> > > +       spin_lock_irqsave(&c->vc.lock, flags);
> > > +       switch (c->cfg.direction) {
> > > +       case DMA_DEV_TO_MEM:
> >
> > You use an if/else below in a similar case, please be consistent (IMHO
> > if/else is a bit more compact, so I'd use that here).
> >
> sean wang had review it. the link
> http://lists.infradead.org/pipermail/linux-mediatek/2018-December/016292.html
>
> > > +               mtk_uart_apdma_chan_write(c,
> > > +                               VFF_INT_FLAG, VFF_RX_INT_FLAG_CLR_B);
> > > +               mtk_uart_apdma_start_rx(c);
> > > +               break;
> > > +       case DMA_MEM_TO_DEV:
> > > +               d = c->desc;
> > > +
> > > +               mtk_uart_apdma_chan_write(c,
> > > +                               VFF_INT_FLAG, VFF_TX_INT_FLAG_CLR_B);
> > > +
> > > +               if (d->avail_len != 0U) {
> > > +                       mtk_uart_apdma_start_tx(c);
> > > +               } else {
> > > +                       list_del(&d->vd.node);
> > > +                       vchan_cookie_complete(&d->vd);
> > > +               }
> > > +               break;
> > > +       default:
> > > +               break;
> > > +       }
> > > +       spin_unlock_irqrestore(&c->vc.lock, flags);
> > > +
> > > +       return IRQ_HANDLED;
> > > +}
> > > +
> > > +static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
> > > +{
> > > +       struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device);
> > > +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> > > +       u32 status;
> > > +       int ret = -EBUSY;

No need to init ret.

> > > +
> > > +       pm_runtime_get_sync(mtkd->ddev.dev);
> > > +
> > > +       mtk_uart_apdma_chan_write(c, VFF_ADDR, 0);
> > > +       mtk_uart_apdma_chan_write(c, VFF_THRE, 0);
> > > +       mtk_uart_apdma_chan_write(c, VFF_LEN, 0);
> > > +       mtk_uart_apdma_chan_write(c, VFF_RST, VFF_WARM_RST_B);
> > > +
> > > +       ret = readx_poll_timeout(readl,
> > > +                                c->base + VFF_EN,
> > > +                                status, status == 0, 10, 100);
> > > +       if (ret) {
> > > +               dev_err(c->vc.chan.device->dev,
> > > +                               "dma reset: fail, timeout\n");
> > > +               goto exit;

Just return ret, since you don't do any cleanup in exit:.

> > > +       }
> > > +
> > > +       if (!c->requested) {
> > > +               c->requested = true;
> > > +               ret = request_irq(mtkd->dma_irq[chan->chan_id],
> > > +                                 mtk_uart_apdma_irq_handler,
> > > +                                 IRQF_TRIGGER_NONE,
> > > +                                 KBUILD_MODNAME, chan);
> > > +               if (ret < 0) {
> > > +                       dev_err(chan->device->dev, "Can't request dma IRQ\n");
> > > +                       return -EINVAL;
> > > +               }
> > > +       }
> > > +
> > > +       if (mtkd->support_33bits)
> > > +               mtk_uart_apdma_chan_write(c,
> > > +                               VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B);
> > > +
> > > +exit:
> > > +       return ret;
> > > +}
> > > +
> > > +static void mtk_uart_apdma_free_chan_resources(struct dma_chan *chan)
> > > +{
> > > +       struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device);
> > > +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> > > +
> > > +       if (c->requested) {
> > > +               c->requested = false;
> > > +               free_irq(mtkd->dma_irq[chan->chan_id], chan);
> > > +       }
> > > +
> > > +       tasklet_kill(&c->vc.task);
> > > +
> > > +       vchan_free_chan_resources(&c->vc);
> > > +
> > > +       pm_runtime_put_sync(mtkd->ddev.dev);
> > > +}
> > > +
> > > +static enum dma_status mtk_uart_apdma_tx_status(struct dma_chan *chan,
> > > +                                        dma_cookie_t cookie,
> > > +                                        struct dma_tx_state *txstate)
> > > +{
> > > +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> > > +       enum dma_status ret;
> > > +       unsigned long flags;
> > > +
> > > +       if (!txstate)
> > > +               return DMA_ERROR;
> > > +
> > > +       ret = dma_cookie_status(chan, cookie, txstate);
> > > +       spin_lock_irqsave(&c->vc.lock, flags);
> > > +       if (ret == DMA_IN_PROGRESS) {
> > > +               c->rx_status = mtk_uart_apdma_chan_read(c, VFF_RPT)
> > > +                            & MTK_UART_APDMA_RING_SIZE;
> > > +               dma_set_residue(txstate, c->rx_status);
> > > +       } else if (ret == DMA_COMPLETE && c->cfg.direction == DMA_DEV_TO_MEM) {
> > > +               dma_set_residue(txstate, c->rx_status);
> > > +       } else {
> > > +               dma_set_residue(txstate, 0);
> > > +       }
> > > +       spin_unlock_irqrestore(&c->vc.lock, flags);
> > > +
> > > +       return ret;
> > > +}
> > > +
> > > +/*
> > > + * dmaengine_prep_slave_single will call the function. and sglen is 1.
> > > + * 8250 uart using one ring buffer, and deal with one sg.
> > > + */
> > > +static struct dma_async_tx_descriptor *mtk_uart_apdma_prep_slave_sg
> > > +       (struct dma_chan *chan, struct scatterlist *sgl,
> > > +       unsigned int sglen,     enum dma_transfer_direction dir,

Too many spaces (or a tab?) between `sglen,` and `enum`

> > > +       unsigned long tx_flags, void *context)
> > > +{
> > > +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> > > +       struct mtk_uart_apdma_desc *d;
> > > +
> > > +       if ((dir != DMA_DEV_TO_MEM) &&
> > > +               (dir != DMA_MEM_TO_DEV)) {
> > > +               dev_err(chan->device->dev, "bad direction\n");
> > > +               return NULL;
> > > +       }
> > > +
> > > +       /* Now allocate and setup the descriptor */
> > > +       d = kzalloc(sizeof(*d), GFP_ATOMIC);
> > > +       if (!d)
> > > +               return NULL;
> > > +
> > > +       /* sglen is 1 */
> > > +       d->avail_len = sg_dma_len(sgl);
> > > +
> > > +       return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
> > > +}
> > > +
> > > +static void mtk_uart_apdma_issue_pending(struct dma_chan *chan)
> > > +{
> > > +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> > > +       struct virt_dma_desc *vd;
> > > +       unsigned long flags;
> > > +
> > > +       spin_lock_irqsave(&c->vc.lock, flags);
> > > +       if (c->cfg.direction == DMA_DEV_TO_MEM) {
> > > +               if (vchan_issue_pending(&c->vc) && !c->desc) {
> > > +                       vd = vchan_next_desc(&c->vc);
> > > +                       c->desc = to_mtk_uart_apdma_desc(&vd->tx);
> > > +                       mtk_uart_apdma_start_rx(c);
> > > +               }
> > > +       } else if (c->cfg.direction == DMA_MEM_TO_DEV) {
> > > +               if (vchan_issue_pending(&c->vc) && !c->desc) {
> > > +                       vd = vchan_next_desc(&c->vc);
> > > +                       c->desc = to_mtk_uart_apdma_desc(&vd->tx);
> > > +                       mtk_uart_apdma_start_tx(c);
> > > +               }
> > > +       }
> > > +       spin_unlock_irqrestore(&c->vc.lock, flags);
> > > +}
> > > +
> > > +static int mtk_uart_apdma_slave_config(struct dma_chan *chan,
> > > +                               struct dma_slave_config *cfg)
> > > +{
> > > +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> > > +       struct mtk_uart_apdmadev *mtkd =
> > > +                               to_mtk_uart_apdma_dev(c->vc.chan.device);
> > > +
> > > +       c->cfg = *cfg;
> > > +
> > > +       if (cfg->direction == DMA_DEV_TO_MEM) {
> > > +               unsigned int rx_len = cfg->src_addr_width * 1024;
> > > +
> > > +               mtk_uart_apdma_chan_write(c, VFF_ADDR, cfg->src_addr);
> > > +               mtk_uart_apdma_chan_write(c, VFF_LEN, rx_len);
> > > +               mtk_uart_apdma_chan_write(c, VFF_THRE, VFF_RX_THRE(rx_len));
> > > +               mtk_uart_apdma_chan_write(c,
> > > +                               VFF_INT_EN, VFF_RX_INT_EN0_B
> > > +                               | VFF_RX_INT_EN1_B);
> > > +               mtk_uart_apdma_chan_write(c, VFF_RPT, 0);
> > > +               mtk_uart_apdma_chan_write(c,
> > > +                               VFF_INT_FLAG, VFF_RX_INT_FLAG_CLR_B);
> > > +               mtk_uart_apdma_chan_write(c, VFF_EN, VFF_EN_B);
> > > +       } else if (cfg->direction == DMA_MEM_TO_DEV)    {
> > > +               unsigned int tx_len = cfg->dst_addr_width * 1024;
> > > +
> > > +               mtk_uart_apdma_chan_write(c, VFF_ADDR, cfg->dst_addr);
> > > +               mtk_uart_apdma_chan_write(c, VFF_LEN, tx_len);
> > > +               mtk_uart_apdma_chan_write(c, VFF_THRE, VFF_TX_THRE(tx_len));
> > > +               mtk_uart_apdma_chan_write(c, VFF_WPT, 0);
> > > +               mtk_uart_apdma_chan_write(c,
> > > +                               VFF_INT_FLAG, VFF_TX_INT_FLAG_CLR_B);
> > > +               mtk_uart_apdma_chan_write(c, VFF_EN, VFF_EN_B);
> > > +       }
> > > +
> > > +       if (mtkd->support_33bits)
> > > +               mtk_uart_apdma_chan_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_B);
> > > +
> > > +       if (mtk_uart_apdma_chan_read(c, VFF_EN) != VFF_EN_B) {
> > > +               dev_err(chan->device->dev,
> > > +                       "config dma dir[%d] fail\n", cfg->direction);
> > > +               return -EINVAL;
> > > +       }
> > > +
> > > +       return 0;
> > > +}
> > > +
> > > +static int mtk_uart_apdma_terminate_all(struct dma_chan *chan)
> > > +{
> > > +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> > > +       unsigned long flags;
> > > +       u32 status;
> > > +       int ret;
> > > +
> > > +       spin_lock_irqsave(&c->vc.lock, flags);
> > > +
> > > +       mtk_uart_apdma_chan_write(c, VFF_FLUSH, VFF_FLUSH_CLR_B);
> > > +       /* Wait for flush */
> > > +       ret = readx_poll_timeout(readl,
> > > +                                c->base + VFF_FLUSH,
> > > +                                status,
> > > +                                (status & VFF_FLUSH_B) != VFF_FLUSH_B,
> > > +                                10, 100);
> > > +       if (ret)
> > > +               dev_err(c->vc.chan.device->dev,
> > > +                       "dma stop: polling FLUSH fail, DEBUG=0x%x\n",
> > > +                       mtk_uart_apdma_chan_read(c, VFF_DEBUG_STATUS));
> > > +
> > > +       /*set stop as 1 -> wait until en is 0 -> set stop as 0*/
> > > +       mtk_uart_apdma_chan_write(c, VFF_STOP, VFF_STOP_B);
> > > +       ret = readx_poll_timeout(readl,
> > > +                                c->base + VFF_EN,
> > > +                                status, status == 0, 10, 100);
> > > +       if (ret)
> > > +               dev_err(c->vc.chan.device->dev,
> > > +                       "dma stop: polling VFF_EN fail, DEBUG=0x%x\n",
> > > +                       mtk_uart_apdma_chan_read(c, VFF_DEBUG_STATUS));
> > > +
> > > +       mtk_uart_apdma_chan_write(c, VFF_STOP, VFF_STOP_CLR_B);
> > > +       mtk_uart_apdma_chan_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
> > > +
> > > +       switch (c->cfg.direction) {
> > > +       case DMA_DEV_TO_MEM:
> >
> > Ditto, if/else?
> >
> ditto.
> http://lists.infradead.org/pipermail/linux-mediatek/2018-December/016292.html

Well, I disagree, especially in this case: We turn 4 lines of code into 10...

> > > +               mtk_uart_apdma_chan_write(c,
> > > +                               VFF_INT_FLAG, VFF_RX_INT_FLAG_CLR_B);
> > > +               break;
> > > +       case DMA_MEM_TO_DEV:
> > > +               mtk_uart_apdma_chan_write(c,
> > > +                               VFF_INT_FLAG, VFF_TX_INT_FLAG_CLR_B);
> > > +               break;
> > > +       default:
> > > +               break;
> > > +       }
> > > +
> > > +       spin_unlock_irqrestore(&c->vc.lock, flags);
> > > +
> > > +       return 0;
> > > +}
> > > +
> > > +static int mtk_uart_apdma_device_pause(struct dma_chan *chan)
> > > +{
> > > +       /* just for check caps pass */
> > > +       return 0;
> > > +}
> > > +
> > > +static int mtk_uart_apdma_device_resume(struct dma_chan *chan)
> > > +{
> > > +       /* just for check caps pass */
> > > +       return 0;
> > > +}

Why do you need these? Seems wrong to advertise device_pause/resume
(and the caps) if we don't actually support that?

> > > +
> > > +static void mtk_uart_apdma_free(struct mtk_uart_apdmadev *mtkd)
> > > +{
> > > +       while (list_empty(&mtkd->ddev.channels) == 0) {
> > > +               struct mtk_chan *c = list_first_entry(&mtkd->ddev.channels,
> > > +                       struct mtk_chan, vc.chan.device_node);
> > > +
> > > +               list_del(&c->vc.chan.device_node);
> > > +               tasklet_kill(&c->vc.task);
> > > +       }
> > > +}
> > > +
> > > +static const struct of_device_id mtk_uart_apdma_match[] = {
> > > +       { .compatible = "mediatek,mt6577-uart-dma", },
> > > +       { /* sentinel */ },
> > > +};
> > > +MODULE_DEVICE_TABLE(of, mtk_uart_apdma_match);
> > > +
> > > +static int mtk_uart_apdma_probe(struct platform_device *pdev)
> > > +{
> > > +       struct mtk_uart_apdmadev *mtkd;
> > > +       struct resource *res;
> > > +       struct mtk_chan *c;
> > > +       unsigned int i;
> > > +       int rc;
> > > +
> > > +       mtkd = devm_kzalloc(&pdev->dev, sizeof(*mtkd), GFP_KERNEL);
> > > +       if (!mtkd)
> > > +               return -ENOMEM;
> > > +
> > > +       mtkd->clk = devm_clk_get(&pdev->dev, NULL);
> > > +       if (IS_ERR(mtkd->clk)) {
> > > +               dev_err(&pdev->dev, "No clock specified\n");
> > > +               rc = PTR_ERR(mtkd->clk);
> > > +               goto err_no_dma;
> > > +       }
> > > +
> > > +       if (of_property_read_bool(pdev->dev.of_node, "dma-33bits")) {
> > > +               dev_info(&pdev->dev, "Support dma 33bits\n");
> >
> > Drop this, a bit verbose.
> >
> > > +               mtkd->support_33bits = true;
> > > +       }
> > > +
> > > +       rc = dma_set_mask_and_coherent(&pdev->dev,
> > > +                               DMA_BIT_MASK(32 | mtkd->support_33bits));
> > > +       if (rc)
> > > +               goto err_no_dma;
> > > +
> > > +       dma_cap_set(DMA_SLAVE, mtkd->ddev.cap_mask);
> > > +       mtkd->ddev.device_alloc_chan_resources =
> > > +                               mtk_uart_apdma_alloc_chan_resources;
> > > +       mtkd->ddev.device_free_chan_resources =
> > > +                               mtk_uart_apdma_free_chan_resources;
> > > +       mtkd->ddev.device_tx_status = mtk_uart_apdma_tx_status;
> > > +       mtkd->ddev.device_issue_pending = mtk_uart_apdma_issue_pending;
> > > +       mtkd->ddev.device_prep_slave_sg = mtk_uart_apdma_prep_slave_sg;
> > > +       mtkd->ddev.device_config = mtk_uart_apdma_slave_config;
> > > +       mtkd->ddev.device_pause = mtk_uart_apdma_device_pause;
> > > +       mtkd->ddev.device_resume = mtk_uart_apdma_device_resume;
> > > +       mtkd->ddev.device_terminate_all = mtk_uart_apdma_terminate_all;
> > > +       mtkd->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE);
> > > +       mtkd->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE);
> > > +       mtkd->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
> > > +       mtkd->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
> > > +       mtkd->ddev.dev = &pdev->dev;
> > > +       INIT_LIST_HEAD(&mtkd->ddev.channels);
> > > +
> > > +       for (i = 0; i < MTK_UART_APDMA_CHANNELS; i++) {
> > > +               c = devm_kzalloc(mtkd->ddev.dev, sizeof(*c), GFP_KERNEL);
> > > +               if (!c) {
> > > +                       rc = -ENODEV;
> > > +                       goto err_no_dma;
> > > +               }
> > > +
> > > +               res = platform_get_resource(pdev, IORESOURCE_MEM, i);
> > > +               if (!res) {
> > > +                       rc = -ENODEV;
> > > +                       goto err_no_dma;
> > > +               }
> > > +
> > > +               c->base = devm_ioremap_resource(&pdev->dev, res);
> > > +               if (IS_ERR(c->base)) {
> > > +                       rc = PTR_ERR(c->base);
> > > +                       goto err_no_dma;
> > > +               }
> > > +               c->requested = false;
> > > +               c->vc.desc_free = mtk_uart_apdma_desc_free;
> > > +               vchan_init(&c->vc, &mtkd->ddev);
> > > +
> > > +               mtkd->dma_irq[i] = platform_get_irq(pdev, i);
> > > +               if ((int)mtkd->dma_irq[i] < 0) {
> > > +                       dev_err(&pdev->dev, "failed to get IRQ[%d]\n", i);
> > > +                       rc = -EINVAL;
> > > +                       goto err_no_dma;
> > > +               }
> > > +       }
> > > +
> > > +       pm_runtime_enable(&pdev->dev);
> > > +       pm_runtime_set_active(&pdev->dev);
> > > +
> > > +       rc = dma_async_device_register(&mtkd->ddev);
> > > +       if (rc)
> > > +               goto rpm_disable;
> > > +
> > > +       platform_set_drvdata(pdev, mtkd);
> > > +
> > > +       if (pdev->dev.of_node) {
> > > +               /* Device-tree DMA controller registration */
> > > +               rc = of_dma_controller_register(pdev->dev.of_node,
> > > +                                               of_dma_xlate_by_chan_id,
> > > +                                               mtkd);
> > > +               if (rc)
> > > +                       goto dma_remove;
> > > +       }
> > > +
> > > +       return rc;
> > > +
> > > +dma_remove:
> > > +       dma_async_device_unregister(&mtkd->ddev);
> > > +rpm_disable:
> > > +       pm_runtime_disable(&pdev->dev);
> > > +err_no_dma:
> > > +       mtk_uart_apdma_free(mtkd);

It's not very correct to call this before
INIT_LIST_HEAD(&mtkd->ddev.channels);

I'd just call `return rc;` directly for all instances before that line.

> > > +       return rc;
> > > +}
> > > +
> > > +static int mtk_uart_apdma_remove(struct platform_device *pdev)
> > > +{
> > > +       struct mtk_uart_apdmadev *mtkd = platform_get_drvdata(pdev);
> > > +
> > > +       if (pdev->dev.of_node)
> > > +               of_dma_controller_free(pdev->dev.of_node);
> > > +
> > > +       pm_runtime_disable(&pdev->dev);
> > > +       pm_runtime_put_noidle(&pdev->dev);
> > > +
> > > +       dma_async_device_unregister(&mtkd->ddev);
> > > +       mtk_uart_apdma_free(mtkd);
> > > +
> > > +       return 0;
> > > +}
> > > +
> > > +#ifdef CONFIG_PM_SLEEP
> > > +static int mtk_uart_apdma_suspend(struct device *dev)
> > > +{
> > > +       struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
> > > +
> > > +       if (!pm_runtime_suspended(dev))
> > > +               clk_disable_unprepare(mtkd->clk);
> > > +
> > > +       return 0;
> > > +}
> > > +
> > > +static int mtk_uart_apdma_resume(struct device *dev)
> > > +{
> > > +       int ret;
> > > +       struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
> > > +
> > > +       if (!pm_runtime_suspended(dev)) {
> > > +               ret = clk_prepare_enable(mtkd->clk);
> > > +               if (ret)
> > > +                       return ret;
> > > +       }
> > > +
> > > +       return 0;
> > > +}
> > > +#endif /* CONFIG_PM_SLEEP */
> > > +
> > > +#ifdef CONFIG_PM
> > > +static int mtk_uart_apdma_runtime_suspend(struct device *dev)
> > > +{
> > > +       struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
> > > +
> > > +       clk_disable_unprepare(mtkd->clk);
> > > +
> > > +       return 0;
> > > +}
> > > +
> > > +static int mtk_uart_apdma_runtime_resume(struct device *dev)
> > > +{
> > > +       int ret;
> > > +       struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
> > > +
> > > +       ret = clk_prepare_enable(mtkd->clk);
> > > +       if (ret)
> > > +               return ret;
> > > +
> > > +       return 0;
> > > +}
> > > +#endif /* CONFIG_PM */
> > > +
> > > +static const struct dev_pm_ops mtk_uart_apdma_pm_ops = {
> > > +       SET_SYSTEM_SLEEP_PM_OPS(mtk_uart_apdma_suspend, mtk_uart_apdma_resume)
> > > +       SET_RUNTIME_PM_OPS(mtk_uart_apdma_runtime_suspend,
> > > +                          mtk_uart_apdma_runtime_resume, NULL)
> > > +};
> > > +
> > > +static struct platform_driver mtk_uart_apdma_driver = {
> > > +       .probe  = mtk_uart_apdma_probe,
> > > +       .remove = mtk_uart_apdma_remove,
> > > +       .driver = {
> > > +               .name           = KBUILD_MODNAME,
> > > +               .pm             = &mtk_uart_apdma_pm_ops,
> > > +               .of_match_table = of_match_ptr(mtk_uart_apdma_match),
> > > +       },
> > > +};
> > > +
> > > +module_platform_driver(mtk_uart_apdma_driver);
> > > +
> > > +MODULE_DESCRIPTION("MediaTek UART APDMA Controller Driver");
> > > +MODULE_AUTHOR("Long Cheng <long.cheng@mediatek.com>");
> > > +MODULE_LICENSE("GPL v2");
> > > +
> > > diff --git a/drivers/dma/mediatek/Kconfig b/drivers/dma/mediatek/Kconfig
> > > index 27bac0b..1a523c87 100644
> > > --- a/drivers/dma/mediatek/Kconfig
> > > +++ b/drivers/dma/mediatek/Kconfig
> > > @@ -1,4 +1,15 @@
> > >
> > > +config DMA_MTK_UART
> > > +       tristate "MediaTek SoCs APDMA support for UART"
> > > +       depends on OF && SERIAL_8250_MT6577
> > > +       select DMA_ENGINE
> > > +       select DMA_VIRTUAL_CHANNELS
> > > +       help
> > > +         Support for the UART DMA engine found on MediaTek MTK SoCs.
> > > +         when SERIAL_8250_MT6577 is enabled, and if you want to use DMA,
> > > +         you can enable the config. the DMA engine can only be used
> > > +         with MediaTek SoCs.
> > > +
> > >  config MTK_HSDMA
> > >         tristate "MediaTek High-Speed DMA controller support"
> > >         depends on ARCH_MEDIATEK || COMPILE_TEST
> > > diff --git a/drivers/dma/mediatek/Makefile b/drivers/dma/mediatek/Makefile
> > > index 6e778f8..2f2efd9 100644
> > > --- a/drivers/dma/mediatek/Makefile
> > > +++ b/drivers/dma/mediatek/Makefile
> > > @@ -1 +1,2 @@
> > > +obj-$(CONFIG_DMA_MTK_UART) += 8250_mtk_dma.o
> > >  obj-$(CONFIG_MTK_HSDMA) += mtk-hsdma.o
> > > --
> > > 1.7.9.5
> > >
>
>

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v7 1/2] dmaengine: 8250_mtk_dma: add MediaTek uart DMA support
  2018-12-26  0:05       ` Nicolas Boichat
@ 2018-12-26  6:35         ` Long Cheng
  2018-12-26  9:28           ` Nicolas Boichat
  0 siblings, 1 reply; 9+ messages in thread
From: Long Cheng @ 2018-12-26  6:35 UTC (permalink / raw)
  To: Nicolas Boichat
  Cc: Vinod Koul, Randy Dunlap, Rob Herring, Mark Rutland, Ryder Lee,
	Sean Wang, Matthias Brugger, Dan Williams, Greg Kroah-Hartman,
	Jiri Slaby, Sean Wang, dmaengine, devicetree,
	linux-arm Mailing List, linux-mediatek, lkml, linux-serial,
	srv_heupstream, Yingjoe Chen, YT Shen, Zhenbao Liu, Long Cheng

On Wed, 2018-12-26 at 08:05 +0800, Nicolas Boichat wrote:

thanks. 

> On Tue, Dec 25, 2018 at 8:06 PM Long Cheng <long.cheng@mediatek.com> wrote:
> >
> > Thanks for your comments.
> >
> > On Tue, 2018-12-25 at 15:16 +0800, Nicolas Boichat wrote:
> > > Not a full review, a few comments below.
> > >
> > > Thanks,
> > >
> >
> > would you like help to full review at this version patch?  and then i
> > can modify these at next version patch. thanks.
> 
> Added a few more comments ,-)
> 
> > > On Tue, Dec 25, 2018 at 9:27 AM Long Cheng <long.cheng@mediatek.com> wrote:
> > > >
> > > > In DMA engine framework, add 8250 uart dma to support MediaTek uart.
> > > > If MediaTek uart enabled(SERIAL_8250_MT6577), and want to improve
> > > > the performance, can enable the function.
> > > >
> > > > Signed-off-by: Long Cheng <long.cheng@mediatek.com>
> > > > ---
> > > >  drivers/dma/mediatek/8250_mtk_dma.c |  694 +++++++++++++++++++++++++++++++++++
> > > >  drivers/dma/mediatek/Kconfig        |   11 +
> > > >  drivers/dma/mediatek/Makefile       |    1 +
> > > >  3 files changed, 706 insertions(+)
> > > >  create mode 100644 drivers/dma/mediatek/8250_mtk_dma.c
> > > >
> > > > diff --git a/drivers/dma/mediatek/8250_mtk_dma.c b/drivers/dma/mediatek/8250_mtk_dma.c
> > > > new file mode 100644
> > > > index 0000000..c4090f2
> > > > --- /dev/null
> > > > +++ b/drivers/dma/mediatek/8250_mtk_dma.c
> > > > @@ -0,0 +1,694 @@
> > > > +// SPDX-License-Identifier: GPL-2.0
> > > > +/*
> > > > + * MediaTek 8250 DMA driver.
> > > > + *
> > > > + * Copyright (c) 2018 MediaTek Inc.
> > > > + * Author: Long Cheng <long.cheng@mediatek.com>
> > > > + */
> > > > +
> > > > +#include <linux/clk.h>
> > > > +#include <linux/dmaengine.h>
> > > > +#include <linux/dma-mapping.h>
> > > > +#include <linux/err.h>
> > > > +#include <linux/init.h>
> > > > +#include <linux/interrupt.h>
> > > > +#include <linux/list.h>
> > > > +#include <linux/kernel.h>
> > > > +#include <linux/module.h>
> > > > +#include <linux/of_dma.h>
> > > > +#include <linux/of_device.h>
> > > > +#include <linux/platform_device.h>
> > > > +#include <linux/slab.h>
> > > > +#include <linux/spinlock.h>
> > > > +#include <linux/pm_runtime.h>
> > > > +#include <linux/iopoll.h>
> > >
> > > Alphabetical order.
> >
> > ok, i will order.
> >
> > >
> > > > +
> > > > +#include "../virt-dma.h"
> > > > +
> > > > +#define MTK_UART_APDMA_CHANNELS                (CONFIG_SERIAL_8250_NR_UARTS * 2)
> > > > +
> > > > +#define VFF_EN_B               BIT(0)
> > > > +#define VFF_STOP_B             BIT(0)
> > > > +#define VFF_FLUSH_B            BIT(0)
> > > > +#define VFF_4G_SUPPORT_B       BIT(0)
> > > > +#define VFF_RX_INT_EN0_B       BIT(0)  /*rx valid size >=  vff thre*/
> > > > +#define VFF_RX_INT_EN1_B       BIT(1)
> > > > +#define VFF_TX_INT_EN_B                BIT(0)  /*tx left size >= vff thre*/
> > > > +#define VFF_WARM_RST_B         BIT(0)
> > > > +#define VFF_RX_INT_FLAG_CLR_B  (BIT(0) | BIT(1))
> > > > +#define VFF_TX_INT_FLAG_CLR_B  0
> > > > +#define VFF_STOP_CLR_B         0
> > > > +#define VFF_FLUSH_CLR_B                0
> > > > +#define VFF_INT_EN_CLR_B       0
> > > > +#define VFF_4G_SUPPORT_CLR_B   0
> > > > +
> > > > +/* interrupt trigger level for tx */
> > > > +#define VFF_TX_THRE(n)         ((n) * 7 / 8)
> > > > +/* interrupt trigger level for rx */
> > > > +#define VFF_RX_THRE(n)         ((n) * 3 / 4)
> > > > +
> > > > +#define MTK_UART_APDMA_RING_SIZE       0xffffU
> > > > +/* invert this bit when wrap ring head again*/
> > > > +#define MTK_UART_APDMA_RING_WRAP       0x10000U
> > > > +
> > > > +#define VFF_INT_FLAG           0x00
> > > > +#define VFF_INT_EN             0x04
> > > > +#define VFF_EN                 0x08
> > > > +#define VFF_RST                        0x0c
> > > > +#define VFF_STOP               0x10
> > > > +#define VFF_FLUSH              0x14
> > > > +#define VFF_ADDR               0x1c
> > > > +#define VFF_LEN                        0x24
> > > > +#define VFF_THRE               0x28
> > > > +#define VFF_WPT                        0x2c
> > > > +#define VFF_RPT                        0x30
> > > > +/*TX: the buffer size HW can read. RX: the buffer size SW can read.*/
> > > > +#define VFF_VALID_SIZE         0x3c
> > > > +/*TX: the buffer size SW can write. RX: the buffer size HW can write.*/
> > > > +#define VFF_LEFT_SIZE          0x40
> > > > +#define VFF_DEBUG_STATUS       0x50
> > > > +#define VFF_4G_SUPPORT         0x54
> > > > +
> > > > +struct mtk_uart_apdmadev {
> > > > +       struct dma_device ddev;
> > > > +       struct clk *clk;
> > > > +       bool support_33bits;
> > > > +       unsigned int dma_irq[MTK_UART_APDMA_CHANNELS];
> > > > +};
> > > > +
> > > > +struct mtk_uart_apdma_desc {
> > > > +       struct virt_dma_desc vd;
> > > > +
> > > > +       unsigned int avail_len;
> > > > +};
> > > > +
> > > > +struct mtk_chan {
> > > > +       struct virt_dma_chan vc;
> > > > +       struct dma_slave_config cfg;
> > > > +       void __iomem *base;
> > > > +       struct mtk_uart_apdma_desc *desc;
> > > > +
> > > > +       bool requested;
> > > > +
> > > > +       unsigned int rx_status;
> > > > +};
> > > > +
> > > > +static inline struct mtk_uart_apdmadev *
> > > > +to_mtk_uart_apdma_dev(struct dma_device *d)
> > > > +{
> > > > +       return container_of(d, struct mtk_uart_apdmadev, ddev);
> > > > +}
> > > > +
> > > > +static inline struct mtk_chan *to_mtk_uart_apdma_chan(struct dma_chan *c)
> > > > +{
> > > > +       return container_of(c, struct mtk_chan, vc.chan);
> > > > +}
> > > > +
> > > > +static inline struct mtk_uart_apdma_desc *to_mtk_uart_apdma_desc
> > > > +       (struct dma_async_tx_descriptor *t)
> > > > +{
> > > > +       return container_of(t, struct mtk_uart_apdma_desc, vd.tx);
> > > > +}
> > > > +
> > > > +static void mtk_uart_apdma_chan_write(struct mtk_chan *c,
> > > > +                              unsigned int reg, unsigned int val)
> > > > +{
> > > > +       writel(val, c->base + reg);
> > > > +}
> > > > +
> > > > +static unsigned int
> > > > +mtk_uart_apdma_chan_read(struct mtk_chan *c, unsigned int reg)
> > > > +{
> > > > +       return readl(c->base + reg);
> > > > +}
> > > > +
> > > > +static void mtk_uart_apdma_desc_free(struct virt_dma_desc *vd)
> > > > +{
> > > > +       struct dma_chan *chan = vd->tx.chan;
> > > > +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> > > > +
> > > > +       kfree(c->desc);
> > > > +       c->desc = NULL;
> > >
> > > Why? I'm afraid this may mask double-free.
> > >
> >
> > i will remove it.
> >
> > > > +}
> > > > +
> > > > +static void mtk_uart_apdma_start_tx(struct mtk_chan *c)
> > > > +{
> > > > +       unsigned int txcount = c->desc->avail_len;
> > >
> > > Variable name is confusing... I'd rather have a boolean `transmitted`
> > > (or something), set to 0 here, and set to 1 in the loop (but again,
> > > maybe this is not necessary at all, see below).
> >
> > ok, i modify the function and remove it.
> >
> > >
> > > > +       unsigned int len, send, left, wpt, wrap;
> > > > +
> > > > +       if (mtk_uart_apdma_chan_read(c, VFF_LEFT_SIZE) == 0U) {
> > > > +               mtk_uart_apdma_chan_write(c, VFF_INT_EN, VFF_TX_INT_EN_B);
> > >
> > > I'd add a "return;" here and de-indent the rest of the function (that
> > > should help to make it more readable).
> >
> > add
> >
> > >
> > > > +       } else {
> > > > +               len = mtk_uart_apdma_chan_read(c, VFF_LEN);
> > >
> > > You make an assumption below that len is always a power of 2. I wonder
> > > if we want to validate that with some BUG/WARN call.
> > >
> > > Also, I assume VFF_LEN is constant? Maybe we can read it once only in
> > > the probe function?
> > >
> >
> > no, the value of VFF_LEN isn't constant.
> >
> > > > +
> > > > +               while (((left = mtk_uart_apdma_chan_read(c,
> > > > +                                       VFF_LEFT_SIZE)) > 0U)
> > > > +                               && (c->desc->avail_len != 0U)) {
> > >
> > > Why do we need a while loop here? LEFT_SIZE will always be smaller
> > > than LEN, right? Shouldn't we load as much as we can, then let an
> > > interrupt happen and reload the FIFO at that stage? Or does this help
> > > gain a bit of extra performance?
> >
> > i will remove while, use 'if' instead of.
> > >
> > > > +                       send = min_t(unsigned int, left, c->desc->avail_len);
> > > > +                       wpt = mtk_uart_apdma_chan_read(c, VFF_WPT);
> > > > +                       wrap = wpt & MTK_UART_APDMA_RING_WRAP ?
> > >
> > > wrap = wrp ^ MTK_UART_APDMA_RING_WRAP ?
> > >
> >
> > wrap = wpt & MTK_UART_APDMA_RING_WRAP ?
> > i had test it.
> 
> Sorry, I got it wrong (and my question mark is confusing), what I mean is this:
> 
> wrap = (wrp ^ MTK_UART_APDMA_RING_WRAP) & MTK_UART_APDMA_RING_WRAP;
> 
> But then it's marginally shorter than your solution, so maybe keep
> yours, it's a little simpler to understand.
> 

i will rename 'MTK_UART_APDMA_RING_WRAP' to 'VFF_RING_WRAP'. let code
shorter. i will keep the code.

> > > > +                                       0U : MTK_UART_APDMA_RING_WRAP;
> > > > +
> > > > +                       if ((wpt & (len - 1U)) + send < len)
> 
> Is it possible for wpt > len? If not, then wpt + send < len should be enough.
> 
> Also, wpt + send is used a few times now, so maybe have it a separate variable?
> 
> Something like this might be nicer:
> 
> next_wpt = wpt + send;
> if ((wpt & (len - 1U)) + send >= len) {
>    next_wpt = next_wpt & (len - 1U);
>    if (!(wpt & MTK_UART_APDMA_RING_WRAP))
>       next_wpt |= MTK_UART_APDMA_RING_WRAP;
> }
> mtk_uart_apdma_chan_write(c, VFF_WPT, next_wpt);
> 
> Or if the assumption than wpt < len holds:
> if (next_wpt >= len) {
>    next_wpt = next_wpt - len;
> ...
> 

i will modify it like your comments,
next_wpt = wpt + send;
> if ((wpt & (len - 1U)) + send >= len) {
>    next_wpt = next_wpt & (len - 1U);
>    if (!(wpt & MTK_UART_APDMA_RING_WRAP))
>       next_wpt |= MTK_UART_APDMA_RING_WRAP;
> 
i think that it's better.

> > > > +                               mtk_uart_apdma_chan_write(c,
> > > > +                                               VFF_WPT, wpt + send);
> > > > +                       else
> > > > +                               mtk_uart_apdma_chan_write(c, VFF_WPT,
> > > > +                                                  ((wpt + send) & (len - 1U))
> > > > +                                                  | wrap);
> > > > +
> > > > +                       c->desc->avail_len -= send;
> > > > +               }
> > > > +
> > > > +               if (txcount != c->desc->avail_len) {
> > >
> > > We know mtk_uart_apdma_chan_read(c, VFF_LEFT_SIZE) > 0 at least once,
> > > so this test should always be true.
> > >
> > > Unless we have c->desc->avail_len == 0, but then we won't even call
> > > this function... So this is always true.
> > >
> >
> > yes. you are right. i will remove it.
> >
> > > > +                       mtk_uart_apdma_chan_write(c,
> > > > +                                       VFF_INT_EN, VFF_TX_INT_EN_B);
> > > > +                       if (mtk_uart_apdma_chan_read(c,
> > > > +                                               VFF_FLUSH) == 0U)
> > > > +                               mtk_uart_apdma_chan_write(c,
> > > > +                                               VFF_FLUSH, VFF_FLUSH_B);
> > > > +               }
> > > > +       }
> > > > +}
> > > > +
> > > > +static void mtk_uart_apdma_start_rx(struct mtk_chan *c)
> > > > +{
> > > > +       struct mtk_uart_apdma_desc *d = c->desc;
> > > > +       unsigned int rx_len, wg, rg, count;
> > > > +
> > > > +       if (mtk_uart_apdma_chan_read(c, VFF_VALID_SIZE) == 0U)
> > > > +               return;
> > > > +
> > > > +       if (d && vchan_next_desc(&c->vc)) {
> 
> I'd negate the test to de-indent the rest of the code:
> if (!d || !vchan_next_desc(&c->vc))
>    return;
> 

ok. i will modify it like 
if ((mtk_uart_apdma_chan_read(c, VFF_VALID_SIZE) == 0U) ||
    !d || !vchan_next_desc(&c->vc))
    return;

> > > > +               rx_len = mtk_uart_apdma_chan_read(c, VFF_LEN);
> > > > +               rg = mtk_uart_apdma_chan_read(c, VFF_RPT);
> > > > +               wg = mtk_uart_apdma_chan_read(c, VFF_WPT);
> > > > +               count = ((rg ^ wg) & MTK_UART_APDMA_RING_WRAP) ?
> > > > +                               ((wg & MTK_UART_APDMA_RING_SIZE) +
> > > > +                               rx_len - (rg & MTK_UART_APDMA_RING_SIZE)) :
> > > > +                               ((wg & MTK_UART_APDMA_RING_SIZE) -
> > > > +                               (rg & MTK_UART_APDMA_RING_SIZE));
> > >
> > > This is hard to read, please use a if/else.
> > >
> >
> > ok.
> >
> > > > +
> > > > +               c->rx_status = count;
> > > > +               mtk_uart_apdma_chan_write(c, VFF_RPT, wg);
> > > > +
> > > > +               list_del(&d->vd.node);
> > > > +               vchan_cookie_complete(&d->vd);
> > > > +       }
> > > > +}
> > > > +
> > > > +static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id)
> > > > +{
> > > > +       struct dma_chan *chan = (struct dma_chan *)dev_id;
> > > > +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> > > > +       struct mtk_uart_apdma_desc *d;
> > > > +       unsigned long flags;
> > > > +
> > > > +       spin_lock_irqsave(&c->vc.lock, flags);
> > > > +       switch (c->cfg.direction) {
> > > > +       case DMA_DEV_TO_MEM:
> > >
> > > You use an if/else below in a similar case, please be consistent (IMHO
> > > if/else is a bit more compact, so I'd use that here).
> > >
> > sean wang had review it. the link
> > http://lists.infradead.org/pipermail/linux-mediatek/2018-December/016292.html
> >

ok, i will use if/else

> > > > +               mtk_uart_apdma_chan_write(c,
> > > > +                               VFF_INT_FLAG, VFF_RX_INT_FLAG_CLR_B);
> > > > +               mtk_uart_apdma_start_rx(c);
> > > > +               break;
> > > > +       case DMA_MEM_TO_DEV:
> > > > +               d = c->desc;
> > > > +
> > > > +               mtk_uart_apdma_chan_write(c,
> > > > +                               VFF_INT_FLAG, VFF_TX_INT_FLAG_CLR_B);
> > > > +
> > > > +               if (d->avail_len != 0U) {
> > > > +                       mtk_uart_apdma_start_tx(c);
> > > > +               } else {
> > > > +                       list_del(&d->vd.node);
> > > > +                       vchan_cookie_complete(&d->vd);
> > > > +               }
> > > > +               break;
> > > > +       default:
> > > > +               break;
> > > > +       }
> > > > +       spin_unlock_irqrestore(&c->vc.lock, flags);
> > > > +
> > > > +       return IRQ_HANDLED;
> > > > +}
> > > > +
> > > > +static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
> > > > +{
> > > > +       struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device);
> > > > +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> > > > +       u32 status;
> > > > +       int ret = -EBUSY;
> 
> No need to init ret.
> 

ok.

> > > > +
> > > > +       pm_runtime_get_sync(mtkd->ddev.dev);
> > > > +
> > > > +       mtk_uart_apdma_chan_write(c, VFF_ADDR, 0);
> > > > +       mtk_uart_apdma_chan_write(c, VFF_THRE, 0);
> > > > +       mtk_uart_apdma_chan_write(c, VFF_LEN, 0);
> > > > +       mtk_uart_apdma_chan_write(c, VFF_RST, VFF_WARM_RST_B);
> > > > +
> > > > +       ret = readx_poll_timeout(readl,
> > > > +                                c->base + VFF_EN,
> > > > +                                status, status == 0, 10, 100);
> > > > +       if (ret) {
> > > > +               dev_err(c->vc.chan.device->dev,
> > > > +                               "dma reset: fail, timeout\n");
> > > > +               goto exit;
> 
> Just return ret, since you don't do any cleanup in exit:.
> 

ok.

> > > > +       }
> > > > +
> > > > +       if (!c->requested) {
> > > > +               c->requested = true;
> > > > +               ret = request_irq(mtkd->dma_irq[chan->chan_id],
> > > > +                                 mtk_uart_apdma_irq_handler,
> > > > +                                 IRQF_TRIGGER_NONE,
> > > > +                                 KBUILD_MODNAME, chan);
> > > > +               if (ret < 0) {
> > > > +                       dev_err(chan->device->dev, "Can't request dma IRQ\n");
> > > > +                       return -EINVAL;
> > > > +               }
> > > > +       }
> > > > +
> > > > +       if (mtkd->support_33bits)
> > > > +               mtk_uart_apdma_chan_write(c,
> > > > +                               VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B);
> > > > +
> > > > +exit:
> > > > +       return ret;
> > > > +}
> > > > +
> > > > +static void mtk_uart_apdma_free_chan_resources(struct dma_chan *chan)
> > > > +{
> > > > +       struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device);
> > > > +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> > > > +
> > > > +       if (c->requested) {
> > > > +               c->requested = false;
> > > > +               free_irq(mtkd->dma_irq[chan->chan_id], chan);
> > > > +       }
> > > > +
> > > > +       tasklet_kill(&c->vc.task);
> > > > +
> > > > +       vchan_free_chan_resources(&c->vc);
> > > > +
> > > > +       pm_runtime_put_sync(mtkd->ddev.dev);
> > > > +}
> > > > +
> > > > +static enum dma_status mtk_uart_apdma_tx_status(struct dma_chan *chan,
> > > > +                                        dma_cookie_t cookie,
> > > > +                                        struct dma_tx_state *txstate)
> > > > +{
> > > > +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> > > > +       enum dma_status ret;
> > > > +       unsigned long flags;
> > > > +
> > > > +       if (!txstate)
> > > > +               return DMA_ERROR;
> > > > +
> > > > +       ret = dma_cookie_status(chan, cookie, txstate);
> > > > +       spin_lock_irqsave(&c->vc.lock, flags);
> > > > +       if (ret == DMA_IN_PROGRESS) {
> > > > +               c->rx_status = mtk_uart_apdma_chan_read(c, VFF_RPT)
> > > > +                            & MTK_UART_APDMA_RING_SIZE;
> > > > +               dma_set_residue(txstate, c->rx_status);
> > > > +       } else if (ret == DMA_COMPLETE && c->cfg.direction == DMA_DEV_TO_MEM) {
> > > > +               dma_set_residue(txstate, c->rx_status);
> > > > +       } else {
> > > > +               dma_set_residue(txstate, 0);
> > > > +       }
> > > > +       spin_unlock_irqrestore(&c->vc.lock, flags);
> > > > +
> > > > +       return ret;
> > > > +}
> > > > +
> > > > +/*
> > > > + * dmaengine_prep_slave_single will call the function. and sglen is 1.
> > > > + * 8250 uart using one ring buffer, and deal with one sg.
> > > > + */
> > > > +static struct dma_async_tx_descriptor *mtk_uart_apdma_prep_slave_sg
> > > > +       (struct dma_chan *chan, struct scatterlist *sgl,
> > > > +       unsigned int sglen,     enum dma_transfer_direction dir,
> 
> Too many spaces (or a tab?) between `sglen,` and `enum`
> 

ok. i will modify it

> > > > +       unsigned long tx_flags, void *context)
> > > > +{
> > > > +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> > > > +       struct mtk_uart_apdma_desc *d;
> > > > +
> > > > +       if ((dir != DMA_DEV_TO_MEM) &&
> > > > +               (dir != DMA_MEM_TO_DEV)) {
> > > > +               dev_err(chan->device->dev, "bad direction\n");
> > > > +               return NULL;
> > > > +       }
> > > > +
> > > > +       /* Now allocate and setup the descriptor */
> > > > +       d = kzalloc(sizeof(*d), GFP_ATOMIC);
> > > > +       if (!d)
> > > > +               return NULL;
> > > > +
> > > > +       /* sglen is 1 */
> > > > +       d->avail_len = sg_dma_len(sgl);
> > > > +
> > > > +       return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
> > > > +}
> > > > +
> > > > +static void mtk_uart_apdma_issue_pending(struct dma_chan *chan)
> > > > +{
> > > > +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> > > > +       struct virt_dma_desc *vd;
> > > > +       unsigned long flags;
> > > > +
> > > > +       spin_lock_irqsave(&c->vc.lock, flags);
> > > > +       if (c->cfg.direction == DMA_DEV_TO_MEM) {
> > > > +               if (vchan_issue_pending(&c->vc) && !c->desc) {
> > > > +                       vd = vchan_next_desc(&c->vc);
> > > > +                       c->desc = to_mtk_uart_apdma_desc(&vd->tx);
> > > > +                       mtk_uart_apdma_start_rx(c);
> > > > +               }
> > > > +       } else if (c->cfg.direction == DMA_MEM_TO_DEV) {
> > > > +               if (vchan_issue_pending(&c->vc) && !c->desc) {
> > > > +                       vd = vchan_next_desc(&c->vc);
> > > > +                       c->desc = to_mtk_uart_apdma_desc(&vd->tx);
> > > > +                       mtk_uart_apdma_start_tx(c);
> > > > +               }
> > > > +       }
> > > > +       spin_unlock_irqrestore(&c->vc.lock, flags);
> > > > +}
> > > > +
> > > > +static int mtk_uart_apdma_slave_config(struct dma_chan *chan,
> > > > +                               struct dma_slave_config *cfg)
> > > > +{
> > > > +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> > > > +       struct mtk_uart_apdmadev *mtkd =
> > > > +                               to_mtk_uart_apdma_dev(c->vc.chan.device);
> > > > +
> > > > +       c->cfg = *cfg;
> > > > +
> > > > +       if (cfg->direction == DMA_DEV_TO_MEM) {
> > > > +               unsigned int rx_len = cfg->src_addr_width * 1024;
> > > > +
> > > > +               mtk_uart_apdma_chan_write(c, VFF_ADDR, cfg->src_addr);
> > > > +               mtk_uart_apdma_chan_write(c, VFF_LEN, rx_len);
> > > > +               mtk_uart_apdma_chan_write(c, VFF_THRE, VFF_RX_THRE(rx_len));
> > > > +               mtk_uart_apdma_chan_write(c,
> > > > +                               VFF_INT_EN, VFF_RX_INT_EN0_B
> > > > +                               | VFF_RX_INT_EN1_B);
> > > > +               mtk_uart_apdma_chan_write(c, VFF_RPT, 0);
> > > > +               mtk_uart_apdma_chan_write(c,
> > > > +                               VFF_INT_FLAG, VFF_RX_INT_FLAG_CLR_B);
> > > > +               mtk_uart_apdma_chan_write(c, VFF_EN, VFF_EN_B);
> > > > +       } else if (cfg->direction == DMA_MEM_TO_DEV)    {
> > > > +               unsigned int tx_len = cfg->dst_addr_width * 1024;
> > > > +
> > > > +               mtk_uart_apdma_chan_write(c, VFF_ADDR, cfg->dst_addr);
> > > > +               mtk_uart_apdma_chan_write(c, VFF_LEN, tx_len);
> > > > +               mtk_uart_apdma_chan_write(c, VFF_THRE, VFF_TX_THRE(tx_len));
> > > > +               mtk_uart_apdma_chan_write(c, VFF_WPT, 0);
> > > > +               mtk_uart_apdma_chan_write(c,
> > > > +                               VFF_INT_FLAG, VFF_TX_INT_FLAG_CLR_B);
> > > > +               mtk_uart_apdma_chan_write(c, VFF_EN, VFF_EN_B);
> > > > +       }
> > > > +
> > > > +       if (mtkd->support_33bits)
> > > > +               mtk_uart_apdma_chan_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_B);
> > > > +
> > > > +       if (mtk_uart_apdma_chan_read(c, VFF_EN) != VFF_EN_B) {
> > > > +               dev_err(chan->device->dev,
> > > > +                       "config dma dir[%d] fail\n", cfg->direction);
> > > > +               return -EINVAL;
> > > > +       }
> > > > +
> > > > +       return 0;
> > > > +}
> > > > +
> > > > +static int mtk_uart_apdma_terminate_all(struct dma_chan *chan)
> > > > +{
> > > > +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> > > > +       unsigned long flags;
> > > > +       u32 status;
> > > > +       int ret;
> > > > +
> > > > +       spin_lock_irqsave(&c->vc.lock, flags);
> > > > +
> > > > +       mtk_uart_apdma_chan_write(c, VFF_FLUSH, VFF_FLUSH_CLR_B);
> > > > +       /* Wait for flush */
> > > > +       ret = readx_poll_timeout(readl,
> > > > +                                c->base + VFF_FLUSH,
> > > > +                                status,
> > > > +                                (status & VFF_FLUSH_B) != VFF_FLUSH_B,
> > > > +                                10, 100);
> > > > +       if (ret)
> > > > +               dev_err(c->vc.chan.device->dev,
> > > > +                       "dma stop: polling FLUSH fail, DEBUG=0x%x\n",
> > > > +                       mtk_uart_apdma_chan_read(c, VFF_DEBUG_STATUS));
> > > > +
> > > > +       /*set stop as 1 -> wait until en is 0 -> set stop as 0*/
> > > > +       mtk_uart_apdma_chan_write(c, VFF_STOP, VFF_STOP_B);
> > > > +       ret = readx_poll_timeout(readl,
> > > > +                                c->base + VFF_EN,
> > > > +                                status, status == 0, 10, 100);
> > > > +       if (ret)
> > > > +               dev_err(c->vc.chan.device->dev,
> > > > +                       "dma stop: polling VFF_EN fail, DEBUG=0x%x\n",
> > > > +                       mtk_uart_apdma_chan_read(c, VFF_DEBUG_STATUS));
> > > > +
> > > > +       mtk_uart_apdma_chan_write(c, VFF_STOP, VFF_STOP_CLR_B);
> > > > +       mtk_uart_apdma_chan_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
> > > > +
> > > > +       switch (c->cfg.direction) {
> > > > +       case DMA_DEV_TO_MEM:
> > >
> > > Ditto, if/else?
> > >
> > ditto.
> > http://lists.infradead.org/pipermail/linux-mediatek/2018-December/016292.html
> 
> Well, I disagree, especially in this case: We turn 4 lines of code into 10...
> 

i see, i will using if/else

> > > > +               mtk_uart_apdma_chan_write(c,
> > > > +                               VFF_INT_FLAG, VFF_RX_INT_FLAG_CLR_B);
> > > > +               break;
> > > > +       case DMA_MEM_TO_DEV:
> > > > +               mtk_uart_apdma_chan_write(c,
> > > > +                               VFF_INT_FLAG, VFF_TX_INT_FLAG_CLR_B);
> > > > +               break;
> > > > +       default:
> > > > +               break;
> > > > +       }
> > > > +
> > > > +       spin_unlock_irqrestore(&c->vc.lock, flags);
> > > > +
> > > > +       return 0;
> > > > +}
> > > > +
> > > > +static int mtk_uart_apdma_device_pause(struct dma_chan *chan)
> > > > +{
> > > > +       /* just for check caps pass */
> > > > +       return 0;
> > > > +}
> > > > +
> > > > +static int mtk_uart_apdma_device_resume(struct dma_chan *chan)
> > > > +{
> > > > +       /* just for check caps pass */
> > > > +       return 0;
> > > > +}
> 
> Why do you need these? Seems wrong to advertise device_pause/resume
> (and the caps) if we don't actually support that?
> 

in serial 8250_dma.c file, when request DMA, will calll
'dma_get_slave_caps' to get the caps. and the will check caps.cmd_pause.
the pause is device_pause.  our device can't support the functions. but
to check pass, need add the fake function. or you can give better
comments. thanks.

> > > > +
> > > > +static void mtk_uart_apdma_free(struct mtk_uart_apdmadev *mtkd)
> > > > +{
> > > > +       while (list_empty(&mtkd->ddev.channels) == 0) {
> > > > +               struct mtk_chan *c = list_first_entry(&mtkd->ddev.channels,
> > > > +                       struct mtk_chan, vc.chan.device_node);
> > > > +
> > > > +               list_del(&c->vc.chan.device_node);
> > > > +               tasklet_kill(&c->vc.task);
> > > > +       }
> > > > +}
> > > > +
> > > > +static const struct of_device_id mtk_uart_apdma_match[] = {
> > > > +       { .compatible = "mediatek,mt6577-uart-dma", },
> > > > +       { /* sentinel */ },
> > > > +};
> > > > +MODULE_DEVICE_TABLE(of, mtk_uart_apdma_match);
> > > > +
> > > > +static int mtk_uart_apdma_probe(struct platform_device *pdev)
> > > > +{
> > > > +       struct mtk_uart_apdmadev *mtkd;
> > > > +       struct resource *res;
> > > > +       struct mtk_chan *c;
> > > > +       unsigned int i;
> > > > +       int rc;
> > > > +
> > > > +       mtkd = devm_kzalloc(&pdev->dev, sizeof(*mtkd), GFP_KERNEL);
> > > > +       if (!mtkd)
> > > > +               return -ENOMEM;
> > > > +
> > > > +       mtkd->clk = devm_clk_get(&pdev->dev, NULL);
> > > > +       if (IS_ERR(mtkd->clk)) {
> > > > +               dev_err(&pdev->dev, "No clock specified\n");
> > > > +               rc = PTR_ERR(mtkd->clk);
> > > > +               goto err_no_dma;
> > > > +       }
> > > > +
> > > > +       if (of_property_read_bool(pdev->dev.of_node, "dma-33bits")) {
> > > > +               dev_info(&pdev->dev, "Support dma 33bits\n");
> > >
> > > Drop this, a bit verbose.
> > >
> > > > +               mtkd->support_33bits = true;
> > > > +       }
> > > > +
> > > > +       rc = dma_set_mask_and_coherent(&pdev->dev,
> > > > +                               DMA_BIT_MASK(32 | mtkd->support_33bits));
> > > > +       if (rc)
> > > > +               goto err_no_dma;
> > > > +
> > > > +       dma_cap_set(DMA_SLAVE, mtkd->ddev.cap_mask);
> > > > +       mtkd->ddev.device_alloc_chan_resources =
> > > > +                               mtk_uart_apdma_alloc_chan_resources;
> > > > +       mtkd->ddev.device_free_chan_resources =
> > > > +                               mtk_uart_apdma_free_chan_resources;
> > > > +       mtkd->ddev.device_tx_status = mtk_uart_apdma_tx_status;
> > > > +       mtkd->ddev.device_issue_pending = mtk_uart_apdma_issue_pending;
> > > > +       mtkd->ddev.device_prep_slave_sg = mtk_uart_apdma_prep_slave_sg;
> > > > +       mtkd->ddev.device_config = mtk_uart_apdma_slave_config;
> > > > +       mtkd->ddev.device_pause = mtk_uart_apdma_device_pause;
> > > > +       mtkd->ddev.device_resume = mtk_uart_apdma_device_resume;
> > > > +       mtkd->ddev.device_terminate_all = mtk_uart_apdma_terminate_all;
> > > > +       mtkd->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE);
> > > > +       mtkd->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE);
> > > > +       mtkd->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
> > > > +       mtkd->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
> > > > +       mtkd->ddev.dev = &pdev->dev;
> > > > +       INIT_LIST_HEAD(&mtkd->ddev.channels);
> > > > +
> > > > +       for (i = 0; i < MTK_UART_APDMA_CHANNELS; i++) {
> > > > +               c = devm_kzalloc(mtkd->ddev.dev, sizeof(*c), GFP_KERNEL);
> > > > +               if (!c) {
> > > > +                       rc = -ENODEV;
> > > > +                       goto err_no_dma;
> > > > +               }
> > > > +
> > > > +               res = platform_get_resource(pdev, IORESOURCE_MEM, i);
> > > > +               if (!res) {
> > > > +                       rc = -ENODEV;
> > > > +                       goto err_no_dma;
> > > > +               }
> > > > +
> > > > +               c->base = devm_ioremap_resource(&pdev->dev, res);
> > > > +               if (IS_ERR(c->base)) {
> > > > +                       rc = PTR_ERR(c->base);
> > > > +                       goto err_no_dma;
> > > > +               }
> > > > +               c->requested = false;
> > > > +               c->vc.desc_free = mtk_uart_apdma_desc_free;
> > > > +               vchan_init(&c->vc, &mtkd->ddev);
> > > > +
> > > > +               mtkd->dma_irq[i] = platform_get_irq(pdev, i);
> > > > +               if ((int)mtkd->dma_irq[i] < 0) {
> > > > +                       dev_err(&pdev->dev, "failed to get IRQ[%d]\n", i);
> > > > +                       rc = -EINVAL;
> > > > +                       goto err_no_dma;
> > > > +               }
> > > > +       }
> > > > +
> > > > +       pm_runtime_enable(&pdev->dev);
> > > > +       pm_runtime_set_active(&pdev->dev);
> > > > +
> > > > +       rc = dma_async_device_register(&mtkd->ddev);
> > > > +       if (rc)
> > > > +               goto rpm_disable;
> > > > +
> > > > +       platform_set_drvdata(pdev, mtkd);
> > > > +
> > > > +       if (pdev->dev.of_node) {
> > > > +               /* Device-tree DMA controller registration */
> > > > +               rc = of_dma_controller_register(pdev->dev.of_node,
> > > > +                                               of_dma_xlate_by_chan_id,
> > > > +                                               mtkd);
> > > > +               if (rc)
> > > > +                       goto dma_remove;
> > > > +       }
> > > > +
> > > > +       return rc;
> > > > +
> > > > +dma_remove:
> > > > +       dma_async_device_unregister(&mtkd->ddev);
> > > > +rpm_disable:
> > > > +       pm_runtime_disable(&pdev->dev);
> > > > +err_no_dma:
> > > > +       mtk_uart_apdma_free(mtkd);
> 
> It's not very correct to call this before
> INIT_LIST_HEAD(&mtkd->ddev.channels);
> 
> I'd just call `return rc;` directly for all instances before that line.
> 

ok, i will modify it.

> > > > +       return rc;
> > > > +}
> > > > +
> > > > +static int mtk_uart_apdma_remove(struct platform_device *pdev)
> > > > +{
> > > > +       struct mtk_uart_apdmadev *mtkd = platform_get_drvdata(pdev);
> > > > +
> > > > +       if (pdev->dev.of_node)
> > > > +               of_dma_controller_free(pdev->dev.of_node);
> > > > +
> > > > +       pm_runtime_disable(&pdev->dev);
> > > > +       pm_runtime_put_noidle(&pdev->dev);
> > > > +
> > > > +       dma_async_device_unregister(&mtkd->ddev);
> > > > +       mtk_uart_apdma_free(mtkd);
> > > > +
> > > > +       return 0;
> > > > +}
> > > > +
> > > > +#ifdef CONFIG_PM_SLEEP
> > > > +static int mtk_uart_apdma_suspend(struct device *dev)
> > > > +{
> > > > +       struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
> > > > +
> > > > +       if (!pm_runtime_suspended(dev))
> > > > +               clk_disable_unprepare(mtkd->clk);
> > > > +
> > > > +       return 0;
> > > > +}
> > > > +
> > > > +static int mtk_uart_apdma_resume(struct device *dev)
> > > > +{
> > > > +       int ret;
> > > > +       struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
> > > > +
> > > > +       if (!pm_runtime_suspended(dev)) {
> > > > +               ret = clk_prepare_enable(mtkd->clk);
> > > > +               if (ret)
> > > > +                       return ret;
> > > > +       }
> > > > +
> > > > +       return 0;
> > > > +}
> > > > +#endif /* CONFIG_PM_SLEEP */
> > > > +
> > > > +#ifdef CONFIG_PM
> > > > +static int mtk_uart_apdma_runtime_suspend(struct device *dev)
> > > > +{
> > > > +       struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
> > > > +
> > > > +       clk_disable_unprepare(mtkd->clk);
> > > > +
> > > > +       return 0;
> > > > +}
> > > > +
> > > > +static int mtk_uart_apdma_runtime_resume(struct device *dev)
> > > > +{
> > > > +       int ret;
> > > > +       struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
> > > > +
> > > > +       ret = clk_prepare_enable(mtkd->clk);
> > > > +       if (ret)
> > > > +               return ret;
> > > > +
> > > > +       return 0;
> > > > +}
> > > > +#endif /* CONFIG_PM */
> > > > +
> > > > +static const struct dev_pm_ops mtk_uart_apdma_pm_ops = {
> > > > +       SET_SYSTEM_SLEEP_PM_OPS(mtk_uart_apdma_suspend, mtk_uart_apdma_resume)
> > > > +       SET_RUNTIME_PM_OPS(mtk_uart_apdma_runtime_suspend,
> > > > +                          mtk_uart_apdma_runtime_resume, NULL)
> > > > +};
> > > > +
> > > > +static struct platform_driver mtk_uart_apdma_driver = {
> > > > +       .probe  = mtk_uart_apdma_probe,
> > > > +       .remove = mtk_uart_apdma_remove,
> > > > +       .driver = {
> > > > +               .name           = KBUILD_MODNAME,
> > > > +               .pm             = &mtk_uart_apdma_pm_ops,
> > > > +               .of_match_table = of_match_ptr(mtk_uart_apdma_match),
> > > > +       },
> > > > +};
> > > > +
> > > > +module_platform_driver(mtk_uart_apdma_driver);
> > > > +
> > > > +MODULE_DESCRIPTION("MediaTek UART APDMA Controller Driver");
> > > > +MODULE_AUTHOR("Long Cheng <long.cheng@mediatek.com>");
> > > > +MODULE_LICENSE("GPL v2");
> > > > +
> > > > diff --git a/drivers/dma/mediatek/Kconfig b/drivers/dma/mediatek/Kconfig
> > > > index 27bac0b..1a523c87 100644
> > > > --- a/drivers/dma/mediatek/Kconfig
> > > > +++ b/drivers/dma/mediatek/Kconfig
> > > > @@ -1,4 +1,15 @@
> > > >
> > > > +config DMA_MTK_UART
> > > > +       tristate "MediaTek SoCs APDMA support for UART"
> > > > +       depends on OF && SERIAL_8250_MT6577
> > > > +       select DMA_ENGINE
> > > > +       select DMA_VIRTUAL_CHANNELS
> > > > +       help
> > > > +         Support for the UART DMA engine found on MediaTek MTK SoCs.
> > > > +         when SERIAL_8250_MT6577 is enabled, and if you want to use DMA,
> > > > +         you can enable the config. the DMA engine can only be used
> > > > +         with MediaTek SoCs.
> > > > +
> > > >  config MTK_HSDMA
> > > >         tristate "MediaTek High-Speed DMA controller support"
> > > >         depends on ARCH_MEDIATEK || COMPILE_TEST
> > > > diff --git a/drivers/dma/mediatek/Makefile b/drivers/dma/mediatek/Makefile
> > > > index 6e778f8..2f2efd9 100644
> > > > --- a/drivers/dma/mediatek/Makefile
> > > > +++ b/drivers/dma/mediatek/Makefile
> > > > @@ -1 +1,2 @@
> > > > +obj-$(CONFIG_DMA_MTK_UART) += 8250_mtk_dma.o
> > > >  obj-$(CONFIG_MTK_HSDMA) += mtk-hsdma.o
> > > > --
> > > > 1.7.9.5
> > > >
> >
> >



^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v7 1/2] dmaengine: 8250_mtk_dma: add MediaTek uart DMA support
  2018-12-26  6:35         ` Long Cheng
@ 2018-12-26  9:28           ` Nicolas Boichat
  2018-12-26 10:18             ` Long Cheng
  0 siblings, 1 reply; 9+ messages in thread
From: Nicolas Boichat @ 2018-12-26  9:28 UTC (permalink / raw)
  To: Long Cheng
  Cc: Vinod Koul, Randy Dunlap, Rob Herring, Mark Rutland, Ryder Lee,
	Sean Wang, Matthias Brugger, Dan Williams, Greg Kroah-Hartman,
	Jiri Slaby, Sean Wang, dmaengine, devicetree,
	linux-arm Mailing List, linux-mediatek, lkml, linux-serial,
	srv_heupstream, Yingjoe Chen, YT Shen, Zhenbao Liu

On Wed, Dec 26, 2018 at 2:35 PM Long Cheng <long.cheng@mediatek.com> wrote:
>
> On Wed, 2018-12-26 at 08:05 +0800, Nicolas Boichat wrote:
>
> thanks.
>
> > On Tue, Dec 25, 2018 at 8:06 PM Long Cheng <long.cheng@mediatek.com> wrote:
> > >
> > > Thanks for your comments.
> > >
> > > On Tue, 2018-12-25 at 15:16 +0800, Nicolas Boichat wrote:
> > > > Not a full review, a few comments below.
> > > >
> > > > Thanks,
> > > >
> > >
> > > would you like help to full review at this version patch?  and then i
> > > can modify these at next version patch. thanks.
> >
> > Added a few more comments ,-)
> >
> > > > On Tue, Dec 25, 2018 at 9:27 AM Long Cheng <long.cheng@mediatek.com> wrote:
> > > > >
> > > > > In DMA engine framework, add 8250 uart dma to support MediaTek uart.
> > > > > If MediaTek uart enabled(SERIAL_8250_MT6577), and want to improve
> > > > > the performance, can enable the function.
> > > > >
> > > > > Signed-off-by: Long Cheng <long.cheng@mediatek.com>
> > > > > ---
> > > > >  drivers/dma/mediatek/8250_mtk_dma.c |  694 +++++++++++++++++++++++++++++++++++
> > > > >  drivers/dma/mediatek/Kconfig        |   11 +
> > > > >  drivers/dma/mediatek/Makefile       |    1 +
> > > > >  3 files changed, 706 insertions(+)
> > > > >  create mode 100644 drivers/dma/mediatek/8250_mtk_dma.c
> > > > >
> > > > > diff --git a/drivers/dma/mediatek/8250_mtk_dma.c b/drivers/dma/mediatek/8250_mtk_dma.c
> > > > > new file mode 100644
> > > > > index 0000000..c4090f2
> > > > > --- /dev/null
> > > > > +++ b/drivers/dma/mediatek/8250_mtk_dma.c
> > > > > @@ -0,0 +1,694 @@
> > > > > +// SPDX-License-Identifier: GPL-2.0
> > > > > +/*
> > > > > + * MediaTek 8250 DMA driver.
> > > > > + *
> > > > > + * Copyright (c) 2018 MediaTek Inc.
> > > > > + * Author: Long Cheng <long.cheng@mediatek.com>
> > > > > + */
> > > > > +
> > > > > +#include <linux/clk.h>
> > > > > +#include <linux/dmaengine.h>
> > > > > +#include <linux/dma-mapping.h>
> > > > > +#include <linux/err.h>
> > > > > +#include <linux/init.h>
> > > > > +#include <linux/interrupt.h>
> > > > > +#include <linux/list.h>
> > > > > +#include <linux/kernel.h>
> > > > > +#include <linux/module.h>
> > > > > +#include <linux/of_dma.h>
> > > > > +#include <linux/of_device.h>
> > > > > +#include <linux/platform_device.h>
> > > > > +#include <linux/slab.h>
> > > > > +#include <linux/spinlock.h>
> > > > > +#include <linux/pm_runtime.h>
> > > > > +#include <linux/iopoll.h>
> > > >
> > > > Alphabetical order.
> > >
> > > ok, i will order.
> > >
> > > >
> > > > > +
> > > > > +#include "../virt-dma.h"
> > > > > +
> > > > > +#define MTK_UART_APDMA_CHANNELS                (CONFIG_SERIAL_8250_NR_UARTS * 2)
> > > > > +
> > > > > +#define VFF_EN_B               BIT(0)
> > > > > +#define VFF_STOP_B             BIT(0)
> > > > > +#define VFF_FLUSH_B            BIT(0)
> > > > > +#define VFF_4G_SUPPORT_B       BIT(0)
> > > > > +#define VFF_RX_INT_EN0_B       BIT(0)  /*rx valid size >=  vff thre*/
> > > > > +#define VFF_RX_INT_EN1_B       BIT(1)
> > > > > +#define VFF_TX_INT_EN_B                BIT(0)  /*tx left size >= vff thre*/
> > > > > +#define VFF_WARM_RST_B         BIT(0)
> > > > > +#define VFF_RX_INT_FLAG_CLR_B  (BIT(0) | BIT(1))
> > > > > +#define VFF_TX_INT_FLAG_CLR_B  0
> > > > > +#define VFF_STOP_CLR_B         0
> > > > > +#define VFF_FLUSH_CLR_B                0
> > > > > +#define VFF_INT_EN_CLR_B       0
> > > > > +#define VFF_4G_SUPPORT_CLR_B   0
> > > > > +
> > > > > +/* interrupt trigger level for tx */
> > > > > +#define VFF_TX_THRE(n)         ((n) * 7 / 8)
> > > > > +/* interrupt trigger level for rx */
> > > > > +#define VFF_RX_THRE(n)         ((n) * 3 / 4)
> > > > > +
> > > > > +#define MTK_UART_APDMA_RING_SIZE       0xffffU
> > > > > +/* invert this bit when wrap ring head again*/
> > > > > +#define MTK_UART_APDMA_RING_WRAP       0x10000U
> > > > > +
> > > > > +#define VFF_INT_FLAG           0x00
> > > > > +#define VFF_INT_EN             0x04
> > > > > +#define VFF_EN                 0x08
> > > > > +#define VFF_RST                        0x0c
> > > > > +#define VFF_STOP               0x10
> > > > > +#define VFF_FLUSH              0x14
> > > > > +#define VFF_ADDR               0x1c
> > > > > +#define VFF_LEN                        0x24
> > > > > +#define VFF_THRE               0x28
> > > > > +#define VFF_WPT                        0x2c
> > > > > +#define VFF_RPT                        0x30
> > > > > +/*TX: the buffer size HW can read. RX: the buffer size SW can read.*/
> > > > > +#define VFF_VALID_SIZE         0x3c
> > > > > +/*TX: the buffer size SW can write. RX: the buffer size HW can write.*/
> > > > > +#define VFF_LEFT_SIZE          0x40
> > > > > +#define VFF_DEBUG_STATUS       0x50
> > > > > +#define VFF_4G_SUPPORT         0x54
> > > > > +
> > > > > +struct mtk_uart_apdmadev {
> > > > > +       struct dma_device ddev;
> > > > > +       struct clk *clk;
> > > > > +       bool support_33bits;
> > > > > +       unsigned int dma_irq[MTK_UART_APDMA_CHANNELS];
> > > > > +};
> > > > > +
> > > > > +struct mtk_uart_apdma_desc {
> > > > > +       struct virt_dma_desc vd;
> > > > > +
> > > > > +       unsigned int avail_len;
> > > > > +};
> > > > > +
> > > > > +struct mtk_chan {
> > > > > +       struct virt_dma_chan vc;
> > > > > +       struct dma_slave_config cfg;
> > > > > +       void __iomem *base;
> > > > > +       struct mtk_uart_apdma_desc *desc;
> > > > > +
> > > > > +       bool requested;
> > > > > +
> > > > > +       unsigned int rx_status;
> > > > > +};
> > > > > +
> > > > > +static inline struct mtk_uart_apdmadev *
> > > > > +to_mtk_uart_apdma_dev(struct dma_device *d)
> > > > > +{
> > > > > +       return container_of(d, struct mtk_uart_apdmadev, ddev);
> > > > > +}
> > > > > +
> > > > > +static inline struct mtk_chan *to_mtk_uart_apdma_chan(struct dma_chan *c)
> > > > > +{
> > > > > +       return container_of(c, struct mtk_chan, vc.chan);
> > > > > +}
> > > > > +
> > > > > +static inline struct mtk_uart_apdma_desc *to_mtk_uart_apdma_desc
> > > > > +       (struct dma_async_tx_descriptor *t)
> > > > > +{
> > > > > +       return container_of(t, struct mtk_uart_apdma_desc, vd.tx);
> > > > > +}
> > > > > +
> > > > > +static void mtk_uart_apdma_chan_write(struct mtk_chan *c,
> > > > > +                              unsigned int reg, unsigned int val)
> > > > > +{
> > > > > +       writel(val, c->base + reg);
> > > > > +}
> > > > > +
> > > > > +static unsigned int
> > > > > +mtk_uart_apdma_chan_read(struct mtk_chan *c, unsigned int reg)
> > > > > +{
> > > > > +       return readl(c->base + reg);
> > > > > +}
> > > > > +
> > > > > +static void mtk_uart_apdma_desc_free(struct virt_dma_desc *vd)
> > > > > +{
> > > > > +       struct dma_chan *chan = vd->tx.chan;
> > > > > +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> > > > > +
> > > > > +       kfree(c->desc);
> > > > > +       c->desc = NULL;
> > > >
> > > > Why? I'm afraid this may mask double-free.
> > > >
> > >
> > > i will remove it.
> > >
> > > > > +}
> > > > > +
> > > > > +static void mtk_uart_apdma_start_tx(struct mtk_chan *c)
> > > > > +{
> > > > > +       unsigned int txcount = c->desc->avail_len;
> > > >
> > > > Variable name is confusing... I'd rather have a boolean `transmitted`
> > > > (or something), set to 0 here, and set to 1 in the loop (but again,
> > > > maybe this is not necessary at all, see below).
> > >
> > > ok, i modify the function and remove it.
> > >
> > > >
> > > > > +       unsigned int len, send, left, wpt, wrap;
> > > > > +
> > > > > +       if (mtk_uart_apdma_chan_read(c, VFF_LEFT_SIZE) == 0U) {
> > > > > +               mtk_uart_apdma_chan_write(c, VFF_INT_EN, VFF_TX_INT_EN_B);
> > > >
> > > > I'd add a "return;" here and de-indent the rest of the function (that
> > > > should help to make it more readable).
> > >
> > > add
> > >
> > > >
> > > > > +       } else {
> > > > > +               len = mtk_uart_apdma_chan_read(c, VFF_LEN);
> > > >
> > > > You make an assumption below that len is always a power of 2. I wonder
> > > > if we want to validate that with some BUG/WARN call.
> > > >
> > > > Also, I assume VFF_LEN is constant? Maybe we can read it once only in
> > > > the probe function?
> > > >
> > >
> > > no, the value of VFF_LEN isn't constant.
> > >
> > > > > +
> > > > > +               while (((left = mtk_uart_apdma_chan_read(c,
> > > > > +                                       VFF_LEFT_SIZE)) > 0U)
> > > > > +                               && (c->desc->avail_len != 0U)) {
> > > >
> > > > Why do we need a while loop here? LEFT_SIZE will always be smaller
> > > > than LEN, right? Shouldn't we load as much as we can, then let an
> > > > interrupt happen and reload the FIFO at that stage? Or does this help
> > > > gain a bit of extra performance?
> > >
> > > i will remove while, use 'if' instead of.
> > > >
> > > > > +                       send = min_t(unsigned int, left, c->desc->avail_len);
> > > > > +                       wpt = mtk_uart_apdma_chan_read(c, VFF_WPT);
> > > > > +                       wrap = wpt & MTK_UART_APDMA_RING_WRAP ?
> > > >
> > > > wrap = wrp ^ MTK_UART_APDMA_RING_WRAP ?
> > > >
> > >
> > > wrap = wpt & MTK_UART_APDMA_RING_WRAP ?
> > > i had test it.
> >
> > Sorry, I got it wrong (and my question mark is confusing), what I mean is this:
> >
> > wrap = (wrp ^ MTK_UART_APDMA_RING_WRAP) & MTK_UART_APDMA_RING_WRAP;
> >
> > But then it's marginally shorter than your solution, so maybe keep
> > yours, it's a little simpler to understand.
> >
>
> i will rename 'MTK_UART_APDMA_RING_WRAP' to 'VFF_RING_WRAP'. let code
> shorter. i will keep the code.
>
> > > > > +                                       0U : MTK_UART_APDMA_RING_WRAP;
> > > > > +
> > > > > +                       if ((wpt & (len - 1U)) + send < len)
> >
> > Is it possible for wpt > len? If not, then wpt + send < len should be enough.
> >
> > Also, wpt + send is used a few times now, so maybe have it a separate variable?
> >
> > Something like this might be nicer:
> >
> > next_wpt = wpt + send;
> > if ((wpt & (len - 1U)) + send >= len) {
> >    next_wpt = next_wpt & (len - 1U);
> >    if (!(wpt & MTK_UART_APDMA_RING_WRAP))
> >       next_wpt |= MTK_UART_APDMA_RING_WRAP;
> > }
> > mtk_uart_apdma_chan_write(c, VFF_WPT, next_wpt);
> >
> > Or if the assumption than wpt < len holds:
> > if (next_wpt >= len) {
> >    next_wpt = next_wpt - len;
> > ...
> >
>
> i will modify it like your comments,
> next_wpt = wpt + send;
> > if ((wpt & (len - 1U)) + send >= len) {
> >    next_wpt = next_wpt & (len - 1U);
> >    if (!(wpt & MTK_UART_APDMA_RING_WRAP))
> >       next_wpt |= MTK_UART_APDMA_RING_WRAP;
> >
> i think that it's better.
>
> > > > > +                               mtk_uart_apdma_chan_write(c,
> > > > > +                                               VFF_WPT, wpt + send);
> > > > > +                       else
> > > > > +                               mtk_uart_apdma_chan_write(c, VFF_WPT,
> > > > > +                                                  ((wpt + send) & (len - 1U))
> > > > > +                                                  | wrap);
> > > > > +
> > > > > +                       c->desc->avail_len -= send;
> > > > > +               }
> > > > > +
> > > > > +               if (txcount != c->desc->avail_len) {
> > > >
> > > > We know mtk_uart_apdma_chan_read(c, VFF_LEFT_SIZE) > 0 at least once,
> > > > so this test should always be true.
> > > >
> > > > Unless we have c->desc->avail_len == 0, but then we won't even call
> > > > this function... So this is always true.
> > > >
> > >
> > > yes. you are right. i will remove it.
> > >
> > > > > +                       mtk_uart_apdma_chan_write(c,
> > > > > +                                       VFF_INT_EN, VFF_TX_INT_EN_B);
> > > > > +                       if (mtk_uart_apdma_chan_read(c,
> > > > > +                                               VFF_FLUSH) == 0U)
> > > > > +                               mtk_uart_apdma_chan_write(c,
> > > > > +                                               VFF_FLUSH, VFF_FLUSH_B);
> > > > > +               }
> > > > > +       }
> > > > > +}
> > > > > +
> > > > > +static void mtk_uart_apdma_start_rx(struct mtk_chan *c)
> > > > > +{
> > > > > +       struct mtk_uart_apdma_desc *d = c->desc;
> > > > > +       unsigned int rx_len, wg, rg, count;
> > > > > +
> > > > > +       if (mtk_uart_apdma_chan_read(c, VFF_VALID_SIZE) == 0U)
> > > > > +               return;
> > > > > +
> > > > > +       if (d && vchan_next_desc(&c->vc)) {
> >
> > I'd negate the test to de-indent the rest of the code:
> > if (!d || !vchan_next_desc(&c->vc))
> >    return;
> >
>
> ok. i will modify it like
> if ((mtk_uart_apdma_chan_read(c, VFF_VALID_SIZE) == 0U) ||
>     !d || !vchan_next_desc(&c->vc))
>     return;
>
> > > > > +               rx_len = mtk_uart_apdma_chan_read(c, VFF_LEN);
> > > > > +               rg = mtk_uart_apdma_chan_read(c, VFF_RPT);
> > > > > +               wg = mtk_uart_apdma_chan_read(c, VFF_WPT);
> > > > > +               count = ((rg ^ wg) & MTK_UART_APDMA_RING_WRAP) ?
> > > > > +                               ((wg & MTK_UART_APDMA_RING_SIZE) +
> > > > > +                               rx_len - (rg & MTK_UART_APDMA_RING_SIZE)) :
> > > > > +                               ((wg & MTK_UART_APDMA_RING_SIZE) -
> > > > > +                               (rg & MTK_UART_APDMA_RING_SIZE));
> > > >
> > > > This is hard to read, please use a if/else.
> > > >
> > >
> > > ok.
> > >
> > > > > +
> > > > > +               c->rx_status = count;
> > > > > +               mtk_uart_apdma_chan_write(c, VFF_RPT, wg);
> > > > > +
> > > > > +               list_del(&d->vd.node);
> > > > > +               vchan_cookie_complete(&d->vd);
> > > > > +       }
> > > > > +}
> > > > > +
> > > > > +static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id)
> > > > > +{
> > > > > +       struct dma_chan *chan = (struct dma_chan *)dev_id;
> > > > > +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> > > > > +       struct mtk_uart_apdma_desc *d;
> > > > > +       unsigned long flags;
> > > > > +
> > > > > +       spin_lock_irqsave(&c->vc.lock, flags);
> > > > > +       switch (c->cfg.direction) {
> > > > > +       case DMA_DEV_TO_MEM:
> > > >
> > > > You use an if/else below in a similar case, please be consistent (IMHO
> > > > if/else is a bit more compact, so I'd use that here).
> > > >
> > > sean wang had review it. the link
> > > http://lists.infradead.org/pipermail/linux-mediatek/2018-December/016292.html
> > >
>
> ok, i will use if/else
>
> > > > > +               mtk_uart_apdma_chan_write(c,
> > > > > +                               VFF_INT_FLAG, VFF_RX_INT_FLAG_CLR_B);
> > > > > +               mtk_uart_apdma_start_rx(c);
> > > > > +               break;
> > > > > +       case DMA_MEM_TO_DEV:
> > > > > +               d = c->desc;
> > > > > +
> > > > > +               mtk_uart_apdma_chan_write(c,
> > > > > +                               VFF_INT_FLAG, VFF_TX_INT_FLAG_CLR_B);
> > > > > +
> > > > > +               if (d->avail_len != 0U) {
> > > > > +                       mtk_uart_apdma_start_tx(c);
> > > > > +               } else {
> > > > > +                       list_del(&d->vd.node);
> > > > > +                       vchan_cookie_complete(&d->vd);
> > > > > +               }
> > > > > +               break;
> > > > > +       default:
> > > > > +               break;
> > > > > +       }
> > > > > +       spin_unlock_irqrestore(&c->vc.lock, flags);
> > > > > +
> > > > > +       return IRQ_HANDLED;
> > > > > +}
> > > > > +
> > > > > +static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
> > > > > +{
> > > > > +       struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device);
> > > > > +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> > > > > +       u32 status;
> > > > > +       int ret = -EBUSY;
> >
> > No need to init ret.
> >
>
> ok.
>
> > > > > +
> > > > > +       pm_runtime_get_sync(mtkd->ddev.dev);
> > > > > +
> > > > > +       mtk_uart_apdma_chan_write(c, VFF_ADDR, 0);
> > > > > +       mtk_uart_apdma_chan_write(c, VFF_THRE, 0);
> > > > > +       mtk_uart_apdma_chan_write(c, VFF_LEN, 0);
> > > > > +       mtk_uart_apdma_chan_write(c, VFF_RST, VFF_WARM_RST_B);
> > > > > +
> > > > > +       ret = readx_poll_timeout(readl,
> > > > > +                                c->base + VFF_EN,
> > > > > +                                status, status == 0, 10, 100);
> > > > > +       if (ret) {
> > > > > +               dev_err(c->vc.chan.device->dev,
> > > > > +                               "dma reset: fail, timeout\n");
> > > > > +               goto exit;
> >
> > Just return ret, since you don't do any cleanup in exit:.
> >
>
> ok.
>
> > > > > +       }
> > > > > +
> > > > > +       if (!c->requested) {
> > > > > +               c->requested = true;
> > > > > +               ret = request_irq(mtkd->dma_irq[chan->chan_id],
> > > > > +                                 mtk_uart_apdma_irq_handler,
> > > > > +                                 IRQF_TRIGGER_NONE,
> > > > > +                                 KBUILD_MODNAME, chan);
> > > > > +               if (ret < 0) {
> > > > > +                       dev_err(chan->device->dev, "Can't request dma IRQ\n");
> > > > > +                       return -EINVAL;
> > > > > +               }
> > > > > +       }
> > > > > +
> > > > > +       if (mtkd->support_33bits)
> > > > > +               mtk_uart_apdma_chan_write(c,
> > > > > +                               VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B);
> > > > > +
> > > > > +exit:
> > > > > +       return ret;
> > > > > +}
> > > > > +
> > > > > +static void mtk_uart_apdma_free_chan_resources(struct dma_chan *chan)
> > > > > +{
> > > > > +       struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device);
> > > > > +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> > > > > +
> > > > > +       if (c->requested) {
> > > > > +               c->requested = false;
> > > > > +               free_irq(mtkd->dma_irq[chan->chan_id], chan);
> > > > > +       }
> > > > > +
> > > > > +       tasklet_kill(&c->vc.task);
> > > > > +
> > > > > +       vchan_free_chan_resources(&c->vc);
> > > > > +
> > > > > +       pm_runtime_put_sync(mtkd->ddev.dev);
> > > > > +}
> > > > > +
> > > > > +static enum dma_status mtk_uart_apdma_tx_status(struct dma_chan *chan,
> > > > > +                                        dma_cookie_t cookie,
> > > > > +                                        struct dma_tx_state *txstate)
> > > > > +{
> > > > > +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> > > > > +       enum dma_status ret;
> > > > > +       unsigned long flags;
> > > > > +
> > > > > +       if (!txstate)
> > > > > +               return DMA_ERROR;
> > > > > +
> > > > > +       ret = dma_cookie_status(chan, cookie, txstate);
> > > > > +       spin_lock_irqsave(&c->vc.lock, flags);
> > > > > +       if (ret == DMA_IN_PROGRESS) {
> > > > > +               c->rx_status = mtk_uart_apdma_chan_read(c, VFF_RPT)
> > > > > +                            & MTK_UART_APDMA_RING_SIZE;
> > > > > +               dma_set_residue(txstate, c->rx_status);
> > > > > +       } else if (ret == DMA_COMPLETE && c->cfg.direction == DMA_DEV_TO_MEM) {
> > > > > +               dma_set_residue(txstate, c->rx_status);
> > > > > +       } else {
> > > > > +               dma_set_residue(txstate, 0);
> > > > > +       }
> > > > > +       spin_unlock_irqrestore(&c->vc.lock, flags);
> > > > > +
> > > > > +       return ret;
> > > > > +}
> > > > > +
> > > > > +/*
> > > > > + * dmaengine_prep_slave_single will call the function. and sglen is 1.
> > > > > + * 8250 uart using one ring buffer, and deal with one sg.
> > > > > + */
> > > > > +static struct dma_async_tx_descriptor *mtk_uart_apdma_prep_slave_sg
> > > > > +       (struct dma_chan *chan, struct scatterlist *sgl,
> > > > > +       unsigned int sglen,     enum dma_transfer_direction dir,
> >
> > Too many spaces (or a tab?) between `sglen,` and `enum`
> >
>
> ok. i will modify it
>
> > > > > +       unsigned long tx_flags, void *context)
> > > > > +{
> > > > > +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> > > > > +       struct mtk_uart_apdma_desc *d;
> > > > > +
> > > > > +       if ((dir != DMA_DEV_TO_MEM) &&
> > > > > +               (dir != DMA_MEM_TO_DEV)) {
> > > > > +               dev_err(chan->device->dev, "bad direction\n");
> > > > > +               return NULL;
> > > > > +       }
> > > > > +
> > > > > +       /* Now allocate and setup the descriptor */
> > > > > +       d = kzalloc(sizeof(*d), GFP_ATOMIC);
> > > > > +       if (!d)
> > > > > +               return NULL;
> > > > > +
> > > > > +       /* sglen is 1 */
> > > > > +       d->avail_len = sg_dma_len(sgl);
> > > > > +
> > > > > +       return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
> > > > > +}
> > > > > +
> > > > > +static void mtk_uart_apdma_issue_pending(struct dma_chan *chan)
> > > > > +{
> > > > > +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> > > > > +       struct virt_dma_desc *vd;
> > > > > +       unsigned long flags;
> > > > > +
> > > > > +       spin_lock_irqsave(&c->vc.lock, flags);
> > > > > +       if (c->cfg.direction == DMA_DEV_TO_MEM) {
> > > > > +               if (vchan_issue_pending(&c->vc) && !c->desc) {
> > > > > +                       vd = vchan_next_desc(&c->vc);
> > > > > +                       c->desc = to_mtk_uart_apdma_desc(&vd->tx);
> > > > > +                       mtk_uart_apdma_start_rx(c);
> > > > > +               }
> > > > > +       } else if (c->cfg.direction == DMA_MEM_TO_DEV) {
> > > > > +               if (vchan_issue_pending(&c->vc) && !c->desc) {
> > > > > +                       vd = vchan_next_desc(&c->vc);
> > > > > +                       c->desc = to_mtk_uart_apdma_desc(&vd->tx);
> > > > > +                       mtk_uart_apdma_start_tx(c);
> > > > > +               }
> > > > > +       }
> > > > > +       spin_unlock_irqrestore(&c->vc.lock, flags);
> > > > > +}
> > > > > +
> > > > > +static int mtk_uart_apdma_slave_config(struct dma_chan *chan,
> > > > > +                               struct dma_slave_config *cfg)
> > > > > +{
> > > > > +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> > > > > +       struct mtk_uart_apdmadev *mtkd =
> > > > > +                               to_mtk_uart_apdma_dev(c->vc.chan.device);
> > > > > +
> > > > > +       c->cfg = *cfg;
> > > > > +
> > > > > +       if (cfg->direction == DMA_DEV_TO_MEM) {
> > > > > +               unsigned int rx_len = cfg->src_addr_width * 1024;
> > > > > +
> > > > > +               mtk_uart_apdma_chan_write(c, VFF_ADDR, cfg->src_addr);
> > > > > +               mtk_uart_apdma_chan_write(c, VFF_LEN, rx_len);
> > > > > +               mtk_uart_apdma_chan_write(c, VFF_THRE, VFF_RX_THRE(rx_len));
> > > > > +               mtk_uart_apdma_chan_write(c,
> > > > > +                               VFF_INT_EN, VFF_RX_INT_EN0_B
> > > > > +                               | VFF_RX_INT_EN1_B);
> > > > > +               mtk_uart_apdma_chan_write(c, VFF_RPT, 0);
> > > > > +               mtk_uart_apdma_chan_write(c,
> > > > > +                               VFF_INT_FLAG, VFF_RX_INT_FLAG_CLR_B);
> > > > > +               mtk_uart_apdma_chan_write(c, VFF_EN, VFF_EN_B);
> > > > > +       } else if (cfg->direction == DMA_MEM_TO_DEV)    {
> > > > > +               unsigned int tx_len = cfg->dst_addr_width * 1024;
> > > > > +
> > > > > +               mtk_uart_apdma_chan_write(c, VFF_ADDR, cfg->dst_addr);
> > > > > +               mtk_uart_apdma_chan_write(c, VFF_LEN, tx_len);
> > > > > +               mtk_uart_apdma_chan_write(c, VFF_THRE, VFF_TX_THRE(tx_len));
> > > > > +               mtk_uart_apdma_chan_write(c, VFF_WPT, 0);
> > > > > +               mtk_uart_apdma_chan_write(c,
> > > > > +                               VFF_INT_FLAG, VFF_TX_INT_FLAG_CLR_B);
> > > > > +               mtk_uart_apdma_chan_write(c, VFF_EN, VFF_EN_B);
> > > > > +       }
> > > > > +
> > > > > +       if (mtkd->support_33bits)
> > > > > +               mtk_uart_apdma_chan_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_B);
> > > > > +
> > > > > +       if (mtk_uart_apdma_chan_read(c, VFF_EN) != VFF_EN_B) {
> > > > > +               dev_err(chan->device->dev,
> > > > > +                       "config dma dir[%d] fail\n", cfg->direction);
> > > > > +               return -EINVAL;
> > > > > +       }
> > > > > +
> > > > > +       return 0;
> > > > > +}
> > > > > +
> > > > > +static int mtk_uart_apdma_terminate_all(struct dma_chan *chan)
> > > > > +{
> > > > > +       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
> > > > > +       unsigned long flags;
> > > > > +       u32 status;
> > > > > +       int ret;
> > > > > +
> > > > > +       spin_lock_irqsave(&c->vc.lock, flags);
> > > > > +
> > > > > +       mtk_uart_apdma_chan_write(c, VFF_FLUSH, VFF_FLUSH_CLR_B);
> > > > > +       /* Wait for flush */
> > > > > +       ret = readx_poll_timeout(readl,
> > > > > +                                c->base + VFF_FLUSH,
> > > > > +                                status,
> > > > > +                                (status & VFF_FLUSH_B) != VFF_FLUSH_B,
> > > > > +                                10, 100);
> > > > > +       if (ret)
> > > > > +               dev_err(c->vc.chan.device->dev,
> > > > > +                       "dma stop: polling FLUSH fail, DEBUG=0x%x\n",
> > > > > +                       mtk_uart_apdma_chan_read(c, VFF_DEBUG_STATUS));
> > > > > +
> > > > > +       /*set stop as 1 -> wait until en is 0 -> set stop as 0*/
> > > > > +       mtk_uart_apdma_chan_write(c, VFF_STOP, VFF_STOP_B);
> > > > > +       ret = readx_poll_timeout(readl,
> > > > > +                                c->base + VFF_EN,
> > > > > +                                status, status == 0, 10, 100);
> > > > > +       if (ret)
> > > > > +               dev_err(c->vc.chan.device->dev,
> > > > > +                       "dma stop: polling VFF_EN fail, DEBUG=0x%x\n",
> > > > > +                       mtk_uart_apdma_chan_read(c, VFF_DEBUG_STATUS));
> > > > > +
> > > > > +       mtk_uart_apdma_chan_write(c, VFF_STOP, VFF_STOP_CLR_B);
> > > > > +       mtk_uart_apdma_chan_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
> > > > > +
> > > > > +       switch (c->cfg.direction) {
> > > > > +       case DMA_DEV_TO_MEM:
> > > >
> > > > Ditto, if/else?
> > > >
> > > ditto.
> > > http://lists.infradead.org/pipermail/linux-mediatek/2018-December/016292.html
> >
> > Well, I disagree, especially in this case: We turn 4 lines of code into 10...
> >
>
> i see, i will using if/else

To clarify, that's my opinion ,-) Down the line it's up to the maintainer.

> > > > > +               mtk_uart_apdma_chan_write(c,
> > > > > +                               VFF_INT_FLAG, VFF_RX_INT_FLAG_CLR_B);
> > > > > +               break;
> > > > > +       case DMA_MEM_TO_DEV:
> > > > > +               mtk_uart_apdma_chan_write(c,
> > > > > +                               VFF_INT_FLAG, VFF_TX_INT_FLAG_CLR_B);
> > > > > +               break;
> > > > > +       default:
> > > > > +               break;
> > > > > +       }
> > > > > +
> > > > > +       spin_unlock_irqrestore(&c->vc.lock, flags);
> > > > > +
> > > > > +       return 0;
> > > > > +}
> > > > > +
> > > > > +static int mtk_uart_apdma_device_pause(struct dma_chan *chan)
> > > > > +{
> > > > > +       /* just for check caps pass */
> > > > > +       return 0;
> > > > > +}
> > > > > +
> > > > > +static int mtk_uart_apdma_device_resume(struct dma_chan *chan)
> > > > > +{
> > > > > +       /* just for check caps pass */
> > > > > +       return 0;
> > > > > +}
> >
> > Why do you need these? Seems wrong to advertise device_pause/resume
> > (and the caps) if we don't actually support that?
> >
>
> in serial 8250_dma.c file, when request DMA, will calll
> 'dma_get_slave_caps' to get the caps. and the will check caps.cmd_pause.
> the pause is device_pause.  our device can't support the functions. but
> to check pass, need add the fake function. or you can give better
> comments. thanks.

Well, then I guess this is a hack... I'm not too familiar with
8250_dma.c, but I guess that if it requires pause/resume to be
implemented, then we should implement them, not just stub them out...

Is this something that can be implemented with 8250_mtk DMA?

Also, I don't see where you need resume, so maybe implementing pause is enough?

> > > > > +
> > > > > +static void mtk_uart_apdma_free(struct mtk_uart_apdmadev *mtkd)
> > > > > +{
> > > > > +       while (list_empty(&mtkd->ddev.channels) == 0) {
> > > > > +               struct mtk_chan *c = list_first_entry(&mtkd->ddev.channels,
> > > > > +                       struct mtk_chan, vc.chan.device_node);
> > > > > +
> > > > > +               list_del(&c->vc.chan.device_node);
> > > > > +               tasklet_kill(&c->vc.task);
> > > > > +       }
> > > > > +}
> > > > > +
> > > > > +static const struct of_device_id mtk_uart_apdma_match[] = {
> > > > > +       { .compatible = "mediatek,mt6577-uart-dma", },
> > > > > +       { /* sentinel */ },
> > > > > +};
> > > > > +MODULE_DEVICE_TABLE(of, mtk_uart_apdma_match);
> > > > > +
> > > > > +static int mtk_uart_apdma_probe(struct platform_device *pdev)
> > > > > +{
> > > > > +       struct mtk_uart_apdmadev *mtkd;
> > > > > +       struct resource *res;
> > > > > +       struct mtk_chan *c;
> > > > > +       unsigned int i;
> > > > > +       int rc;
> > > > > +
> > > > > +       mtkd = devm_kzalloc(&pdev->dev, sizeof(*mtkd), GFP_KERNEL);
> > > > > +       if (!mtkd)
> > > > > +               return -ENOMEM;
> > > > > +
> > > > > +       mtkd->clk = devm_clk_get(&pdev->dev, NULL);
> > > > > +       if (IS_ERR(mtkd->clk)) {
> > > > > +               dev_err(&pdev->dev, "No clock specified\n");
> > > > > +               rc = PTR_ERR(mtkd->clk);
> > > > > +               goto err_no_dma;
> > > > > +       }
> > > > > +
> > > > > +       if (of_property_read_bool(pdev->dev.of_node, "dma-33bits")) {
> > > > > +               dev_info(&pdev->dev, "Support dma 33bits\n");
> > > >
> > > > Drop this, a bit verbose.
> > > >
> > > > > +               mtkd->support_33bits = true;
> > > > > +       }
> > > > > +
> > > > > +       rc = dma_set_mask_and_coherent(&pdev->dev,
> > > > > +                               DMA_BIT_MASK(32 | mtkd->support_33bits));
> > > > > +       if (rc)
> > > > > +               goto err_no_dma;
> > > > > +
> > > > > +       dma_cap_set(DMA_SLAVE, mtkd->ddev.cap_mask);
> > > > > +       mtkd->ddev.device_alloc_chan_resources =
> > > > > +                               mtk_uart_apdma_alloc_chan_resources;
> > > > > +       mtkd->ddev.device_free_chan_resources =
> > > > > +                               mtk_uart_apdma_free_chan_resources;
> > > > > +       mtkd->ddev.device_tx_status = mtk_uart_apdma_tx_status;
> > > > > +       mtkd->ddev.device_issue_pending = mtk_uart_apdma_issue_pending;
> > > > > +       mtkd->ddev.device_prep_slave_sg = mtk_uart_apdma_prep_slave_sg;
> > > > > +       mtkd->ddev.device_config = mtk_uart_apdma_slave_config;
> > > > > +       mtkd->ddev.device_pause = mtk_uart_apdma_device_pause;
> > > > > +       mtkd->ddev.device_resume = mtk_uart_apdma_device_resume;
> > > > > +       mtkd->ddev.device_terminate_all = mtk_uart_apdma_terminate_all;
> > > > > +       mtkd->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE);
> > > > > +       mtkd->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE);
> > > > > +       mtkd->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
> > > > > +       mtkd->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
> > > > > +       mtkd->ddev.dev = &pdev->dev;
> > > > > +       INIT_LIST_HEAD(&mtkd->ddev.channels);
> > > > > +
> > > > > +       for (i = 0; i < MTK_UART_APDMA_CHANNELS; i++) {
> > > > > +               c = devm_kzalloc(mtkd->ddev.dev, sizeof(*c), GFP_KERNEL);
> > > > > +               if (!c) {
> > > > > +                       rc = -ENODEV;
> > > > > +                       goto err_no_dma;
> > > > > +               }
> > > > > +
> > > > > +               res = platform_get_resource(pdev, IORESOURCE_MEM, i);
> > > > > +               if (!res) {
> > > > > +                       rc = -ENODEV;
> > > > > +                       goto err_no_dma;
> > > > > +               }
> > > > > +
> > > > > +               c->base = devm_ioremap_resource(&pdev->dev, res);
> > > > > +               if (IS_ERR(c->base)) {
> > > > > +                       rc = PTR_ERR(c->base);
> > > > > +                       goto err_no_dma;
> > > > > +               }
> > > > > +               c->requested = false;
> > > > > +               c->vc.desc_free = mtk_uart_apdma_desc_free;
> > > > > +               vchan_init(&c->vc, &mtkd->ddev);
> > > > > +
> > > > > +               mtkd->dma_irq[i] = platform_get_irq(pdev, i);
> > > > > +               if ((int)mtkd->dma_irq[i] < 0) {
> > > > > +                       dev_err(&pdev->dev, "failed to get IRQ[%d]\n", i);
> > > > > +                       rc = -EINVAL;
> > > > > +                       goto err_no_dma;
> > > > > +               }
> > > > > +       }
> > > > > +
> > > > > +       pm_runtime_enable(&pdev->dev);
> > > > > +       pm_runtime_set_active(&pdev->dev);
> > > > > +
> > > > > +       rc = dma_async_device_register(&mtkd->ddev);
> > > > > +       if (rc)
> > > > > +               goto rpm_disable;
> > > > > +
> > > > > +       platform_set_drvdata(pdev, mtkd);
> > > > > +
> > > > > +       if (pdev->dev.of_node) {
> > > > > +               /* Device-tree DMA controller registration */
> > > > > +               rc = of_dma_controller_register(pdev->dev.of_node,
> > > > > +                                               of_dma_xlate_by_chan_id,
> > > > > +                                               mtkd);
> > > > > +               if (rc)
> > > > > +                       goto dma_remove;
> > > > > +       }
> > > > > +
> > > > > +       return rc;
> > > > > +
> > > > > +dma_remove:
> > > > > +       dma_async_device_unregister(&mtkd->ddev);
> > > > > +rpm_disable:
> > > > > +       pm_runtime_disable(&pdev->dev);
> > > > > +err_no_dma:
> > > > > +       mtk_uart_apdma_free(mtkd);
> >
> > It's not very correct to call this before
> > INIT_LIST_HEAD(&mtkd->ddev.channels);
> >
> > I'd just call `return rc;` directly for all instances before that line.
> >
>
> ok, i will modify it.
>
> > > > > +       return rc;
> > > > > +}
> > > > > +
> > > > > +static int mtk_uart_apdma_remove(struct platform_device *pdev)
> > > > > +{
> > > > > +       struct mtk_uart_apdmadev *mtkd = platform_get_drvdata(pdev);
> > > > > +
> > > > > +       if (pdev->dev.of_node)
> > > > > +               of_dma_controller_free(pdev->dev.of_node);
> > > > > +
> > > > > +       pm_runtime_disable(&pdev->dev);
> > > > > +       pm_runtime_put_noidle(&pdev->dev);
> > > > > +
> > > > > +       dma_async_device_unregister(&mtkd->ddev);
> > > > > +       mtk_uart_apdma_free(mtkd);
> > > > > +
> > > > > +       return 0;
> > > > > +}
> > > > > +
> > > > > +#ifdef CONFIG_PM_SLEEP
> > > > > +static int mtk_uart_apdma_suspend(struct device *dev)
> > > > > +{
> > > > > +       struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
> > > > > +
> > > > > +       if (!pm_runtime_suspended(dev))
> > > > > +               clk_disable_unprepare(mtkd->clk);
> > > > > +
> > > > > +       return 0;
> > > > > +}
> > > > > +
> > > > > +static int mtk_uart_apdma_resume(struct device *dev)
> > > > > +{
> > > > > +       int ret;
> > > > > +       struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
> > > > > +
> > > > > +       if (!pm_runtime_suspended(dev)) {
> > > > > +               ret = clk_prepare_enable(mtkd->clk);
> > > > > +               if (ret)
> > > > > +                       return ret;
> > > > > +       }
> > > > > +
> > > > > +       return 0;
> > > > > +}
> > > > > +#endif /* CONFIG_PM_SLEEP */
> > > > > +
> > > > > +#ifdef CONFIG_PM
> > > > > +static int mtk_uart_apdma_runtime_suspend(struct device *dev)
> > > > > +{
> > > > > +       struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
> > > > > +
> > > > > +       clk_disable_unprepare(mtkd->clk);
> > > > > +
> > > > > +       return 0;
> > > > > +}
> > > > > +
> > > > > +static int mtk_uart_apdma_runtime_resume(struct device *dev)
> > > > > +{
> > > > > +       int ret;
> > > > > +       struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
> > > > > +
> > > > > +       ret = clk_prepare_enable(mtkd->clk);
> > > > > +       if (ret)
> > > > > +               return ret;
> > > > > +
> > > > > +       return 0;
> > > > > +}
> > > > > +#endif /* CONFIG_PM */
> > > > > +
> > > > > +static const struct dev_pm_ops mtk_uart_apdma_pm_ops = {
> > > > > +       SET_SYSTEM_SLEEP_PM_OPS(mtk_uart_apdma_suspend, mtk_uart_apdma_resume)
> > > > > +       SET_RUNTIME_PM_OPS(mtk_uart_apdma_runtime_suspend,
> > > > > +                          mtk_uart_apdma_runtime_resume, NULL)
> > > > > +};
> > > > > +
> > > > > +static struct platform_driver mtk_uart_apdma_driver = {
> > > > > +       .probe  = mtk_uart_apdma_probe,
> > > > > +       .remove = mtk_uart_apdma_remove,
> > > > > +       .driver = {
> > > > > +               .name           = KBUILD_MODNAME,
> > > > > +               .pm             = &mtk_uart_apdma_pm_ops,
> > > > > +               .of_match_table = of_match_ptr(mtk_uart_apdma_match),
> > > > > +       },
> > > > > +};
> > > > > +
> > > > > +module_platform_driver(mtk_uart_apdma_driver);
> > > > > +
> > > > > +MODULE_DESCRIPTION("MediaTek UART APDMA Controller Driver");
> > > > > +MODULE_AUTHOR("Long Cheng <long.cheng@mediatek.com>");
> > > > > +MODULE_LICENSE("GPL v2");
> > > > > +
> > > > > diff --git a/drivers/dma/mediatek/Kconfig b/drivers/dma/mediatek/Kconfig
> > > > > index 27bac0b..1a523c87 100644
> > > > > --- a/drivers/dma/mediatek/Kconfig
> > > > > +++ b/drivers/dma/mediatek/Kconfig
> > > > > @@ -1,4 +1,15 @@
> > > > >
> > > > > +config DMA_MTK_UART
> > > > > +       tristate "MediaTek SoCs APDMA support for UART"
> > > > > +       depends on OF && SERIAL_8250_MT6577
> > > > > +       select DMA_ENGINE
> > > > > +       select DMA_VIRTUAL_CHANNELS
> > > > > +       help
> > > > > +         Support for the UART DMA engine found on MediaTek MTK SoCs.
> > > > > +         when SERIAL_8250_MT6577 is enabled, and if you want to use DMA,
> > > > > +         you can enable the config. the DMA engine can only be used
> > > > > +         with MediaTek SoCs.
> > > > > +
> > > > >  config MTK_HSDMA
> > > > >         tristate "MediaTek High-Speed DMA controller support"
> > > > >         depends on ARCH_MEDIATEK || COMPILE_TEST
> > > > > diff --git a/drivers/dma/mediatek/Makefile b/drivers/dma/mediatek/Makefile
> > > > > index 6e778f8..2f2efd9 100644
> > > > > --- a/drivers/dma/mediatek/Makefile
> > > > > +++ b/drivers/dma/mediatek/Makefile
> > > > > @@ -1 +1,2 @@
> > > > > +obj-$(CONFIG_DMA_MTK_UART) += 8250_mtk_dma.o
> > > > >  obj-$(CONFIG_MTK_HSDMA) += mtk-hsdma.o
> > > > > --
> > > > > 1.7.9.5
> > > > >
> > >
> > >
>
>

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v7 1/2] dmaengine: 8250_mtk_dma: add MediaTek uart DMA support
  2018-12-26  9:28           ` Nicolas Boichat
@ 2018-12-26 10:18             ` Long Cheng
  0 siblings, 0 replies; 9+ messages in thread
From: Long Cheng @ 2018-12-26 10:18 UTC (permalink / raw)
  To: Nicolas Boichat
  Cc: Vinod Koul, Randy Dunlap, Rob Herring, Mark Rutland, Ryder Lee,
	Sean Wang, Matthias Brugger, Dan Williams, Greg Kroah-Hartman,
	Jiri Slaby, Sean Wang, dmaengine, devicetree,
	linux-arm Mailing List, linux-mediatek, lkml, linux-serial,
	srv_heupstream, Yingjoe Chen, YT Shen, Zhenbao Liu

On Wed, 2018-12-26 at 17:28 +0800, Nicolas Boichat wrote:

......

> > > > > > +static int mtk_uart_apdma_device_pause(struct dma_chan *chan)
> > > > > > +{
> > > > > > +       /* just for check caps pass */
> > > > > > +       return 0;
> > > > > > +}
> > > > > > +
> > > > > > +static int mtk_uart_apdma_device_resume(struct dma_chan *chan)
> > > > > > +{
> > > > > > +       /* just for check caps pass */
> > > > > > +       return 0;
> > > > > > +}
> > >
> > > Why do you need these? Seems wrong to advertise device_pause/resume
> > > (and the caps) if we don't actually support that?
> > >
> >
> > in serial 8250_dma.c file, when request DMA, will calll
> > 'dma_get_slave_caps' to get the caps. and the will check caps.cmd_pause.
> > the pause is device_pause.  our device can't support the functions. but
> > to check pass, need add the fake function. or you can give better
> > comments. thanks.
> 
> Well, then I guess this is a hack... I'm not too familiar with
> 8250_dma.c, but I guess that if it requires pause/resume to be
> implemented, then we should implement them, not just stub them out...
> 
> Is this something that can be implemented with 8250_mtk DMA?
> 

the HW can't support it.

> Also, I don't see where you need resume, so maybe implementing pause is enough?
> 

yes. pause is enough. for symmetry, add resume function.
anyway, the pause function need to keep.

> > > > > > +
> > > > > > +static void mtk_uart_apdma_free(struct mtk_uart_apdmadev *mtkd)
> > > > > > +{
> > > > > > +       while (list_empty(&mtkd->ddev.channels) == 0) {
> > > > > > +               struct mtk_chan *c = list_first_entry(&mtkd->ddev.channels,
> > > > > > +                       struct mtk_chan, vc.chan.device_node);
> > > > > > +
> > > > > > +               list_del(&c->vc.chan.device_node);
> > > > > > +               tasklet_kill(&c->vc.task);
> > > > > > +       }
> > > > > > +}
> > > > > > +
> > > > > > +static const struct of_device_id mtk_uart_apdma_match[] = {
> > > > > > +       { .compatible = "mediatek,mt6577-uart-dma", },
> > > > > > +       { /* sentinel */ },
> > > > > > +};
> > > > > > +MODULE_DEVICE_TABLE(of, mtk_uart_apdma_match);
> > > > > > +
> > > > > > +static int mtk_uart_apdma_probe(struct platform_device *pdev)
> > > > > > +{
> > > > > > +       struct mtk_uart_apdmadev *mtkd;
> > > > > > +       struct resource *res;
> > > > > > +       struct mtk_chan *c;
> > > > > > +       unsigned int i;
> > > > > > +       int rc;
> > > > > > +

......



^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2018-12-26 10:19 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-12-25  1:26 [PATCH v7 0/2] add uart DMA function Long Cheng
2018-12-25  1:26 ` [PATCH v7 1/2] dmaengine: 8250_mtk_dma: add MediaTek uart DMA support Long Cheng
2018-12-25  7:16   ` Nicolas Boichat
2018-12-25 12:06     ` Long Cheng
2018-12-26  0:05       ` Nicolas Boichat
2018-12-26  6:35         ` Long Cheng
2018-12-26  9:28           ` Nicolas Boichat
2018-12-26 10:18             ` Long Cheng
2018-12-25  1:26 ` [PATCH v7 2/2] arm: dts: mt2712: add uart APDMA to device tree Long Cheng

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).