All of lore.kernel.org
 help / color / mirror / Atom feed
From: mgross@linux.intel.com
To: markgross@kernel.org, mgross@linux.intel.com, arnd@arndb.de,
	bp@suse.de, damien.lemoal@wdc.com, dragan.cvetic@xilinx.com,
	gregkh@linuxfoundation.org, corbet@lwn.net,
	palmerdabbelt@google.com, paul.walmsley@sifive.com,
	peng.fan@nxp.com, robh+dt@kernel.org, shawnguo@kernel.org,
	jassisinghbrar@gmail.com
Cc: linux-kernel@vger.kernel.org,
	Srikanth Thokala <srikanth.thokala@intel.com>
Subject: [PATCH v4 10/34] misc: xlink-pcie: lh: Add PCIe EP DMA functionality
Date: Fri, 29 Jan 2021 18:21:00 -0800	[thread overview]
Message-ID: <20210130022124.65083-46-mgross@linux.intel.com> (raw)
In-Reply-To: <20210130022124.65083-1-mgross@linux.intel.com>

From: Srikanth Thokala <srikanth.thokala@intel.com>

Add Synopsys PCIe DWC core embedded-DMA functionality for local host

Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Reviewed-by: Mark Gross <mgross@linux.intel.com>
Signed-off-by: Mark Gross <mgross@linux.intel.com>
Signed-off-by: Srikanth Thokala <srikanth.thokala@intel.com>
---
 drivers/misc/xlink-pcie/local_host/Makefile |   1 +
 drivers/misc/xlink-pcie/local_host/dma.c    | 575 ++++++++++++++++++++
 drivers/misc/xlink-pcie/local_host/epf.c    |  15 +-
 drivers/misc/xlink-pcie/local_host/epf.h    |  41 ++
 4 files changed, 629 insertions(+), 3 deletions(-)
 create mode 100644 drivers/misc/xlink-pcie/local_host/dma.c

diff --git a/drivers/misc/xlink-pcie/local_host/Makefile b/drivers/misc/xlink-pcie/local_host/Makefile
index 514d3f0c91bc..54fc118e2dd1 100644
--- a/drivers/misc/xlink-pcie/local_host/Makefile
+++ b/drivers/misc/xlink-pcie/local_host/Makefile
@@ -1,2 +1,3 @@
 obj-$(CONFIG_XLINK_PCIE_LH_DRIVER) += mxlk_ep.o
 mxlk_ep-objs := epf.o
+mxlk_ep-objs += dma.o
diff --git a/drivers/misc/xlink-pcie/local_host/dma.c b/drivers/misc/xlink-pcie/local_host/dma.c
new file mode 100644
index 000000000000..42978fb0db49
--- /dev/null
+++ b/drivers/misc/xlink-pcie/local_host/dma.c
@@ -0,0 +1,575 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel Keem Bay XLink PCIe Driver
+ *
+ * Copyright (C) 2021 Intel Corporation
+ */
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+
+#include "epf.h"
+
+#define DMA_DBI_OFFSET			(0x380000)
+
+/* PCIe DMA control 1 register definitions. */
+#define DMA_CH_CONTROL1_CB_SHIFT	(0)
+#define DMA_CH_CONTROL1_TCB_SHIFT	(1)
+#define DMA_CH_CONTROL1_LLP_SHIFT	(2)
+#define DMA_CH_CONTROL1_LIE_SHIFT	(3)
+#define DMA_CH_CONTROL1_CS_SHIFT	(5)
+#define DMA_CH_CONTROL1_CCS_SHIFT	(8)
+#define DMA_CH_CONTROL1_LLE_SHIFT	(9)
+#define DMA_CH_CONTROL1_CB_MASK		(BIT(DMA_CH_CONTROL1_CB_SHIFT))
+#define DMA_CH_CONTROL1_TCB_MASK	(BIT(DMA_CH_CONTROL1_TCB_SHIFT))
+#define DMA_CH_CONTROL1_LLP_MASK	(BIT(DMA_CH_CONTROL1_LLP_SHIFT))
+#define DMA_CH_CONTROL1_LIE_MASK	(BIT(DMA_CH_CONTROL1_LIE_SHIFT))
+#define DMA_CH_CONTROL1_CS_MASK		(0x3 << DMA_CH_CONTROL1_CS_SHIFT)
+#define DMA_CH_CONTROL1_CCS_MASK	(BIT(DMA_CH_CONTROL1_CCS_SHIFT))
+#define DMA_CH_CONTROL1_LLE_MASK	(BIT(DMA_CH_CONTROL1_LLE_SHIFT))
+
+/* DMA control 1 register Channel Status */
+#define DMA_CH_CONTROL1_CS_RUNNING	(0x1 << DMA_CH_CONTROL1_CS_SHIFT)
+#define DMA_CH_CONTROL1_CS_HALTED	(0x2 << DMA_CH_CONTROL1_CS_SHIFT)
+#define DMA_CH_CONTROL1_CS_STOPPED	(0x3 << DMA_CH_CONTROL1_CS_SHIFT)
+
+/* PCIe DMA Engine enable register definitions. */
+#define DMA_ENGINE_EN_SHIFT		(0)
+#define DMA_ENGINE_EN_MASK		(BIT(DMA_ENGINE_EN_SHIFT))
+
+/* PCIe DMA interrupt registers definitions. */
+#define DMA_ABORT_INTERRUPT_SHIFT	(16)
+#define DMA_ABORT_INTERRUPT_MASK	(0xFF << DMA_ABORT_INTERRUPT_SHIFT)
+#define DMA_ABORT_INTERRUPT_CH_MASK(_c) (BIT(_c) << DMA_ABORT_INTERRUPT_SHIFT)
+#define DMA_DONE_INTERRUPT_MASK		(0xFF)
+#define DMA_DONE_INTERRUPT_CH_MASK(_c)	(BIT(_c))
+#define DMA_ALL_INTERRUPT_MASK \
+	(DMA_ABORT_INTERRUPT_MASK | DMA_DONE_INTERRUPT_MASK)
+
+#define DMA_LL_ERROR_SHIFT		(16)
+#define DMA_CPL_ABORT_SHIFT		(8)
+#define DMA_CPL_TIMEOUT_SHIFT		(16)
+#define DMA_DATA_POI_SHIFT		(24)
+#define DMA_AR_ERROR_CH_MASK(_c)	(BIT(_c))
+#define DMA_LL_ERROR_CH_MASK(_c)	(BIT(_c) << DMA_LL_ERROR_SHIFT)
+#define DMA_UNREQ_ERROR_CH_MASK(_c)	(BIT(_c))
+#define DMA_CPL_ABORT_ERROR_CH_MASK(_c)	(BIT(_c) << DMA_CPL_ABORT_SHIFT)
+#define DMA_CPL_TIMEOUT_ERROR_CH_MASK(_c) (BIT(_c) << DMA_CPL_TIMEOUT_SHIFT)
+#define DMA_DATA_POI_ERROR_CH_MASK(_c)	(BIT(_c) << DMA_DATA_POI_SHIFT)
+
+#define DMA_LLLAIE_SHIFT		(16)
+#define DMA_LLLAIE_MASK			(0xF << DMA_LLLAIE_SHIFT)
+
+#define DMA_CHAN_WRITE_MAX_WEIGHT	(0x7)
+#define DMA_CHAN_READ_MAX_WEIGHT	(0x3)
+#define DMA_CHAN0_WEIGHT_OFFSET		(0)
+#define DMA_CHAN1_WEIGHT_OFFSET		(5)
+#define DMA_CHAN2_WEIGHT_OFFSET		(10)
+#define DMA_CHAN3_WEIGHT_OFFSET		(15)
+#define DMA_CHAN_WRITE_ALL_MAX_WEIGHT					\
+	((DMA_CHAN_WRITE_MAX_WEIGHT << DMA_CHAN0_WEIGHT_OFFSET) |	\
+	 (DMA_CHAN_WRITE_MAX_WEIGHT << DMA_CHAN1_WEIGHT_OFFSET) |	\
+	 (DMA_CHAN_WRITE_MAX_WEIGHT << DMA_CHAN2_WEIGHT_OFFSET) |	\
+	 (DMA_CHAN_WRITE_MAX_WEIGHT << DMA_CHAN3_WEIGHT_OFFSET))
+#define DMA_CHAN_READ_ALL_MAX_WEIGHT					\
+	((DMA_CHAN_READ_MAX_WEIGHT << DMA_CHAN0_WEIGHT_OFFSET) |	\
+	 (DMA_CHAN_READ_MAX_WEIGHT << DMA_CHAN1_WEIGHT_OFFSET) |	\
+	 (DMA_CHAN_READ_MAX_WEIGHT << DMA_CHAN2_WEIGHT_OFFSET) |	\
+	 (DMA_CHAN_READ_MAX_WEIGHT << DMA_CHAN3_WEIGHT_OFFSET))
+
+#define PCIE_REGS_PCIE_APP_CNTRL	0x8
+#define APP_XFER_PENDING		BIT(6)
+#define PCIE_REGS_PCIE_SII_PM_STATE_1	0xb4
+#define PM_LINKST_IN_L1			BIT(10)
+
+#define DMA_POLLING_TIMEOUT		1000000
+#define DMA_ENABLE_TIMEOUT		1000
+#define DMA_PCIE_PM_L1_TIMEOUT		20
+
+struct __packed pcie_dma_reg {
+	u32 dma_ctrl_data_arb_prior;
+	u32 reserved1;
+	u32 dma_ctrl;
+	u32 dma_write_engine_en;
+	u32 dma_write_doorbell;
+	u32 reserved2;
+	u32 dma_write_channel_arb_weight_low;
+	u32 dma_write_channel_arb_weight_high;
+	u32 reserved3[3];
+	u32 dma_read_engine_en;
+	u32 dma_read_doorbell;
+	u32 reserved4;
+	u32 dma_read_channel_arb_weight_low;
+	u32 dma_read_channel_arb_weight_high;
+	u32 reserved5[3];
+	u32 dma_write_int_status;
+	u32 reserved6;
+	u32 dma_write_int_mask;
+	u32 dma_write_int_clear;
+	u32 dma_write_err_status;
+	u32 dma_write_done_imwr_low;
+	u32 dma_write_done_imwr_high;
+	u32 dma_write_abort_imwr_low;
+	u32 dma_write_abort_imwr_high;
+	u16 dma_write_ch_imwr_data[8];
+	u32 reserved7[4];
+	u32 dma_write_linked_list_err_en;
+	u32 reserved8[3];
+	u32 dma_read_int_status;
+	u32 reserved9;
+	u32 dma_read_int_mask;
+	u32 dma_read_int_clear;
+	u32 reserved10;
+	u32 dma_read_err_status_low;
+	u32 dma_rd_err_sts_h;
+	u32 reserved11[2];
+	u32 dma_read_linked_list_err_en;
+	u32 reserved12;
+	u32 dma_read_done_imwr_low;
+	u32 dma_read_done_imwr_high;
+	u32 dma_read_abort_imwr_low;
+	u32 dma_read_abort_imwr_high;
+	u16 dma_read_ch_imwr_data[8];
+};
+
+struct __packed pcie_dma_chan {
+	u32 dma_ch_control1;
+	u32 reserved1;
+	u32 dma_transfer_size;
+	u32 dma_sar_low;
+	u32 dma_sar_high;
+	u32 dma_dar_low;
+	u32 dma_dar_high;
+	u32 dma_llp_low;
+	u32 dma_llp_high;
+};
+
+enum xpcie_ep_engine_type {
+	WRITE_ENGINE,
+	READ_ENGINE
+};
+
+static u32 dma_chan_offset[2][DMA_CHAN_NUM] = {
+	{ 0x200, 0x400, 0x600, 0x800 },
+	{ 0x300, 0x500, 0x700, 0x900 }
+};
+
+static void __iomem *intel_xpcie_ep_get_dma_base(struct pci_epf *epf)
+{
+	struct device *dev = &epf->dev;
+	struct xpcie_epf *xpcie_epf = (struct xpcie_epf *)dev->driver_data;
+
+	return xpcie_epf->dbi_base + DMA_DBI_OFFSET;
+}
+
+static int intel_xpcie_ep_dma_disable(void __iomem *dma_base,
+				      enum xpcie_ep_engine_type rw)
+{
+	struct __iomem pcie_dma_reg * dma_reg =
+				(struct __iomem pcie_dma_reg *)dma_base;
+	void __iomem *int_mask, *int_clear;
+	void __iomem *engine_en, *ll_err;
+	int i;
+
+	if (rw == WRITE_ENGINE) {
+		engine_en = (void __iomem *)&dma_reg->dma_write_engine_en;
+		int_mask = (void __iomem *)&dma_reg->dma_write_int_mask;
+		int_clear = (void __iomem *)&dma_reg->dma_write_int_clear;
+		ll_err = (void __iomem *)&dma_reg->dma_write_linked_list_err_en;
+	} else {
+		engine_en = (void __iomem *)&dma_reg->dma_read_engine_en;
+		int_mask = (void __iomem *)&dma_reg->dma_read_int_mask;
+		int_clear = (void __iomem *)&dma_reg->dma_read_int_clear;
+		ll_err = (void __iomem *)&dma_reg->dma_read_linked_list_err_en;
+	}
+
+	iowrite32(0x0, engine_en);
+
+	/* Mask all interrupts. */
+	iowrite32(DMA_ALL_INTERRUPT_MASK, int_mask);
+
+	/* Clear all interrupts. */
+	iowrite32(DMA_ALL_INTERRUPT_MASK, int_clear);
+
+	/* Disable LL abort interrupt (LLLAIE). */
+	iowrite32(0, ll_err);
+
+	/* Wait until the engine is disabled. */
+	for (i = 0; i < DMA_ENABLE_TIMEOUT; i++) {
+		if (!(ioread32(engine_en) & DMA_ENGINE_EN_MASK))
+			return 0;
+		msleep(20);
+	}
+
+	return -EBUSY;
+}
+
+static void intel_xpcie_ep_dma_enable(void __iomem *dma_base,
+				      enum xpcie_ep_engine_type rw)
+{
+	struct __iomem pcie_dma_reg * dma_reg =
+				(struct __iomem pcie_dma_reg *)(dma_base);
+	void __iomem *engine_en, *ll_err, *arb_weight;
+	struct __iomem pcie_dma_chan * dma_chan;
+	void __iomem *int_mask, *int_clear;
+	u32 offset, weight;
+	int i;
+
+	if (rw == WRITE_ENGINE) {
+		engine_en = (void __iomem *)&dma_reg->dma_write_engine_en;
+		int_mask = (void __iomem *)&dma_reg->dma_write_int_mask;
+		int_clear = (void __iomem *)&dma_reg->dma_write_int_clear;
+		ll_err = (void __iomem *)&dma_reg->dma_write_linked_list_err_en;
+		arb_weight = (void __iomem *)
+			     &dma_reg->dma_write_channel_arb_weight_low;
+		weight = DMA_CHAN_WRITE_ALL_MAX_WEIGHT;
+	} else {
+		engine_en = (void __iomem *)&dma_reg->dma_read_engine_en;
+		int_mask = (void __iomem *)&dma_reg->dma_read_int_mask;
+		int_clear = (void __iomem *)&dma_reg->dma_read_int_clear;
+		ll_err = (void __iomem *)&dma_reg->dma_read_linked_list_err_en;
+		arb_weight = (void __iomem *)
+			     &dma_reg->dma_read_channel_arb_weight_low;
+		weight = DMA_CHAN_READ_ALL_MAX_WEIGHT;
+	}
+
+	iowrite32(DMA_ENGINE_EN_MASK, engine_en);
+
+	/* Unmask all interrupts, so that the interrupt line gets asserted. */
+	iowrite32(~(u32)DMA_ALL_INTERRUPT_MASK, int_mask);
+
+	/* Clear all interrupts. */
+	iowrite32(DMA_ALL_INTERRUPT_MASK, int_clear);
+
+	/* Set channel round robin weight. */
+	iowrite32(weight, arb_weight);
+
+	/* Enable LL abort interrupt (LLLAIE). */
+	iowrite32(DMA_LLLAIE_MASK, ll_err);
+
+	/* Enable linked list mode. */
+	for (i = 0; i < DMA_CHAN_NUM; i++) {
+		offset = dma_chan_offset[rw][i];
+		dma_chan = (struct __iomem pcie_dma_chan *)(dma_base + offset);
+		iowrite32(DMA_CH_CONTROL1_LLE_MASK,
+			  (void __iomem *)&dma_chan->dma_ch_control1);
+	}
+}
+
+/*
+ * Make sure EP is not in L1 state when DMA doorbell.
+ * The DMA controller may start the wrong channel if doorbell occurs at the
+ * same time as controller is transitioning to L1.
+ */
+static int intel_xpcie_ep_dma_doorbell(struct xpcie_epf *xpcie_epf, int chan,
+				       void __iomem *doorbell)
+{
+	int i = DMA_PCIE_PM_L1_TIMEOUT, rc = 0;
+	u32 val, pm_val;
+
+	val = ioread32(xpcie_epf->apb_base + PCIE_REGS_PCIE_APP_CNTRL);
+	iowrite32(val | APP_XFER_PENDING,
+		  xpcie_epf->apb_base + PCIE_REGS_PCIE_APP_CNTRL);
+	pm_val = ioread32(xpcie_epf->apb_base + PCIE_REGS_PCIE_SII_PM_STATE_1);
+	while (pm_val & PM_LINKST_IN_L1) {
+		if (i-- < 0) {
+			rc = -ETIME;
+			break;
+		}
+		udelay(5);
+		pm_val = ioread32(xpcie_epf->apb_base +
+				  PCIE_REGS_PCIE_SII_PM_STATE_1);
+	}
+
+	iowrite32((u32)chan, doorbell);
+
+	iowrite32(val & ~APP_XFER_PENDING,
+		  xpcie_epf->apb_base + PCIE_REGS_PCIE_APP_CNTRL);
+
+	return rc;
+}
+
+static int intel_xpcie_ep_dma_err_status(void __iomem *err_status, int chan)
+{
+	if (ioread32(err_status) &
+	    (DMA_AR_ERROR_CH_MASK(chan) | DMA_LL_ERROR_CH_MASK(chan)))
+		return -EIO;
+
+	return 0;
+}
+
+static int intel_xpcie_ep_dma_rd_err_sts_h(void __iomem *err_status,
+					   int chan)
+{
+	if (ioread32(err_status) &
+	    (DMA_UNREQ_ERROR_CH_MASK(chan) |
+	     DMA_CPL_ABORT_ERROR_CH_MASK(chan) |
+	     DMA_CPL_TIMEOUT_ERROR_CH_MASK(chan) |
+	     DMA_DATA_POI_ERROR_CH_MASK(chan)))
+		return -EIO;
+
+	return 0;
+}
+
+static void
+intel_xpcie_ep_dma_setup_ll_descs(struct __iomem pcie_dma_chan * dma_chan,
+				  struct xpcie_dma_ll_desc_buf *desc_buf,
+				  int descs_num)
+{
+	struct xpcie_dma_ll_desc *descs = desc_buf->virt;
+	int i;
+
+	/* Setup linked list descriptors */
+	for (i = 0; i < descs_num - 1; i++)
+		descs[i].dma_ch_control1 = DMA_CH_CONTROL1_CB_MASK;
+	descs[descs_num - 1].dma_ch_control1 = DMA_CH_CONTROL1_LIE_MASK |
+						DMA_CH_CONTROL1_CB_MASK;
+	descs[descs_num].dma_ch_control1 = DMA_CH_CONTROL1_LLP_MASK |
+					   DMA_CH_CONTROL1_TCB_MASK;
+	descs[descs_num].src_addr = (phys_addr_t)desc_buf->phys;
+
+	/* Setup linked list settings */
+	iowrite32(DMA_CH_CONTROL1_LLE_MASK | DMA_CH_CONTROL1_CCS_MASK,
+		  (void __iomem *)&dma_chan->dma_ch_control1);
+	iowrite32((u32)desc_buf->phys, (void __iomem *)&dma_chan->dma_llp_low);
+	iowrite32((u64)desc_buf->phys >> 32,
+		  (void __iomem *)&dma_chan->dma_llp_high);
+}
+
+int intel_xpcie_ep_dma_write_ll(struct pci_epf *epf, int chan, int descs_num)
+{
+	struct xpcie_epf *xpcie_epf = epf_get_drvdata(epf);
+	void __iomem *dma_base = xpcie_epf->dma_base;
+	struct __iomem pcie_dma_chan * dma_chan;
+	struct xpcie_dma_ll_desc_buf *desc_buf;
+	struct __iomem pcie_dma_reg * dma_reg =
+				(struct __iomem pcie_dma_reg *)(dma_base);
+	int i, rc;
+
+	if (descs_num <= 0 || descs_num > XPCIE_NUM_TX_DESCS)
+		return -EINVAL;
+
+	if (chan < 0 || chan >= DMA_CHAN_NUM)
+		return -EINVAL;
+
+	dma_chan = (struct __iomem pcie_dma_chan *)
+		(dma_base + dma_chan_offset[WRITE_ENGINE][chan]);
+
+	desc_buf = &xpcie_epf->tx_desc_buf[chan];
+
+	intel_xpcie_ep_dma_setup_ll_descs(dma_chan, desc_buf, descs_num);
+
+	/* Start DMA transfer. */
+	rc = intel_xpcie_ep_dma_doorbell(xpcie_epf, chan,
+					 (void __iomem *)
+					 &dma_reg->dma_write_doorbell);
+	if (rc)
+		return rc;
+
+	/* Wait for DMA transfer to complete. */
+	for (i = 0; i < DMA_POLLING_TIMEOUT; i++) {
+		usleep_range(5, 10);
+		if (ioread32((void __iomem *)&dma_reg->dma_write_int_status) &
+		    (DMA_DONE_INTERRUPT_CH_MASK(chan) |
+		     DMA_ABORT_INTERRUPT_CH_MASK(chan)))
+			break;
+	}
+	if (i == DMA_POLLING_TIMEOUT) {
+		dev_err(&xpcie_epf->epf->dev, "DMA Wr timeout\n");
+		rc = -ETIME;
+		goto cleanup;
+	}
+
+	rc = intel_xpcie_ep_dma_err_status((void __iomem *)
+					   &dma_reg->dma_write_err_status,
+					   chan);
+
+cleanup:
+	/* Clear the done/abort interrupt. */
+	iowrite32((DMA_DONE_INTERRUPT_CH_MASK(chan) |
+		   DMA_ABORT_INTERRUPT_CH_MASK(chan)),
+		  (void __iomem *)&dma_reg->dma_write_int_clear);
+
+	if (rc) {
+		if (intel_xpcie_ep_dma_disable(dma_base, WRITE_ENGINE)) {
+			dev_err(&xpcie_epf->epf->dev,
+				"failed to disable WR DMA\n");
+			return rc;
+		}
+		intel_xpcie_ep_dma_enable(dma_base, WRITE_ENGINE);
+	}
+
+	return rc;
+}
+
+int intel_xpcie_ep_dma_read_ll(struct pci_epf *epf, int chan, int descs_num)
+{
+	struct xpcie_epf *xpcie_epf = epf_get_drvdata(epf);
+	void __iomem *dma_base = xpcie_epf->dma_base;
+	struct xpcie_dma_ll_desc_buf *desc_buf;
+	struct __iomem pcie_dma_reg * dma_reg =
+				(struct __iomem pcie_dma_reg *)(dma_base);
+	struct __iomem pcie_dma_chan * dma_chan;
+	int i, rc;
+
+	if (descs_num <= 0 || descs_num > XPCIE_NUM_RX_DESCS)
+		return -EINVAL;
+
+	if (chan < 0 || chan >= DMA_CHAN_NUM)
+		return -EINVAL;
+
+	dma_chan = (struct __iomem pcie_dma_chan *)
+		(dma_base + dma_chan_offset[READ_ENGINE][chan]);
+
+	desc_buf = &xpcie_epf->rx_desc_buf[chan];
+
+	intel_xpcie_ep_dma_setup_ll_descs(dma_chan, desc_buf, descs_num);
+
+	/* Start DMA transfer. */
+	rc = intel_xpcie_ep_dma_doorbell(xpcie_epf, chan,
+					 (void __iomem *)
+					 &dma_reg->dma_read_doorbell);
+	if (rc)
+		return rc;
+
+	/* Wait for DMA transfer to complete. */
+	for (i = 0; i < DMA_POLLING_TIMEOUT; i++) {
+		usleep_range(5, 10);
+		if (ioread32((void __iomem *)&dma_reg->dma_read_int_status) &
+		    (DMA_DONE_INTERRUPT_CH_MASK(chan) |
+		     DMA_ABORT_INTERRUPT_CH_MASK(chan)))
+			break;
+	}
+	if (i == DMA_POLLING_TIMEOUT) {
+		dev_err(&xpcie_epf->epf->dev, "DMA Rd timeout\n");
+		rc = -ETIME;
+		goto cleanup;
+	}
+
+	rc = intel_xpcie_ep_dma_err_status((void __iomem *)
+					   &dma_reg->dma_read_err_status_low,
+					   chan);
+	if (!rc) {
+		rc =
+		intel_xpcie_ep_dma_rd_err_sts_h((void __iomem *)
+						&dma_reg->dma_rd_err_sts_h,
+						chan);
+	}
+cleanup:
+	/* Clear the done/abort interrupt. */
+	iowrite32((DMA_DONE_INTERRUPT_CH_MASK(chan) |
+		   DMA_ABORT_INTERRUPT_CH_MASK(chan)),
+		  (void __iomem *)&dma_reg->dma_read_int_clear);
+
+	if (rc) {
+		if (intel_xpcie_ep_dma_disable(dma_base, READ_ENGINE)) {
+			dev_err(&xpcie_epf->epf->dev,
+				"failed to disable RD DMA\n");
+			return rc;
+		}
+		intel_xpcie_ep_dma_enable(dma_base, READ_ENGINE);
+	}
+
+	return rc;
+}
+
+static void intel_xpcie_ep_dma_free_ll_descs_mem(struct xpcie_epf *xpcie_epf)
+{
+	struct device *dma_dev = xpcie_epf->epf->epc->dev.parent;
+	int i;
+
+	for (i = 0; i < DMA_CHAN_NUM; i++) {
+		if (xpcie_epf->tx_desc_buf[i].virt) {
+			dma_free_coherent(dma_dev,
+					  xpcie_epf->tx_desc_buf[i].size,
+					  xpcie_epf->tx_desc_buf[i].virt,
+					  xpcie_epf->tx_desc_buf[i].phys);
+		}
+		if (xpcie_epf->rx_desc_buf[i].virt) {
+			dma_free_coherent(dma_dev,
+					  xpcie_epf->rx_desc_buf[i].size,
+					  xpcie_epf->rx_desc_buf[i].virt,
+					  xpcie_epf->rx_desc_buf[i].phys);
+		}
+
+		memset(&xpcie_epf->tx_desc_buf[i], 0,
+		       sizeof(struct xpcie_dma_ll_desc_buf));
+		memset(&xpcie_epf->rx_desc_buf[i], 0,
+		       sizeof(struct xpcie_dma_ll_desc_buf));
+	}
+}
+
+static int intel_xpcie_ep_dma_alloc_ll_descs_mem(struct xpcie_epf *xpcie_epf)
+{
+	struct device *dma_dev = xpcie_epf->epf->epc->dev.parent;
+	int tx_num = XPCIE_NUM_TX_DESCS + 1;
+	int rx_num = XPCIE_NUM_RX_DESCS + 1;
+	size_t tx_size, rx_size;
+	int i;
+
+	tx_size = tx_num * sizeof(struct xpcie_dma_ll_desc);
+	rx_size = rx_num * sizeof(struct xpcie_dma_ll_desc);
+
+	for (i = 0; i < DMA_CHAN_NUM; i++) {
+		xpcie_epf->tx_desc_buf[i].virt =
+			dma_alloc_coherent(dma_dev, tx_size,
+					   &xpcie_epf->tx_desc_buf[i].phys,
+					   GFP_KERNEL);
+		xpcie_epf->rx_desc_buf[i].virt =
+			dma_alloc_coherent(dma_dev, rx_size,
+					   &xpcie_epf->rx_desc_buf[i].phys,
+					   GFP_KERNEL);
+
+		if (!xpcie_epf->tx_desc_buf[i].virt ||
+		    !xpcie_epf->rx_desc_buf[i].virt) {
+			intel_xpcie_ep_dma_free_ll_descs_mem(xpcie_epf);
+			return -ENOMEM;
+		}
+
+		xpcie_epf->tx_desc_buf[i].size = tx_size;
+		xpcie_epf->rx_desc_buf[i].size = rx_size;
+	}
+	return 0;
+}
+
+int intel_xpcie_ep_dma_reset(struct pci_epf *epf)
+{
+	struct xpcie_epf *xpcie_epf = epf_get_drvdata(epf);
+
+	/* Disable the DMA read/write engine. */
+	if (intel_xpcie_ep_dma_disable(xpcie_epf->dma_base, WRITE_ENGINE) ||
+	    intel_xpcie_ep_dma_disable(xpcie_epf->dma_base, READ_ENGINE))
+		return -EBUSY;
+
+	intel_xpcie_ep_dma_enable(xpcie_epf->dma_base, WRITE_ENGINE);
+	intel_xpcie_ep_dma_enable(xpcie_epf->dma_base, READ_ENGINE);
+
+	return 0;
+}
+
+int intel_xpcie_ep_dma_uninit(struct pci_epf *epf)
+{
+	struct xpcie_epf *xpcie_epf = epf_get_drvdata(epf);
+
+	if (intel_xpcie_ep_dma_disable(xpcie_epf->dma_base, WRITE_ENGINE) ||
+	    intel_xpcie_ep_dma_disable(xpcie_epf->dma_base, READ_ENGINE))
+		return -EBUSY;
+
+	intel_xpcie_ep_dma_free_ll_descs_mem(xpcie_epf);
+
+	return 0;
+}
+
+int intel_xpcie_ep_dma_init(struct pci_epf *epf)
+{
+	struct xpcie_epf *xpcie_epf = epf_get_drvdata(epf);
+	int rc;
+
+	xpcie_epf->dma_base = intel_xpcie_ep_get_dma_base(epf);
+
+	rc = intel_xpcie_ep_dma_alloc_ll_descs_mem(xpcie_epf);
+	if (rc)
+		return rc;
+
+	return intel_xpcie_ep_dma_reset(epf);
+}
diff --git a/drivers/misc/xlink-pcie/local_host/epf.c b/drivers/misc/xlink-pcie/local_host/epf.c
index 0234756e89ae..7019aecd6a81 100644
--- a/drivers/misc/xlink-pcie/local_host/epf.c
+++ b/drivers/misc/xlink-pcie/local_host/epf.c
@@ -43,6 +43,8 @@ static irqreturn_t intel_xpcie_err_interrupt(int irq, void *args)
 
 	xpcie_epf = container_of(xpcie, struct xpcie_epf, xpcie);
 	val = ioread32(xpcie_epf->apb_base + PCIE_REGS_PCIE_ERR_INTR_FLAGS);
+	if (val & LINK_REQ_RST_FLG)
+		intel_xpcie_ep_dma_reset(xpcie_epf->epf);
 
 	iowrite32(val, xpcie_epf->apb_base + PCIE_REGS_PCIE_ERR_INTR_FLAGS);
 
@@ -285,8 +287,17 @@ static int intel_xpcie_epf_bind(struct pci_epf *epf)
 		goto err_cleanup_bars;
 	}
 
+	ret = intel_xpcie_ep_dma_init(epf);
+	if (ret) {
+		dev_err(&epf->dev, "DMA initialization failed\n");
+		goto err_free_err_irq;
+	}
+
 	return 0;
 
+err_free_err_irq:
+	free_irq(xpcie_epf->irq_err, &xpcie_epf->xpcie);
+
 err_cleanup_bars:
 	intel_xpcie_cleanup_bars(epf);
 
@@ -295,11 +306,9 @@ static int intel_xpcie_epf_bind(struct pci_epf *epf)
 
 static void intel_xpcie_epf_unbind(struct pci_epf *epf)
 {
-	struct xpcie_epf *xpcie_epf = epf_get_drvdata(epf);
 	struct pci_epc *epc = epf->epc;
 
-	free_irq(xpcie_epf->irq, &xpcie_epf->xpcie);
-	free_irq(xpcie_epf->irq_err, &xpcie_epf->xpcie);
+	intel_xpcie_ep_dma_uninit(epf);
 
 	pci_epc_stop(epc);
 
diff --git a/drivers/misc/xlink-pcie/local_host/epf.h b/drivers/misc/xlink-pcie/local_host/epf.h
index a60cd43fe555..82410404d54e 100644
--- a/drivers/misc/xlink-pcie/local_host/epf.h
+++ b/drivers/misc/xlink-pcie/local_host/epf.h
@@ -18,6 +18,38 @@
 
 #define KEEMBAY_XPCIE_STEPPING_MAXLEN 8
 
+#define DMA_CHAN_NUM		(4)
+
+#define XPCIE_NUM_TX_DESCS	(64)
+#define XPCIE_NUM_RX_DESCS	(64)
+
+extern bool dma_ll_mode;
+
+struct xpcie_dma_ll_desc {
+	u32 dma_ch_control1;
+	u32 dma_transfer_size;
+	union {
+		struct {
+			u32 dma_sar_low;
+			u32 dma_sar_high;
+		};
+		phys_addr_t src_addr;
+	};
+	union {
+		struct {
+			u32 dma_dar_low;
+			u32 dma_dar_high;
+		};
+		phys_addr_t dst_addr;
+	};
+} __packed;
+
+struct xpcie_dma_ll_desc_buf {
+	struct xpcie_dma_ll_desc *virt;
+	dma_addr_t phys;
+	size_t size;
+};
+
 struct xpcie_epf {
 	struct pci_epf *epf;
 	void *vaddr[BAR_5 + 1];
@@ -32,6 +64,15 @@ struct xpcie_epf {
 	void __iomem *dma_base;
 	void __iomem *dbi_base;
 	char stepping[KEEMBAY_XPCIE_STEPPING_MAXLEN];
+
+	struct xpcie_dma_ll_desc_buf	tx_desc_buf[DMA_CHAN_NUM];
+	struct xpcie_dma_ll_desc_buf	rx_desc_buf[DMA_CHAN_NUM];
 };
 
+int intel_xpcie_ep_dma_init(struct pci_epf *epf);
+int intel_xpcie_ep_dma_uninit(struct pci_epf *epf);
+int intel_xpcie_ep_dma_reset(struct pci_epf *epf);
+int intel_xpcie_ep_dma_read_ll(struct pci_epf *epf, int chan, int descs_num);
+int intel_xpcie_ep_dma_write_ll(struct pci_epf *epf, int chan, int descs_num);
+
 #endif /* XPCIE_EPF_HEADER_ */
-- 
2.17.1


  parent reply	other threads:[~2021-01-30  4:54 UTC|newest]

Thread overview: 82+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-01-30  2:20 [PATCH v3 00/34] Intel Vision Processing base enabling mgross
2021-01-30  2:20 ` [PATCH v3 01/34] Add Vision Processing Unit (VPU) documentation mgross
2021-01-30  2:20 ` [PATCH v3 02/34] dt-bindings: mailbox: Add Intel VPU IPC mailbox bindings mgross
2021-01-30  2:20 ` [PATCH v3 03/34] mailbox: vpu-ipc-mailbox: Add support for Intel VPU IPC mailbox mgross
2021-02-01  7:07   ` Jassi Brar
2021-02-01 15:49     ` Alessandrelli, Daniele
2021-01-30  2:20 ` [PATCH v3 04/34] dt-bindings: Add bindings for Keem Bay IPC driver mgross
2021-01-30  2:20 ` [PATCH v3 05/34] keembay-ipc: Add Keem Bay IPC module mgross
2021-01-30  2:20 ` [PATCH v3 06/34] dt-bindings: Add bindings for Keem Bay VPU IPC driver mgross
2021-01-30  2:20 ` [PATCH v3 07/34] keembay-vpu-ipc: Add Keem Bay VPU IPC module mgross
2021-01-30  2:20 ` [PATCH v3 08/34] misc: xlink-pcie: Add documentation for XLink PCIe driver mgross
2021-01-30  2:20 ` [PATCH v3 09/34] misc: xlink-pcie: lh: Add PCIe EPF driver for Local Host mgross
2021-01-30  2:20 ` [PATCH v3 10/34] misc: xlink-pcie: lh: Add PCIe EP DMA functionality mgross
2021-01-30  2:20 ` [PATCH v3 11/34] misc: xlink-pcie: lh: Add core communication logic mgross
2021-01-30  2:20 ` [PATCH v3 12/34] misc: xlink-pcie: lh: Prepare changes for adding remote host driver mgross
2021-01-30  2:20 ` [PATCH v3 13/34] misc: xlink-pcie: rh: Add PCIe EP driver for Remote Host mgross
2021-01-30  2:20 ` [PATCH v3 14/34] misc: xlink-pcie: rh: Add core communication logic mgross
2021-01-30  2:20 ` [PATCH v3 15/34] misc: xlink-pcie: Add XLink API interface mgross
2021-01-30  2:20 ` [PATCH v3 16/34] misc: xlink-pcie: Add asynchronous event notification support for XLink mgross
2021-01-30  2:20 ` [PATCH v3 17/34] xlink-ipc: Add xlink ipc device tree bindings mgross
2021-01-30  2:20 ` [PATCH v3 18/34] xlink-ipc: Add xlink ipc driver mgross
2021-01-30  2:20 ` [PATCH v3 19/34] xlink-core: Add xlink core device tree bindings mgross
2021-01-30  2:20 ` [PATCH v3 20/34] xlink-core: Add xlink core driver xLink mgross
2021-01-30  2:20 ` [PATCH v3 21/34] xlink-core: Enable xlink protocol over pcie mgross
2021-01-30  2:20 ` [PATCH v3 22/34] xlink-core: Enable VPU IP management and runtime control mgross
2021-01-30  2:20 ` [PATCH v3 23/34] xlink-core: add async channel and events mgross
2021-01-30  2:20 ` [PATCH v3 24/34] dt-bindings: misc: Add Keem Bay vpumgr mgross
2021-01-30  2:20 ` [PATCH v3 25/34] misc: Add Keem Bay VPU manager mgross
2021-02-01  2:04   ` Randy Dunlap
2021-01-30  2:20 ` [PATCH v3 26/34] dt-bindings: misc: intel_tsens: Add tsens thermal bindings documentation mgross
2021-01-30  2:20 ` [PATCH v3 27/34] misc: Tsens ARM host thermal driver mgross
2021-01-30  2:20 ` [PATCH v3 28/34] misc: Intel tsens IA host driver mgross
2021-01-30  7:01   ` Joe Perches
2021-02-02  7:21     ` C, Udhayakumar
2021-01-30  2:20 ` [PATCH v3 29/34] Intel tsens i2c slave driver mgross
2021-02-01  2:07   ` Randy Dunlap
2021-01-30  2:20 ` [PATCH v3 30/34] misc:intel_tsens: Intel Keem Bay tsens driver mgross
2021-01-30  2:20 ` [PATCH v3 31/34] Intel Keem Bay XLink SMBus driver mgross
2021-01-30  2:20 ` [PATCH v3 32/34] dt-bindings: misc: hddl_dev: Add hddl device management documentation mgross
2021-01-30  2:20 ` [PATCH v3 33/34] misc: Hddl device management for local host mgross
2021-01-30  2:20 ` [PATCH v3 34/34] misc: HDDL device management for IA host mgross
2021-01-30  2:20 ` [PATCH v4 00/34] Intel Vision Processing base enabling mgross
2021-01-30  2:20 ` [PATCH v4 01/34] Add Vision Processing Unit (VPU) documentation mgross
2021-01-30  2:20 ` [PATCH v4 02/34] dt-bindings: mailbox: Add Intel VPU IPC mailbox bindings mgross
2021-01-30  2:20 ` [PATCH v4 03/34] mailbox: vpu-ipc-mailbox: Add support for Intel VPU IPC mailbox mgross
2021-01-30  2:20 ` [PATCH v4 04/34] dt-bindings: Add bindings for Keem Bay IPC driver mgross
2021-01-30  2:20 ` [PATCH v4 05/34] keembay-ipc: Add Keem Bay IPC module mgross
2021-01-30  2:20 ` [PATCH v4 06/34] dt-bindings: Add bindings for Keem Bay VPU IPC driver mgross
2021-01-30  2:20 ` [PATCH v4 07/34] keembay-vpu-ipc: Add Keem Bay VPU IPC module mgross
2021-01-30  2:20 ` [PATCH v4 08/34] misc: xlink-pcie: Add documentation for XLink PCIe driver mgross
2021-01-30  2:20 ` [PATCH v4 09/34] misc: xlink-pcie: lh: Add PCIe EPF driver for Local Host mgross
2021-01-30  2:21 ` mgross [this message]
2021-01-30  2:21 ` [PATCH v4 11/34] misc: xlink-pcie: lh: Add core communication logic mgross
2021-01-30  2:21 ` [PATCH v4 12/34] misc: xlink-pcie: lh: Prepare changes for adding remote host driver mgross
2021-01-30  2:21 ` [PATCH v4 13/34] misc: xlink-pcie: rh: Add PCIe EP driver for Remote Host mgross
2021-01-30  2:21 ` [PATCH v4 14/34] misc: xlink-pcie: rh: Add core communication logic mgross
2021-01-30  2:21 ` [PATCH v4 15/34] misc: xlink-pcie: Add XLink API interface mgross
2021-01-30  2:21 ` [PATCH v4 16/34] misc: xlink-pcie: Add asynchronous event notification support for XLink mgross
2021-01-30  2:21 ` [PATCH v4 17/34] xlink-ipc: Add xlink ipc device tree bindings mgross
2021-01-30  2:21 ` [PATCH v4 18/34] xlink-ipc: Add xlink ipc driver mgross
2021-01-30  2:21 ` [PATCH v4 19/34] xlink-core: Add xlink core device tree bindings mgross
2021-01-30  2:21 ` [PATCH v4 20/34] xlink-core: Add xlink core driver xLink mgross
2021-01-30  2:21 ` [PATCH v4 21/34] xlink-core: Enable xlink protocol over pcie mgross
2021-01-30  2:21 ` [PATCH v4 22/34] xlink-core: Enable VPU IP management and runtime control mgross
2021-01-30  2:21 ` [PATCH v4 23/34] xlink-core: add async channel and events mgross
2021-01-30  2:21 ` [PATCH v4 24/34] dt-bindings: misc: Add Keem Bay vpumgr mgross
2021-01-30  2:21 ` [PATCH v4 25/34] misc: Add Keem Bay VPU manager mgross
2021-01-30  2:21 ` [PATCH v4 26/34] dt-bindings: misc: intel_tsens: Add tsens thermal bindings documentation mgross
2021-01-30 17:23   ` Rob Herring
2021-02-02  8:47     ` C, Udhayakumar
2021-01-30  2:21 ` [PATCH v4 27/34] misc: Tsens ARM host thermal driver mgross
2021-01-30  2:21 ` [PATCH v4 28/34] misc: Intel tsens IA host driver mgross
2021-01-30  2:21 ` [PATCH v4 29/34] Intel tsens i2c slave driver mgross
2021-02-01  2:13   ` Randy Dunlap
2021-02-01 15:20     ` Gross, Mark
2021-01-30  2:21 ` [PATCH v4 30/34] misc:intel_tsens: Intel Keem Bay tsens driver mgross
2021-01-30  2:21 ` [PATCH v4 31/34] Intel Keem Bay XLink SMBus driver mgross
2021-01-30  2:21 ` [PATCH v4 32/34] dt-bindings: misc: hddl_dev: Add hddl device management documentation mgross
2021-01-30 17:23   ` Rob Herring
2021-02-02  8:48     ` C, Udhayakumar
2021-01-30  2:21 ` [PATCH v4 33/34] misc: Hddl device management for local host mgross
2021-01-30  2:21 ` [PATCH v4 34/34] misc: HDDL device management for IA host mgross

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210130022124.65083-46-mgross@linux.intel.com \
    --to=mgross@linux.intel.com \
    --cc=arnd@arndb.de \
    --cc=bp@suse.de \
    --cc=corbet@lwn.net \
    --cc=damien.lemoal@wdc.com \
    --cc=dragan.cvetic@xilinx.com \
    --cc=gregkh@linuxfoundation.org \
    --cc=jassisinghbrar@gmail.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=markgross@kernel.org \
    --cc=palmerdabbelt@google.com \
    --cc=paul.walmsley@sifive.com \
    --cc=peng.fan@nxp.com \
    --cc=robh+dt@kernel.org \
    --cc=shawnguo@kernel.org \
    --cc=srikanth.thokala@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.