All of lore.kernel.org
 help / color / mirror / Atom feed
From: mgross@linux.intel.com
To: markgross@kernel.org, mgross@linux.intel.com, arnd@arndb.de,
	bp@suse.de, damien.lemoal@wdc.com, dragan.cvetic@xilinx.com,
	gregkh@linuxfoundation.org, corbet@lwn.net,
	palmerdabbelt@google.com, paul.walmsley@sifive.com,
	peng.fan@nxp.com, robh+dt@kernel.org, shawnguo@kernel.org,
	jassisinghbrar@gmail.com
Cc: linux-kernel@vger.kernel.org,
	Srikanth Thokala <srikanth.thokala@intel.com>,
	Derek Kiernan <derek.kiernan@xilinx.com>
Subject: [PATCH v4 09/34] misc: xlink-pcie: lh: Add PCIe EPF driver for Local Host
Date: Fri, 29 Jan 2021 18:20:59 -0800	[thread overview]
Message-ID: <20210130022124.65083-45-mgross@linux.intel.com> (raw)
In-Reply-To: <20210130022124.65083-1-mgross@linux.intel.com>

From: Srikanth Thokala <srikanth.thokala@intel.com>

Add PCIe EPF driver for local host (lh) to configure BAR's and other
HW resources. Underlying PCIe HW controller is a Synopsys DWC PCIe core.

Cc: Derek Kiernan <derek.kiernan@xilinx.com>
Cc: Dragan Cvetic <dragan.cvetic@xilinx.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Reviewed-by: Mark Gross <mgross@linux.intel.com>
Signed-off-by: Mark Gross <mgross@linux.intel.com>
Signed-off-by: Srikanth Thokala <srikanth.thokala@intel.com>
---
 MAINTAINERS                                 |   6 +
 drivers/misc/Kconfig                        |   1 +
 drivers/misc/Makefile                       |   1 +
 drivers/misc/xlink-pcie/Kconfig             |   9 +
 drivers/misc/xlink-pcie/Makefile            |   1 +
 drivers/misc/xlink-pcie/local_host/Makefile |   2 +
 drivers/misc/xlink-pcie/local_host/epf.c    | 373 ++++++++++++++++++++
 drivers/misc/xlink-pcie/local_host/epf.h    |  37 ++
 drivers/misc/xlink-pcie/local_host/xpcie.h  |  38 ++
 9 files changed, 468 insertions(+)
 create mode 100644 drivers/misc/xlink-pcie/Kconfig
 create mode 100644 drivers/misc/xlink-pcie/Makefile
 create mode 100644 drivers/misc/xlink-pcie/local_host/Makefile
 create mode 100644 drivers/misc/xlink-pcie/local_host/epf.c
 create mode 100644 drivers/misc/xlink-pcie/local_host/epf.h
 create mode 100644 drivers/misc/xlink-pcie/local_host/xpcie.h

diff --git a/MAINTAINERS b/MAINTAINERS
index 6742a1827cd9..3ca6c8c6341b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1961,6 +1961,12 @@ F:	Documentation/devicetree/bindings/arm/intel,keembay.yaml
 F:	arch/arm64/boot/dts/intel/keembay-evm.dts
 F:	arch/arm64/boot/dts/intel/keembay-soc.dtsi
 
+ARM KEEM BAY XLINK PCIE SUPPORT
+M:	Srikanth Thokala <srikanth.thokala@intel.com>
+M:	Mark Gross <mgross@linux.intel.com>
+S:	Supported
+F:	drivers/misc/xlink-pcie/
+
 ARM/INTEL RESEARCH IMOTE/STARGATE 2 MACHINE SUPPORT
 M:	Jonathan Cameron <jic23@cam.ac.uk>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index fafa8b0d8099..dfb98e444c6e 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -481,4 +481,5 @@ source "drivers/misc/ocxl/Kconfig"
 source "drivers/misc/cardreader/Kconfig"
 source "drivers/misc/habanalabs/Kconfig"
 source "drivers/misc/uacce/Kconfig"
+source "drivers/misc/xlink-pcie/Kconfig"
 endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index d23231e73330..d17621fc43d5 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -57,3 +57,4 @@ obj-$(CONFIG_HABANA_AI)		+= habanalabs/
 obj-$(CONFIG_UACCE)		+= uacce/
 obj-$(CONFIG_XILINX_SDFEC)	+= xilinx_sdfec.o
 obj-$(CONFIG_HISI_HIKEY_USB)	+= hisi_hikey_usb.o
+obj-y                           += xlink-pcie/
diff --git a/drivers/misc/xlink-pcie/Kconfig b/drivers/misc/xlink-pcie/Kconfig
new file mode 100644
index 000000000000..46aa401d79b7
--- /dev/null
+++ b/drivers/misc/xlink-pcie/Kconfig
@@ -0,0 +1,9 @@
+config XLINK_PCIE_LH_DRIVER
+	tristate "XLink PCIe Local Host driver"
+	depends on PCI_ENDPOINT && ARCH_KEEMBAY
+	help
+	  This option enables XLink PCIe Local Host driver.
+
+	  Choose M here to compile this driver as a module, name is mxlk_ep.
+	  This driver is used for XLink communication over PCIe and is to be
+	  loaded on the Intel Keem Bay platform.
diff --git a/drivers/misc/xlink-pcie/Makefile b/drivers/misc/xlink-pcie/Makefile
new file mode 100644
index 000000000000..d693d382e9c6
--- /dev/null
+++ b/drivers/misc/xlink-pcie/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_XLINK_PCIE_LH_DRIVER) += local_host/
diff --git a/drivers/misc/xlink-pcie/local_host/Makefile b/drivers/misc/xlink-pcie/local_host/Makefile
new file mode 100644
index 000000000000..514d3f0c91bc
--- /dev/null
+++ b/drivers/misc/xlink-pcie/local_host/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_XLINK_PCIE_LH_DRIVER) += mxlk_ep.o
+mxlk_ep-objs := epf.o
diff --git a/drivers/misc/xlink-pcie/local_host/epf.c b/drivers/misc/xlink-pcie/local_host/epf.c
new file mode 100644
index 000000000000..0234756e89ae
--- /dev/null
+++ b/drivers/misc/xlink-pcie/local_host/epf.c
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel Keem Bay XLink PCIe Driver
+ *
+ * Copyright (C) 2021 Intel Corporation
+ */
+
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "epf.h"
+
+#define BAR2_MIN_SIZE			SZ_16K
+#define BAR4_MIN_SIZE			SZ_16K
+
+#define PCIE_REGS_PCIE_INTR_ENABLE	0x18
+#define PCIE_REGS_PCIE_INTR_FLAGS	0x1C
+#define LBC_CII_EVENT_FLAG		BIT(18)
+#define PCIE_REGS_PCIE_ERR_INTR_FLAGS	0x24
+#define LINK_REQ_RST_FLG		BIT(15)
+
+static struct pci_epf_header xpcie_header = {
+	.vendorid = PCI_VENDOR_ID_INTEL,
+	.deviceid = PCI_DEVICE_ID_INTEL_KEEMBAY,
+	.baseclass_code = PCI_BASE_CLASS_MULTIMEDIA,
+	.subclass_code = 0x0,
+	.subsys_vendor_id = 0x0,
+	.subsys_id = 0x0,
+};
+
+static const struct pci_epf_device_id xpcie_epf_ids[] = {
+	{
+		.name = "mxlk_pcie_epf",
+	},
+	{},
+};
+
+static irqreturn_t intel_xpcie_err_interrupt(int irq, void *args)
+{
+	struct xpcie_epf *xpcie_epf;
+	struct xpcie *xpcie = args;
+	u32 val;
+
+	xpcie_epf = container_of(xpcie, struct xpcie_epf, xpcie);
+	val = ioread32(xpcie_epf->apb_base + PCIE_REGS_PCIE_ERR_INTR_FLAGS);
+
+	iowrite32(val, xpcie_epf->apb_base + PCIE_REGS_PCIE_ERR_INTR_FLAGS);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t intel_xpcie_host_interrupt(int irq, void *args)
+{
+	struct xpcie_epf *xpcie_epf;
+	struct xpcie *xpcie = args;
+	u32 val;
+
+	xpcie_epf = container_of(xpcie, struct xpcie_epf, xpcie);
+	val = ioread32(xpcie_epf->apb_base + PCIE_REGS_PCIE_INTR_FLAGS);
+	if (val & LBC_CII_EVENT_FLAG) {
+		iowrite32(LBC_CII_EVENT_FLAG,
+			  xpcie_epf->apb_base + PCIE_REGS_PCIE_INTR_FLAGS);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void
+intel_xpcie_configure_bar(struct pci_epf *epf,
+			  const struct pci_epc_features *epc_features)
+{
+	struct pci_epf_bar *epf_bar;
+	bool bar_fixed_64bit;
+	int i;
+
+	for (i = BAR_0; i <= BAR_5; i++) {
+		epf_bar = &epf->bar[i];
+		bar_fixed_64bit = !!(epc_features->bar_fixed_64bit & (1 << i));
+		if (bar_fixed_64bit)
+			epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
+		if (epc_features->bar_fixed_size[i])
+			epf_bar->size = epc_features->bar_fixed_size[i];
+	}
+}
+
+static void intel_xpcie_cleanup_bar(struct pci_epf *epf, enum pci_barno barno)
+{
+	struct xpcie_epf *xpcie_epf = epf_get_drvdata(epf);
+	struct pci_epc *epc = epf->epc;
+
+	if (xpcie_epf->vaddr[barno]) {
+		pci_epc_clear_bar(epc, epf->func_no, &epf->bar[barno]);
+		pci_epf_free_space(epf, xpcie_epf->vaddr[barno], barno);
+		xpcie_epf->vaddr[barno] = NULL;
+	}
+}
+
+static void intel_xpcie_cleanup_bars(struct pci_epf *epf)
+{
+	struct xpcie_epf *xpcie_epf = epf_get_drvdata(epf);
+
+	intel_xpcie_cleanup_bar(epf, BAR_2);
+	intel_xpcie_cleanup_bar(epf, BAR_4);
+	xpcie_epf->xpcie.mmio = NULL;
+	xpcie_epf->xpcie.bar4 = NULL;
+}
+
+static int intel_xpcie_setup_bar(struct pci_epf *epf, enum pci_barno barno,
+				 size_t min_size, size_t align)
+{
+	struct xpcie_epf *xpcie_epf = epf_get_drvdata(epf);
+	struct pci_epf_bar *bar = &epf->bar[barno];
+	struct pci_epc *epc = epf->epc;
+	void *vaddr;
+	int ret;
+
+	bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
+	if (!bar->size)
+		bar->size = min_size;
+
+	if (barno == BAR_4)
+		bar->flags |= PCI_BASE_ADDRESS_MEM_PREFETCH;
+
+	vaddr = pci_epf_alloc_space(epf, bar->size, barno, align);
+	if (!vaddr) {
+		dev_err(&epf->dev, "Failed to map BAR%d\n", barno);
+		return -ENOMEM;
+	}
+
+	ret = pci_epc_set_bar(epc, epf->func_no, bar);
+	if (ret) {
+		pci_epf_free_space(epf, vaddr, barno);
+		dev_err(&epf->dev, "Failed to set BAR%d\n", barno);
+		return ret;
+	}
+
+	xpcie_epf->vaddr[barno] = vaddr;
+
+	return 0;
+}
+
+static int intel_xpcie_setup_bars(struct pci_epf *epf, size_t align)
+{
+	int ret;
+
+	struct xpcie_epf *xpcie_epf = epf_get_drvdata(epf);
+
+	ret = intel_xpcie_setup_bar(epf, BAR_2, BAR2_MIN_SIZE, align);
+	if (ret)
+		return ret;
+
+	ret = intel_xpcie_setup_bar(epf, BAR_4, BAR4_MIN_SIZE, align);
+	if (ret) {
+		intel_xpcie_cleanup_bar(epf, BAR_2);
+		return ret;
+	}
+
+	xpcie_epf->comm_bar = BAR_2;
+	xpcie_epf->xpcie.mmio = (void *)xpcie_epf->vaddr[BAR_2] +
+				XPCIE_MMIO_OFFSET;
+
+	xpcie_epf->bar4 = BAR_4;
+	xpcie_epf->xpcie.bar4 = xpcie_epf->vaddr[BAR_4];
+
+	return 0;
+}
+
+static int intel_xpcie_epf_get_platform_data(struct device *dev,
+					     struct xpcie_epf *xpcie_epf)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct device_node *soc_node, *version_node;
+	struct resource *res;
+	const char *prop;
+	int prop_size;
+
+	xpcie_epf->irq_dma = platform_get_irq_byname(pdev, "intr");
+	if (xpcie_epf->irq_dma < 0) {
+		dev_err(&xpcie_epf->epf->dev, "failed to get IRQ: %d\n",
+			xpcie_epf->irq_dma);
+		return -EINVAL;
+	}
+
+	xpcie_epf->irq_err = platform_get_irq_byname(pdev, "err_intr");
+	if (xpcie_epf->irq_err < 0) {
+		dev_err(&xpcie_epf->epf->dev, "failed to get erroe IRQ: %d\n",
+			xpcie_epf->irq_err);
+		return -EINVAL;
+	}
+
+	xpcie_epf->irq = platform_get_irq_byname(pdev, "ev_intr");
+	if (xpcie_epf->irq < 0) {
+		dev_err(&xpcie_epf->epf->dev, "failed to get event IRQ: %d\n",
+			xpcie_epf->irq);
+		return -EINVAL;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb");
+	xpcie_epf->apb_base =
+		devm_ioremap(dev, res->start, resource_size(res));
+	if (IS_ERR(xpcie_epf->apb_base))
+		return PTR_ERR(xpcie_epf->apb_base);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+	xpcie_epf->dbi_base =
+		devm_ioremap(dev, res->start, resource_size(res));
+	if (IS_ERR(xpcie_epf->dbi_base))
+		return PTR_ERR(xpcie_epf->dbi_base);
+
+	memcpy(xpcie_epf->stepping, "B0", 2);
+	soc_node = of_get_parent(pdev->dev.of_node);
+	if (soc_node) {
+		version_node = of_get_child_by_name(soc_node, "version-info");
+		if (version_node) {
+			prop = of_get_property(version_node, "stepping",
+					       &prop_size);
+			if (prop && prop_size <= KEEMBAY_XPCIE_STEPPING_MAXLEN)
+				memcpy(xpcie_epf->stepping, prop, prop_size);
+			of_node_put(version_node);
+		}
+		of_node_put(soc_node);
+	}
+
+	return 0;
+}
+
+static int intel_xpcie_epf_bind(struct pci_epf *epf)
+{
+	struct xpcie_epf *xpcie_epf = epf_get_drvdata(epf);
+	const struct pci_epc_features *features;
+	struct pci_epc *epc = epf->epc;
+	struct device *dev;
+	size_t align = SZ_16K;
+	int ret;
+
+	if (WARN_ON_ONCE(!epc))
+		return -EINVAL;
+
+	dev = epc->dev.parent;
+	features = pci_epc_get_features(epc, epf->func_no);
+	xpcie_epf->epc_features = features;
+	if (features) {
+		align = features->align;
+		intel_xpcie_configure_bar(epf, features);
+	}
+
+	ret = intel_xpcie_setup_bars(epf, align);
+	if (ret) {
+		dev_err(&epf->dev, "BAR initialization failed\n");
+		return ret;
+	}
+
+	ret = intel_xpcie_epf_get_platform_data(dev, xpcie_epf);
+	if (ret) {
+		dev_err(&epf->dev, "Unable to get platform data\n");
+		return -EINVAL;
+	}
+
+	if (!strcmp(xpcie_epf->stepping, "A0")) {
+		xpcie_epf->xpcie.legacy_a0 = true;
+		iowrite32(1, (void __iomem *)xpcie_epf->xpcie.mmio +
+			     XPCIE_MMIO_LEGACY_A0);
+	} else {
+		xpcie_epf->xpcie.legacy_a0 = false;
+		iowrite32(0, (void __iomem *)xpcie_epf->xpcie.mmio +
+			     XPCIE_MMIO_LEGACY_A0);
+	}
+
+	/* Enable interrupt */
+	writel(LBC_CII_EVENT_FLAG,
+	       xpcie_epf->apb_base + PCIE_REGS_PCIE_INTR_ENABLE);
+	ret = devm_request_irq(&epf->dev, xpcie_epf->irq,
+			       &intel_xpcie_host_interrupt, 0,
+			       XPCIE_DRIVER_NAME, &xpcie_epf->xpcie);
+	if (ret) {
+		dev_err(&epf->dev, "failed to request irq\n");
+		goto err_cleanup_bars;
+	}
+
+	ret = devm_request_irq(&epf->dev, xpcie_epf->irq_err,
+			       &intel_xpcie_err_interrupt, 0,
+			       XPCIE_DRIVER_NAME, &xpcie_epf->xpcie);
+	if (ret) {
+		dev_err(&epf->dev, "failed to request error irq\n");
+		goto err_cleanup_bars;
+	}
+
+	return 0;
+
+err_cleanup_bars:
+	intel_xpcie_cleanup_bars(epf);
+
+	return ret;
+}
+
+static void intel_xpcie_epf_unbind(struct pci_epf *epf)
+{
+	struct xpcie_epf *xpcie_epf = epf_get_drvdata(epf);
+	struct pci_epc *epc = epf->epc;
+
+	free_irq(xpcie_epf->irq, &xpcie_epf->xpcie);
+	free_irq(xpcie_epf->irq_err, &xpcie_epf->xpcie);
+
+	pci_epc_stop(epc);
+
+	intel_xpcie_cleanup_bars(epf);
+}
+
+static int intel_xpcie_epf_probe(struct pci_epf *epf)
+{
+	struct device *dev = &epf->dev;
+	struct xpcie_epf *xpcie_epf;
+
+	xpcie_epf = devm_kzalloc(dev, sizeof(*xpcie_epf), GFP_KERNEL);
+	if (!xpcie_epf)
+		return -ENOMEM;
+
+	epf->header = &xpcie_header;
+	xpcie_epf->epf = epf;
+	epf_set_drvdata(epf, xpcie_epf);
+
+	return 0;
+}
+
+static void intel_xpcie_epf_shutdown(struct device *dev)
+{
+	struct pci_epf *epf = to_pci_epf(dev);
+	struct xpcie_epf *xpcie_epf;
+
+	xpcie_epf = epf_get_drvdata(epf);
+
+	/* Notify host in case PCIe hot plug not supported */
+	if (xpcie_epf)
+		pci_epc_raise_irq(epf->epc, epf->func_no, PCI_EPC_IRQ_MSI, 1);
+}
+
+static struct pci_epf_ops ops = {
+	.bind = intel_xpcie_epf_bind,
+	.unbind = intel_xpcie_epf_unbind,
+};
+
+static struct pci_epf_driver xpcie_epf_driver = {
+	.driver.name = "mxlk_pcie_epf",
+	.driver.shutdown = intel_xpcie_epf_shutdown,
+	.probe = intel_xpcie_epf_probe,
+	.id_table = xpcie_epf_ids,
+	.ops = &ops,
+	.owner = THIS_MODULE,
+};
+
+static int __init intel_xpcie_epf_init(void)
+{
+	int ret;
+
+	ret = pci_epf_register_driver(&xpcie_epf_driver);
+	if (ret) {
+		pr_err("Failed to register xlink pcie epf driver: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+module_init(intel_xpcie_epf_init);
+
+static void __exit intel_xpcie_epf_exit(void)
+{
+	pci_epf_unregister_driver(&xpcie_epf_driver);
+}
+module_exit(intel_xpcie_epf_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION(XPCIE_DRIVER_DESC);
diff --git a/drivers/misc/xlink-pcie/local_host/epf.h b/drivers/misc/xlink-pcie/local_host/epf.h
new file mode 100644
index 000000000000..a60cd43fe555
--- /dev/null
+++ b/drivers/misc/xlink-pcie/local_host/epf.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Intel Keem Bay XLink PCIe Driver
+ *
+ * Copyright (C) 2021 Intel Corporation
+ */
+
+#ifndef XPCIE_EPF_HEADER_
+#define XPCIE_EPF_HEADER_
+
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+
+#include "xpcie.h"
+
+#define XPCIE_DRIVER_NAME "mxlk_pcie_epf"
+#define XPCIE_DRIVER_DESC "Intel(R) xLink PCIe endpoint function driver"
+
+#define KEEMBAY_XPCIE_STEPPING_MAXLEN 8
+
+struct xpcie_epf {
+	struct pci_epf *epf;
+	void *vaddr[BAR_5 + 1];
+	enum pci_barno comm_bar;
+	enum pci_barno bar4;
+	const struct pci_epc_features *epc_features;
+	struct xpcie xpcie;
+	int irq;
+	int irq_dma;
+	int irq_err;
+	void __iomem *apb_base;
+	void __iomem *dma_base;
+	void __iomem *dbi_base;
+	char stepping[KEEMBAY_XPCIE_STEPPING_MAXLEN];
+};
+
+#endif /* XPCIE_EPF_HEADER_ */
diff --git a/drivers/misc/xlink-pcie/local_host/xpcie.h b/drivers/misc/xlink-pcie/local_host/xpcie.h
new file mode 100644
index 000000000000..0745e6dfee10
--- /dev/null
+++ b/drivers/misc/xlink-pcie/local_host/xpcie.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*****************************************************************************
+ *
+ * Intel Keem Bay XLink PCIe Driver
+ *
+ * Copyright (C) 2020 Intel Corporation
+ *
+ ****************************************************************************/
+
+#ifndef XPCIE_HEADER_
+#define XPCIE_HEADER_
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci_ids.h>
+
+#ifndef PCI_DEVICE_ID_INTEL_KEEMBAY
+#define PCI_DEVICE_ID_INTEL_KEEMBAY 0x6240
+#endif
+
+#define XPCIE_IO_COMM_SIZE SZ_16K
+#define XPCIE_MMIO_OFFSET SZ_4K
+
+/* MMIO layout and offsets shared between device and host */
+struct xpcie_mmio {
+	u8 legacy_a0;
+} __packed;
+
+#define XPCIE_MMIO_LEGACY_A0	(offsetof(struct xpcie_mmio, legacy_a0))
+
+struct xpcie {
+	u32 status;
+	bool legacy_a0;
+	void *mmio;
+	void *bar4;
+};
+
+#endif /* XPCIE_HEADER_ */
-- 
2.17.1


  parent reply	other threads:[~2021-01-30  9:32 UTC|newest]

Thread overview: 82+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-01-30  2:20 [PATCH v3 00/34] Intel Vision Processing base enabling mgross
2021-01-30  2:20 ` [PATCH v3 01/34] Add Vision Processing Unit (VPU) documentation mgross
2021-01-30  2:20 ` [PATCH v3 02/34] dt-bindings: mailbox: Add Intel VPU IPC mailbox bindings mgross
2021-01-30  2:20 ` [PATCH v3 03/34] mailbox: vpu-ipc-mailbox: Add support for Intel VPU IPC mailbox mgross
2021-02-01  7:07   ` Jassi Brar
2021-02-01 15:49     ` Alessandrelli, Daniele
2021-01-30  2:20 ` [PATCH v3 04/34] dt-bindings: Add bindings for Keem Bay IPC driver mgross
2021-01-30  2:20 ` [PATCH v3 05/34] keembay-ipc: Add Keem Bay IPC module mgross
2021-01-30  2:20 ` [PATCH v3 06/34] dt-bindings: Add bindings for Keem Bay VPU IPC driver mgross
2021-01-30  2:20 ` [PATCH v3 07/34] keembay-vpu-ipc: Add Keem Bay VPU IPC module mgross
2021-01-30  2:20 ` [PATCH v3 08/34] misc: xlink-pcie: Add documentation for XLink PCIe driver mgross
2021-01-30  2:20 ` [PATCH v3 09/34] misc: xlink-pcie: lh: Add PCIe EPF driver for Local Host mgross
2021-01-30  2:20 ` [PATCH v3 10/34] misc: xlink-pcie: lh: Add PCIe EP DMA functionality mgross
2021-01-30  2:20 ` [PATCH v3 11/34] misc: xlink-pcie: lh: Add core communication logic mgross
2021-01-30  2:20 ` [PATCH v3 12/34] misc: xlink-pcie: lh: Prepare changes for adding remote host driver mgross
2021-01-30  2:20 ` [PATCH v3 13/34] misc: xlink-pcie: rh: Add PCIe EP driver for Remote Host mgross
2021-01-30  2:20 ` [PATCH v3 14/34] misc: xlink-pcie: rh: Add core communication logic mgross
2021-01-30  2:20 ` [PATCH v3 15/34] misc: xlink-pcie: Add XLink API interface mgross
2021-01-30  2:20 ` [PATCH v3 16/34] misc: xlink-pcie: Add asynchronous event notification support for XLink mgross
2021-01-30  2:20 ` [PATCH v3 17/34] xlink-ipc: Add xlink ipc device tree bindings mgross
2021-01-30  2:20 ` [PATCH v3 18/34] xlink-ipc: Add xlink ipc driver mgross
2021-01-30  2:20 ` [PATCH v3 19/34] xlink-core: Add xlink core device tree bindings mgross
2021-01-30  2:20 ` [PATCH v3 20/34] xlink-core: Add xlink core driver xLink mgross
2021-01-30  2:20 ` [PATCH v3 21/34] xlink-core: Enable xlink protocol over pcie mgross
2021-01-30  2:20 ` [PATCH v3 22/34] xlink-core: Enable VPU IP management and runtime control mgross
2021-01-30  2:20 ` [PATCH v3 23/34] xlink-core: add async channel and events mgross
2021-01-30  2:20 ` [PATCH v3 24/34] dt-bindings: misc: Add Keem Bay vpumgr mgross
2021-01-30  2:20 ` [PATCH v3 25/34] misc: Add Keem Bay VPU manager mgross
2021-02-01  2:04   ` Randy Dunlap
2021-01-30  2:20 ` [PATCH v3 26/34] dt-bindings: misc: intel_tsens: Add tsens thermal bindings documentation mgross
2021-01-30  2:20 ` [PATCH v3 27/34] misc: Tsens ARM host thermal driver mgross
2021-01-30  2:20 ` [PATCH v3 28/34] misc: Intel tsens IA host driver mgross
2021-01-30  7:01   ` Joe Perches
2021-02-02  7:21     ` C, Udhayakumar
2021-01-30  2:20 ` [PATCH v3 29/34] Intel tsens i2c slave driver mgross
2021-02-01  2:07   ` Randy Dunlap
2021-01-30  2:20 ` [PATCH v3 30/34] misc:intel_tsens: Intel Keem Bay tsens driver mgross
2021-01-30  2:20 ` [PATCH v3 31/34] Intel Keem Bay XLink SMBus driver mgross
2021-01-30  2:20 ` [PATCH v3 32/34] dt-bindings: misc: hddl_dev: Add hddl device management documentation mgross
2021-01-30  2:20 ` [PATCH v3 33/34] misc: Hddl device management for local host mgross
2021-01-30  2:20 ` [PATCH v3 34/34] misc: HDDL device management for IA host mgross
2021-01-30  2:20 ` [PATCH v4 00/34] Intel Vision Processing base enabling mgross
2021-01-30  2:20 ` [PATCH v4 01/34] Add Vision Processing Unit (VPU) documentation mgross
2021-01-30  2:20 ` [PATCH v4 02/34] dt-bindings: mailbox: Add Intel VPU IPC mailbox bindings mgross
2021-01-30  2:20 ` [PATCH v4 03/34] mailbox: vpu-ipc-mailbox: Add support for Intel VPU IPC mailbox mgross
2021-01-30  2:20 ` [PATCH v4 04/34] dt-bindings: Add bindings for Keem Bay IPC driver mgross
2021-01-30  2:20 ` [PATCH v4 05/34] keembay-ipc: Add Keem Bay IPC module mgross
2021-01-30  2:20 ` [PATCH v4 06/34] dt-bindings: Add bindings for Keem Bay VPU IPC driver mgross
2021-01-30  2:20 ` [PATCH v4 07/34] keembay-vpu-ipc: Add Keem Bay VPU IPC module mgross
2021-01-30  2:20 ` [PATCH v4 08/34] misc: xlink-pcie: Add documentation for XLink PCIe driver mgross
2021-01-30  2:20 ` mgross [this message]
2021-01-30  2:21 ` [PATCH v4 10/34] misc: xlink-pcie: lh: Add PCIe EP DMA functionality mgross
2021-01-30  2:21 ` [PATCH v4 11/34] misc: xlink-pcie: lh: Add core communication logic mgross
2021-01-30  2:21 ` [PATCH v4 12/34] misc: xlink-pcie: lh: Prepare changes for adding remote host driver mgross
2021-01-30  2:21 ` [PATCH v4 13/34] misc: xlink-pcie: rh: Add PCIe EP driver for Remote Host mgross
2021-01-30  2:21 ` [PATCH v4 14/34] misc: xlink-pcie: rh: Add core communication logic mgross
2021-01-30  2:21 ` [PATCH v4 15/34] misc: xlink-pcie: Add XLink API interface mgross
2021-01-30  2:21 ` [PATCH v4 16/34] misc: xlink-pcie: Add asynchronous event notification support for XLink mgross
2021-01-30  2:21 ` [PATCH v4 17/34] xlink-ipc: Add xlink ipc device tree bindings mgross
2021-01-30  2:21 ` [PATCH v4 18/34] xlink-ipc: Add xlink ipc driver mgross
2021-01-30  2:21 ` [PATCH v4 19/34] xlink-core: Add xlink core device tree bindings mgross
2021-01-30  2:21 ` [PATCH v4 20/34] xlink-core: Add xlink core driver xLink mgross
2021-01-30  2:21 ` [PATCH v4 21/34] xlink-core: Enable xlink protocol over pcie mgross
2021-01-30  2:21 ` [PATCH v4 22/34] xlink-core: Enable VPU IP management and runtime control mgross
2021-01-30  2:21 ` [PATCH v4 23/34] xlink-core: add async channel and events mgross
2021-01-30  2:21 ` [PATCH v4 24/34] dt-bindings: misc: Add Keem Bay vpumgr mgross
2021-01-30  2:21 ` [PATCH v4 25/34] misc: Add Keem Bay VPU manager mgross
2021-01-30  2:21 ` [PATCH v4 26/34] dt-bindings: misc: intel_tsens: Add tsens thermal bindings documentation mgross
2021-01-30 17:23   ` Rob Herring
2021-02-02  8:47     ` C, Udhayakumar
2021-01-30  2:21 ` [PATCH v4 27/34] misc: Tsens ARM host thermal driver mgross
2021-01-30  2:21 ` [PATCH v4 28/34] misc: Intel tsens IA host driver mgross
2021-01-30  2:21 ` [PATCH v4 29/34] Intel tsens i2c slave driver mgross
2021-02-01  2:13   ` Randy Dunlap
2021-02-01 15:20     ` Gross, Mark
2021-01-30  2:21 ` [PATCH v4 30/34] misc:intel_tsens: Intel Keem Bay tsens driver mgross
2021-01-30  2:21 ` [PATCH v4 31/34] Intel Keem Bay XLink SMBus driver mgross
2021-01-30  2:21 ` [PATCH v4 32/34] dt-bindings: misc: hddl_dev: Add hddl device management documentation mgross
2021-01-30 17:23   ` Rob Herring
2021-02-02  8:48     ` C, Udhayakumar
2021-01-30  2:21 ` [PATCH v4 33/34] misc: Hddl device management for local host mgross
2021-01-30  2:21 ` [PATCH v4 34/34] misc: HDDL device management for IA host mgross

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210130022124.65083-45-mgross@linux.intel.com \
    --to=mgross@linux.intel.com \
    --cc=arnd@arndb.de \
    --cc=bp@suse.de \
    --cc=corbet@lwn.net \
    --cc=damien.lemoal@wdc.com \
    --cc=derek.kiernan@xilinx.com \
    --cc=dragan.cvetic@xilinx.com \
    --cc=gregkh@linuxfoundation.org \
    --cc=jassisinghbrar@gmail.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=markgross@kernel.org \
    --cc=palmerdabbelt@google.com \
    --cc=paul.walmsley@sifive.com \
    --cc=peng.fan@nxp.com \
    --cc=robh+dt@kernel.org \
    --cc=shawnguo@kernel.org \
    --cc=srikanth.thokala@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.