All of lore.kernel.org
 help / color / mirror / Atom feed
From: mgross@linux.intel.com
To: markgross@kernel.org, mgross@linux.intel.com, arnd@arndb.de,
	bp@suse.de, damien.lemoal@wdc.com, dragan.cvetic@xilinx.com,
	gregkh@linuxfoundation.org, corbet@lwn.net,
	palmerdabbelt@google.com, paul.walmsley@sifive.com,
	peng.fan@nxp.com, robh+dt@kernel.org, shawnguo@kernel.org,
	jassisinghbrar@gmail.com
Cc: linux-kernel@vger.kernel.org,
	Srikanth Thokala <srikanth.thokala@intel.com>
Subject: [PATCH v3 13/34] misc: xlink-pcie: rh: Add PCIe EP driver for Remote Host
Date: Fri, 29 Jan 2021 18:20:28 -0800	[thread overview]
Message-ID: <20210130022124.65083-14-mgross@linux.intel.com> (raw)
In-Reply-To: <20210130022124.65083-1-mgross@linux.intel.com>

From: Srikanth Thokala <srikanth.thokala@intel.com>

Add PCIe Endpoint driver that configures PCIe BARs and MSIs on the
Remote Host

Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Reviewed-by: Mark Gross <mgross@linux.intel.com>
Signed-off-by: Srikanth Thokala <srikanth.thokala@intel.com>
---
 MAINTAINERS                                  |   2 +-
 drivers/misc/xlink-pcie/Kconfig              |  11 +
 drivers/misc/xlink-pcie/Makefile             |   1 +
 drivers/misc/xlink-pcie/common/xpcie.h       |   1 +
 drivers/misc/xlink-pcie/remote_host/Makefile |   3 +
 drivers/misc/xlink-pcie/remote_host/main.c   |  90 ++++
 drivers/misc/xlink-pcie/remote_host/pci.c    | 449 +++++++++++++++++++
 drivers/misc/xlink-pcie/remote_host/pci.h    |  62 +++
 8 files changed, 618 insertions(+), 1 deletion(-)
 create mode 100644 drivers/misc/xlink-pcie/remote_host/Makefile
 create mode 100644 drivers/misc/xlink-pcie/remote_host/main.c
 create mode 100644 drivers/misc/xlink-pcie/remote_host/pci.c
 create mode 100644 drivers/misc/xlink-pcie/remote_host/pci.h

diff --git a/MAINTAINERS b/MAINTAINERS
index 3ca6c8c6341b..e05fa34d72ce 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1961,7 +1961,7 @@ F:	Documentation/devicetree/bindings/arm/intel,keembay.yaml
 F:	arch/arm64/boot/dts/intel/keembay-evm.dts
 F:	arch/arm64/boot/dts/intel/keembay-soc.dtsi
 
-ARM KEEM BAY XLINK PCIE SUPPORT
+ARM/INTEL KEEM BAY XLINK PCIE SUPPORT
 M:	Srikanth Thokala <srikanth.thokala@intel.com>
 M:	Mark Gross <mgross@linux.intel.com>
 S:	Supported
diff --git a/drivers/misc/xlink-pcie/Kconfig b/drivers/misc/xlink-pcie/Kconfig
index 46aa401d79b7..448b9bfbdfa2 100644
--- a/drivers/misc/xlink-pcie/Kconfig
+++ b/drivers/misc/xlink-pcie/Kconfig
@@ -1,3 +1,14 @@
+config XLINK_PCIE_RH_DRIVER
+	tristate "XLink PCIe Remote Host driver"
+	depends on PCI && X86_64
+	help
+	  This option enables XLink PCIe Remote Host driver.
+
+	  Choose M here to compile this driver as a module, name is mxlk.
+	  This driver is used for XLink communication over PCIe,
+	  and is to be loaded on the IA host which is connected to
+	  the Intel Keem Bay.
+
 config XLINK_PCIE_LH_DRIVER
 	tristate "XLink PCIe Local Host driver"
 	depends on PCI_ENDPOINT && ARCH_KEEMBAY
diff --git a/drivers/misc/xlink-pcie/Makefile b/drivers/misc/xlink-pcie/Makefile
index d693d382e9c6..1dd984d8d88c 100644
--- a/drivers/misc/xlink-pcie/Makefile
+++ b/drivers/misc/xlink-pcie/Makefile
@@ -1 +1,2 @@
+obj-$(CONFIG_XLINK_PCIE_RH_DRIVER) += remote_host/
 obj-$(CONFIG_XLINK_PCIE_LH_DRIVER) += local_host/
diff --git a/drivers/misc/xlink-pcie/common/xpcie.h b/drivers/misc/xlink-pcie/common/xpcie.h
index 48529eb49be0..b5cf9242a59a 100644
--- a/drivers/misc/xlink-pcie/common/xpcie.h
+++ b/drivers/misc/xlink-pcie/common/xpcie.h
@@ -69,6 +69,7 @@ struct xpcie_mmio {
 struct xpcie {
 	u32 status;
 	bool legacy_a0;
+	void *bar0;
 	void *mmio;
 	void *bar4;
 
diff --git a/drivers/misc/xlink-pcie/remote_host/Makefile b/drivers/misc/xlink-pcie/remote_host/Makefile
new file mode 100644
index 000000000000..96374a43023e
--- /dev/null
+++ b/drivers/misc/xlink-pcie/remote_host/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_XLINK_PCIE_RH_DRIVER) += mxlk.o
+mxlk-objs := main.o
+mxlk-objs += pci.o
diff --git a/drivers/misc/xlink-pcie/remote_host/main.c b/drivers/misc/xlink-pcie/remote_host/main.c
new file mode 100644
index 000000000000..ed1a431ed5d4
--- /dev/null
+++ b/drivers/misc/xlink-pcie/remote_host/main.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel Keem Bay XLink PCIe Driver
+ *
+ * Copyright (C) 2021 Intel Corporation
+ */
+
+#include "pci.h"
+#include "../common/core.h"
+
+#define HW_ID_LO_MASK	GENMASK(7, 0)
+#define HW_ID_HI_MASK	GENMASK(15, 8)
+
+static const struct pci_device_id xpcie_pci_table[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KEEMBAY), 0 },
+	{ 0 }
+};
+
+static int intel_xpcie_probe(struct pci_dev *pdev,
+			     const struct pci_device_id *ent)
+{
+	bool new_device = false;
+	struct xpcie_dev *xdev;
+	u32 sw_devid;
+	u16 hw_id;
+	int ret;
+
+	hw_id = FIELD_PREP(HW_ID_HI_MASK, pdev->bus->number) |
+		FIELD_PREP(HW_ID_LO_MASK, PCI_SLOT(pdev->devfn));
+
+	sw_devid = FIELD_PREP(XLINK_DEV_INF_TYPE_MASK,
+			      XLINK_DEV_INF_PCIE) |
+		   FIELD_PREP(XLINK_DEV_PHYS_ID_MASK, hw_id) |
+		   FIELD_PREP(XLINK_DEV_TYPE_MASK, XLINK_DEV_TYPE_KMB) |
+		   FIELD_PREP(XLINK_DEV_PCIE_ID_MASK, XLINK_DEV_PCIE_0) |
+		   FIELD_PREP(XLINK_DEV_FUNC_MASK, XLINK_DEV_FUNC_VPU);
+
+	xdev = intel_xpcie_get_device_by_id(sw_devid);
+	if (!xdev) {
+		xdev = intel_xpcie_create_device(sw_devid, pdev);
+		if (!xdev)
+			return -ENOMEM;
+
+		new_device = true;
+	}
+
+	ret = intel_xpcie_pci_init(xdev, pdev);
+	if (ret) {
+		intel_xpcie_remove_device(xdev);
+		return ret;
+	}
+
+	if (new_device)
+		intel_xpcie_list_add_device(xdev);
+
+	return ret;
+}
+
+static void intel_xpcie_remove(struct pci_dev *pdev)
+{
+	struct xpcie_dev *xdev = pci_get_drvdata(pdev);
+
+	if (xdev) {
+		intel_xpcie_pci_cleanup(xdev);
+		intel_xpcie_remove_device(xdev);
+	}
+}
+
+static struct pci_driver xpcie_driver = {
+	.name = XPCIE_DRIVER_NAME,
+	.id_table = xpcie_pci_table,
+	.probe = intel_xpcie_probe,
+	.remove = intel_xpcie_remove
+};
+
+static int __init intel_xpcie_init_module(void)
+{
+	return pci_register_driver(&xpcie_driver);
+}
+
+static void __exit intel_xpcie_exit_module(void)
+{
+	pci_unregister_driver(&xpcie_driver);
+}
+
+module_init(intel_xpcie_init_module);
+module_exit(intel_xpcie_exit_module);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION(XPCIE_DRIVER_DESC);
diff --git a/drivers/misc/xlink-pcie/remote_host/pci.c b/drivers/misc/xlink-pcie/remote_host/pci.c
new file mode 100644
index 000000000000..7b94575ef997
--- /dev/null
+++ b/drivers/misc/xlink-pcie/remote_host/pci.c
@@ -0,0 +1,449 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel Keem Bay XLink PCIe Driver
+ *
+ * Copyright (C) 2021 Intel Corporation
+ */
+
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include "pci.h"
+
+#include "../common/core.h"
+#include "../common/util.h"
+
+static int aspm_enable;
+module_param(aspm_enable, int, 0664);
+MODULE_PARM_DESC(aspm_enable, "enable ASPM");
+
+static LIST_HEAD(dev_list);
+static DEFINE_MUTEX(dev_list_mutex);
+
+struct xpcie_dev *intel_xpcie_get_device_by_id(u32 id)
+{
+	struct xpcie_dev *xdev;
+
+	mutex_lock(&dev_list_mutex);
+
+	if (list_empty(&dev_list)) {
+		mutex_unlock(&dev_list_mutex);
+		return NULL;
+	}
+
+	list_for_each_entry(xdev, &dev_list, list) {
+		if (xdev->devid == id) {
+			mutex_unlock(&dev_list_mutex);
+			return xdev;
+		}
+	}
+
+	mutex_unlock(&dev_list_mutex);
+
+	return NULL;
+}
+
+struct xpcie_dev *intel_xpcie_create_device(u32 sw_device_id,
+					    struct pci_dev *pdev)
+{
+	struct xpcie_dev *xdev = kzalloc(sizeof(*xdev), GFP_KERNEL);
+
+	if (!xdev)
+		return NULL;
+
+	xdev->devid = sw_device_id;
+	snprintf(xdev->name, XPCIE_MAX_NAME_LEN, "%02x:%02x.%x",
+		 pdev->bus->number,
+		 PCI_SLOT(pdev->devfn),
+		 PCI_FUNC(pdev->devfn));
+
+	mutex_init(&xdev->lock);
+
+	return xdev;
+}
+
+void intel_xpcie_remove_device(struct xpcie_dev *xdev)
+{
+	mutex_destroy(&xdev->lock);
+	kfree(xdev);
+}
+
+void intel_xpcie_list_add_device(struct xpcie_dev *xdev)
+{
+	mutex_lock(&dev_list_mutex);
+
+	list_add_tail(&xdev->list, &dev_list);
+
+	mutex_unlock(&dev_list_mutex);
+}
+
+void intel_xpcie_list_del_device(struct xpcie_dev *xdev)
+{
+	mutex_lock(&dev_list_mutex);
+
+	list_del(&xdev->list);
+
+	mutex_unlock(&dev_list_mutex);
+}
+
+static void intel_xpcie_pci_set_aspm(struct xpcie_dev *xdev, int aspm)
+{
+	u16 link_control;
+	u8 cap_exp;
+
+	cap_exp = pci_find_capability(xdev->pci, PCI_CAP_ID_EXP);
+	if (!cap_exp) {
+		dev_err(&xdev->pci->dev, "failed to find pcie capability\n");
+		return;
+	}
+
+	pci_read_config_word(xdev->pci, cap_exp + PCI_EXP_LNKCTL,
+			     &link_control);
+	link_control &= ~(PCI_EXP_LNKCTL_ASPMC);
+	link_control |= (aspm & PCI_EXP_LNKCTL_ASPMC);
+	pci_write_config_word(xdev->pci, cap_exp + PCI_EXP_LNKCTL,
+			      link_control);
+}
+
+static void intel_xpcie_pci_unmap_bar(struct xpcie_dev *xdev)
+{
+	if (xdev->xpcie.bar0) {
+		iounmap((void __iomem *)xdev->xpcie.bar0);
+		xdev->xpcie.bar0 = NULL;
+	}
+
+	if (xdev->xpcie.mmio) {
+		iounmap((void __iomem *)(xdev->xpcie.mmio - XPCIE_MMIO_OFFSET));
+		xdev->xpcie.mmio = NULL;
+	}
+
+	if (xdev->xpcie.bar4) {
+		iounmap((void __iomem *)xdev->xpcie.bar4);
+		xdev->xpcie.bar4 = NULL;
+	}
+}
+
+static int intel_xpcie_pci_map_bar(struct xpcie_dev *xdev)
+{
+	if (pci_resource_len(xdev->pci, 2) < XPCIE_IO_COMM_SIZE) {
+		dev_err(&xdev->pci->dev, "device BAR region is too small\n");
+		return -EIO;
+	}
+
+	xdev->xpcie.bar0 = (void __force *)pci_ioremap_bar(xdev->pci, 0);
+	if (!xdev->xpcie.bar0) {
+		dev_err(&xdev->pci->dev, "failed to ioremap BAR0\n");
+		goto bar_error;
+	}
+
+	xdev->xpcie.mmio = (void __force *)
+			   (pci_ioremap_bar(xdev->pci, 2) + XPCIE_MMIO_OFFSET);
+	if (!xdev->xpcie.mmio) {
+		dev_err(&xdev->pci->dev, "failed to ioremap BAR2\n");
+		goto bar_error;
+	}
+
+	xdev->xpcie.bar4 = (void __force *)pci_ioremap_wc_bar(xdev->pci, 4);
+	if (!xdev->xpcie.bar4) {
+		dev_err(&xdev->pci->dev, "failed to ioremap BAR4\n");
+		goto bar_error;
+	}
+
+	return 0;
+
+bar_error:
+	intel_xpcie_pci_unmap_bar(xdev);
+	return -EIO;
+}
+
+static void intel_xpcie_pci_irq_cleanup(struct xpcie_dev *xdev)
+{
+	int irq = pci_irq_vector(xdev->pci, 0);
+
+	if (irq < 0)
+		return;
+
+	synchronize_irq(irq);
+	free_irq(irq, xdev);
+	pci_free_irq_vectors(xdev->pci);
+}
+
+static int intel_xpcie_pci_irq_init(struct xpcie_dev *xdev,
+				    irq_handler_t irq_handler)
+{
+	int rc, irq;
+
+	rc = pci_alloc_irq_vectors(xdev->pci, 1, 1, PCI_IRQ_MSI);
+	if (rc < 0) {
+		dev_err(&xdev->pci->dev,
+			"failed to allocate %d MSI vectors\n", 1);
+		return rc;
+	}
+
+	irq = pci_irq_vector(xdev->pci, 0);
+	if (irq < 0) {
+		dev_err(&xdev->pci->dev, "failed to get irq\n");
+		rc = irq;
+		goto error_irq;
+	}
+	rc = request_irq(irq, irq_handler, 0,
+			 XPCIE_DRIVER_NAME, xdev);
+	if (rc) {
+		dev_err(&xdev->pci->dev, "failed to request irq\n");
+		goto error_irq;
+	}
+
+	return 0;
+
+error_irq:
+	pci_free_irq_vectors(xdev->pci);
+	return rc;
+}
+
+static void xpcie_device_poll(struct work_struct *work)
+{
+	struct xpcie_dev *xdev = container_of(work, struct xpcie_dev,
+					      wait_event.work);
+	u32 dev_status = intel_xpcie_ioread32(xdev->xpcie.mmio +
+					      XPCIE_MMIO_DEV_STATUS);
+
+	if (dev_status < XPCIE_STATUS_RUN)
+		schedule_delayed_work(&xdev->wait_event,
+				      msecs_to_jiffies(100));
+	else
+		xdev->xpcie.status = XPCIE_STATUS_READY;
+}
+
+static int intel_xpcie_pci_prepare_dev_reset(struct xpcie_dev *xdev,
+					     bool notify)
+{
+	if (mutex_lock_interruptible(&xdev->lock))
+		return -EINTR;
+
+	if (xdev->core_irq_callback)
+		xdev->core_irq_callback = NULL;
+
+	xdev->xpcie.status = XPCIE_STATUS_OFF;
+	if (notify)
+		intel_xpcie_pci_raise_irq(xdev, DEV_EVENT, REQUEST_RESET);
+
+	mutex_unlock(&xdev->lock);
+
+	return 0;
+}
+
+static void xpcie_device_shutdown(struct work_struct *work)
+{
+	struct xpcie_dev *xdev = container_of(work, struct xpcie_dev,
+					      shutdown_event.work);
+
+	intel_xpcie_pci_prepare_dev_reset(xdev, false);
+}
+
+static int xpcie_device_init(struct xpcie_dev *xdev)
+{
+	INIT_DELAYED_WORK(&xdev->wait_event, xpcie_device_poll);
+	INIT_DELAYED_WORK(&xdev->shutdown_event, xpcie_device_shutdown);
+
+	pci_set_master(xdev->pci);
+
+	xdev->xpcie.status = XPCIE_STATUS_UNINIT;
+
+	init_waitqueue_head(&xdev->waitqueue);
+	schedule_delayed_work(&xdev->wait_event, 0);
+
+	return 0;
+}
+
+int intel_xpcie_pci_init(struct xpcie_dev *xdev, struct pci_dev *pdev)
+{
+	int rc;
+
+	if (mutex_lock_interruptible(&xdev->lock))
+		return -EINTR;
+
+	xdev->pci = pdev;
+	pci_set_drvdata(pdev, xdev);
+
+	rc = pci_enable_device_mem(xdev->pci);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to enable pci device\n");
+		goto error_exit;
+	}
+
+	rc = pci_request_regions(xdev->pci, XPCIE_DRIVER_NAME);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to request mmio regions\n");
+		goto error_req_mem;
+	}
+
+	rc = intel_xpcie_pci_map_bar(xdev);
+	if (rc)
+		goto error_map;
+
+	rc = dma_set_mask_and_coherent(&xdev->pci->dev, DMA_BIT_MASK(64));
+	if (rc) {
+		dev_err(&pdev->dev, "failed to set dma mask\n");
+		goto error_dma_mask;
+	}
+
+	intel_xpcie_pci_set_aspm(xdev, aspm_enable);
+
+	rc = xpcie_device_init(xdev);
+	if (!rc)
+		goto init_exit;
+
+error_dma_mask:
+	intel_xpcie_pci_unmap_bar(xdev);
+
+error_map:
+	pci_release_regions(xdev->pci);
+
+error_req_mem:
+	pci_disable_device(xdev->pci);
+
+error_exit:
+	xdev->xpcie.status = XPCIE_STATUS_ERROR;
+
+init_exit:
+	mutex_unlock(&xdev->lock);
+	if (rc)
+		mutex_destroy(&xdev->lock);
+	return rc;
+}
+
+int intel_xpcie_pci_cleanup(struct xpcie_dev *xdev)
+{
+	if (mutex_lock_interruptible(&xdev->lock))
+		return -EINTR;
+
+	cancel_delayed_work(&xdev->wait_event);
+	cancel_delayed_work(&xdev->shutdown_event);
+	xdev->core_irq_callback = NULL;
+	intel_xpcie_pci_irq_cleanup(xdev);
+
+	intel_xpcie_pci_unmap_bar(xdev);
+	pci_release_regions(xdev->pci);
+	pci_disable_device(xdev->pci);
+	pci_set_drvdata(xdev->pci, NULL);
+	xdev->xpcie.status = XPCIE_STATUS_OFF;
+	xdev->irq_enabled = false;
+
+	mutex_unlock(&xdev->lock);
+
+	return 0;
+}
+
+int intel_xpcie_pci_register_irq(struct xpcie_dev *xdev,
+				 irq_handler_t irq_handler)
+{
+	int rc;
+
+	if (xdev->xpcie.status != XPCIE_STATUS_READY)
+		return -EINVAL;
+
+	rc = intel_xpcie_pci_irq_init(xdev, irq_handler);
+	if (rc)
+		dev_warn(&xdev->pci->dev, "failed to initialize pci irq\n");
+
+	return rc;
+}
+
+int intel_xpcie_pci_raise_irq(struct xpcie_dev *xdev,
+			      enum xpcie_doorbell_type type,
+			      u8 value)
+{
+	u16 pci_status;
+
+	pci_read_config_word(xdev->pci, PCI_STATUS, &pci_status);
+
+	return 0;
+}
+
+u32 intel_xpcie_get_device_num(u32 *id_list)
+{
+	struct xpcie_dev *p;
+	u32 num = 0;
+
+	mutex_lock(&dev_list_mutex);
+
+	if (list_empty(&dev_list)) {
+		mutex_unlock(&dev_list_mutex);
+		return 0;
+	}
+
+	list_for_each_entry(p, &dev_list, list) {
+		*id_list++ = p->devid;
+		num++;
+	}
+	mutex_unlock(&dev_list_mutex);
+
+	return num;
+}
+
+int intel_xpcie_get_device_name_by_id(u32 id,
+				      char *device_name, size_t name_size)
+{
+	struct xpcie_dev *xdev;
+	size_t size;
+
+	xdev = intel_xpcie_get_device_by_id(id);
+	if (!xdev)
+		return -ENODEV;
+
+	mutex_lock(&xdev->lock);
+
+	size = (name_size > XPCIE_MAX_NAME_LEN) ?
+		XPCIE_MAX_NAME_LEN : name_size;
+	memcpy(device_name, xdev->name, size);
+
+	mutex_unlock(&xdev->lock);
+
+	return 0;
+}
+
+int intel_xpcie_get_device_status_by_id(u32 id, u32 *status)
+{
+	struct xpcie_dev *xdev = intel_xpcie_get_device_by_id(id);
+
+	if (!xdev)
+		return -ENODEV;
+
+	mutex_lock(&xdev->lock);
+	*status = xdev->xpcie.status;
+	mutex_unlock(&xdev->lock);
+
+	return 0;
+}
+
+int intel_xpcie_pci_connect_device(u32 id)
+{
+	struct xpcie_dev *xdev;
+	int rc = 0;
+
+	xdev = intel_xpcie_get_device_by_id(id);
+	if (!xdev)
+		return -ENODEV;
+
+	if (mutex_lock_interruptible(&xdev->lock))
+		return -EINTR;
+
+	if (xdev->xpcie.status == XPCIE_STATUS_RUN)
+		goto connect_cleanup;
+
+	if (xdev->xpcie.status == XPCIE_STATUS_OFF) {
+		rc = -ENODEV;
+		goto connect_cleanup;
+	}
+
+	if (xdev->xpcie.status != XPCIE_STATUS_READY) {
+		rc = -EBUSY;
+		goto connect_cleanup;
+	}
+
+connect_cleanup:
+	mutex_unlock(&xdev->lock);
+	return rc;
+}
diff --git a/drivers/misc/xlink-pcie/remote_host/pci.h b/drivers/misc/xlink-pcie/remote_host/pci.h
new file mode 100644
index 000000000000..bd6b01cc58b8
--- /dev/null
+++ b/drivers/misc/xlink-pcie/remote_host/pci.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Intel Keem Bay XLink PCIe Driver
+ *
+ * Copyright (C) 2021 Intel Corporation
+ */
+
+#ifndef XPCIE_PCI_HEADER_
+#define XPCIE_PCI_HEADER_
+
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/xlink_drv_inf.h>
+#include "../common/xpcie.h"
+#include "../common/util.h"
+
+#define XPCIE_DRIVER_NAME "mxlk"
+#define XPCIE_DRIVER_DESC "Intel(R) Keem Bay XLink PCIe driver"
+
+#define XPCIE_MAX_NAME_LEN	(32)
+
+struct xpcie_dev {
+	struct list_head list;
+	struct mutex lock; /* Device Lock */
+
+	struct pci_dev *pci;
+	char name[XPCIE_MAX_NAME_LEN];
+	u32 devid;
+	char fw_name[XPCIE_MAX_NAME_LEN];
+
+	struct delayed_work wait_event;
+	struct delayed_work shutdown_event;
+	wait_queue_head_t waitqueue;
+	bool irq_enabled;
+	irq_handler_t core_irq_callback;
+
+	struct xpcie xpcie;
+};
+
+static inline struct device *xpcie_to_dev(struct xpcie *xpcie)
+{
+	struct xpcie_dev *xdev = container_of(xpcie, struct xpcie_dev, xpcie);
+
+	return &xdev->pci->dev;
+}
+
+int intel_xpcie_pci_init(struct xpcie_dev *xdev, struct pci_dev *pdev);
+int intel_xpcie_pci_cleanup(struct xpcie_dev *xdev);
+int intel_xpcie_pci_register_irq(struct xpcie_dev *xdev,
+				 irq_handler_t irq_handler);
+int intel_xpcie_pci_raise_irq(struct xpcie_dev *xdev,
+			      enum xpcie_doorbell_type type,
+			      u8 value);
+
+struct xpcie_dev *intel_xpcie_create_device(u32 sw_device_id,
+					    struct pci_dev *pdev);
+void intel_xpcie_remove_device(struct xpcie_dev *xdev);
+void intel_xpcie_list_add_device(struct xpcie_dev *xdev);
+void intel_xpcie_list_del_device(struct xpcie_dev *xdev);
+
+#endif /* XPCIE_PCI_HEADER_ */
-- 
2.17.1


  parent reply	other threads:[~2021-01-30  9:47 UTC|newest]

Thread overview: 83+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-01-30  2:20 [PATCH v3 00/34] Intel Vision Processing base enabling mgross
2021-01-30  2:20 ` [PATCH v3 01/34] Add Vision Processing Unit (VPU) documentation mgross
2021-01-30  2:20 ` [PATCH v3 02/34] dt-bindings: mailbox: Add Intel VPU IPC mailbox bindings mgross
2021-01-30  2:20 ` [PATCH v3 03/34] mailbox: vpu-ipc-mailbox: Add support for Intel VPU IPC mailbox mgross
2021-02-01  7:07   ` Jassi Brar
2021-02-01 15:49     ` Alessandrelli, Daniele
2021-01-30  2:20 ` [PATCH v3 04/34] dt-bindings: Add bindings for Keem Bay IPC driver mgross
2021-01-30  2:20 ` [PATCH v3 05/34] keembay-ipc: Add Keem Bay IPC module mgross
2021-01-30  2:20 ` [PATCH v3 06/34] dt-bindings: Add bindings for Keem Bay VPU IPC driver mgross
2021-01-30  2:20 ` [PATCH v3 07/34] keembay-vpu-ipc: Add Keem Bay VPU IPC module mgross
2021-01-30  2:20 ` [PATCH v3 08/34] misc: xlink-pcie: Add documentation for XLink PCIe driver mgross
2021-01-30  2:20 ` [PATCH v3 09/34] misc: xlink-pcie: lh: Add PCIe EPF driver for Local Host mgross
2021-01-30  2:20 ` [PATCH v3 10/34] misc: xlink-pcie: lh: Add PCIe EP DMA functionality mgross
2021-01-30  2:20 ` [PATCH v3 11/34] misc: xlink-pcie: lh: Add core communication logic mgross
2021-01-30  2:20 ` [PATCH v3 12/34] misc: xlink-pcie: lh: Prepare changes for adding remote host driver mgross
2021-01-30  2:20 ` mgross [this message]
2021-01-30  2:20 ` [PATCH v3 14/34] misc: xlink-pcie: rh: Add core communication logic mgross
2021-01-30  2:20 ` [PATCH v3 15/34] misc: xlink-pcie: Add XLink API interface mgross
2021-01-30  2:20 ` [PATCH v3 16/34] misc: xlink-pcie: Add asynchronous event notification support for XLink mgross
2021-01-30  2:20 ` [PATCH v3 17/34] xlink-ipc: Add xlink ipc device tree bindings mgross
2021-01-30  2:20 ` [PATCH v3 18/34] xlink-ipc: Add xlink ipc driver mgross
2021-01-30  2:20 ` [PATCH v3 19/34] xlink-core: Add xlink core device tree bindings mgross
2021-01-30  2:20 ` [PATCH v3 20/34] xlink-core: Add xlink core driver xLink mgross
2021-01-30  2:20 ` [PATCH v3 21/34] xlink-core: Enable xlink protocol over pcie mgross
2021-01-30  2:20 ` [PATCH v3 22/34] xlink-core: Enable VPU IP management and runtime control mgross
2021-01-30  2:20 ` [PATCH v3 23/34] xlink-core: add async channel and events mgross
2021-01-30  2:20 ` [PATCH v3 24/34] dt-bindings: misc: Add Keem Bay vpumgr mgross
2021-01-30  2:20 ` [PATCH v3 25/34] misc: Add Keem Bay VPU manager mgross
2021-02-01  2:04   ` Randy Dunlap
2021-01-30  2:20 ` [PATCH v3 26/34] dt-bindings: misc: intel_tsens: Add tsens thermal bindings documentation mgross
2021-01-30  2:20 ` [PATCH v3 27/34] misc: Tsens ARM host thermal driver mgross
2021-01-30  2:20 ` [PATCH v3 28/34] misc: Intel tsens IA host driver mgross
2021-01-30  7:01   ` Joe Perches
2021-02-02  7:21     ` C, Udhayakumar
2021-01-30  2:20 ` [PATCH v3 29/34] Intel tsens i2c slave driver mgross
2021-02-01  2:07   ` Randy Dunlap
2021-01-30  2:20 ` [PATCH v3 30/34] misc:intel_tsens: Intel Keem Bay tsens driver mgross
2021-01-30  2:20 ` [PATCH v3 31/34] Intel Keem Bay XLink SMBus driver mgross
2021-01-30  2:20 ` [PATCH v3 32/34] dt-bindings: misc: hddl_dev: Add hddl device management documentation mgross
2021-01-30  2:20 ` [PATCH v3 33/34] misc: Hddl device management for local host mgross
2021-01-30  2:20 ` [PATCH v3 34/34] misc: HDDL device management for IA host mgross
2021-01-30  2:20 ` [PATCH v4 00/34] Intel Vision Processing base enabling mgross
2021-01-30  2:20 ` [PATCH v4 01/34] Add Vision Processing Unit (VPU) documentation mgross
2021-01-30  2:20 ` [PATCH v4 02/34] dt-bindings: mailbox: Add Intel VPU IPC mailbox bindings mgross
2021-01-30  2:20 ` [PATCH v4 03/34] mailbox: vpu-ipc-mailbox: Add support for Intel VPU IPC mailbox mgross
2021-01-30  2:20 ` [PATCH v4 04/34] dt-bindings: Add bindings for Keem Bay IPC driver mgross
2021-01-30  2:20 ` [PATCH v4 05/34] keembay-ipc: Add Keem Bay IPC module mgross
2021-01-30  2:20 ` [PATCH v4 06/34] dt-bindings: Add bindings for Keem Bay VPU IPC driver mgross
2021-01-30  2:20 ` [PATCH v4 07/34] keembay-vpu-ipc: Add Keem Bay VPU IPC module mgross
2021-01-30  2:20 ` [PATCH v4 08/34] misc: xlink-pcie: Add documentation for XLink PCIe driver mgross
2021-01-30  2:20 ` [PATCH v4 09/34] misc: xlink-pcie: lh: Add PCIe EPF driver for Local Host mgross
2021-01-30  2:21 ` [PATCH v4 10/34] misc: xlink-pcie: lh: Add PCIe EP DMA functionality mgross
2021-01-30  2:21 ` [PATCH v4 11/34] misc: xlink-pcie: lh: Add core communication logic mgross
2021-01-30  2:21 ` [PATCH v4 12/34] misc: xlink-pcie: lh: Prepare changes for adding remote host driver mgross
2021-01-30  2:21 ` [PATCH v4 13/34] misc: xlink-pcie: rh: Add PCIe EP driver for Remote Host mgross
2021-01-30  2:21 ` [PATCH v4 14/34] misc: xlink-pcie: rh: Add core communication logic mgross
2021-01-30  2:21 ` [PATCH v4 15/34] misc: xlink-pcie: Add XLink API interface mgross
2021-01-30  2:21 ` [PATCH v4 16/34] misc: xlink-pcie: Add asynchronous event notification support for XLink mgross
2021-01-30  2:21 ` [PATCH v4 17/34] xlink-ipc: Add xlink ipc device tree bindings mgross
2021-01-30  2:21 ` [PATCH v4 18/34] xlink-ipc: Add xlink ipc driver mgross
2021-01-30  2:21 ` [PATCH v4 19/34] xlink-core: Add xlink core device tree bindings mgross
2021-01-30  2:21 ` [PATCH v4 20/34] xlink-core: Add xlink core driver xLink mgross
2021-01-30  2:21 ` [PATCH v4 21/34] xlink-core: Enable xlink protocol over pcie mgross
2021-01-30  2:21 ` [PATCH v4 22/34] xlink-core: Enable VPU IP management and runtime control mgross
2021-01-30  2:21 ` [PATCH v4 23/34] xlink-core: add async channel and events mgross
2021-01-30  2:21 ` [PATCH v4 24/34] dt-bindings: misc: Add Keem Bay vpumgr mgross
2021-01-30  2:21 ` [PATCH v4 25/34] misc: Add Keem Bay VPU manager mgross
2021-01-30  2:21 ` [PATCH v4 26/34] dt-bindings: misc: intel_tsens: Add tsens thermal bindings documentation mgross
2021-01-30 17:23   ` Rob Herring
2021-02-02  8:47     ` C, Udhayakumar
2021-01-30  2:21 ` [PATCH v4 27/34] misc: Tsens ARM host thermal driver mgross
2021-01-30  2:21 ` [PATCH v4 28/34] misc: Intel tsens IA host driver mgross
2021-01-30  2:21 ` [PATCH v4 29/34] Intel tsens i2c slave driver mgross
2021-02-01  2:13   ` Randy Dunlap
2021-02-01 15:20     ` Gross, Mark
2021-01-30  2:21 ` [PATCH v4 30/34] misc:intel_tsens: Intel Keem Bay tsens driver mgross
2021-01-30  2:21 ` [PATCH v4 31/34] Intel Keem Bay XLink SMBus driver mgross
2021-01-30  2:21 ` [PATCH v4 32/34] dt-bindings: misc: hddl_dev: Add hddl device management documentation mgross
2021-01-30 17:23   ` Rob Herring
2021-02-02  8:48     ` C, Udhayakumar
2021-01-30  2:21 ` [PATCH v4 33/34] misc: Hddl device management for local host mgross
2021-01-30  2:21 ` [PATCH v4 34/34] misc: HDDL device management for IA host mgross
  -- strict thread matches above, loose matches on Subject: below --
2021-01-26  5:40 [PATCH v3 00/34] Intel Vision Processing base enabling mgross
2021-01-26  5:40 ` [PATCH v3 13/34] misc: xlink-pcie: rh: Add PCIe EP driver for Remote Host mgross

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210130022124.65083-14-mgross@linux.intel.com \
    --to=mgross@linux.intel.com \
    --cc=arnd@arndb.de \
    --cc=bp@suse.de \
    --cc=corbet@lwn.net \
    --cc=damien.lemoal@wdc.com \
    --cc=dragan.cvetic@xilinx.com \
    --cc=gregkh@linuxfoundation.org \
    --cc=jassisinghbrar@gmail.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=markgross@kernel.org \
    --cc=palmerdabbelt@google.com \
    --cc=paul.walmsley@sifive.com \
    --cc=peng.fan@nxp.com \
    --cc=robh+dt@kernel.org \
    --cc=shawnguo@kernel.org \
    --cc=srikanth.thokala@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.