All of lore.kernel.org
 help / color / mirror / Atom feed
From: Lizhi Hou <lizhi.hou@xilinx.com>
To: <linux-kernel@vger.kernel.org>
Cc: Lizhi Hou <lizhi.hou@xilinx.com>, <linux-fpga@vger.kernel.org>,
	<maxz@xilinx.com>, <sonal.santan@xilinx.com>, <yliu@xilinx.com>,
	<michal.simek@xilinx.com>, <stefanos@xilinx.com>,
	<devicetree@vger.kernel.org>, <trix@redhat.com>, <mdf@kernel.org>,
	<robh@kernel.org>, Max Zhen <max.zhen@xilinx.com>
Subject: [PATCH V5 XRT Alveo 09/20] fpga: xrt: management physical function driver (root)
Date: Tue, 27 Apr 2021 13:54:20 -0700	[thread overview]
Message-ID: <20210427205431.23896-10-lizhi.hou@xilinx.com> (raw)
In-Reply-To: <20210427205431.23896-1-lizhi.hou@xilinx.com>

The PCIE device driver which attaches to management function on Alveo
devices. It instantiates one or more group drivers which, in turn,
instantiate xrt drivers. The instantiation of group and xrt drivers is
completely dtb driven.

Signed-off-by: Sonal Santan <sonal.santan@xilinx.com>
Signed-off-by: Max Zhen <max.zhen@xilinx.com>
Signed-off-by: Lizhi Hou <lizhi.hou@xilinx.com>
---
 drivers/fpga/xrt/mgnt/root.c | 419 +++++++++++++++++++++++++++++++++++
 1 file changed, 419 insertions(+)
 create mode 100644 drivers/fpga/xrt/mgnt/root.c

diff --git a/drivers/fpga/xrt/mgnt/root.c b/drivers/fpga/xrt/mgnt/root.c
new file mode 100644
index 000000000000..6e362e9d4b59
--- /dev/null
+++ b/drivers/fpga/xrt/mgnt/root.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Alveo Management Function Driver
+ *
+ * Copyright (C) 2020-2021 Xilinx, Inc.
+ *
+ * Authors:
+ *	Cheng Zhen <maxz@xilinx.com>
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+
+#include "xroot.h"
+#include "xmgnt.h"
+#include "metadata.h"
+
+#define XMGNT_MODULE_NAME	"xrt-mgnt"
+#define XMGNT_DRIVER_VERSION	"4.0.0"
+
+#define XMGNT_PDEV(xm)		((xm)->pdev)
+#define XMGNT_DEV(xm)		(&(XMGNT_PDEV(xm)->dev))
+#define xmgnt_err(xm, fmt, args...)	\
+	dev_err(XMGNT_DEV(xm), "%s: " fmt, __func__, ##args)
+#define xmgnt_warn(xm, fmt, args...)	\
+	dev_warn(XMGNT_DEV(xm), "%s: " fmt, __func__, ##args)
+#define xmgnt_info(xm, fmt, args...)	\
+	dev_info(XMGNT_DEV(xm), "%s: " fmt, __func__, ##args)
+#define xmgnt_dbg(xm, fmt, args...)	\
+	dev_dbg(XMGNT_DEV(xm), "%s: " fmt, __func__, ##args)
+#define XMGNT_DEV_ID(_pcidev)			\
+	({ typeof(_pcidev) (pcidev) = (_pcidev);	\
+	((pci_domain_nr((pcidev)->bus) << 16) |	\
+	PCI_DEVID((pcidev)->bus->number, 0)); })
+#define XRT_VSEC_ID		0x20
+#define XRT_MAX_READRQ		512
+
+static struct class *xmgnt_class;
+
+/* PCI Device IDs */
+/*
+ * Golden image is preloaded on the device when it is shipped to customer.
+ * Then, customer can load other shells (from Xilinx or some other vendor).
+ * If something goes wrong with the shell, customer can always go back to
+ * golden and start over again.
+ */
+#define PCI_DEVICE_ID_U50_GOLDEN	0xD020
+#define PCI_DEVICE_ID_U50		0x5020
+static const struct pci_device_id xmgnt_pci_ids[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_U50_GOLDEN), }, /* Alveo U50 (golden) */
+	{ PCI_DEVICE(PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_U50), }, /* Alveo U50 */
+	{ 0, }
+};
+
+struct xmgnt {
+	struct pci_dev *pdev;
+	void *root;
+
+	bool ready;
+};
+
+static int xmgnt_config_pci(struct xmgnt *xm)
+{
+	struct pci_dev *pdev = XMGNT_PDEV(xm);
+	int rc;
+
+	rc = pcim_enable_device(pdev);
+	if (rc < 0) {
+		xmgnt_err(xm, "failed to enable device: %d", rc);
+		return rc;
+	}
+
+	rc = pci_enable_pcie_error_reporting(pdev);
+	if (rc)
+		xmgnt_warn(xm, "failed to enable AER: %d", rc);
+
+	pci_set_master(pdev);
+
+	rc = pcie_get_readrq(pdev);
+	if (rc > XRT_MAX_READRQ)
+		pcie_set_readrq(pdev, XRT_MAX_READRQ);
+	return 0;
+}
+
+static int xmgnt_match_slot_and_save(struct device *dev, void *data)
+{
+	struct xmgnt *xm = data;
+	struct pci_dev *pdev = to_pci_dev(dev);
+
+	if (XMGNT_DEV_ID(pdev) == XMGNT_DEV_ID(xm->pdev)) {
+		pci_cfg_access_lock(pdev);
+		pci_save_state(pdev);
+	}
+
+	return 0;
+}
+
+static void xmgnt_pci_save_config_all(struct xmgnt *xm)
+{
+	bus_for_each_dev(&pci_bus_type, NULL, xm, xmgnt_match_slot_and_save);
+}
+
+static int xmgnt_match_slot_and_restore(struct device *dev, void *data)
+{
+	struct xmgnt *xm = data;
+	struct pci_dev *pdev = to_pci_dev(dev);
+
+	if (XMGNT_DEV_ID(pdev) == XMGNT_DEV_ID(xm->pdev)) {
+		pci_restore_state(pdev);
+		pci_cfg_access_unlock(pdev);
+	}
+
+	return 0;
+}
+
+static void xmgnt_pci_restore_config_all(struct xmgnt *xm)
+{
+	bus_for_each_dev(&pci_bus_type, NULL, xm, xmgnt_match_slot_and_restore);
+}
+
+static void xmgnt_root_hot_reset(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct pci_bus *bus;
+	u16 pci_cmd, devctl;
+	struct xmgnt *xm;
+	u8 pci_bctl;
+	int i, ret;
+
+	xm = pci_get_drvdata(pdev);
+	xmgnt_info(xm, "hot reset start");
+	xmgnt_pci_save_config_all(xm);
+	pci_disable_device(pdev);
+	bus = pdev->bus;
+
+	/*
+	 * When flipping the SBR bit, device can fall off the bus. This is
+	 * usually no problem at all so long as drivers are working properly
+	 * after SBR. However, some systems complain bitterly when the device
+	 * falls off the bus.
+	 * The quick solution is to temporarily disable the SERR reporting of
+	 * switch port during SBR.
+	 */
+
+	pci_read_config_word(bus->self, PCI_COMMAND, &pci_cmd);
+	pci_write_config_word(bus->self, PCI_COMMAND, (pci_cmd & ~PCI_COMMAND_SERR));
+	pcie_capability_read_word(bus->self, PCI_EXP_DEVCTL, &devctl);
+	pcie_capability_write_word(bus->self, PCI_EXP_DEVCTL, (devctl & ~PCI_EXP_DEVCTL_FERE));
+	pci_read_config_byte(bus->self, PCI_BRIDGE_CONTROL, &pci_bctl);
+	pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl | PCI_BRIDGE_CTL_BUS_RESET);
+	msleep(100);
+	pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl);
+	ssleep(1);
+
+	pcie_capability_write_word(bus->self, PCI_EXP_DEVCTL, devctl);
+	pci_write_config_word(bus->self, PCI_COMMAND, pci_cmd);
+
+	ret = pci_enable_device(pdev);
+	if (ret)
+		xmgnt_err(xm, "failed to enable device, ret %d", ret);
+
+	for (i = 0; i < 300; i++) {
+		pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
+		if (pci_cmd != 0xffff)
+			break;
+		msleep(20);
+	}
+	if (i == 300)
+		xmgnt_err(xm, "timed out waiting for device to be online after reset");
+
+	xmgnt_info(xm, "waiting for %d ms", i * 20);
+	xmgnt_pci_restore_config_all(xm);
+	xmgnt_config_pci(xm);
+}
+
+static int xmgnt_add_vsec_node(struct xmgnt *xm, char *dtb)
+{
+	u32 off_low, off_high, vsec_bar, header;
+	struct pci_dev *pdev = XMGNT_PDEV(xm);
+	struct xrt_md_endpoint ep = { 0 };
+	struct device *dev = DEV(pdev);
+	int cap = 0, ret = 0;
+	u64 vsec_off;
+
+	while ((cap = pci_find_next_ext_capability(pdev, cap, PCI_EXT_CAP_ID_VNDR))) {
+		pci_read_config_dword(pdev, cap + PCI_VNDR_HEADER, &header);
+		if (PCI_VNDR_HEADER_ID(header) == XRT_VSEC_ID)
+			break;
+	}
+	if (!cap) {
+		xmgnt_info(xm, "No Vendor Specific Capability.");
+		return -ENOENT;
+	}
+
+	if (pci_read_config_dword(pdev, cap + 8, &off_low) ||
+	    pci_read_config_dword(pdev, cap + 12, &off_high)) {
+		xmgnt_err(xm, "pci_read vendor specific failed.");
+		return -EINVAL;
+	}
+
+	ep.ep_name = XRT_MD_NODE_VSEC;
+	ret = xrt_md_add_endpoint(dev, dtb, &ep);
+	if (ret) {
+		xmgnt_err(xm, "add vsec metadata failed, ret %d", ret);
+		goto failed;
+	}
+
+	vsec_bar = cpu_to_be32(off_low & 0xf);
+	ret = xrt_md_set_prop(dev, dtb, XRT_MD_NODE_VSEC, NULL,
+			      XRT_MD_PROP_BAR_IDX, &vsec_bar, sizeof(vsec_bar));
+	if (ret) {
+		xmgnt_err(xm, "add vsec bar idx failed, ret %d", ret);
+		goto failed;
+	}
+
+	vsec_off = cpu_to_be64(((u64)off_high << 32) | (off_low & ~0xfU));
+	ret = xrt_md_set_prop(dev, dtb, XRT_MD_NODE_VSEC, NULL,
+			      XRT_MD_PROP_OFFSET, &vsec_off, sizeof(vsec_off));
+	if (ret) {
+		xmgnt_err(xm, "add vsec offset failed, ret %d", ret);
+		goto failed;
+	}
+
+failed:
+	return ret;
+}
+
+static int xmgnt_create_root_metadata(struct xmgnt *xm, char **root_dtb)
+{
+	char *dtb = NULL;
+	int ret;
+
+	ret = xrt_md_create(XMGNT_DEV(xm), &dtb);
+	if (ret) {
+		xmgnt_err(xm, "create metadata failed, ret %d", ret);
+		goto failed;
+	}
+
+	ret = xmgnt_add_vsec_node(xm, dtb);
+	if (ret == -ENOENT) {
+		/*
+		 * We may be dealing with a MFG board.
+		 * Try vsec-golden which will bring up all hard-coded leaves
+		 * at hard-coded offsets.
+		 */
+		ret = xroot_add_simple_node(xm->root, dtb, XRT_MD_NODE_VSEC_GOLDEN);
+	} else if (ret == 0) {
+		ret = xroot_add_simple_node(xm->root, dtb, XRT_MD_NODE_MGNT_MAIN);
+	}
+	if (ret)
+		goto failed;
+
+	*root_dtb = dtb;
+	return 0;
+
+failed:
+	vfree(dtb);
+	return ret;
+}
+
+static ssize_t ready_show(struct device *dev,
+			  struct device_attribute *da,
+			  char *buf)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct xmgnt *xm = pci_get_drvdata(pdev);
+
+	return sprintf(buf, "%d\n", xm->ready);
+}
+static DEVICE_ATTR_RO(ready);
+
+static struct attribute *xmgnt_root_attrs[] = {
+	&dev_attr_ready.attr,
+	NULL
+};
+
+static struct attribute_group xmgnt_root_attr_group = {
+	.attrs = xmgnt_root_attrs,
+};
+
+static void xmgnt_root_get_id(struct device *dev, struct xrt_root_get_id *rid)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+
+	rid->xpigi_vendor_id = pdev->vendor;
+	rid->xpigi_device_id = pdev->device;
+	rid->xpigi_sub_vendor_id = pdev->subsystem_vendor;
+	rid->xpigi_sub_device_id = pdev->subsystem_device;
+}
+
+static int xmgnt_root_get_resource(struct device *dev, struct xrt_root_get_res *res)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct xmgnt *xm;
+
+	xm = pci_get_drvdata(pdev);
+	if (res->xpigr_region_id > PCI_STD_RESOURCE_END) {
+		xmgnt_err(xm, "Invalid bar idx %d", res->xpigr_region_id);
+		return -EINVAL;
+	}
+
+	res->xpigr_res = &pdev->resource[res->xpigr_region_id];
+	return 0;
+}
+
+static struct xroot_physical_function_callback xmgnt_xroot_pf_cb = {
+	.xpc_get_id = xmgnt_root_get_id,
+	.xpc_get_resource = xmgnt_root_get_resource,
+	.xpc_hot_reset = xmgnt_root_hot_reset,
+};
+
+static int xmgnt_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	int ret;
+	struct device *dev = &pdev->dev;
+	struct xmgnt *xm = devm_kzalloc(dev, sizeof(*xm), GFP_KERNEL);
+	char *dtb = NULL;
+
+	if (!xm)
+		return -ENOMEM;
+	xm->pdev = pdev;
+	pci_set_drvdata(pdev, xm);
+
+	ret = xmgnt_config_pci(xm);
+	if (ret)
+		goto failed;
+
+	ret = xroot_probe(&pdev->dev, &xmgnt_xroot_pf_cb, &xm->root);
+	if (ret)
+		goto failed;
+
+	ret = xmgnt_create_root_metadata(xm, &dtb);
+	if (ret)
+		goto failed_metadata;
+
+	ret = xroot_create_group(xm->root, dtb);
+	vfree(dtb);
+	if (ret)
+		xmgnt_err(xm, "failed to create root group: %d", ret);
+
+	if (!xroot_wait_for_bringup(xm->root))
+		xmgnt_err(xm, "failed to bringup all groups");
+	else
+		xm->ready = true;
+
+	ret = sysfs_create_group(&pdev->dev.kobj, &xmgnt_root_attr_group);
+	if (ret) {
+		/* Warning instead of failing the probe. */
+		xmgnt_warn(xm, "create xmgnt root attrs failed: %d", ret);
+	}
+
+	xroot_broadcast(xm->root, XRT_EVENT_POST_CREATION);
+	xmgnt_info(xm, "%s started successfully", XMGNT_MODULE_NAME);
+	return 0;
+
+failed_metadata:
+	xroot_remove(xm->root);
+failed:
+	pci_set_drvdata(pdev, NULL);
+	return ret;
+}
+
+static void xmgnt_remove(struct pci_dev *pdev)
+{
+	struct xmgnt *xm = pci_get_drvdata(pdev);
+
+	xroot_broadcast(xm->root, XRT_EVENT_PRE_REMOVAL);
+	sysfs_remove_group(&pdev->dev.kobj, &xmgnt_root_attr_group);
+	xroot_remove(xm->root);
+	pci_disable_pcie_error_reporting(xm->pdev);
+	xmgnt_info(xm, "%s cleaned up successfully", XMGNT_MODULE_NAME);
+}
+
+static struct pci_driver xmgnt_driver = {
+	.name = XMGNT_MODULE_NAME,
+	.id_table = xmgnt_pci_ids,
+	.probe = xmgnt_probe,
+	.remove = xmgnt_remove,
+};
+
+static int __init xmgnt_init(void)
+{
+	int res = 0;
+
+	res = xmgnt_register_leaf();
+	if (res)
+		return res;
+
+	xmgnt_class = class_create(THIS_MODULE, XMGNT_MODULE_NAME);
+	if (IS_ERR(xmgnt_class))
+		return PTR_ERR(xmgnt_class);
+
+	res = pci_register_driver(&xmgnt_driver);
+	if (res) {
+		class_destroy(xmgnt_class);
+		return res;
+	}
+
+	return 0;
+}
+
+static __exit void xmgnt_exit(void)
+{
+	pci_unregister_driver(&xmgnt_driver);
+	class_destroy(xmgnt_class);
+	xmgnt_unregister_leaf();
+}
+
+module_init(xmgnt_init);
+module_exit(xmgnt_exit);
+
+MODULE_DEVICE_TABLE(pci, xmgnt_pci_ids);
+MODULE_VERSION(XMGNT_DRIVER_VERSION);
+MODULE_AUTHOR("XRT Team <runtime@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx Alveo management function driver");
+MODULE_LICENSE("GPL v2");
-- 
2.27.0


  parent reply	other threads:[~2021-04-27 21:00 UTC|newest]

Thread overview: 52+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-04-27 20:54 [PATCH V5 XRT Alveo 00/20] XRT Alveo driver overview Lizhi Hou
2021-04-27 20:54 ` [PATCH V5 XRT Alveo 01/20] Documentation: fpga: Add a document describing XRT Alveo drivers Lizhi Hou
2021-04-28 19:40   ` Tom Rix
2021-05-03 23:00   ` Moritz Fischer
2021-04-27 20:54 ` [PATCH V5 XRT Alveo 02/20] fpga: xrt: driver metadata helper functions Lizhi Hou
2021-05-01 20:19   ` Tom Rix
2021-04-27 20:54 ` [PATCH V5 XRT Alveo 03/20] fpga: xrt: xclbin file " Lizhi Hou
2021-05-03 13:00   ` Tom Rix
2021-05-03 23:19   ` Moritz Fischer
2021-05-05 17:21     ` Lizhi Hou
2021-04-27 20:54 ` [PATCH V5 XRT Alveo 04/20] fpga: xrt: xrt-lib driver manager Lizhi Hou
2021-05-03 13:06   ` Tom Rix
2021-05-03 21:51     ` Lizhi Hou
2021-04-27 20:54 ` [PATCH V5 XRT Alveo 05/20] fpga: xrt: group driver Lizhi Hou
2021-05-03 13:10   ` Tom Rix
2021-04-27 20:54 ` [PATCH V5 XRT Alveo 06/20] fpga: xrt: char dev node helper functions Lizhi Hou
2021-05-03 13:27   ` Tom Rix
2021-04-27 20:54 ` [PATCH V5 XRT Alveo 07/20] fpga: xrt: root driver infrastructure Lizhi Hou
2021-05-03 13:37   ` Tom Rix
2021-04-27 20:54 ` [PATCH V5 XRT Alveo 08/20] fpga: xrt: " Lizhi Hou
2021-05-03 13:46   ` Tom Rix
2021-04-27 20:54 ` Lizhi Hou [this message]
2021-05-03 13:49   ` [PATCH V5 XRT Alveo 09/20] fpga: xrt: management physical function driver (root) Tom Rix
2021-04-27 20:54 ` [PATCH V5 XRT Alveo 10/20] fpga: xrt: main driver for management function device Lizhi Hou
2021-05-04 13:50   ` Tom Rix
2021-04-27 20:54 ` [PATCH V5 XRT Alveo 11/20] fpga: xrt: fpga-mgr and region implementation for xclbin download Lizhi Hou
2021-05-04 13:56   ` Tom Rix
2021-04-27 20:54 ` [PATCH V5 XRT Alveo 12/20] fpga: xrt: VSEC driver Lizhi Hou
2021-05-04 14:00   ` Tom Rix
2021-04-27 20:54 ` [PATCH V5 XRT Alveo 13/20] fpga: xrt: User Clock Subsystem driver Lizhi Hou
2021-05-04 14:03   ` Tom Rix
2021-04-27 20:54 ` [PATCH V5 XRT Alveo 14/20] fpga: xrt: ICAP driver Lizhi Hou
2021-05-04 14:05   ` Tom Rix
2021-04-27 20:54 ` [PATCH V5 XRT Alveo 15/20] fpga: xrt: devctl xrt driver Lizhi Hou
2021-05-04 14:07   ` Tom Rix
2021-04-27 20:54 ` [PATCH V5 XRT Alveo 16/20] fpga: xrt: clock driver Lizhi Hou
2021-05-04 14:08   ` Tom Rix
2021-04-27 20:54 ` [PATCH V5 XRT Alveo 17/20] fpga: xrt: clock frequency counter driver Lizhi Hou
2021-05-04 14:10   ` Tom Rix
2021-04-27 20:54 ` [PATCH V5 XRT Alveo 18/20] fpga: xrt: DDR calibration driver Lizhi Hou
2021-05-04 14:11   ` Tom Rix
2021-04-27 20:54 ` [PATCH V5 XRT Alveo 19/20] fpga: xrt: partition isolation driver Lizhi Hou
2021-05-04 14:13   ` Tom Rix
2021-04-27 20:54 ` [PATCH V5 XRT Alveo 20/20] fpga: xrt: Kconfig and Makefile updates for XRT drivers Lizhi Hou
2021-04-27 23:53   ` kernel test robot
2021-04-27 23:53     ` kernel test robot
2021-04-28  3:12   ` kernel test robot
2021-04-28  3:12     ` kernel test robot
2021-04-28  3:12   ` [RFC PATCH] fpga: xrt: xmgnt_bridge_ops can be static kernel test robot
2021-04-28  3:12     ` kernel test robot
2021-05-04 14:18   ` [PATCH V5 XRT Alveo 20/20] fpga: xrt: Kconfig and Makefile updates for XRT drivers Tom Rix
2021-04-28 17:36 ` [PATCH V5 XRT Alveo 00/20] XRT Alveo driver overview Tom Rix

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210427205431.23896-10-lizhi.hou@xilinx.com \
    --to=lizhi.hou@xilinx.com \
    --cc=devicetree@vger.kernel.org \
    --cc=linux-fpga@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=max.zhen@xilinx.com \
    --cc=maxz@xilinx.com \
    --cc=mdf@kernel.org \
    --cc=michal.simek@xilinx.com \
    --cc=robh@kernel.org \
    --cc=sonal.santan@xilinx.com \
    --cc=stefanos@xilinx.com \
    --cc=trix@redhat.com \
    --cc=yliu@xilinx.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.