linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Tom Rix <trix@redhat.com>
To: Lizhi Hou <lizhi.hou@xilinx.com>,
	linux-kernel@vger.kernel.org, Max Zhen <max.zhen@xilinx.com>
Cc: linux-fpga@vger.kernel.org, maxz@xilinx.com,
	sonal.santan@xilinx.com, yliu@xilinx.com,
	michal.simek@xilinx.com, stefanos@xilinx.com,
	devicetree@vger.kernel.org, mdf@kernel.org, robh@kernel.org
Subject: Re: [PATCH V4 XRT Alveo 05/20] fpga: xrt: group platform driver
Date: Tue, 30 Mar 2021 05:52:39 -0700	[thread overview]
Message-ID: <692776f1-ed9f-5013-a0bf-d6c97d355369@redhat.com> (raw)
In-Reply-To: <20210324052947.27889-6-lizhi.hou@xilinx.com>


On 3/23/21 10:29 PM, Lizhi Hou wrote:
> group driver that manages life cycle of a bunch of leaf driver instances
> and bridges them with root.
>
> Signed-off-by: Sonal Santan <sonal.santan@xilinx.com>
> Signed-off-by: Max Zhen <max.zhen@xilinx.com>
> Signed-off-by: Lizhi Hou <lizhi.hou@xilinx.com>
> ---
>  drivers/fpga/xrt/include/group.h |  25 +++
>  drivers/fpga/xrt/lib/group.c     | 286 +++++++++++++++++++++++++++++++
>  2 files changed, 311 insertions(+)
>  create mode 100644 drivers/fpga/xrt/include/group.h
>  create mode 100644 drivers/fpga/xrt/lib/group.c
>
> diff --git a/drivers/fpga/xrt/include/group.h b/drivers/fpga/xrt/include/group.h
> new file mode 100644
> index 000000000000..09e9d03f53fe
> --- /dev/null
> +++ b/drivers/fpga/xrt/include/group.h
> @@ -0,0 +1,25 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/*
> + * Copyright (C) 2020-2021 Xilinx, Inc.
> + *
ok, removed generic boilerplate
> + * Authors:
> + *	Cheng Zhen <maxz@xilinx.com>
> + */
> +
> +#ifndef _XRT_GROUP_H_
> +#define _XRT_GROUP_H_
> +
> +#include "xleaf.h"
move header to another patch
> +
> +/*
> + * Group driver leaf calls.
ok
> + */
> +enum xrt_group_leaf_cmd {
> +	XRT_GROUP_GET_LEAF = XRT_XLEAF_CUSTOM_BASE, /* See comments in xleaf.h */
ok
> +	XRT_GROUP_PUT_LEAF,
> +	XRT_GROUP_INIT_CHILDREN,
> +	XRT_GROUP_FINI_CHILDREN,
> +	XRT_GROUP_TRIGGER_EVENT,
> +};
> +
> +#endif	/* _XRT_GROUP_H_ */
> diff --git a/drivers/fpga/xrt/lib/group.c b/drivers/fpga/xrt/lib/group.c
> new file mode 100644
> index 000000000000..7b8716569641
> --- /dev/null
> +++ b/drivers/fpga/xrt/lib/group.c
> @@ -0,0 +1,286 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * Xilinx Alveo FPGA Group Driver
> + *
> + * Copyright (C) 2020-2021 Xilinx, Inc.
> + *
> + * Authors:
> + *	Cheng Zhen <maxz@xilinx.com>
> + */
> +
> +#include <linux/mod_devicetable.h>
> +#include <linux/platform_device.h>
> +#include "xleaf.h"
> +#include "subdev_pool.h"
> +#include "group.h"
> +#include "metadata.h"
> +#include "lib-drv.h"
> +
> +#define XRT_GRP "xrt_group"
> +
> +struct xrt_group {
> +	struct platform_device *pdev;
> +	struct xrt_subdev_pool leaves;
> +	bool leaves_created;
> +	struct mutex lock; /* lock for group */
> +};
> +
> +static int xrt_grp_root_cb(struct device *dev, void *parg,
> +			   enum xrt_root_cmd cmd, void *arg)
ok
> +{
> +	int rc;
> +	struct platform_device *pdev =
> +		container_of(dev, struct platform_device, dev);
> +	struct xrt_group *xg = (struct xrt_group *)parg;
> +
> +	switch (cmd) {
> +	case XRT_ROOT_GET_LEAF_HOLDERS: {
> +		struct xrt_root_get_holders *holders =
> +			(struct xrt_root_get_holders *)arg;
> +		rc = xrt_subdev_pool_get_holders(&xg->leaves,
> +						 holders->xpigh_pdev,
> +						 holders->xpigh_holder_buf,
> +						 holders->xpigh_holder_buf_len);
> +		break;
> +	}
> +	default:
> +		/* Forward parent call to root. */
> +		rc = xrt_subdev_root_request(pdev, cmd, arg);
> +		break;
> +	}
> +
> +	return rc;
> +}
> +
> +/*
> + * Cut subdev's dtb from group's dtb based on passed-in endpoint descriptor.
> + * Return the subdev's dtb through dtbp, if found.
> + */
> +static int xrt_grp_cut_subdev_dtb(struct xrt_group *xg, struct xrt_subdev_endpoints *eps,
> +				  char *grp_dtb, char **dtbp)
> +{
> +	int ret, i, ep_count = 0;
> +	char *dtb = NULL;
> +
> +	ret = xrt_md_create(DEV(xg->pdev), &dtb);
> +	if (ret)
> +		return ret;
> +
> +	for (i = 0; eps->xse_names[i].ep_name || eps->xse_names[i].regmap_name; i++) {
> +		const char *ep_name = eps->xse_names[i].ep_name;
> +		const char *reg_name = eps->xse_names[i].regmap_name;
> +
> +		if (!ep_name)
> +			xrt_md_get_compatible_endpoint(DEV(xg->pdev), grp_dtb, reg_name, &ep_name);
> +		if (!ep_name)
> +			continue;
> +
> +		ret = xrt_md_copy_endpoint(DEV(xg->pdev), dtb, grp_dtb, ep_name, reg_name, NULL);
> +		if (ret)
> +			continue;
> +		xrt_md_del_endpoint(DEV(xg->pdev), grp_dtb, ep_name, reg_name);
> +		ep_count++;
> +	}
> +	/* Found enough endpoints, return the subdev's dtb. */
> +	if (ep_count >= eps->xse_min_ep) {
> +		*dtbp = dtb;
> +		return 0;
> +	}
> +
> +	/* Cleanup - Restore all endpoints that has been deleted, if any. */
> +	if (ep_count > 0) {
> +		xrt_md_copy_endpoint(DEV(xg->pdev), grp_dtb, dtb,
> +				     XRT_MD_NODE_ENDPOINTS, NULL, NULL);
> +	}
> +	vfree(dtb);
> +	*dtbp = NULL;
> +	return 0;
> +}
> +
> +static int xrt_grp_create_leaves(struct xrt_group *xg)
> +{
> +	struct xrt_subdev_platdata *pdata = DEV_PDATA(xg->pdev);
> +	struct xrt_subdev_endpoints *eps = NULL;
> +	int ret = 0, failed = 0;
> +	enum xrt_subdev_id did;
> +	char *grp_dtb = NULL;
> +	unsigned long mlen;
> +
> +	if (!pdata)
> +		return -EINVAL;
ok
> +
> +	mlen = xrt_md_size(DEV(xg->pdev), pdata->xsp_dtb);
> +	if (mlen == XRT_MD_INVALID_LENGTH) {
> +		xrt_err(xg->pdev, "invalid dtb, len %ld", mlen);
> +		return -EINVAL;
> +	}
> +
> +	mutex_lock(&xg->lock);
> +
> +	if (xg->leaves_created) {
> +		mutex_unlock(&xg->lock);
add a comment that this is not an error and/or error is handled elsewhere
> +		return -EEXIST;
> +	}
> +
> +	grp_dtb = vmalloc(mlen);
> +	if (!grp_dtb) {
> +		mutex_unlock(&xg->lock);
> +		return -ENOMEM;
ok
> +	}
> +
> +	/* Create all leaves based on dtb. */
> +	xrt_info(xg->pdev, "bringing up leaves...");
> +	memcpy(grp_dtb, pdata->xsp_dtb, mlen);
> +	for (did = 0; did < XRT_SUBDEV_NUM; did++) {
ok
> +		eps = xrt_drv_get_endpoints(did);
> +		while (eps && eps->xse_names) {
> +			char *dtb = NULL;
> +
> +			ret = xrt_grp_cut_subdev_dtb(xg, eps, grp_dtb, &dtb);
> +			if (ret) {
> +				failed++;
> +				xrt_err(xg->pdev, "failed to cut subdev dtb for drv %s: %d",
> +					xrt_drv_name(did), ret);
> +			}
> +			if (!dtb) {
> +				/*
> +				 * No more dtb to cut or bad things happened for this instance,
> +				 * switch to the next one.
> +				 */
> +				eps++;
> +				continue;
> +			}
> +
> +			/* Found a dtb for this instance, let's add it. */
> +			ret = xrt_subdev_pool_add(&xg->leaves, did, xrt_grp_root_cb, xg, dtb);
> +			if (ret < 0) {
> +				failed++;
> +				xrt_err(xg->pdev, "failed to add %s: %d", xrt_drv_name(did), ret);

add a comment that this is not a fatal error and cleanup happens elsewhere

Tom

> +			}
> +			vfree(dtb);
> +			/* Continue searching for the same instance from grp_dtb. */
> +		}
> +	}
> +
> +	xg->leaves_created = true;
> +	vfree(grp_dtb);
> +	mutex_unlock(&xg->lock);
> +	return failed == 0 ? 0 : -ECHILD;
> +}
> +
> +static void xrt_grp_remove_leaves(struct xrt_group *xg)
> +{
> +	mutex_lock(&xg->lock);
> +
> +	if (!xg->leaves_created) {
> +		mutex_unlock(&xg->lock);
> +		return;
> +	}
> +
> +	xrt_info(xg->pdev, "tearing down leaves...");
> +	xrt_subdev_pool_fini(&xg->leaves);
> +	xg->leaves_created = false;
> +
> +	mutex_unlock(&xg->lock);
> +}
> +
> +static int xrt_grp_probe(struct platform_device *pdev)
> +{
> +	struct xrt_group *xg;
> +
> +	xrt_info(pdev, "probing...");
> +
> +	xg = devm_kzalloc(&pdev->dev, sizeof(*xg), GFP_KERNEL);
> +	if (!xg)
> +		return -ENOMEM;
> +
> +	xg->pdev = pdev;
> +	mutex_init(&xg->lock);
> +	xrt_subdev_pool_init(DEV(pdev), &xg->leaves);
> +	platform_set_drvdata(pdev, xg);
> +
> +	return 0;
> +}
> +
> +static int xrt_grp_remove(struct platform_device *pdev)
> +{
> +	struct xrt_group *xg = platform_get_drvdata(pdev);
> +
> +	xrt_info(pdev, "leaving...");
> +	xrt_grp_remove_leaves(xg);
> +	return 0;
> +}
> +
> +static int xrt_grp_leaf_call(struct platform_device *pdev, u32 cmd, void *arg)
> +{
> +	int rc = 0;
> +	struct xrt_group *xg = platform_get_drvdata(pdev);
> +
> +	switch (cmd) {
> +	case XRT_XLEAF_EVENT:
> +		/* Simply forward to every child. */
> +		xrt_subdev_pool_handle_event(&xg->leaves,
> +					     (struct xrt_event *)arg);
> +		break;
> +	case XRT_GROUP_GET_LEAF: {
> +		struct xrt_root_get_leaf *get_leaf =
> +			(struct xrt_root_get_leaf *)arg;
> +
> +		rc = xrt_subdev_pool_get(&xg->leaves, get_leaf->xpigl_match_cb,
> +					 get_leaf->xpigl_match_arg,
> +					 DEV(get_leaf->xpigl_caller_pdev),
> +					 &get_leaf->xpigl_tgt_pdev);
> +		break;
> +	}
> +	case XRT_GROUP_PUT_LEAF: {
> +		struct xrt_root_put_leaf *put_leaf =
> +			(struct xrt_root_put_leaf *)arg;
> +
> +		rc = xrt_subdev_pool_put(&xg->leaves, put_leaf->xpipl_tgt_pdev,
> +					 DEV(put_leaf->xpipl_caller_pdev));
> +		break;
> +	}
> +	case XRT_GROUP_INIT_CHILDREN:
> +		rc = xrt_grp_create_leaves(xg);
> +		break;
> +	case XRT_GROUP_FINI_CHILDREN:
> +		xrt_grp_remove_leaves(xg);
> +		break;
> +	case XRT_GROUP_TRIGGER_EVENT:
> +		xrt_subdev_pool_trigger_event(&xg->leaves, (enum xrt_events)(uintptr_t)arg);
> +		break;
> +	default:
> +		xrt_err(pdev, "unknown IOCTL cmd %d", cmd);
> +		rc = -EINVAL;
> +		break;
> +	}
> +	return rc;
> +}
> +
> +static struct xrt_subdev_drvdata xrt_grp_data = {
> +	.xsd_dev_ops = {
> +		.xsd_leaf_call = xrt_grp_leaf_call,
> +	},
> +};
> +
> +static const struct platform_device_id xrt_grp_id_table[] = {
> +	{ XRT_GRP, (kernel_ulong_t)&xrt_grp_data },
> +	{ },
> +};
> +
> +static struct platform_driver xrt_group_driver = {
> +	.driver	= {
> +		.name    = XRT_GRP,
> +	},
> +	.probe   = xrt_grp_probe,
> +	.remove  = xrt_grp_remove,
> +	.id_table = xrt_grp_id_table,
> +};
> +
> +void group_leaf_init_fini(bool init)
> +{
> +	if (init)
> +		xleaf_register_driver(XRT_SUBDEV_GRP, &xrt_group_driver, NULL);
> +	else
> +		xleaf_unregister_driver(XRT_SUBDEV_GRP);
> +}


  reply	other threads:[~2021-03-30 12:53 UTC|newest]

Thread overview: 55+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-03-24  5:29 [PATCH V4 XRT Alveo 00/20] XRT Alveo driver overview Lizhi Hou
2021-03-24  5:29 ` [PATCH V4 XRT Alveo 01/20] Documentation: fpga: Add a document describing XRT Alveo drivers Lizhi Hou
2021-03-27 14:37   ` Tom Rix
2021-03-24  5:29 ` [PATCH V4 XRT Alveo 02/20] fpga: xrt: driver metadata helper functions Lizhi Hou
2021-03-28 15:30   ` Tom Rix
2021-04-06  4:36     ` Lizhi Hou
2021-03-24  5:29 ` [PATCH V4 XRT Alveo 03/20] fpga: xrt: xclbin file " Lizhi Hou
2021-03-29 17:12   ` Tom Rix
2021-04-06 17:52     ` Lizhi Hou
2021-03-24  5:29 ` [PATCH V4 XRT Alveo 04/20] fpga: xrt: xrt-lib platform driver manager Lizhi Hou
2021-03-29 19:44   ` Tom Rix
2021-04-06 20:59     ` Max Zhen
2021-03-24  5:29 ` [PATCH V4 XRT Alveo 05/20] fpga: xrt: group platform driver Lizhi Hou
2021-03-30 12:52   ` Tom Rix [this message]
2021-04-06 21:42     ` Max Zhen
2021-03-24  5:29 ` [PATCH V4 XRT Alveo 06/20] fpga: xrt: char dev node helper functions Lizhi Hou
2021-03-30 13:45   ` Tom Rix
2021-04-06 16:29     ` Max Zhen
2021-03-24  5:29 ` [PATCH V4 XRT Alveo 07/20] fpga: xrt: root driver infrastructure Lizhi Hou
2021-03-30 15:11   ` Tom Rix
2021-04-05 20:53     ` Max Zhen
2021-03-24  5:29 ` [PATCH V4 XRT Alveo 08/20] fpga: xrt: platform " Lizhi Hou
2021-03-31 12:50   ` Tom Rix
2021-04-08 17:09     ` Max Zhen
2021-03-24  5:29 ` [PATCH V4 XRT Alveo 09/20] fpga: xrt: management physical function driver (root) Lizhi Hou
2021-03-31 13:03   ` Tom Rix
2021-04-09 18:50     ` Max Zhen
2021-04-14 15:40       ` Tom Rix
2021-03-24  5:29 ` [PATCH V4 XRT Alveo 10/20] fpga: xrt: main platform driver for management function device Lizhi Hou
2021-04-01 14:07   ` Tom Rix
2021-04-07 22:37     ` Lizhi Hou
2021-03-24  5:29 ` [PATCH V4 XRT Alveo 11/20] fpga: xrt: fpga-mgr and region implementation for xclbin download Lizhi Hou
2021-04-01 14:43   ` Tom Rix
2021-04-07 22:41     ` Lizhi Hou
2021-03-24  5:29 ` [PATCH V4 XRT Alveo 12/20] fpga: xrt: VSEC platform driver Lizhi Hou
2021-04-02 14:12   ` Tom Rix
2021-04-06 21:01     ` Lizhi Hou
2021-03-24  5:29 ` [PATCH V4 XRT Alveo 13/20] fpga: xrt: User Clock Subsystem " Lizhi Hou
2021-04-02 14:27   ` Tom Rix
2021-03-24  5:29 ` [PATCH V4 XRT Alveo 14/20] fpga: xrt: ICAP " Lizhi Hou
2021-04-06 13:50   ` Tom Rix
2021-04-06 23:00     ` Lizhi Hou
2021-03-24  5:29 ` [PATCH V4 XRT Alveo 15/20] fpga: xrt: devctl " Lizhi Hou
2021-04-06 14:18   ` Tom Rix
2021-03-24  5:29 ` [PATCH V4 XRT Alveo 16/20] fpga: xrt: clock " Lizhi Hou
2021-04-06 20:11   ` Tom Rix
2021-03-24  5:29 ` [PATCH V4 XRT Alveo 17/20] fpga: xrt: clock frequency counter " Lizhi Hou
2021-04-06 20:32   ` Tom Rix
2021-03-24  5:29 ` [PATCH V4 XRT Alveo 18/20] fpga: xrt: DDR calibration " Lizhi Hou
2021-04-06 20:37   ` Tom Rix
2021-03-24  5:29 ` [PATCH V4 XRT Alveo 19/20] fpga: xrt: partition isolation " Lizhi Hou
2021-04-06 20:46   ` Tom Rix
2021-03-24  5:29 ` [PATCH V4 XRT Alveo 20/20] fpga: xrt: Kconfig and Makefile updates for XRT drivers Lizhi Hou
2021-04-06 21:00   ` Tom Rix
2021-04-06 23:39     ` Lizhi Hou

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=692776f1-ed9f-5013-a0bf-d6c97d355369@redhat.com \
    --to=trix@redhat.com \
    --cc=devicetree@vger.kernel.org \
    --cc=linux-fpga@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=lizhi.hou@xilinx.com \
    --cc=max.zhen@xilinx.com \
    --cc=maxz@xilinx.com \
    --cc=mdf@kernel.org \
    --cc=michal.simek@xilinx.com \
    --cc=robh@kernel.org \
    --cc=sonal.santan@xilinx.com \
    --cc=stefanos@xilinx.com \
    --cc=yliu@xilinx.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).