All of lore.kernel.org
 help / color / mirror / Atom feed
From: Mathieu Poirier <mathieu.poirier@linaro.org>
To: Ben Levinsky <BLEVINSK@xilinx.com>
Cc: "devicetree@vger.kernel.org" <devicetree@vger.kernel.org>,
	"linux-remoteproc@vger.kernel.org"
	<linux-remoteproc@vger.kernel.org>,
	"linux-kernel@vger.kernel.org" <linux-kernel@vger.kernel.org>,
	"linux-arm-kernel@lists.infradead.org" 
	<linux-arm-kernel@lists.infradead.org>,
	Michal Simek <michals@xilinx.com>,
	"Ed T. Mooring" <emooring@xilinx.com>
Subject: Re: [PATCH v26 5/5] remoteproc: Add initial zynqmp R5 remoteproc driver
Date: Mon, 15 Mar 2021 11:25:58 -0600	[thread overview]
Message-ID: <20210315172558.GA1342614@xps15> (raw)
In-Reply-To: <FF6E631A-87E0-4194-844A-E6B58E5B2928@xilinx.com>

>     > +
>     > +static void zynqmp_r5_cleanup_mbox(struct zynqmp_r5_rproc *z_rproc)
>     > +{
>     > +	mbox_free_channel(z_rproc->tx_chan);
>     > +	mbox_free_channel(z_rproc->rx_chan);
>     > +}
>     > +
>     > +/**
>     > + * zynqmp_r5_probe - Probes ZynqMP R5 processor device node
>     > + *		       this is called for each individual R5 core to
>     > + *		       set up mailbox, Xilinx platform manager unique ID,
>     > + *		       add to rproc core
> 
>     The above has changed since last time, which makes it harder for me to
>     review your work.  From hereon please change only the things I point out so that
>     we keep the same goal posts from one revision to the other.
> 
>     The tabulation needs to be fixed:  
> 
>             * zynqmp_r5_probe - Probes ZynqMP R5 processor device node
>             *
>             * This is called for each individual R5 core to set up mailbox, Xilinx
>             * platform manager unique ID, add to rproc core.
> 
>     The description is also broken.
> 
> [Ben] Ok. How is the following:
> /**                                                                                
>  * zynqmp_r5_probe - Probes ZynqMP R5 processor device node                        
>  *                                                                                 
>  * This is called for each individual R5 core to set up mailbox, Xilinx            
>  * platform manager unique ID, collect SRAM information and wire in                
>  * driver-specific data to to rproc core.                                          
>  *                                                                                 
>  * @pdev: domain platform device for current R5 core                               
>  * @node: pointer of the device node for current R5 core                           
>  * @rpu_mode: mode to configure RPU, split or lockstep                             
>  *                                                                                 
>  * Return: 0 for success, negative value for failure.                              

Much better

>  */                                                                                
> static struct zynqmp_r5_rproc *zynqmp_r5_probe(struct platform_device *pdev,       
>                                                struct device_node *node,           
>                                                enum rpu_oper_mode rpu_mode) 
> 
> 
>     > + *
>     > + * @pdev: domain platform device for current R5 core
>     > + * @node: pointer of the device node for current R5 core
>     > + * @rpu_mode: mode to configure RPU, split or lockstep
>     > + *
>     > + * Return: 0 for success, negative value for failure.
>     > + */
>     > +static struct zynqmp_r5_rproc *zynqmp_r5_probe(struct platform_device *pdev,
>     > +					       struct device_node *node,
>     > +					       enum rpu_oper_mode rpu_mode)
>     > +{
>     > +	int ret, num_banks;
>     > +	struct device *dev = &pdev->dev;
>     > +	struct rproc *rproc_ptr;
>     > +	struct zynqmp_r5_rproc *z_rproc;
>     > +	struct device_node *r5_node;
>     > +
>     > +	/* Allocate remoteproc instance */
>     > +	rproc_ptr = devm_rproc_alloc(dev, dev_name(dev), &zynqmp_r5_rproc_ops,
>     > +				     NULL, sizeof(struct zynqmp_r5_rproc));
>     > +	if (!rproc_ptr) {
>     > +		ret = -ENOMEM;
>     > +		goto error;
>     > +	}
>     > +
>     > +	rproc_ptr->auto_boot = false;
>     > +	z_rproc = rproc_ptr->priv;
>     > +	z_rproc->rproc = rproc_ptr;
>     > +	r5_node = z_rproc->rproc->dev.parent->of_node;
>     > +
>     > +	/* Set up DMA mask */
>     > +	ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
>     > +	if (ret)
>     > +		goto error;
>     > +
>     > +	/* Get R5 power domain node */
>     > +	ret = of_property_read_u32(node, "power-domain", &z_rproc->pnode_id);
>     > +	if (ret)
>     > +		goto error;
>     > +
>     > +	ret = r5_set_mode(z_rproc, rpu_mode);
>     > +	if (ret)
>     > +		goto error;
>     > +
>     > +	if (of_property_read_bool(node, "mboxes")) {
>     > +		ret = zynqmp_r5_setup_mbox(z_rproc, node);
>     > +		if (ret)
>     > +			goto error;
>     > +	}
>     > +
>     > +	/* go through TCM banks for r5 node */
>     > +	num_banks = of_count_phandle_with_args(r5_node, BANK_LIST_PROP, NULL);
> 
>     Shouldn't this be @node instead of @r5_node?
> 
> [Ben]  Yes this should and will be node.
> 
>     > +	if (num_banks <= 0) {
>     > +		dev_err(dev, "need to specify TCM banks\n");
>     > +		ret = -EINVAL;
>     > +		goto error;
>     > +	}
>     > +
>     > +	if (num_banks > NUM_SRAMS) {
>     > +		dev_err(dev, "max number of srams is %d. given: %d \r\n",
>     > +			NUM_SRAMS, num_banks);
>     > +		ret = -EINVAL;
>     > +		goto error;
>     > +	}
>     > +
>     > +	/* construct collection of srams used by the current R5 core */
>     > +	for (; num_banks; num_banks--) {
>     > +		struct resource rsc;
>     > +		struct device_node *dt_node;
>     > +		resource_size_t size;
>     > +		int i;
>     > +
>     > +		dt_node = of_parse_phandle(r5_node, BANK_LIST_PROP, i);
>     > +		if (!dt_node) {
>     > +			ret = -EINVAL;
>     > +			goto error;
>     > +		}
>     > +
>     > +		ret = of_address_to_resource(dt_node, 0, &rsc);
>     > +		if (ret < 0) {
>     > +			of_node_put(dt_node);
>     > +			goto error;
>     > +		}
>     > +
>     > +		of_node_put(dt_node);
>     > +		size = resource_size(&rsc);
>     > +
>     > +		/*
>     > +		 * Find corresponding Xilinx platform management ID.
>     > +		 * The bank information is used in prepare/unprepare and
>     > +		 * parse_fw.
>     > +		 */
>     > +		for (i = 0; i < NUM_SRAMS; i++) {
>     > +			if (rsc.start == zynqmp_banks[i].addr) {
>     > +				z_rproc->srams[i].addr = rsc.start;
>     > +				z_rproc->srams[i].size = size;
>     > +				z_rproc->srams[i].id = zynqmp_banks[i].id;
>     > +				break;
>     > +			}
>     > +		}
>     > +
>     > +		if (i == NUM_SRAMS) {
>     > +			dev_err(dev, "sram %llx is not valid.\n", rsc.start);
>     > +			ret = -EINVAL;
>     > +			goto error;
>     > +		}
>     > +	}
> 
>     Everything that is related to the initialisation of srams above should be in a
>     function on its own.  This too is new code that wasn't requested - the next
>     revision needs to include *only* the changes I request.  Any improvement on the
>     current implementation can be made in future patchsets. 
> 
> 
> [Ben] Makes sense. I will do that going forward. For probe() I will put all the sram information collection functionality in 1 function.
> 
>     > +
>     > +	/* Add R5 remoteproc */
>     > +	ret = devm_rproc_add(dev, rproc_ptr);
>     > +	if (ret) {
>     > +		zynqmp_r5_cleanup_mbox(z_rproc);
>     > +		goto error;
>     > +	}
>     > +
>     > +	return z_rproc;
>     > +error:
>     > +	return ERR_PTR(ret);
>     > +}
>     > +
>     > +/*
>     > + * zynqmp_r5_remoteproc_probe
>     > + *
>     > + * @pdev: domain platform device for R5 cluster
>     > + *
>     > + * called when driver is probed, for each R5 core specified in DT,
>     > + * setup as needed to do remoteproc-related operations
>     > + *
>     > + * Return: 0 for success, negative value for failure.
>     > + */
>     > +static int zynqmp_r5_remoteproc_probe(struct platform_device *pdev)
>     > +{
>     > +	int ret, core_count;
>     > +	struct device *dev = &pdev->dev;
>     > +	struct device_node *nc;
>     > +	enum rpu_oper_mode rpu_mode = PM_RPU_MODE_LOCKSTEP;
>     > +	struct list_head *cluster; /* list to track each core's rproc */
>     > +	struct zynqmp_r5_rproc *z_rproc;
>     > +	struct platform_device *child_pdev;
>     > +	struct list_head *pos;
>     > +
>     > +	ret = of_property_read_u32(dev->of_node, "xlnx,cluster-mode", &rpu_mode);
>     > +	if (ret < 0 || (rpu_mode != PM_RPU_MODE_LOCKSTEP &&
>     > +			rpu_mode != PM_RPU_MODE_SPLIT)) {
>     > +		dev_err(dev, "invalid cluster mode: ret %d mode %x\n",
>     > +			ret, rpu_mode);
>     > +		return ret;
>     > +	}
>     > +
>     > +	dev_dbg(dev, "RPU configuration: %s\n",
>     > +		rpu_mode == PM_RPU_MODE_LOCKSTEP ? "lockstep" : "split");
>     > +
>     > +	/*
>     > +	 * if 2 RPUs provided but one is lockstep, then we have an
>     > +	 * invalid configuration.
>     > +	 */
>     > +
>     > +	core_count = of_get_available_child_count(dev->of_node);
>     > +	if ((rpu_mode == PM_RPU_MODE_LOCKSTEP && core_count != 1) ||
>     > +	    core_count > MAX_RPROCS)
>     > +		return -EINVAL;
>     > +
>     > +	cluster = devm_kzalloc(dev, sizeof(*cluster), GFP_KERNEL);
>     > +	if (!cluster)
>     > +		return -ENOMEM;
>     > +	INIT_LIST_HEAD(cluster);
>     > +
>     > +	ret = devm_of_platform_populate(dev);
>     > +	if (ret) {
>     > +		dev_err(dev, "devm_of_platform_populate failed, ret = %d\n", ret);
>     > +		return ret;
>     > +	}
>     > +
>     > +	/* probe each individual r5 core's remoteproc-related info */
>     > +	for_each_available_child_of_node(dev->of_node, nc) {
>     > +		child_pdev = of_find_device_by_node(nc);
> 
>     The device reference needs to be dropped after use, as described in the function
>     documentation.
> 
>     I'm out of time - I will continue tomorrow.
> 
>     Mathieu
> 
> 
> [Ben] By this do you mean that for each platform_device should have a call like
> 	platform_set_drvdata(child_pdev, NULL); if it fails? or something else?

Have another read at the documentation and look at how other people have used
it.  You may already be aware but Bootlin's kernel cross-reference tool is
really good for that.

https://elixir.bootlin.com/linux/v5.12-rc3/source


WARNING: multiple messages have this Message-ID (diff)
From: Mathieu Poirier <mathieu.poirier@linaro.org>
To: Ben Levinsky <BLEVINSK@xilinx.com>
Cc: "devicetree@vger.kernel.org" <devicetree@vger.kernel.org>,
	"linux-remoteproc@vger.kernel.org"
	<linux-remoteproc@vger.kernel.org>,
	"linux-kernel@vger.kernel.org" <linux-kernel@vger.kernel.org>,
	"linux-arm-kernel@lists.infradead.org"
	<linux-arm-kernel@lists.infradead.org>,
	 Michal Simek <michals@xilinx.com>,
	"Ed T. Mooring" <emooring@xilinx.com>
Subject: Re: [PATCH v26 5/5] remoteproc: Add initial zynqmp R5 remoteproc driver
Date: Mon, 15 Mar 2021 11:25:58 -0600	[thread overview]
Message-ID: <20210315172558.GA1342614@xps15> (raw)
In-Reply-To: <FF6E631A-87E0-4194-844A-E6B58E5B2928@xilinx.com>

>     > +
>     > +static void zynqmp_r5_cleanup_mbox(struct zynqmp_r5_rproc *z_rproc)
>     > +{
>     > +	mbox_free_channel(z_rproc->tx_chan);
>     > +	mbox_free_channel(z_rproc->rx_chan);
>     > +}
>     > +
>     > +/**
>     > + * zynqmp_r5_probe - Probes ZynqMP R5 processor device node
>     > + *		       this is called for each individual R5 core to
>     > + *		       set up mailbox, Xilinx platform manager unique ID,
>     > + *		       add to rproc core
> 
>     The above has changed since last time, which makes it harder for me to
>     review your work.  From hereon please change only the things I point out so that
>     we keep the same goal posts from one revision to the other.
> 
>     The tabulation needs to be fixed:  
> 
>             * zynqmp_r5_probe - Probes ZynqMP R5 processor device node
>             *
>             * This is called for each individual R5 core to set up mailbox, Xilinx
>             * platform manager unique ID, add to rproc core.
> 
>     The description is also broken.
> 
> [Ben] Ok. How is the following:
> /**                                                                                
>  * zynqmp_r5_probe - Probes ZynqMP R5 processor device node                        
>  *                                                                                 
>  * This is called for each individual R5 core to set up mailbox, Xilinx            
>  * platform manager unique ID, collect SRAM information and wire in                
>  * driver-specific data to to rproc core.                                          
>  *                                                                                 
>  * @pdev: domain platform device for current R5 core                               
>  * @node: pointer of the device node for current R5 core                           
>  * @rpu_mode: mode to configure RPU, split or lockstep                             
>  *                                                                                 
>  * Return: 0 for success, negative value for failure.                              

Much better

>  */                                                                                
> static struct zynqmp_r5_rproc *zynqmp_r5_probe(struct platform_device *pdev,       
>                                                struct device_node *node,           
>                                                enum rpu_oper_mode rpu_mode) 
> 
> 
>     > + *
>     > + * @pdev: domain platform device for current R5 core
>     > + * @node: pointer of the device node for current R5 core
>     > + * @rpu_mode: mode to configure RPU, split or lockstep
>     > + *
>     > + * Return: 0 for success, negative value for failure.
>     > + */
>     > +static struct zynqmp_r5_rproc *zynqmp_r5_probe(struct platform_device *pdev,
>     > +					       struct device_node *node,
>     > +					       enum rpu_oper_mode rpu_mode)
>     > +{
>     > +	int ret, num_banks;
>     > +	struct device *dev = &pdev->dev;
>     > +	struct rproc *rproc_ptr;
>     > +	struct zynqmp_r5_rproc *z_rproc;
>     > +	struct device_node *r5_node;
>     > +
>     > +	/* Allocate remoteproc instance */
>     > +	rproc_ptr = devm_rproc_alloc(dev, dev_name(dev), &zynqmp_r5_rproc_ops,
>     > +				     NULL, sizeof(struct zynqmp_r5_rproc));
>     > +	if (!rproc_ptr) {
>     > +		ret = -ENOMEM;
>     > +		goto error;
>     > +	}
>     > +
>     > +	rproc_ptr->auto_boot = false;
>     > +	z_rproc = rproc_ptr->priv;
>     > +	z_rproc->rproc = rproc_ptr;
>     > +	r5_node = z_rproc->rproc->dev.parent->of_node;
>     > +
>     > +	/* Set up DMA mask */
>     > +	ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
>     > +	if (ret)
>     > +		goto error;
>     > +
>     > +	/* Get R5 power domain node */
>     > +	ret = of_property_read_u32(node, "power-domain", &z_rproc->pnode_id);
>     > +	if (ret)
>     > +		goto error;
>     > +
>     > +	ret = r5_set_mode(z_rproc, rpu_mode);
>     > +	if (ret)
>     > +		goto error;
>     > +
>     > +	if (of_property_read_bool(node, "mboxes")) {
>     > +		ret = zynqmp_r5_setup_mbox(z_rproc, node);
>     > +		if (ret)
>     > +			goto error;
>     > +	}
>     > +
>     > +	/* go through TCM banks for r5 node */
>     > +	num_banks = of_count_phandle_with_args(r5_node, BANK_LIST_PROP, NULL);
> 
>     Shouldn't this be @node instead of @r5_node?
> 
> [Ben]  Yes this should and will be node.
> 
>     > +	if (num_banks <= 0) {
>     > +		dev_err(dev, "need to specify TCM banks\n");
>     > +		ret = -EINVAL;
>     > +		goto error;
>     > +	}
>     > +
>     > +	if (num_banks > NUM_SRAMS) {
>     > +		dev_err(dev, "max number of srams is %d. given: %d \r\n",
>     > +			NUM_SRAMS, num_banks);
>     > +		ret = -EINVAL;
>     > +		goto error;
>     > +	}
>     > +
>     > +	/* construct collection of srams used by the current R5 core */
>     > +	for (; num_banks; num_banks--) {
>     > +		struct resource rsc;
>     > +		struct device_node *dt_node;
>     > +		resource_size_t size;
>     > +		int i;
>     > +
>     > +		dt_node = of_parse_phandle(r5_node, BANK_LIST_PROP, i);
>     > +		if (!dt_node) {
>     > +			ret = -EINVAL;
>     > +			goto error;
>     > +		}
>     > +
>     > +		ret = of_address_to_resource(dt_node, 0, &rsc);
>     > +		if (ret < 0) {
>     > +			of_node_put(dt_node);
>     > +			goto error;
>     > +		}
>     > +
>     > +		of_node_put(dt_node);
>     > +		size = resource_size(&rsc);
>     > +
>     > +		/*
>     > +		 * Find corresponding Xilinx platform management ID.
>     > +		 * The bank information is used in prepare/unprepare and
>     > +		 * parse_fw.
>     > +		 */
>     > +		for (i = 0; i < NUM_SRAMS; i++) {
>     > +			if (rsc.start == zynqmp_banks[i].addr) {
>     > +				z_rproc->srams[i].addr = rsc.start;
>     > +				z_rproc->srams[i].size = size;
>     > +				z_rproc->srams[i].id = zynqmp_banks[i].id;
>     > +				break;
>     > +			}
>     > +		}
>     > +
>     > +		if (i == NUM_SRAMS) {
>     > +			dev_err(dev, "sram %llx is not valid.\n", rsc.start);
>     > +			ret = -EINVAL;
>     > +			goto error;
>     > +		}
>     > +	}
> 
>     Everything that is related to the initialisation of srams above should be in a
>     function on its own.  This too is new code that wasn't requested - the next
>     revision needs to include *only* the changes I request.  Any improvement on the
>     current implementation can be made in future patchsets. 
> 
> 
> [Ben] Makes sense. I will do that going forward. For probe() I will put all the sram information collection functionality in 1 function.
> 
>     > +
>     > +	/* Add R5 remoteproc */
>     > +	ret = devm_rproc_add(dev, rproc_ptr);
>     > +	if (ret) {
>     > +		zynqmp_r5_cleanup_mbox(z_rproc);
>     > +		goto error;
>     > +	}
>     > +
>     > +	return z_rproc;
>     > +error:
>     > +	return ERR_PTR(ret);
>     > +}
>     > +
>     > +/*
>     > + * zynqmp_r5_remoteproc_probe
>     > + *
>     > + * @pdev: domain platform device for R5 cluster
>     > + *
>     > + * called when driver is probed, for each R5 core specified in DT,
>     > + * setup as needed to do remoteproc-related operations
>     > + *
>     > + * Return: 0 for success, negative value for failure.
>     > + */
>     > +static int zynqmp_r5_remoteproc_probe(struct platform_device *pdev)
>     > +{
>     > +	int ret, core_count;
>     > +	struct device *dev = &pdev->dev;
>     > +	struct device_node *nc;
>     > +	enum rpu_oper_mode rpu_mode = PM_RPU_MODE_LOCKSTEP;
>     > +	struct list_head *cluster; /* list to track each core's rproc */
>     > +	struct zynqmp_r5_rproc *z_rproc;
>     > +	struct platform_device *child_pdev;
>     > +	struct list_head *pos;
>     > +
>     > +	ret = of_property_read_u32(dev->of_node, "xlnx,cluster-mode", &rpu_mode);
>     > +	if (ret < 0 || (rpu_mode != PM_RPU_MODE_LOCKSTEP &&
>     > +			rpu_mode != PM_RPU_MODE_SPLIT)) {
>     > +		dev_err(dev, "invalid cluster mode: ret %d mode %x\n",
>     > +			ret, rpu_mode);
>     > +		return ret;
>     > +	}
>     > +
>     > +	dev_dbg(dev, "RPU configuration: %s\n",
>     > +		rpu_mode == PM_RPU_MODE_LOCKSTEP ? "lockstep" : "split");
>     > +
>     > +	/*
>     > +	 * if 2 RPUs provided but one is lockstep, then we have an
>     > +	 * invalid configuration.
>     > +	 */
>     > +
>     > +	core_count = of_get_available_child_count(dev->of_node);
>     > +	if ((rpu_mode == PM_RPU_MODE_LOCKSTEP && core_count != 1) ||
>     > +	    core_count > MAX_RPROCS)
>     > +		return -EINVAL;
>     > +
>     > +	cluster = devm_kzalloc(dev, sizeof(*cluster), GFP_KERNEL);
>     > +	if (!cluster)
>     > +		return -ENOMEM;
>     > +	INIT_LIST_HEAD(cluster);
>     > +
>     > +	ret = devm_of_platform_populate(dev);
>     > +	if (ret) {
>     > +		dev_err(dev, "devm_of_platform_populate failed, ret = %d\n", ret);
>     > +		return ret;
>     > +	}
>     > +
>     > +	/* probe each individual r5 core's remoteproc-related info */
>     > +	for_each_available_child_of_node(dev->of_node, nc) {
>     > +		child_pdev = of_find_device_by_node(nc);
> 
>     The device reference needs to be dropped after use, as described in the function
>     documentation.
> 
>     I'm out of time - I will continue tomorrow.
> 
>     Mathieu
> 
> 
> [Ben] By this do you mean that for each platform_device should have a call like
> 	platform_set_drvdata(child_pdev, NULL); if it fails? or something else?

Have another read at the documentation and look at how other people have used
it.  You may already be aware but Bootlin's kernel cross-reference tool is
really good for that.

https://elixir.bootlin.com/linux/v5.12-rc3/source


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  reply	other threads:[~2021-03-15 17:26 UTC|newest]

Thread overview: 38+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-02-23 15:44 [PATCH v26 0/5] Add initial zynqmp R5 remoteproc driver Ben Levinsky
2021-02-23 15:44 ` Ben Levinsky
2021-02-23 15:44 ` [PATCH v26 1/5] firmware: xilinx: Add ZynqMP firmware ioctl enums for RPU configuration Ben Levinsky
2021-02-23 15:44   ` Ben Levinsky
2021-02-23 15:44 ` [PATCH v26 2/5] firmware: xilinx: Add shutdown/wakeup APIs Ben Levinsky
2021-02-23 15:44   ` Ben Levinsky
2021-02-23 15:44 ` [PATCH v26 3/5] firmware: xilinx: Add RPU configuration APIs Ben Levinsky
2021-02-23 15:44   ` Ben Levinsky
2021-02-23 15:44 ` [PATCH v26 4/5] dt-bindings: remoteproc: Add documentation for ZynqMP R5 rproc bindings Ben Levinsky
2021-02-23 15:44   ` Ben Levinsky
2021-02-23 15:44 ` [PATCH v26 5/5] remoteproc: Add initial zynqmp R5 remoteproc driver Ben Levinsky
2021-02-23 15:44   ` Ben Levinsky
2021-03-08 19:00   ` Mathieu Poirier
2021-03-08 19:00     ` Mathieu Poirier
2021-03-11 23:47     ` Ben Levinsky
2021-03-11 23:47       ` Ben Levinsky
2021-03-15 17:25       ` Mathieu Poirier [this message]
2021-03-15 17:25         ` Mathieu Poirier
2021-03-15 21:42         ` Ben Levinsky
2021-03-15 21:42           ` Ben Levinsky
2021-03-17 16:22           ` Mathieu Poirier
2021-03-17 16:22             ` Mathieu Poirier
2021-03-09 16:53   ` Mathieu Poirier
2021-03-09 16:53     ` Mathieu Poirier
2021-03-11 23:49     ` Ben Levinsky
2021-03-11 23:49       ` Ben Levinsky
2021-03-15 17:37       ` Mathieu Poirier
2021-03-15 17:37         ` Mathieu Poirier
2021-03-15 21:32         ` Ben Levinsky
2021-03-15 21:32           ` Ben Levinsky
2021-03-17 16:27           ` Mathieu Poirier
2021-03-17 16:27             ` Mathieu Poirier
2021-03-19 17:46             ` Ben Levinsky
2021-03-19 17:46               ` Ben Levinsky
2021-03-22  0:10             ` Ben Levinsky
2021-03-22  0:10               ` Ben Levinsky
2021-02-23 16:34 ` [PATCH v26 0/5] " Mathieu Poirier
2021-02-23 16:34   ` Mathieu Poirier

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210315172558.GA1342614@xps15 \
    --to=mathieu.poirier@linaro.org \
    --cc=BLEVINSK@xilinx.com \
    --cc=devicetree@vger.kernel.org \
    --cc=emooring@xilinx.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-remoteproc@vger.kernel.org \
    --cc=michals@xilinx.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.