linux-remoteproc.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Mathieu Poirier <mathieu.poirier@linaro.org>
To: Suman Anna <s-anna@ti.com>
Cc: Bjorn Andersson <bjorn.andersson@linaro.org>,
	Rob Herring <robh+dt@kernel.org>,
	Lokesh Vutla <lokeshvutla@ti.com>,
	linux-remoteproc@vger.kernel.org, devicetree@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org,
	linux-kernel@vger.kernel.org
Subject: Re: [PATCH v2 4/4] remoteproc: k3-r5: Add loading support for on-chip SRAM regions
Date: Thu, 9 Jul 2020 13:51:11 -0600	[thread overview]
Message-ID: <20200709195111.GC948668@xps15> (raw)
In-Reply-To: <20200630024922.32491-5-s-anna@ti.com>

On Mon, Jun 29, 2020 at 09:49:22PM -0500, Suman Anna wrote:
> The K3 SoCs has various internal on-chip SRAM memories like the SRAM
> within the MCU domain or the shared MSMC RAM within NavSS that can be
> used for multiple purposes. One such purpose is to have the R5F cores
> use a portion of such on-chip SRAM for fast-access data or to directly
> execute code.
> 
> Add support to the K3 R5 remoteproc driver to parse and support
> loading into such memories. The SRAM regions need to be mapped as
> normal non-cacheable memory to avoid kernel crashes when the remoteproc
> loader code uses the Arm64 memset library function (the "DC ZVA"
> instruction throws a alignment fault on device type memory).
> 
> These SRAM regions are completely optional as not all firmware images
> require these memories, and any such memory has to be reserved as such
> in the DTS files.
> 
> Signed-off-by: Suman Anna <s-anna@ti.com>
> ---
> v2:
>  - Adapted to use various devm_ functions resulting in a smaller patch
>  - Failure path code is dropped as a result in k3_r5_core_of_exit() and
>    k3_r5_core_of_get_sram_memories()
>  - Dropped unneeded whitespaces in a debug trace
>  - Revised the patch title to move away from remoteproc/k3-r5
>  - Dropped Mathieu's Acked-by because of the changes
> v1: https://patchwork.kernel.org/patch/11456373/
> 
>  drivers/remoteproc/ti_k3_r5_remoteproc.c | 79 ++++++++++++++++++++++++
>  1 file changed, 79 insertions(+)
> 
> diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
> index aca0eaf42a38..ac8ae29f38aa 100644
> --- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
> +++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
> @@ -86,7 +86,9 @@ struct k3_r5_cluster {
>   * @dev: cached device pointer
>   * @rproc: rproc handle representing this core
>   * @mem: internal memory regions data
> + * @sram: on-chip SRAM memory regions data
>   * @num_mems: number of internal memory regions
> + * @num_sram: number of on-chip SRAM memory regions
>   * @reset: reset control handle
>   * @tsp: TI-SCI processor control handle
>   * @ti_sci: TI-SCI handle
> @@ -100,7 +102,9 @@ struct k3_r5_core {
>  	struct device *dev;
>  	struct rproc *rproc;
>  	struct k3_r5_mem *mem;
> +	struct k3_r5_mem *sram;
>  	int num_mems;
> +	int num_sram;
>  	struct reset_control *reset;
>  	struct ti_sci_proc *tsp;
>  	const struct ti_sci_handle *ti_sci;
> @@ -588,6 +592,18 @@ static void *k3_r5_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
>  		}
>  	}
>  
> +	/* handle any SRAM regions using SoC-view addresses */
> +	for (i = 0; i < core->num_sram; i++) {
> +		dev_addr = core->sram[i].dev_addr;
> +		size = core->sram[i].size;
> +
> +		if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
> +			offset = da - dev_addr;
> +			va = core->sram[i].cpu_addr + offset;
> +			return (__force void *)va;
> +		}
> +	}
> +
>  	/* handle static DDR reserved memory regions */
>  	for (i = 0; i < kproc->num_rmems; i++) {
>  		dev_addr = kproc->rmem[i].dev_addr;
> @@ -1030,6 +1046,63 @@ static int k3_r5_core_of_get_internal_memories(struct platform_device *pdev,
>  	return 0;
>  }
>  
> +static int k3_r5_core_of_get_sram_memories(struct platform_device *pdev,
> +					   struct k3_r5_core *core)
> +{
> +	struct device_node *np = pdev->dev.of_node;
> +	struct device *dev = &pdev->dev;
> +	struct device_node *sram_np;
> +	struct resource res;
> +	int num_sram;
> +	int i, ret;
> +
> +	num_sram = of_property_count_elems_of_size(np, "sram", sizeof(phandle));
> +	if (num_sram <= 0) {
> +		dev_dbg(dev, "device does not use reserved on-chip memories, num_sram = %d\n",
> +			num_sram);
> +		return 0;
> +	}
> +
> +	core->sram = devm_kcalloc(dev, num_sram, sizeof(*core->sram), GFP_KERNEL);
> +	if (!core->sram)
> +		return -ENOMEM;
> +
> +	for (i = 0; i < num_sram; i++) {
> +		sram_np = of_parse_phandle(np, "sram", i);
> +		if (!sram_np)
> +			return -EINVAL;
> +
> +		if (!of_device_is_available(sram_np)) {
> +			of_node_put(sram_np);
> +			return -EINVAL;
> +		}
> +
> +		ret = of_address_to_resource(sram_np, 0, &res);
> +		of_node_put(sram_np);
> +		if (ret)
> +			return -EINVAL;
> +
> +		core->sram[i].bus_addr = res.start;
> +		core->sram[i].dev_addr = res.start;
> +		core->sram[i].size = resource_size(&res);
> +		core->sram[i].cpu_addr = devm_ioremap_wc(dev, res.start,
> +							 resource_size(&res));
> +		if (!core->sram[i].cpu_addr) {
> +			dev_err(dev, "failed to parse and map sram%d memory at %pad\n",
> +				i, &res.start);
> +			return -ENOMEM;
> +		}
> +
> +		dev_dbg(dev, "memory sram%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
> +			i, &core->sram[i].bus_addr,
> +			core->sram[i].size, core->sram[i].cpu_addr,
> +			core->sram[i].dev_addr);
> +	}
> +	core->num_sram = num_sram;
> +
> +	return 0;
> +}
> +
>  static
>  struct ti_sci_proc *k3_r5_core_of_get_tsp(struct device *dev,
>  					  const struct ti_sci_handle *sci)
> @@ -1143,6 +1216,12 @@ static int k3_r5_core_of_init(struct platform_device *pdev)
>  		goto err;
>  	}
>  
> +	ret = k3_r5_core_of_get_sram_memories(pdev, core);
> +	if (ret) {
> +		dev_err(dev, "failed to get sram memories, ret = %d\n", ret);
> +		goto err;
> +	}
> +

Reviewed-by: Mathieu Poirier <mathieu.poirier@linaro.org>

>  	ret = ti_sci_proc_request(core->tsp);
>  	if (ret < 0) {
>  		dev_err(dev, "ti_sci_proc_request failed, ret = %d\n", ret);
> -- 
> 2.26.0
> 

      reply	other threads:[~2020-07-09 19:52 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-06-30  2:49 [PATCH v2 0/4] TI K3 R5F remoteproc support Suman Anna
2020-06-30  2:49 ` [PATCH v2 1/4] dt-bindings: remoteproc: Add bindings for R5F subsystem on TI K3 SoCs Suman Anna
2020-07-14 17:15   ` Rob Herring
2020-07-16 17:19     ` Mathieu Poirier
2020-07-16 19:43       ` Stefano Stabellini
2020-07-27 22:39         ` Suman Anna
2020-08-10 16:52           ` Suman Anna
2020-08-20 21:53             ` Suman Anna
2020-08-24 20:42               ` Ben Levinsky
2020-06-30  2:49 ` [PATCH v2 2/4] remoteproc: k3-r5: Add a remoteproc driver for R5F subsystem Suman Anna
2020-07-09 18:10   ` Mathieu Poirier
2020-07-09 22:02     ` Suman Anna
2020-06-30  2:49 ` [PATCH v2 3/4] remoteproc: k3-r5: Initialize TCM memories for ECC Suman Anna
2020-07-09 19:29   ` Mathieu Poirier
2020-06-30  2:49 ` [PATCH v2 4/4] remoteproc: k3-r5: Add loading support for on-chip SRAM regions Suman Anna
2020-07-09 19:51   ` Mathieu Poirier [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200709195111.GC948668@xps15 \
    --to=mathieu.poirier@linaro.org \
    --cc=bjorn.andersson@linaro.org \
    --cc=devicetree@vger.kernel.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-remoteproc@vger.kernel.org \
    --cc=lokeshvutla@ti.com \
    --cc=robh+dt@kernel.org \
    --cc=s-anna@ti.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).