All of lore.kernel.org
 help / color / mirror / Atom feed
From: Tanmay Shah <tanmay.shah@amd.com>
To: <andersson@kernel.org>, <mathieu.poirier@linaro.org>,
	<robh+dt@kernel.org>, <krzysztof.kozlowski+dt@linaro.org>,
	<conor+dt@kernel.org>, <michal.simek@amd.com>,
	<ben.levinsky@amd.com>, <tanmay.shah@amd.com>
Cc: <linux-remoteproc@vger.kernel.org>, <devicetree@vger.kernel.org>,
	<linux-arm-kernel@lists.infradead.org>,
	<linux-kernel@vger.kernel.org>
Subject: [PATCH v8 3/3] remoteproc: zynqmp: parse TCM from device tree
Date: Fri, 15 Dec 2023 15:57:25 -0800	[thread overview]
Message-ID: <20231215235725.1247350-4-tanmay.shah@amd.com> (raw)
In-Reply-To: <20231215235725.1247350-1-tanmay.shah@amd.com>

ZynqMP TCM information is fixed in driver. Now ZynqMP TCM information
is available in device-tree. Parse TCM information in driver
as per new bindings.

Signed-off-by: Tanmay Shah <tanmay.shah@amd.com>
---

Changes in v8:
  - parse power-domains property from device-tree and use EEMI calls
    to power on/off TCM instead of using pm domains framework
  - Remove checking of pm_domain_id validation to power on/off tcm
  - Remove spurious change

Changes in v7:
  - move checking of pm_domain_id from previous patch
  - fix mem_bank_data memory allocation

 drivers/remoteproc/xlnx_r5_remoteproc.c | 154 +++++++++++++++++++++++-
 1 file changed, 148 insertions(+), 6 deletions(-)

diff --git a/drivers/remoteproc/xlnx_r5_remoteproc.c b/drivers/remoteproc/xlnx_r5_remoteproc.c
index 4395edea9a64..36d73dcd93f0 100644
--- a/drivers/remoteproc/xlnx_r5_remoteproc.c
+++ b/drivers/remoteproc/xlnx_r5_remoteproc.c
@@ -74,8 +74,8 @@ struct mbox_info {
 };
 
 /*
- * Hardcoded TCM bank values. This will be removed once TCM bindings are
- * accepted for system-dt specifications and upstreamed in linux kernel
+ * Hardcoded TCM bank values. This will stay in driver to maintain backward
+ * compatibility with device-tree that does not have TCM information.
  */
 static const struct mem_bank_data zynqmp_tcm_banks_split[] = {
 	{0xffe00000UL, 0x0, 0x10000UL, PD_R5_0_ATCM, "atcm0"}, /* TCM 64KB each */
@@ -878,6 +878,139 @@ static struct zynqmp_r5_core *zynqmp_r5_add_rproc_core(struct device *cdev)
 	return ERR_PTR(ret);
 }
 
+static int zynqmp_r5_get_tcm_node_from_dt(struct zynqmp_r5_cluster *cluster)
+{
+	struct of_phandle_args out_args;
+	int tcm_reg_per_r5, tcm_pd_idx;
+	struct zynqmp_r5_core *r5_core;
+	int i, j, tcm_bank_count, ret;
+	struct platform_device *cpdev;
+	struct mem_bank_data *tcm;
+	struct device_node *np;
+	struct resource *res;
+	u64 abs_addr, size;
+	struct device *dev;
+
+	for (i = 0; i < cluster->core_count; i++) {
+		r5_core = cluster->r5_cores[i];
+		dev = r5_core->dev;
+		np = of_node_get(dev_of_node(dev));
+		tcm_pd_idx = 1;
+
+		/* we have address cell 2 and size cell as 2 */
+		tcm_reg_per_r5 = of_property_count_elems_of_size(np, "reg",
+								 4 * sizeof(u32));
+		if (tcm_reg_per_r5 <= 0) {
+			dev_err(dev, "can't get reg property err %d\n", tcm_reg_per_r5);
+			return -EINVAL;
+		}
+
+		/*
+		 * In lockstep mode, r5 core 0 will use r5 core 1 TCM
+		 * power domains as well. so allocate twice of per core TCM
+		 */
+		if (cluster->mode == LOCKSTEP_MODE)
+			tcm_bank_count = tcm_reg_per_r5 * 2;
+		else
+			tcm_bank_count = tcm_reg_per_r5;
+
+		r5_core->tcm_banks = devm_kcalloc(dev, tcm_bank_count,
+						  sizeof(struct mem_bank_data *),
+						  GFP_KERNEL);
+		if (!r5_core->tcm_banks)
+			ret = -ENOMEM;
+
+		r5_core->tcm_bank_count = tcm_bank_count;
+		for (j = 0; j < tcm_bank_count; j++) {
+			tcm = devm_kzalloc(dev, sizeof(struct mem_bank_data),
+					   GFP_KERNEL);
+			if (!tcm)
+				return -ENOMEM;
+
+			r5_core->tcm_banks[j] = tcm;
+
+			/*
+			 * In lockstep mode, get second core's TCM power domains id
+			 * after first core TCM parsing is done as
+			 */
+			if (j == tcm_reg_per_r5) {
+				/* dec first core node */
+				of_node_put(np);
+
+				/* get second core node */
+				np = of_get_next_child(cluster->dev->of_node, np);
+
+				/*
+				 * reset index of power-domains property list
+				 * for second core
+				 */
+				tcm_pd_idx = 1;
+			}
+
+			/* get power-domains id of tcm */
+			ret = of_parse_phandle_with_args(np, "power-domains",
+							 "#power-domain-cells",
+							 tcm_pd_idx,
+							 &out_args);
+			if (ret) {
+				dev_err(r5_core->dev,
+					"failed to get tcm %d pm domain, ret %d\n",
+					j, ret);
+				of_node_put(out_args.np);
+				return ret;
+			}
+			tcm->pm_domain_id = out_args.args[0];
+			of_node_put(out_args.np);
+			tcm_pd_idx++;
+
+			/*
+			 * In lockstep mode, we only need second core's power domain
+			 * ids. Other information from second core isn't needed so
+			 * ignore it. This forms table as zynqmp_tcm_banks_lockstep
+			 */
+			if (j >= tcm_reg_per_r5)
+				continue;
+
+			/* get tcm address without translation */
+			ret = of_property_read_reg(np, j, &abs_addr, &size);
+			if (ret) {
+				of_node_put(np);
+				dev_err(dev, "failed to get reg property\n");
+				return ret;
+			}
+
+			/*
+			 * remote processor can address only 32 bits
+			 * so convert 64-bits into 32-bits. This will discard
+			 * any unwanted upper 32-bits.
+			 */
+			tcm->da = (u32)abs_addr;
+			tcm->size = (u32)size;
+
+			cpdev = to_platform_device(dev);
+			res = platform_get_resource(cpdev, IORESOURCE_MEM, j);
+			if (!res) {
+				of_node_put(np);
+				dev_err(dev, "failed to get tcm resource\n");
+				return -EINVAL;
+			}
+
+			tcm->addr = (u32)res->start;
+			tcm->bank_name = (char *)res->name;
+			res = devm_request_mem_region(dev, tcm->addr, tcm->size,
+						      tcm->bank_name);
+			if (!res) {
+				dev_err(dev, "failed to request tcm resource\n");
+				of_node_put(np);
+				return -EINVAL;
+			}
+		}
+	}
+
+	of_node_put(np);
+	return 0;
+}
+
 /**
  * zynqmp_r5_get_tcm_node()
  * Ideally this function should parse tcm node and store information
@@ -956,10 +1089,19 @@ static int zynqmp_r5_core_init(struct zynqmp_r5_cluster *cluster,
 	struct zynqmp_r5_core *r5_core;
 	int ret, i;
 
-	ret = zynqmp_r5_get_tcm_node(cluster);
-	if (ret < 0) {
-		dev_err(dev, "can't get tcm node, err %d\n", ret);
-		return ret;
+	r5_core = cluster->r5_cores[0];
+	if (of_find_property(r5_core->np, "reg", NULL)) {
+		ret = zynqmp_r5_get_tcm_node_from_dt(cluster);
+		if (ret) {
+			dev_err(dev, "can't get tcm node from dt, err %d\n", ret);
+			return ret;
+		}
+	} else {
+		ret = zynqmp_r5_get_tcm_node(cluster);
+		if (ret < 0) {
+			dev_err(dev, "can't get tcm node, err %d\n", ret);
+			return ret;
+		}
 	}
 
 	for (i = 0; i < cluster->core_count; i++) {
-- 
2.25.1


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

WARNING: multiple messages have this Message-ID (diff)
From: Tanmay Shah <tanmay.shah@amd.com>
To: <andersson@kernel.org>, <mathieu.poirier@linaro.org>,
	<robh+dt@kernel.org>, <krzysztof.kozlowski+dt@linaro.org>,
	<conor+dt@kernel.org>, <michal.simek@amd.com>,
	<ben.levinsky@amd.com>, <tanmay.shah@amd.com>
Cc: <linux-remoteproc@vger.kernel.org>, <devicetree@vger.kernel.org>,
	<linux-arm-kernel@lists.infradead.org>,
	<linux-kernel@vger.kernel.org>
Subject: [PATCH v8 3/3] remoteproc: zynqmp: parse TCM from device tree
Date: Fri, 15 Dec 2023 15:57:25 -0800	[thread overview]
Message-ID: <20231215235725.1247350-4-tanmay.shah@amd.com> (raw)
In-Reply-To: <20231215235725.1247350-1-tanmay.shah@amd.com>

ZynqMP TCM information is fixed in driver. Now ZynqMP TCM information
is available in device-tree. Parse TCM information in driver
as per new bindings.

Signed-off-by: Tanmay Shah <tanmay.shah@amd.com>
---

Changes in v8:
  - parse power-domains property from device-tree and use EEMI calls
    to power on/off TCM instead of using pm domains framework
  - Remove checking of pm_domain_id validation to power on/off tcm
  - Remove spurious change

Changes in v7:
  - move checking of pm_domain_id from previous patch
  - fix mem_bank_data memory allocation

 drivers/remoteproc/xlnx_r5_remoteproc.c | 154 +++++++++++++++++++++++-
 1 file changed, 148 insertions(+), 6 deletions(-)

diff --git a/drivers/remoteproc/xlnx_r5_remoteproc.c b/drivers/remoteproc/xlnx_r5_remoteproc.c
index 4395edea9a64..36d73dcd93f0 100644
--- a/drivers/remoteproc/xlnx_r5_remoteproc.c
+++ b/drivers/remoteproc/xlnx_r5_remoteproc.c
@@ -74,8 +74,8 @@ struct mbox_info {
 };
 
 /*
- * Hardcoded TCM bank values. This will be removed once TCM bindings are
- * accepted for system-dt specifications and upstreamed in linux kernel
+ * Hardcoded TCM bank values. This will stay in driver to maintain backward
+ * compatibility with device-tree that does not have TCM information.
  */
 static const struct mem_bank_data zynqmp_tcm_banks_split[] = {
 	{0xffe00000UL, 0x0, 0x10000UL, PD_R5_0_ATCM, "atcm0"}, /* TCM 64KB each */
@@ -878,6 +878,139 @@ static struct zynqmp_r5_core *zynqmp_r5_add_rproc_core(struct device *cdev)
 	return ERR_PTR(ret);
 }
 
+static int zynqmp_r5_get_tcm_node_from_dt(struct zynqmp_r5_cluster *cluster)
+{
+	struct of_phandle_args out_args;
+	int tcm_reg_per_r5, tcm_pd_idx;
+	struct zynqmp_r5_core *r5_core;
+	int i, j, tcm_bank_count, ret;
+	struct platform_device *cpdev;
+	struct mem_bank_data *tcm;
+	struct device_node *np;
+	struct resource *res;
+	u64 abs_addr, size;
+	struct device *dev;
+
+	for (i = 0; i < cluster->core_count; i++) {
+		r5_core = cluster->r5_cores[i];
+		dev = r5_core->dev;
+		np = of_node_get(dev_of_node(dev));
+		tcm_pd_idx = 1;
+
+		/* we have address cell 2 and size cell as 2 */
+		tcm_reg_per_r5 = of_property_count_elems_of_size(np, "reg",
+								 4 * sizeof(u32));
+		if (tcm_reg_per_r5 <= 0) {
+			dev_err(dev, "can't get reg property err %d\n", tcm_reg_per_r5);
+			return -EINVAL;
+		}
+
+		/*
+		 * In lockstep mode, r5 core 0 will use r5 core 1 TCM
+		 * power domains as well. so allocate twice of per core TCM
+		 */
+		if (cluster->mode == LOCKSTEP_MODE)
+			tcm_bank_count = tcm_reg_per_r5 * 2;
+		else
+			tcm_bank_count = tcm_reg_per_r5;
+
+		r5_core->tcm_banks = devm_kcalloc(dev, tcm_bank_count,
+						  sizeof(struct mem_bank_data *),
+						  GFP_KERNEL);
+		if (!r5_core->tcm_banks)
+			ret = -ENOMEM;
+
+		r5_core->tcm_bank_count = tcm_bank_count;
+		for (j = 0; j < tcm_bank_count; j++) {
+			tcm = devm_kzalloc(dev, sizeof(struct mem_bank_data),
+					   GFP_KERNEL);
+			if (!tcm)
+				return -ENOMEM;
+
+			r5_core->tcm_banks[j] = tcm;
+
+			/*
+			 * In lockstep mode, get second core's TCM power domains id
+			 * after first core TCM parsing is done as
+			 */
+			if (j == tcm_reg_per_r5) {
+				/* dec first core node */
+				of_node_put(np);
+
+				/* get second core node */
+				np = of_get_next_child(cluster->dev->of_node, np);
+
+				/*
+				 * reset index of power-domains property list
+				 * for second core
+				 */
+				tcm_pd_idx = 1;
+			}
+
+			/* get power-domains id of tcm */
+			ret = of_parse_phandle_with_args(np, "power-domains",
+							 "#power-domain-cells",
+							 tcm_pd_idx,
+							 &out_args);
+			if (ret) {
+				dev_err(r5_core->dev,
+					"failed to get tcm %d pm domain, ret %d\n",
+					j, ret);
+				of_node_put(out_args.np);
+				return ret;
+			}
+			tcm->pm_domain_id = out_args.args[0];
+			of_node_put(out_args.np);
+			tcm_pd_idx++;
+
+			/*
+			 * In lockstep mode, we only need second core's power domain
+			 * ids. Other information from second core isn't needed so
+			 * ignore it. This forms table as zynqmp_tcm_banks_lockstep
+			 */
+			if (j >= tcm_reg_per_r5)
+				continue;
+
+			/* get tcm address without translation */
+			ret = of_property_read_reg(np, j, &abs_addr, &size);
+			if (ret) {
+				of_node_put(np);
+				dev_err(dev, "failed to get reg property\n");
+				return ret;
+			}
+
+			/*
+			 * remote processor can address only 32 bits
+			 * so convert 64-bits into 32-bits. This will discard
+			 * any unwanted upper 32-bits.
+			 */
+			tcm->da = (u32)abs_addr;
+			tcm->size = (u32)size;
+
+			cpdev = to_platform_device(dev);
+			res = platform_get_resource(cpdev, IORESOURCE_MEM, j);
+			if (!res) {
+				of_node_put(np);
+				dev_err(dev, "failed to get tcm resource\n");
+				return -EINVAL;
+			}
+
+			tcm->addr = (u32)res->start;
+			tcm->bank_name = (char *)res->name;
+			res = devm_request_mem_region(dev, tcm->addr, tcm->size,
+						      tcm->bank_name);
+			if (!res) {
+				dev_err(dev, "failed to request tcm resource\n");
+				of_node_put(np);
+				return -EINVAL;
+			}
+		}
+	}
+
+	of_node_put(np);
+	return 0;
+}
+
 /**
  * zynqmp_r5_get_tcm_node()
  * Ideally this function should parse tcm node and store information
@@ -956,10 +1089,19 @@ static int zynqmp_r5_core_init(struct zynqmp_r5_cluster *cluster,
 	struct zynqmp_r5_core *r5_core;
 	int ret, i;
 
-	ret = zynqmp_r5_get_tcm_node(cluster);
-	if (ret < 0) {
-		dev_err(dev, "can't get tcm node, err %d\n", ret);
-		return ret;
+	r5_core = cluster->r5_cores[0];
+	if (of_find_property(r5_core->np, "reg", NULL)) {
+		ret = zynqmp_r5_get_tcm_node_from_dt(cluster);
+		if (ret) {
+			dev_err(dev, "can't get tcm node from dt, err %d\n", ret);
+			return ret;
+		}
+	} else {
+		ret = zynqmp_r5_get_tcm_node(cluster);
+		if (ret < 0) {
+			dev_err(dev, "can't get tcm node, err %d\n", ret);
+			return ret;
+		}
 	}
 
 	for (i = 0; i < cluster->core_count; i++) {
-- 
2.25.1


  parent reply	other threads:[~2023-12-15 23:59 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-12-15 23:57 [PATCH v8 0/3] add zynqmp TCM bindings Tanmay Shah
2023-12-15 23:57 ` Tanmay Shah
2023-12-15 23:57 ` [PATCH v8 1/3] dt-bindings: remoteproc: add Tightly Coupled Memory (TCM) bindings Tanmay Shah
2023-12-15 23:57   ` Tanmay Shah
2023-12-15 23:57 ` [PATCH v8 2/3] dts: zynqmp: add properties for TCM in remoteproc Tanmay Shah
2023-12-15 23:57   ` Tanmay Shah
2023-12-20 13:14   ` Michal Simek
2023-12-20 13:14     ` Michal Simek
2023-12-20 14:45     ` Tanmay Shah
2023-12-20 14:45       ` Tanmay Shah
2023-12-23 14:34       ` Mathieu Poirier
2023-12-23 14:34         ` Mathieu Poirier
2024-01-03 17:54   ` Mathieu Poirier
2024-01-03 17:54     ` Mathieu Poirier
2024-01-03 18:19     ` Tanmay Shah
2024-01-03 18:19       ` Tanmay Shah
2023-12-15 23:57 ` Tanmay Shah [this message]
2023-12-15 23:57   ` [PATCH v8 3/3] remoteproc: zynqmp: parse TCM from device tree Tanmay Shah
2024-01-03 18:17   ` Mathieu Poirier
2024-01-03 18:17     ` Mathieu Poirier
2024-01-03 18:51     ` Tanmay Shah
2024-01-03 18:51       ` Tanmay Shah
2024-01-04 16:14     ` Tanmay Shah
2024-01-04 16:14       ` Tanmay Shah
2024-01-08 17:25       ` Mathieu Poirier
2024-01-08 17:25         ` Mathieu Poirier

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20231215235725.1247350-4-tanmay.shah@amd.com \
    --to=tanmay.shah@amd.com \
    --cc=andersson@kernel.org \
    --cc=ben.levinsky@amd.com \
    --cc=conor+dt@kernel.org \
    --cc=devicetree@vger.kernel.org \
    --cc=krzysztof.kozlowski+dt@linaro.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-remoteproc@vger.kernel.org \
    --cc=mathieu.poirier@linaro.org \
    --cc=michal.simek@amd.com \
    --cc=robh+dt@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.