linux-pm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/4] Add interconnect sync state support
@ 2020-07-09 11:07 Georgi Djakov
  2020-07-09 11:07 ` [PATCH 1/4] interconnect: Add " Georgi Djakov
                   ` (3 more replies)
  0 siblings, 4 replies; 7+ messages in thread
From: Georgi Djakov @ 2020-07-09 11:07 UTC (permalink / raw)
  To: linux-pm
  Cc: saravanak, mdtipton, okukatla, bjorn.andersson, vincent.guittot,
	georgi.djakov, linux-arm-msm, linux-kernel

Bootloaders often leave some system resources enabled such as clocks,
regulators, interconnects etc. We want to keep these resources enabled
until all their consumers are probed. These resources are often shared,
so we must wait for all the consumers to come up, before deciding
whether to turn them off or change the configuration. This patchset is
trying to solve the above problem just for the on-chip interconnects.

The problem is solved by allowing the interconnect providers to specify
an initial bandwidth value, which is enforced during boot as a floor
value, while the requests from all consumers are being collected.
Then the sync_state() callback is used to signal when all consumers have
been probed, meaning that the floor bandwidth is not needed anymore and
the framework is ready to re-aggregate and process all requests.

Georgi Djakov (4):
  interconnect: Add sync state support
  interconnect: Add get_bw() callback
  interconnect: qcom: smd845: Use icc_sync_state
  interconnect: qcom: osm-l3: Use icc_sync_state

 drivers/interconnect/core.c           | 57 +++++++++++++++++++++++++++
 drivers/interconnect/qcom/osm-l3.c    |  9 +++++
 drivers/interconnect/qcom/sdm845.c    |  9 +++++
 include/linux/interconnect-provider.h |  5 +++
 4 files changed, 80 insertions(+)


^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 1/4] interconnect: Add sync state support
  2020-07-09 11:07 [PATCH 0/4] Add interconnect sync state support Georgi Djakov
@ 2020-07-09 11:07 ` Georgi Djakov
  2020-07-10  5:04   ` Mike Tipton
  2020-07-09 11:07 ` [PATCH 2/4] interconnect: Add get_bw() callback Georgi Djakov
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 7+ messages in thread
From: Georgi Djakov @ 2020-07-09 11:07 UTC (permalink / raw)
  To: linux-pm
  Cc: saravanak, mdtipton, okukatla, bjorn.andersson, vincent.guittot,
	georgi.djakov, linux-arm-msm, linux-kernel

The bootloaders often do some initial configuration of the interconnects
in the system and we want to keep this configuration until all consumers
have probed and expressed their bandwidth needs. This is because we don't
want to change the configuration by starting to disable unused paths until
every user had a chance to request the amount of bandwidth it needs.

To accomplish this we will implement an interconnect specific sync_state
callback which will synchronize (aggregate and set) the current bandwidth
settings when all consumers have been probed.

Signed-off-by: Georgi Djakov <georgi.djakov@linaro.org>
---
 drivers/interconnect/core.c           | 56 +++++++++++++++++++++++++++
 include/linux/interconnect-provider.h |  3 ++
 2 files changed, 59 insertions(+)

diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index e5f998744501..e53adfee1ee3 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -26,6 +26,8 @@
 
 static DEFINE_IDR(icc_idr);
 static LIST_HEAD(icc_providers);
+static int providers_count;
+static bool synced_state;
 static DEFINE_MUTEX(icc_lock);
 static struct dentry *icc_debugfs_dir;
 
@@ -255,6 +257,10 @@ static int aggregate_requests(struct icc_node *node)
 			continue;
 		p->aggregate(node, r->tag, r->avg_bw, r->peak_bw,
 			     &node->avg_bw, &node->peak_bw);
+
+		/* during boot use the initial bandwidth as a floor value */
+		if (!synced_state)
+			node->peak_bw = max(node->peak_bw, node->init_bw);
 	}
 
 	return 0;
@@ -919,6 +925,12 @@ void icc_node_add(struct icc_node *node, struct icc_provider *provider)
 	node->provider = provider;
 	list_add_tail(&node->node_list, &provider->nodes);
 
+	/* get the bandwidth value and sync it with the hardware */
+	if (node->init_bw && provider->set) {
+		node->peak_bw = node->init_bw;
+		provider->set(node, node);
+	}
+
 	mutex_unlock(&icc_lock);
 }
 EXPORT_SYMBOL_GPL(icc_node_add);
@@ -1014,8 +1026,52 @@ int icc_provider_del(struct icc_provider *provider)
 }
 EXPORT_SYMBOL_GPL(icc_provider_del);
 
+static int of_count_icc_providers(struct device_node *np)
+{
+	struct device_node *child;
+	int count = 0;
+
+	for_each_available_child_of_node(np, child) {
+		if (of_property_read_bool(child, "#interconnect-cells"))
+			count++;
+		count += of_count_icc_providers(child);
+	}
+	of_node_put(np);
+
+	return count;
+}
+
+void icc_sync_state(struct device *dev)
+{
+	struct icc_provider *p;
+	struct icc_node *n;
+	static int count;
+
+	count++;
+
+	if (count < providers_count)
+		return;
+
+	mutex_lock(&icc_lock);
+	list_for_each_entry(p, &icc_providers, provider_list) {
+		dev_dbg(p->dev, "interconnect provider is in synced state\n");
+		list_for_each_entry(n, &p->nodes, node_list) {
+			aggregate_requests(n);
+			p->set(n, n);
+		}
+	}
+	mutex_unlock(&icc_lock);
+	synced_state = true;
+}
+EXPORT_SYMBOL_GPL(icc_sync_state);
+
 static int __init icc_init(void)
 {
+	struct device_node *root = of_find_node_by_path("/");
+
+	providers_count = of_count_icc_providers(root);
+	of_node_put(root);
+
 	icc_debugfs_dir = debugfs_create_dir("interconnect", NULL);
 	debugfs_create_file("interconnect_summary", 0444,
 			    icc_debugfs_dir, NULL, &icc_summary_fops);
diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h
index 0c494534b4d3..153fb7616f96 100644
--- a/include/linux/interconnect-provider.h
+++ b/include/linux/interconnect-provider.h
@@ -71,6 +71,7 @@ struct icc_provider {
  * @req_list: a list of QoS constraint requests associated with this node
  * @avg_bw: aggregated value of average bandwidth requests from all consumers
  * @peak_bw: aggregated value of peak bandwidth requests from all consumers
+ * @init_bw: the bandwidth value that is read from the hardware during init
  * @data: pointer to private data
  */
 struct icc_node {
@@ -87,6 +88,7 @@ struct icc_node {
 	struct hlist_head	req_list;
 	u32			avg_bw;
 	u32			peak_bw;
+	u32			init_bw;
 	void			*data;
 };
 
@@ -103,6 +105,7 @@ void icc_node_del(struct icc_node *node);
 int icc_nodes_remove(struct icc_provider *provider);
 int icc_provider_add(struct icc_provider *provider);
 int icc_provider_del(struct icc_provider *provider);
+void icc_sync_state(struct device *dev);
 
 #else
 

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 2/4] interconnect: Add get_bw() callback
  2020-07-09 11:07 [PATCH 0/4] Add interconnect sync state support Georgi Djakov
  2020-07-09 11:07 ` [PATCH 1/4] interconnect: Add " Georgi Djakov
@ 2020-07-09 11:07 ` Georgi Djakov
  2020-07-10  5:04   ` Mike Tipton
  2020-07-09 11:07 ` [PATCH 3/4] interconnect: qcom: smd845: Use icc_sync_state Georgi Djakov
  2020-07-09 11:07 ` [PATCH 4/4] interconnect: qcom: osm-l3: " Georgi Djakov
  3 siblings, 1 reply; 7+ messages in thread
From: Georgi Djakov @ 2020-07-09 11:07 UTC (permalink / raw)
  To: linux-pm
  Cc: saravanak, mdtipton, okukatla, bjorn.andersson, vincent.guittot,
	georgi.djakov, linux-arm-msm, linux-kernel

The interconnect controller hardware may support querying the current
bandwidth settings, so add a callback for providers to implement this
functionality if supported.

Signed-off-by: Georgi Djakov <georgi.djakov@linaro.org>
---
 drivers/interconnect/core.c           | 3 ++-
 include/linux/interconnect-provider.h | 2 ++
 2 files changed, 4 insertions(+), 1 deletion(-)

diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index e53adfee1ee3..edbfe8380e83 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -926,7 +926,8 @@ void icc_node_add(struct icc_node *node, struct icc_provider *provider)
 	list_add_tail(&node->node_list, &provider->nodes);
 
 	/* get the bandwidth value and sync it with the hardware */
-	if (node->init_bw && provider->set) {
+	if (provider->get_bw && provider->set) {
+		provider->get_bw(node, &node->init_bw);
 		node->peak_bw = node->init_bw;
 		provider->set(node, node);
 	}
diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h
index 153fb7616f96..329eccb19f58 100644
--- a/include/linux/interconnect-provider.h
+++ b/include/linux/interconnect-provider.h
@@ -38,6 +38,7 @@ struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec,
  * @aggregate: pointer to device specific aggregate operation function
  * @pre_aggregate: pointer to device specific function that is called
  *		   before the aggregation begins (optional)
+ * @get_bw: pointer to device specific function to get current bandwidth
  * @xlate: provider-specific callback for mapping nodes from phandle arguments
  * @dev: the device this interconnect provider belongs to
  * @users: count of active users
@@ -50,6 +51,7 @@ struct icc_provider {
 	int (*aggregate)(struct icc_node *node, u32 tag, u32 avg_bw,
 			 u32 peak_bw, u32 *agg_avg, u32 *agg_peak);
 	void (*pre_aggregate)(struct icc_node *node);
+	int (*get_bw)(struct icc_node *node, u32 *bw);
 	struct icc_node* (*xlate)(struct of_phandle_args *spec, void *data);
 	struct device		*dev;
 	int			users;

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 3/4] interconnect: qcom: smd845: Use icc_sync_state
  2020-07-09 11:07 [PATCH 0/4] Add interconnect sync state support Georgi Djakov
  2020-07-09 11:07 ` [PATCH 1/4] interconnect: Add " Georgi Djakov
  2020-07-09 11:07 ` [PATCH 2/4] interconnect: Add get_bw() callback Georgi Djakov
@ 2020-07-09 11:07 ` Georgi Djakov
  2020-07-09 11:07 ` [PATCH 4/4] interconnect: qcom: osm-l3: " Georgi Djakov
  3 siblings, 0 replies; 7+ messages in thread
From: Georgi Djakov @ 2020-07-09 11:07 UTC (permalink / raw)
  To: linux-pm
  Cc: saravanak, mdtipton, okukatla, bjorn.andersson, vincent.guittot,
	georgi.djakov, linux-arm-msm, linux-kernel

Lowering the bandwidth on the bus might have negative consequences if
it's done before all consumers had a chance to cast their vote. Let's
return the maximum amount of bandwidth as initial value. This bandwidth
level would be maintained until all consumers have probed.

Signed-off-by: Georgi Djakov <georgi.djakov@linaro.org>
---
 drivers/interconnect/qcom/sdm845.c | 9 +++++++++
 1 file changed, 9 insertions(+)

diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c
index f6c7b969520d..bfea6a5321b0 100644
--- a/drivers/interconnect/qcom/sdm845.c
+++ b/drivers/interconnect/qcom/sdm845.c
@@ -437,6 +437,13 @@ static const struct qcom_icc_desc sdm845_system_noc = {
 	.num_bcms = ARRAY_SIZE(system_noc_bcms),
 };
 
+static int qcom_get_bw(struct icc_node *node, u32 *bw)
+{
+	*bw = INT_MAX;
+
+	return 0;
+}
+
 static int qnoc_probe(struct platform_device *pdev)
 {
 	const struct qcom_icc_desc *desc;
@@ -469,6 +476,7 @@ static int qnoc_probe(struct platform_device *pdev)
 	provider->set = qcom_icc_set;
 	provider->pre_aggregate = qcom_icc_pre_aggregate;
 	provider->aggregate = qcom_icc_aggregate;
+	provider->get_bw = qcom_get_bw;
 	provider->xlate = of_icc_xlate_onecell;
 	INIT_LIST_HEAD(&provider->nodes);
 	provider->data = data;
@@ -559,6 +567,7 @@ static struct platform_driver qnoc_driver = {
 	.driver = {
 		.name = "qnoc-sdm845",
 		.of_match_table = qnoc_of_match,
+		.sync_state = icc_sync_state,
 	},
 };
 module_platform_driver(qnoc_driver);

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 4/4] interconnect: qcom: osm-l3: Use icc_sync_state
  2020-07-09 11:07 [PATCH 0/4] Add interconnect sync state support Georgi Djakov
                   ` (2 preceding siblings ...)
  2020-07-09 11:07 ` [PATCH 3/4] interconnect: qcom: smd845: Use icc_sync_state Georgi Djakov
@ 2020-07-09 11:07 ` Georgi Djakov
  3 siblings, 0 replies; 7+ messages in thread
From: Georgi Djakov @ 2020-07-09 11:07 UTC (permalink / raw)
  To: linux-pm
  Cc: saravanak, mdtipton, okukatla, bjorn.andersson, vincent.guittot,
	georgi.djakov, linux-arm-msm, linux-kernel

Lowering the bandwidth on the bus might have negative consequences if
it's done before all consumers had a chance to cast their vote. Let's
return the maximum amount of bandwidth as initial value. This bandwidth
level would be maintained until all consumers have probed.

Signed-off-by: Georgi Djakov <georgi.djakov@linaro.org>
---
 drivers/interconnect/qcom/osm-l3.c | 9 +++++++++
 1 file changed, 9 insertions(+)

diff --git a/drivers/interconnect/qcom/osm-l3.c b/drivers/interconnect/qcom/osm-l3.c
index 96fb9ff5ff2e..532d541b71be 100644
--- a/drivers/interconnect/qcom/osm-l3.c
+++ b/drivers/interconnect/qcom/osm-l3.c
@@ -137,6 +137,13 @@ static int qcom_osm_l3_remove(struct platform_device *pdev)
 	return icc_provider_del(&qp->provider);
 }
 
+static int qcom_osm_l3_get_bw(struct icc_node *node, u32 *bw)
+{
+	*bw = INT_MAX;
+
+	return 0;
+}
+
 static int qcom_osm_l3_probe(struct platform_device *pdev)
 {
 	u32 info, src, lval, i, prev_freq = 0, freq;
@@ -215,6 +222,7 @@ static int qcom_osm_l3_probe(struct platform_device *pdev)
 	provider->dev = &pdev->dev;
 	provider->set = qcom_icc_set;
 	provider->aggregate = icc_std_aggregate;
+	provider->get_bw = qcom_osm_l3_get_bw;
 	provider->xlate = of_icc_xlate_onecell;
 	INIT_LIST_HEAD(&provider->nodes);
 	provider->data = data;
@@ -268,6 +276,7 @@ static struct platform_driver osm_l3_driver = {
 	.driver = {
 		.name = "osm-l3",
 		.of_match_table = osm_l3_of_match,
+		.sync_state = icc_sync_state,
 	},
 };
 module_platform_driver(osm_l3_driver);

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH 1/4] interconnect: Add sync state support
  2020-07-09 11:07 ` [PATCH 1/4] interconnect: Add " Georgi Djakov
@ 2020-07-10  5:04   ` Mike Tipton
  0 siblings, 0 replies; 7+ messages in thread
From: Mike Tipton @ 2020-07-10  5:04 UTC (permalink / raw)
  To: Georgi Djakov, linux-pm
  Cc: saravanak, okukatla, bjorn.andersson, vincent.guittot,
	linux-arm-msm, linux-kernel

On 7/9/2020 4:07 AM, Georgi Djakov wrote:
> The bootloaders often do some initial configuration of the interconnects
> in the system and we want to keep this configuration until all consumers
> have probed and expressed their bandwidth needs. This is because we don't
> want to change the configuration by starting to disable unused paths until
> every user had a chance to request the amount of bandwidth it needs.
> 
> To accomplish this we will implement an interconnect specific sync_state
> callback which will synchronize (aggregate and set) the current bandwidth
> settings when all consumers have been probed.
> 
> Signed-off-by: Georgi Djakov <georgi.djakov@linaro.org>
> ---
>   drivers/interconnect/core.c           | 56 +++++++++++++++++++++++++++
>   include/linux/interconnect-provider.h |  3 ++
>   2 files changed, 59 insertions(+)
> 
> diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
> index e5f998744501..e53adfee1ee3 100644
> --- a/drivers/interconnect/core.c
> +++ b/drivers/interconnect/core.c
> @@ -26,6 +26,8 @@
>   
>   static DEFINE_IDR(icc_idr);
>   static LIST_HEAD(icc_providers);
> +static int providers_count;
> +static bool synced_state;
>   static DEFINE_MUTEX(icc_lock);
>   static struct dentry *icc_debugfs_dir;
>   
> @@ -255,6 +257,10 @@ static int aggregate_requests(struct icc_node *node)
>   			continue;
>   		p->aggregate(node, r->tag, r->avg_bw, r->peak_bw,
>   			     &node->avg_bw, &node->peak_bw);
> +
> +		/* during boot use the initial bandwidth as a floor value */
> +		if (!synced_state)
> +			node->peak_bw = max(node->peak_bw, node->init_bw);
>   	}
>   
>   	return 0;
> @@ -919,6 +925,12 @@ void icc_node_add(struct icc_node *node, struct icc_provider *provider)
>   	node->provider = provider;
>   	list_add_tail(&node->node_list, &provider->nodes);
>   
> +	/* get the bandwidth value and sync it with the hardware */
> +	if (node->init_bw && provider->set) {
> +		node->peak_bw = node->init_bw;
> +		provider->set(node, node);
> +	}
> +

We'll need separate initial values for avg_bw/peak_bw. Some of our BCMs 
only support one or the other. Voting for one it doesn't support is a 
NOP. Additionally, some targets bring multiple subsystems out of reset 
in bootloaders and in those cases we'd need BCM to sum our initial 
avg_bw with the other subsystems.

>   	mutex_unlock(&icc_lock);
>   }
>   EXPORT_SYMBOL_GPL(icc_node_add);
> @@ -1014,8 +1026,52 @@ int icc_provider_del(struct icc_provider *provider)
>   }
>   EXPORT_SYMBOL_GPL(icc_provider_del);
>   
> +static int of_count_icc_providers(struct device_node *np)
> +{
> +	struct device_node *child;
> +	int count = 0;
> +
> +	for_each_available_child_of_node(np, child) {
> +		if (of_property_read_bool(child, "#interconnect-cells"))
> +			count++;
> +		count += of_count_icc_providers(child);
> +	}
> +	of_node_put(np);
> +
> +	return count;
> +}
> +
> +void icc_sync_state(struct device *dev)
> +{
> +	struct icc_provider *p;
> +	struct icc_node *n;
> +	static int count;
> +
> +	count++;
> +
> +	if (count < providers_count)
> +		return;
> +
> +	mutex_lock(&icc_lock);
> +	list_for_each_entry(p, &icc_providers, provider_list) {
> +		dev_dbg(p->dev, "interconnect provider is in synced state\n");
> +		list_for_each_entry(n, &p->nodes, node_list) {
> +			aggregate_requests(n);
> +			p->set(n, n);

We could skip re-aggregating/setting for nodes that don't specify an 
initial BW. That'll save a lot of unnecessary HW voting. In practice, 
we'll only need to specify an initial minimum BW for a small subset of 
nodes (likely only one per-BCM we care about). Technically we only need 
to re-aggregate/set if the current BW vote is limited by init_bw, but 
that optimization is less important than skipping the majority that 
don't have an init_bw.

> +		}
> +	}
> +	mutex_unlock(&icc_lock);
> +	synced_state = true;
> +}
> +EXPORT_SYMBOL_GPL(icc_sync_state);
> +
>   static int __init icc_init(void)
>   {
> +	struct device_node *root = of_find_node_by_path("/");
> +
> +	providers_count = of_count_icc_providers(root);
> +	of_node_put(root);
> +
>   	icc_debugfs_dir = debugfs_create_dir("interconnect", NULL);
>   	debugfs_create_file("interconnect_summary", 0444,
>   			    icc_debugfs_dir, NULL, &icc_summary_fops);
> diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h
> index 0c494534b4d3..153fb7616f96 100644
> --- a/include/linux/interconnect-provider.h
> +++ b/include/linux/interconnect-provider.h
> @@ -71,6 +71,7 @@ struct icc_provider {
>    * @req_list: a list of QoS constraint requests associated with this node
>    * @avg_bw: aggregated value of average bandwidth requests from all consumers
>    * @peak_bw: aggregated value of peak bandwidth requests from all consumers
> + * @init_bw: the bandwidth value that is read from the hardware during init
>    * @data: pointer to private data
>    */
>   struct icc_node {
> @@ -87,6 +88,7 @@ struct icc_node {
>   	struct hlist_head	req_list;
>   	u32			avg_bw;
>   	u32			peak_bw;
> +	u32			init_bw;
>   	void			*data;
>   };
>   
> @@ -103,6 +105,7 @@ void icc_node_del(struct icc_node *node);
>   int icc_nodes_remove(struct icc_provider *provider);
>   int icc_provider_add(struct icc_provider *provider);
>   int icc_provider_del(struct icc_provider *provider);
> +void icc_sync_state(struct device *dev);
>   
>   #else
>   
> 

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 2/4] interconnect: Add get_bw() callback
  2020-07-09 11:07 ` [PATCH 2/4] interconnect: Add get_bw() callback Georgi Djakov
@ 2020-07-10  5:04   ` Mike Tipton
  0 siblings, 0 replies; 7+ messages in thread
From: Mike Tipton @ 2020-07-10  5:04 UTC (permalink / raw)
  To: Georgi Djakov, linux-pm
  Cc: saravanak, okukatla, bjorn.andersson, vincent.guittot,
	linux-arm-msm, linux-kernel

On 7/9/2020 4:07 AM, Georgi Djakov wrote:
> The interconnect controller hardware may support querying the current
> bandwidth settings, so add a callback for providers to implement this
> functionality if supported.
> 
> Signed-off-by: Georgi Djakov <georgi.djakov@linaro.org>
> ---
>   drivers/interconnect/core.c           | 3 ++-
>   include/linux/interconnect-provider.h | 2 ++
>   2 files changed, 4 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
> index e53adfee1ee3..edbfe8380e83 100644
> --- a/drivers/interconnect/core.c
> +++ b/drivers/interconnect/core.c
> @@ -926,7 +926,8 @@ void icc_node_add(struct icc_node *node, struct icc_provider *provider)
>   	list_add_tail(&node->node_list, &provider->nodes);
>   
>   	/* get the bandwidth value and sync it with the hardware */
> -	if (node->init_bw && provider->set) {
> +	if (provider->get_bw && provider->set) {
> +		provider->get_bw(node, &node->init_bw);

I'm not sure what benefit this callback provides over the provider 
directly setting init_bw in the structure. Additionally, "get_bw" is a 
more generic callback than just for getting the *initial* BW 
requirement. Currently it's only used that way, but staying true to the 
spirit of the callback would require us to return the current BW at any 
point in time.

We can only detect the current HW vote at BCM-level granularity and a 
single BCM can have many nodes. So since this callback is being used to 
determine init_bw, we'd end up voting way more nodes than necessary. In 
practice we'll only need to enforce minimum initial BW on a small subset 
of them, but I wouldn't want to hardcode that init-specific logic in a 
generic "get_bw" callback.

>   		node->peak_bw = node->init_bw;
>   		provider->set(node, node);
>   	}
> diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h
> index 153fb7616f96..329eccb19f58 100644
> --- a/include/linux/interconnect-provider.h
> +++ b/include/linux/interconnect-provider.h
> @@ -38,6 +38,7 @@ struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec,
>    * @aggregate: pointer to device specific aggregate operation function
>    * @pre_aggregate: pointer to device specific function that is called
>    *		   before the aggregation begins (optional)
> + * @get_bw: pointer to device specific function to get current bandwidth
>    * @xlate: provider-specific callback for mapping nodes from phandle arguments
>    * @dev: the device this interconnect provider belongs to
>    * @users: count of active users
> @@ -50,6 +51,7 @@ struct icc_provider {
>   	int (*aggregate)(struct icc_node *node, u32 tag, u32 avg_bw,
>   			 u32 peak_bw, u32 *agg_avg, u32 *agg_peak);
>   	void (*pre_aggregate)(struct icc_node *node);
> +	int (*get_bw)(struct icc_node *node, u32 *bw);
>   	struct icc_node* (*xlate)(struct of_phandle_args *spec, void *data);
>   	struct device		*dev;
>   	int			users;
> 

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2020-07-10  5:05 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-07-09 11:07 [PATCH 0/4] Add interconnect sync state support Georgi Djakov
2020-07-09 11:07 ` [PATCH 1/4] interconnect: Add " Georgi Djakov
2020-07-10  5:04   ` Mike Tipton
2020-07-09 11:07 ` [PATCH 2/4] interconnect: Add get_bw() callback Georgi Djakov
2020-07-10  5:04   ` Mike Tipton
2020-07-09 11:07 ` [PATCH 3/4] interconnect: qcom: smd845: Use icc_sync_state Georgi Djakov
2020-07-09 11:07 ` [PATCH 4/4] interconnect: qcom: osm-l3: " Georgi Djakov

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).