diff mbox series

[1/4] interconnect: Add sync state support

Message ID 20200709110705.30359-2-georgi.djakov@linaro.org (mailing list archive)
State Superseded
Headers show
Series Add interconnect sync state support | expand

Commit Message

Georgi Djakov July 9, 2020, 11:07 a.m. UTC
The bootloaders often do some initial configuration of the interconnects
in the system and we want to keep this configuration until all consumers
have probed and expressed their bandwidth needs. This is because we don't
want to change the configuration by starting to disable unused paths until
every user had a chance to request the amount of bandwidth it needs.

To accomplish this we will implement an interconnect specific sync_state
callback which will synchronize (aggregate and set) the current bandwidth
settings when all consumers have been probed.

Signed-off-by: Georgi Djakov <georgi.djakov@linaro.org>
---
 drivers/interconnect/core.c           | 56 +++++++++++++++++++++++++++
 include/linux/interconnect-provider.h |  3 ++
 2 files changed, 59 insertions(+)

Comments

Mike Tipton July 10, 2020, 5:04 a.m. UTC | #1
On 7/9/2020 4:07 AM, Georgi Djakov wrote:
> The bootloaders often do some initial configuration of the interconnects
> in the system and we want to keep this configuration until all consumers
> have probed and expressed their bandwidth needs. This is because we don't
> want to change the configuration by starting to disable unused paths until
> every user had a chance to request the amount of bandwidth it needs.
> 
> To accomplish this we will implement an interconnect specific sync_state
> callback which will synchronize (aggregate and set) the current bandwidth
> settings when all consumers have been probed.
> 
> Signed-off-by: Georgi Djakov <georgi.djakov@linaro.org>
> ---
>   drivers/interconnect/core.c           | 56 +++++++++++++++++++++++++++
>   include/linux/interconnect-provider.h |  3 ++
>   2 files changed, 59 insertions(+)
> 
> diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
> index e5f998744501..e53adfee1ee3 100644
> --- a/drivers/interconnect/core.c
> +++ b/drivers/interconnect/core.c
> @@ -26,6 +26,8 @@
>   
>   static DEFINE_IDR(icc_idr);
>   static LIST_HEAD(icc_providers);
> +static int providers_count;
> +static bool synced_state;
>   static DEFINE_MUTEX(icc_lock);
>   static struct dentry *icc_debugfs_dir;
>   
> @@ -255,6 +257,10 @@ static int aggregate_requests(struct icc_node *node)
>   			continue;
>   		p->aggregate(node, r->tag, r->avg_bw, r->peak_bw,
>   			     &node->avg_bw, &node->peak_bw);
> +
> +		/* during boot use the initial bandwidth as a floor value */
> +		if (!synced_state)
> +			node->peak_bw = max(node->peak_bw, node->init_bw);
>   	}
>   
>   	return 0;
> @@ -919,6 +925,12 @@ void icc_node_add(struct icc_node *node, struct icc_provider *provider)
>   	node->provider = provider;
>   	list_add_tail(&node->node_list, &provider->nodes);
>   
> +	/* get the bandwidth value and sync it with the hardware */
> +	if (node->init_bw && provider->set) {
> +		node->peak_bw = node->init_bw;
> +		provider->set(node, node);
> +	}
> +

We'll need separate initial values for avg_bw/peak_bw. Some of our BCMs 
only support one or the other. Voting for one it doesn't support is a 
NOP. Additionally, some targets bring multiple subsystems out of reset 
in bootloaders and in those cases we'd need BCM to sum our initial 
avg_bw with the other subsystems.

>   	mutex_unlock(&icc_lock);
>   }
>   EXPORT_SYMBOL_GPL(icc_node_add);
> @@ -1014,8 +1026,52 @@ int icc_provider_del(struct icc_provider *provider)
>   }
>   EXPORT_SYMBOL_GPL(icc_provider_del);
>   
> +static int of_count_icc_providers(struct device_node *np)
> +{
> +	struct device_node *child;
> +	int count = 0;
> +
> +	for_each_available_child_of_node(np, child) {
> +		if (of_property_read_bool(child, "#interconnect-cells"))
> +			count++;
> +		count += of_count_icc_providers(child);
> +	}
> +	of_node_put(np);
> +
> +	return count;
> +}
> +
> +void icc_sync_state(struct device *dev)
> +{
> +	struct icc_provider *p;
> +	struct icc_node *n;
> +	static int count;
> +
> +	count++;
> +
> +	if (count < providers_count)
> +		return;
> +
> +	mutex_lock(&icc_lock);
> +	list_for_each_entry(p, &icc_providers, provider_list) {
> +		dev_dbg(p->dev, "interconnect provider is in synced state\n");
> +		list_for_each_entry(n, &p->nodes, node_list) {
> +			aggregate_requests(n);
> +			p->set(n, n);

We could skip re-aggregating/setting for nodes that don't specify an 
initial BW. That'll save a lot of unnecessary HW voting. In practice, 
we'll only need to specify an initial minimum BW for a small subset of 
nodes (likely only one per-BCM we care about). Technically we only need 
to re-aggregate/set if the current BW vote is limited by init_bw, but 
that optimization is less important than skipping the majority that 
don't have an init_bw.

> +		}
> +	}
> +	mutex_unlock(&icc_lock);
> +	synced_state = true;
> +}
> +EXPORT_SYMBOL_GPL(icc_sync_state);
> +
>   static int __init icc_init(void)
>   {
> +	struct device_node *root = of_find_node_by_path("/");
> +
> +	providers_count = of_count_icc_providers(root);
> +	of_node_put(root);
> +
>   	icc_debugfs_dir = debugfs_create_dir("interconnect", NULL);
>   	debugfs_create_file("interconnect_summary", 0444,
>   			    icc_debugfs_dir, NULL, &icc_summary_fops);
> diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h
> index 0c494534b4d3..153fb7616f96 100644
> --- a/include/linux/interconnect-provider.h
> +++ b/include/linux/interconnect-provider.h
> @@ -71,6 +71,7 @@ struct icc_provider {
>    * @req_list: a list of QoS constraint requests associated with this node
>    * @avg_bw: aggregated value of average bandwidth requests from all consumers
>    * @peak_bw: aggregated value of peak bandwidth requests from all consumers
> + * @init_bw: the bandwidth value that is read from the hardware during init
>    * @data: pointer to private data
>    */
>   struct icc_node {
> @@ -87,6 +88,7 @@ struct icc_node {
>   	struct hlist_head	req_list;
>   	u32			avg_bw;
>   	u32			peak_bw;
> +	u32			init_bw;
>   	void			*data;
>   };
>   
> @@ -103,6 +105,7 @@ void icc_node_del(struct icc_node *node);
>   int icc_nodes_remove(struct icc_provider *provider);
>   int icc_provider_add(struct icc_provider *provider);
>   int icc_provider_del(struct icc_provider *provider);
> +void icc_sync_state(struct device *dev);
>   
>   #else
>   
>
diff mbox series

Patch

diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index e5f998744501..e53adfee1ee3 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -26,6 +26,8 @@ 
 
 static DEFINE_IDR(icc_idr);
 static LIST_HEAD(icc_providers);
+static int providers_count;
+static bool synced_state;
 static DEFINE_MUTEX(icc_lock);
 static struct dentry *icc_debugfs_dir;
 
@@ -255,6 +257,10 @@  static int aggregate_requests(struct icc_node *node)
 			continue;
 		p->aggregate(node, r->tag, r->avg_bw, r->peak_bw,
 			     &node->avg_bw, &node->peak_bw);
+
+		/* during boot use the initial bandwidth as a floor value */
+		if (!synced_state)
+			node->peak_bw = max(node->peak_bw, node->init_bw);
 	}
 
 	return 0;
@@ -919,6 +925,12 @@  void icc_node_add(struct icc_node *node, struct icc_provider *provider)
 	node->provider = provider;
 	list_add_tail(&node->node_list, &provider->nodes);
 
+	/* get the bandwidth value and sync it with the hardware */
+	if (node->init_bw && provider->set) {
+		node->peak_bw = node->init_bw;
+		provider->set(node, node);
+	}
+
 	mutex_unlock(&icc_lock);
 }
 EXPORT_SYMBOL_GPL(icc_node_add);
@@ -1014,8 +1026,52 @@  int icc_provider_del(struct icc_provider *provider)
 }
 EXPORT_SYMBOL_GPL(icc_provider_del);
 
+static int of_count_icc_providers(struct device_node *np)
+{
+	struct device_node *child;
+	int count = 0;
+
+	for_each_available_child_of_node(np, child) {
+		if (of_property_read_bool(child, "#interconnect-cells"))
+			count++;
+		count += of_count_icc_providers(child);
+	}
+	of_node_put(np);
+
+	return count;
+}
+
+void icc_sync_state(struct device *dev)
+{
+	struct icc_provider *p;
+	struct icc_node *n;
+	static int count;
+
+	count++;
+
+	if (count < providers_count)
+		return;
+
+	mutex_lock(&icc_lock);
+	list_for_each_entry(p, &icc_providers, provider_list) {
+		dev_dbg(p->dev, "interconnect provider is in synced state\n");
+		list_for_each_entry(n, &p->nodes, node_list) {
+			aggregate_requests(n);
+			p->set(n, n);
+		}
+	}
+	mutex_unlock(&icc_lock);
+	synced_state = true;
+}
+EXPORT_SYMBOL_GPL(icc_sync_state);
+
 static int __init icc_init(void)
 {
+	struct device_node *root = of_find_node_by_path("/");
+
+	providers_count = of_count_icc_providers(root);
+	of_node_put(root);
+
 	icc_debugfs_dir = debugfs_create_dir("interconnect", NULL);
 	debugfs_create_file("interconnect_summary", 0444,
 			    icc_debugfs_dir, NULL, &icc_summary_fops);
diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h
index 0c494534b4d3..153fb7616f96 100644
--- a/include/linux/interconnect-provider.h
+++ b/include/linux/interconnect-provider.h
@@ -71,6 +71,7 @@  struct icc_provider {
  * @req_list: a list of QoS constraint requests associated with this node
  * @avg_bw: aggregated value of average bandwidth requests from all consumers
  * @peak_bw: aggregated value of peak bandwidth requests from all consumers
+ * @init_bw: the bandwidth value that is read from the hardware during init
  * @data: pointer to private data
  */
 struct icc_node {
@@ -87,6 +88,7 @@  struct icc_node {
 	struct hlist_head	req_list;
 	u32			avg_bw;
 	u32			peak_bw;
+	u32			init_bw;
 	void			*data;
 };
 
@@ -103,6 +105,7 @@  void icc_node_del(struct icc_node *node);
 int icc_nodes_remove(struct icc_provider *provider);
 int icc_provider_add(struct icc_provider *provider);
 int icc_provider_del(struct icc_provider *provider);
+void icc_sync_state(struct device *dev);
 
 #else