diff mbox

remoteproc: q6v5: Add support to vote for rpmh power domains

Message ID 20180629102035.2757-1-rnayak@codeaurora.org (mailing list archive)
State New, archived
Headers show

Commit Message

Rajendra Nayak June 29, 2018, 10:20 a.m. UTC
With rpmh ARC resources being modelled as power domains with
performance state, add support to proxy vote on these for SDM845.
Add support to vote on multiple of them, now that genpd supports
associating multiple power domains to a device.

Signed-off-by: Rajendra Nayak <rnayak@codeaurora.org>
---
This patch is dependent on the rpmh powerdomain driver
still under review,
https://lkml.org/lkml/2018/6/27/7

 drivers/remoteproc/qcom_q6v5_pil.c | 77 +++++++++++++++++++++++++++++-
 1 file changed, 75 insertions(+), 2 deletions(-)

Comments

Ulf Hansson Aug. 3, 2018, 10:10 a.m. UTC | #1
On 29 June 2018 at 12:20, Rajendra Nayak <rnayak@codeaurora.org> wrote:
> With rpmh ARC resources being modelled as power domains with
> performance state, add support to proxy vote on these for SDM845.
> Add support to vote on multiple of them, now that genpd supports
> associating multiple power domains to a device.
>
> Signed-off-by: Rajendra Nayak <rnayak@codeaurora.org>
> ---
> This patch is dependent on the rpmh powerdomain driver
> still under review,
> https://lkml.org/lkml/2018/6/27/7

I assume you intend to re-spin this to use the new
dev_pm_domain_attach_by_name(), once 4.19 rc1 is out, right!?

Anyway, please keep me in the loop.

[...]

> +       if (num_pds > 1) {
> +               for (i = 0; i < num_pds; i++) {
> +                       devs[i] = genpd_dev_pm_attach_by_id(dev, i);

This API is supposed to be called only by the driver core. Please use
dev_pm_domain_attach_by_id|name() instead.

[...]

Kind regards
Uffe
--
To unsubscribe from this list: send the line "unsubscribe linux-remoteproc" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Rajendra Nayak Aug. 6, 2018, 3:38 a.m. UTC | #2
On 8/3/2018 3:40 PM, Ulf Hansson wrote:
> On 29 June 2018 at 12:20, Rajendra Nayak <rnayak@codeaurora.org> wrote:
>> With rpmh ARC resources being modelled as power domains with
>> performance state, add support to proxy vote on these for SDM845.
>> Add support to vote on multiple of them, now that genpd supports
>> associating multiple power domains to a device.
>>
>> Signed-off-by: Rajendra Nayak <rnayak@codeaurora.org>
>> ---
>> This patch is dependent on the rpmh powerdomain driver
>> still under review,
>> https://lkml.org/lkml/2018/6/27/7
> 
> I assume you intend to re-spin this to use the new
> dev_pm_domain_attach_by_name(), once 4.19 rc1 is out, right!?
> 
> Anyway, please keep me in the loop.

Yes, I do plan to respin this one with the new api.

> 
> [...]
> 
>> +       if (num_pds > 1) {
>> +               for (i = 0; i < num_pds; i++) {
>> +                       devs[i] = genpd_dev_pm_attach_by_id(dev, i);
> 
> This API is supposed to be called only by the driver core. Please use
> dev_pm_domain_attach_by_id|name() instead.

thanks, will change.

regards,
Rajendra
> 
> [...]
> 
> Kind regards
> Uffe
> 
--
To unsubscribe from this list: send the line "unsubscribe linux-remoteproc" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Bjorn Andersson Aug. 6, 2018, 4:48 p.m. UTC | #3
On Fri 29 Jun 03:20 PDT 2018, Rajendra Nayak wrote:

> With rpmh ARC resources being modelled as power domains with
> performance state, add support to proxy vote on these for SDM845.
> Add support to vote on multiple of them, now that genpd supports
> associating multiple power domains to a device.
> 

Thanks for writing up this patch Rajendra.

> Signed-off-by: Rajendra Nayak <rnayak@codeaurora.org>
> ---
> This patch is dependent on the rpmh powerdomain driver
> still under review,
> https://lkml.org/lkml/2018/6/27/7
> 
>  drivers/remoteproc/qcom_q6v5_pil.c | 77 +++++++++++++++++++++++++++++-
>  1 file changed, 75 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_pil.c
> index 2bf8e7c49f2a..2b5be6d15779 100644
> --- a/drivers/remoteproc/qcom_q6v5_pil.c
> +++ b/drivers/remoteproc/qcom_q6v5_pil.c
> @@ -25,6 +25,8 @@
>  #include <linux/of_address.h>
>  #include <linux/of_device.h>
>  #include <linux/platform_device.h>
> +#include <linux/pm_domain.h>
> +#include <linux/pm_runtime.h>
>  #include <linux/regmap.h>
>  #include <linux/regulator/consumer.h>
>  #include <linux/remoteproc.h>
> @@ -132,6 +134,7 @@ struct rproc_hexagon_res {
>  	char **proxy_clk_names;
>  	char **reset_clk_names;
>  	char **active_clk_names;
> +	char **pd_names;
>  	int version;
>  	bool need_mem_protection;
>  	bool has_alt_reset;
> @@ -161,9 +164,11 @@ struct q6v5 {
>  	struct clk *active_clks[8];
>  	struct clk *reset_clks[4];
>  	struct clk *proxy_clks[4];
> +	struct device *pd_devs[3];
>  	int active_clk_count;
>  	int reset_clk_count;
>  	int proxy_clk_count;
> +	int pd_count;
>  
>  	struct reg_info active_regs[1];
>  	struct reg_info proxy_regs[3];
> @@ -324,6 +329,23 @@ static void q6v5_clk_disable(struct device *dev,
>  		clk_disable_unprepare(clks[i]);
>  }
>  
> +static int q6v5_powerdomain_enable(struct device *dev, struct device **devs,
> +				   int count)
> +{
> +	int i;
> +
> +	if (!count)
> +		return 0;
> +
> +	if (count > 1)
> +		for (i = 0; i < count; i++)
> +			dev_pm_genpd_set_performance_state(devs[i], INT_MAX);
> +	else
> +		dev_pm_genpd_set_performance_state(dev, INT_MAX);

I would prefer if we could just set the performance state during
initialization, but I see that we only aggregate the state during
dev_pm_genpd_set_performance_state().

As such you need to also reduce the votes in the disable path; or we
will just max out any shared corners from the first time we boot this
remoteproc.


For this to work I believe _genpd_power_o{n,ff}() would need to
aggregate the performance state of all enabled consumers, something that
would make the interface more convenient to use.

> +
> +	return pm_runtime_get_sync(dev);
> +}
> +
[..]
> @@ -1142,6 +1173,35 @@ static int q6v5_init_clocks(struct device *dev, struct clk **clks,
>  	return i;
>  }
>  
> +static int q6v5_powerdomain_init(struct device *dev, struct device **devs,
> +				 char **pd_names)
> +{
> +	int i = 0, num_pds;
> +
> +	if (!pd_names)
> +		return 0;
> +
> +	while (pd_names[i])
> +		i++;
> +
> +	num_pds = i;
> +
> +	if (num_pds > 1) {
> +		for (i = 0; i < num_pds; i++) {
> +			devs[i] = genpd_dev_pm_attach_by_id(dev, i);

This should be done by_name

> +			if (IS_ERR(devs[i]))
> +				return PTR_ERR(devs[i]);
> +			if (!device_link_add(dev, devs[i], DL_FLAG_STATELESS |
> +					     DL_FLAG_PM_RUNTIME))
> +				return -EINVAL;
> +		}
> +	}
> +
> +	pm_runtime_enable(dev);

Don't you need a call to something like pm_suspend_ignore_children()
here as well, to prevent a pm_runtime_get_sync() in a child device to
power on our rails at runtime?

> +
> +	return num_pds;
> +};
> +
>  static int q6v5_init_reset(struct q6v5 *qproc)
>  {
>  	qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev,

Regards,
Bjorn
--
To unsubscribe from this list: send the line "unsubscribe linux-remoteproc" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Rajendra Nayak Aug. 8, 2018, 4:20 p.m. UTC | #4
Hi Bjorn,

On 8/6/2018 10:18 PM, Bjorn Andersson wrote:
> On Fri 29 Jun 03:20 PDT 2018, Rajendra Nayak wrote:
> 
>> With rpmh ARC resources being modelled as power domains with
>> performance state, add support to proxy vote on these for SDM845.
>> Add support to vote on multiple of them, now that genpd supports
>> associating multiple power domains to a device.
>>
> 
> Thanks for writing up this patch Rajendra.
> 
>> Signed-off-by: Rajendra Nayak <rnayak@codeaurora.org>
>> ---
>> This patch is dependent on the rpmh powerdomain driver
>> still under review,
>> https://lkml.org/lkml/2018/6/27/7
>>
>>   drivers/remoteproc/qcom_q6v5_pil.c | 77 +++++++++++++++++++++++++++++-
>>   1 file changed, 75 insertions(+), 2 deletions(-)
>>
>> diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_pil.c
>> index 2bf8e7c49f2a..2b5be6d15779 100644
>> --- a/drivers/remoteproc/qcom_q6v5_pil.c
>> +++ b/drivers/remoteproc/qcom_q6v5_pil.c
>> @@ -25,6 +25,8 @@
>>   #include <linux/of_address.h>
>>   #include <linux/of_device.h>
>>   #include <linux/platform_device.h>
>> +#include <linux/pm_domain.h>
>> +#include <linux/pm_runtime.h>
>>   #include <linux/regmap.h>
>>   #include <linux/regulator/consumer.h>
>>   #include <linux/remoteproc.h>
>> @@ -132,6 +134,7 @@ struct rproc_hexagon_res {
>>   	char **proxy_clk_names;
>>   	char **reset_clk_names;
>>   	char **active_clk_names;
>> +	char **pd_names;
>>   	int version;
>>   	bool need_mem_protection;
>>   	bool has_alt_reset;
>> @@ -161,9 +164,11 @@ struct q6v5 {
>>   	struct clk *active_clks[8];
>>   	struct clk *reset_clks[4];
>>   	struct clk *proxy_clks[4];
>> +	struct device *pd_devs[3];
>>   	int active_clk_count;
>>   	int reset_clk_count;
>>   	int proxy_clk_count;
>> +	int pd_count;
>>   
>>   	struct reg_info active_regs[1];
>>   	struct reg_info proxy_regs[3];
>> @@ -324,6 +329,23 @@ static void q6v5_clk_disable(struct device *dev,
>>   		clk_disable_unprepare(clks[i]);
>>   }
>>   
>> +static int q6v5_powerdomain_enable(struct device *dev, struct device **devs,
>> +				   int count)
>> +{
>> +	int i;
>> +
>> +	if (!count)
>> +		return 0;
>> +
>> +	if (count > 1)
>> +		for (i = 0; i < count; i++)
>> +			dev_pm_genpd_set_performance_state(devs[i], INT_MAX);
>> +	else
>> +		dev_pm_genpd_set_performance_state(dev, INT_MAX);
> 
> I would prefer if we could just set the performance state during
> initialization, but I see that we only aggregate the state during
> dev_pm_genpd_set_performance_state().
> 
> As such you need to also reduce the votes in the disable path; or we
> will just max out any shared corners from the first time we boot this
> remoteproc.

Right, I need to drop the votes along with doing a runtime suspend of the
device.

> 
> 
> For this to work I believe _genpd_power_o{n,ff}() would need to
> aggregate the performance state of all enabled consumers, something that
> would make the interface more convenient to use.

This isn't done today. There was some discussion in another thread on *if*
we should do this and what could be the implications [1]

> 
>> +
>> +	return pm_runtime_get_sync(dev);
>> +}
>> +
> [..]
>> @@ -1142,6 +1173,35 @@ static int q6v5_init_clocks(struct device *dev, struct clk **clks,
>>   	return i;
>>   }
>>   
>> +static int q6v5_powerdomain_init(struct device *dev, struct device **devs,
>> +				 char **pd_names)
>> +{
>> +	int i = 0, num_pds;
>> +
>> +	if (!pd_names)
>> +		return 0;
>> +
>> +	while (pd_names[i])
>> +		i++;
>> +
>> +	num_pds = i;
>> +
>> +	if (num_pds > 1) {
>> +		for (i = 0; i < num_pds; i++) {
>> +			devs[i] = genpd_dev_pm_attach_by_id(dev, i);
> 
> This should be done by_name

Right, I posted this out before the by_name api was available :)
I will move to it when I respin.

> 
>> +			if (IS_ERR(devs[i]))
>> +				return PTR_ERR(devs[i]);
>> +			if (!device_link_add(dev, devs[i], DL_FLAG_STATELESS |
>> +					     DL_FLAG_PM_RUNTIME))
>> +				return -EINVAL;
>> +		}
>> +	}
>> +
>> +	pm_runtime_enable(dev);
> 
> Don't you need a call to something like pm_suspend_ignore_children()
> here as well, to prevent a pm_runtime_get_sync() in a child device to
> power on our rails at runtime?

Are there any child nodes of remoteproc which do runtime control of
resources via runtime pm?

Thanks for the review.
regards,
Rajendra

[1] https://lkml.org/lkml/2018/6/15/139
--
To unsubscribe from this list: send the line "unsubscribe linux-remoteproc" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Bjorn Andersson Aug. 8, 2018, 8:02 p.m. UTC | #5
On Wed 08 Aug 09:20 PDT 2018, Rajendra Nayak wrote:
> On 8/6/2018 10:18 PM, Bjorn Andersson wrote:
> > On Fri 29 Jun 03:20 PDT 2018, Rajendra Nayak wrote:
[..]
> > > +static int q6v5_powerdomain_enable(struct device *dev, struct device **devs,
> > > +				   int count)
> > > +{
> > > +	int i;
> > > +
> > > +	if (!count)
> > > +		return 0;
> > > +
> > > +	if (count > 1)
> > > +		for (i = 0; i < count; i++)
> > > +			dev_pm_genpd_set_performance_state(devs[i], INT_MAX);
> > > +	else
> > > +		dev_pm_genpd_set_performance_state(dev, INT_MAX);
> > 
> > I would prefer if we could just set the performance state during
> > initialization, but I see that we only aggregate the state during
> > dev_pm_genpd_set_performance_state().
> > 
> > As such you need to also reduce the votes in the disable path; or we
> > will just max out any shared corners from the first time we boot this
> > remoteproc.
> 
> Right, I need to drop the votes along with doing a runtime suspend of the
> device.
> 
> > 
> > 
> > For this to work I believe _genpd_power_o{n,ff}() would need to
> > aggregate the performance state of all enabled consumers, something that
> > would make the interface more convenient to use.
> 
> This isn't done today. There was some discussion in another thread on *if*
> we should do this and what could be the implications [1]
> 

Thanks for the pointer, so let's start by explicitly setting the
performance state during both enable and disable and then we can discuss
adding this logic to the core separately.

[..]
> > > +	pm_runtime_enable(dev);
> > 
> > Don't you need a call to something like pm_suspend_ignore_children()
> > here as well, to prevent a pm_runtime_get_sync() in a child device to
> > power on our rails at runtime?
> 
> Are there any child nodes of remoteproc which do runtime control of
> resources via runtime pm?
> 

Srinivas does that in the audio drivers.

Regards,
Bjorn
--
To unsubscribe from this list: send the line "unsubscribe linux-remoteproc" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_pil.c
index 2bf8e7c49f2a..2b5be6d15779 100644
--- a/drivers/remoteproc/qcom_q6v5_pil.c
+++ b/drivers/remoteproc/qcom_q6v5_pil.c
@@ -25,6 +25,8 @@ 
 #include <linux/of_address.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
 #include <linux/regmap.h>
 #include <linux/regulator/consumer.h>
 #include <linux/remoteproc.h>
@@ -132,6 +134,7 @@  struct rproc_hexagon_res {
 	char **proxy_clk_names;
 	char **reset_clk_names;
 	char **active_clk_names;
+	char **pd_names;
 	int version;
 	bool need_mem_protection;
 	bool has_alt_reset;
@@ -161,9 +164,11 @@  struct q6v5 {
 	struct clk *active_clks[8];
 	struct clk *reset_clks[4];
 	struct clk *proxy_clks[4];
+	struct device *pd_devs[3];
 	int active_clk_count;
 	int reset_clk_count;
 	int proxy_clk_count;
+	int pd_count;
 
 	struct reg_info active_regs[1];
 	struct reg_info proxy_regs[3];
@@ -324,6 +329,23 @@  static void q6v5_clk_disable(struct device *dev,
 		clk_disable_unprepare(clks[i]);
 }
 
+static int q6v5_powerdomain_enable(struct device *dev, struct device **devs,
+				   int count)
+{
+	int i;
+
+	if (!count)
+		return 0;
+
+	if (count > 1)
+		for (i = 0; i < count; i++)
+			dev_pm_genpd_set_performance_state(devs[i], INT_MAX);
+	else
+		dev_pm_genpd_set_performance_state(dev, INT_MAX);
+
+	return pm_runtime_get_sync(dev);
+}
+
 static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm,
 				   bool remote_owner, phys_addr_t addr,
 				   size_t size)
@@ -802,11 +824,18 @@  static int q6v5_start(struct rproc *rproc)
 
 	enable_irq(qproc->handover_irq);
 
+	ret = q6v5_powerdomain_enable(qproc->dev, qproc->pd_devs,
+				      qproc->pd_count);
+	if (ret) {
+		dev_err(qproc->dev, "failed to enable power domains\n");
+		goto disable_irqs;
+	}
+
 	ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
 				    qproc->proxy_reg_count);
 	if (ret) {
 		dev_err(qproc->dev, "failed to enable proxy supplies\n");
-		goto disable_irqs;
+		goto disable_powerdomains;
 	}
 
 	ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
@@ -931,7 +960,8 @@  static int q6v5_start(struct rproc *rproc)
 disable_proxy_reg:
 	q6v5_regulator_disable(qproc, qproc->proxy_regs,
 			       qproc->proxy_reg_count);
-
+disable_powerdomains:
+	pm_runtime_put(qproc->dev);
 disable_irqs:
 	disable_irq(qproc->handover_irq);
 
@@ -991,6 +1021,7 @@  static int q6v5_stop(struct rproc *rproc)
 			 qproc->active_clk_count);
 	q6v5_regulator_disable(qproc, qproc->active_regs,
 			       qproc->active_reg_count);
+	pm_runtime_put(qproc->dev);
 
 	return 0;
 }
@@ -1142,6 +1173,35 @@  static int q6v5_init_clocks(struct device *dev, struct clk **clks,
 	return i;
 }
 
+static int q6v5_powerdomain_init(struct device *dev, struct device **devs,
+				 char **pd_names)
+{
+	int i = 0, num_pds;
+
+	if (!pd_names)
+		return 0;
+
+	while (pd_names[i])
+		i++;
+
+	num_pds = i;
+
+	if (num_pds > 1) {
+		for (i = 0; i < num_pds; i++) {
+			devs[i] = genpd_dev_pm_attach_by_id(dev, i);
+			if (IS_ERR(devs[i]))
+				return PTR_ERR(devs[i]);
+			if (!device_link_add(dev, devs[i], DL_FLAG_STATELESS |
+					     DL_FLAG_PM_RUNTIME))
+				return -EINVAL;
+		}
+	}
+
+	pm_runtime_enable(dev);
+
+	return num_pds;
+};
+
 static int q6v5_init_reset(struct q6v5 *qproc)
 {
 	qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev,
@@ -1298,6 +1358,13 @@  static int q6v5_probe(struct platform_device *pdev)
 	}
 	qproc->active_reg_count = ret;
 
+	ret = q6v5_powerdomain_init(&pdev->dev, qproc->pd_devs, desc->pd_names);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Failed to init power domains\n");
+		goto free_rproc;
+	}
+	qproc->pd_count = ret;
+
 	ret = q6v5_init_reset(qproc);
 	if (ret)
 		goto free_rproc;
@@ -1386,6 +1453,12 @@  static const struct rproc_hexagon_res sdm845_mss = {
 			"mnoc_axi",
 			NULL
 	},
+	.pd_names = (char*[]){
+			"cx",
+			"mx",
+			"mss",
+			NULL
+	},
 	.need_mem_protection = true,
 	.has_alt_reset = true,
 	.version = MSS_SDM845,