Message ID | 1602176947-17385-2-git-send-email-akhilpo@codeaurora.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [1/2] arm64: dts: qcom: sc7180: Add gpu cooling support | expand |
Hi Akhil, On Thu, Oct 08, 2020 at 10:39:07PM +0530, Akhil P Oommen wrote: > Register GPU as a devfreq cooling device so that it can be passively > cooled by the thermal framework. > > Signed-off-by: Akhil P Oommen <akhilpo@codeaurora.org> > --- > drivers/gpu/drm/msm/msm_gpu.c | 13 ++++++++++++- > drivers/gpu/drm/msm/msm_gpu.h | 2 ++ > 2 files changed, 14 insertions(+), 1 deletion(-) > > diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c > index 55d1648..93ffd66 100644 > --- a/drivers/gpu/drm/msm/msm_gpu.c > +++ b/drivers/gpu/drm/msm/msm_gpu.c > @@ -14,6 +14,7 @@ > #include <generated/utsrelease.h> > #include <linux/string_helpers.h> > #include <linux/devfreq.h> > +#include <linux/devfreq_cooling.h> > #include <linux/devcoredump.h> > #include <linux/sched/task.h> > > @@ -107,9 +108,18 @@ static void msm_devfreq_init(struct msm_gpu *gpu) > if (IS_ERR(gpu->devfreq.devfreq)) { > DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n"); > gpu->devfreq.devfreq = NULL; > + return; > } > > devfreq_suspend_device(gpu->devfreq.devfreq); > + > + gpu->cooling = of_devfreq_cooling_register(gpu->pdev->dev.of_node, > + gpu->devfreq.devfreq); > + if (IS_ERR(gpu->cooling)) { > + DRM_DEV_ERROR(&gpu->pdev->dev, > + "Couldn't register GPU cooling device\n"); > + gpu->cooling = NULL; > + } > } > > static int enable_pwrrail(struct msm_gpu *gpu) > @@ -926,7 +936,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, > > msm_devfreq_init(gpu); > > - > gpu->aspace = gpu->funcs->create_address_space(gpu, pdev); > > if (gpu->aspace == NULL) > @@ -1005,4 +1014,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) > gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu); > msm_gem_address_space_put(gpu->aspace); > } > + > + devfreq_cooling_unregister(gpu->cooling); Resources should be released in reverse order, otherwise the cooling device could use resources that have already been freed.
On 10/10/2020 12:06 AM, mka@chromium.org wrote: > Hi Akhil, > > On Thu, Oct 08, 2020 at 10:39:07PM +0530, Akhil P Oommen wrote: >> Register GPU as a devfreq cooling device so that it can be passively >> cooled by the thermal framework. >> >> Signed-off-by: Akhil P Oommen <akhilpo@codeaurora.org> >> --- >> drivers/gpu/drm/msm/msm_gpu.c | 13 ++++++++++++- >> drivers/gpu/drm/msm/msm_gpu.h | 2 ++ >> 2 files changed, 14 insertions(+), 1 deletion(-) >> >> diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c >> index 55d1648..93ffd66 100644 >> --- a/drivers/gpu/drm/msm/msm_gpu.c >> +++ b/drivers/gpu/drm/msm/msm_gpu.c >> @@ -14,6 +14,7 @@ >> #include <generated/utsrelease.h> >> #include <linux/string_helpers.h> >> #include <linux/devfreq.h> >> +#include <linux/devfreq_cooling.h> >> #include <linux/devcoredump.h> >> #include <linux/sched/task.h> >> >> @@ -107,9 +108,18 @@ static void msm_devfreq_init(struct msm_gpu *gpu) >> if (IS_ERR(gpu->devfreq.devfreq)) { >> DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n"); >> gpu->devfreq.devfreq = NULL; >> + return; >> } >> >> devfreq_suspend_device(gpu->devfreq.devfreq); >> + >> + gpu->cooling = of_devfreq_cooling_register(gpu->pdev->dev.of_node, >> + gpu->devfreq.devfreq); >> + if (IS_ERR(gpu->cooling)) { >> + DRM_DEV_ERROR(&gpu->pdev->dev, >> + "Couldn't register GPU cooling device\n"); >> + gpu->cooling = NULL; >> + } >> } >> >> static int enable_pwrrail(struct msm_gpu *gpu) >> @@ -926,7 +936,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, >> >> msm_devfreq_init(gpu); >> >> - >> gpu->aspace = gpu->funcs->create_address_space(gpu, pdev); >> >> if (gpu->aspace == NULL) >> @@ -1005,4 +1014,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) >> gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu); >> msm_gem_address_space_put(gpu->aspace); >> } >> + >> + devfreq_cooling_unregister(gpu->cooling); > > Resources should be released in reverse order, otherwise the cooling device > could use resources that have already been freed. > Why do you think this is not the correct order? If you are thinking about devfreq struct, it is managed device resource. -Akhil
On Mon, Oct 12, 2020 at 07:03:51PM +0530, Akhil P Oommen wrote: > On 10/10/2020 12:06 AM, mka@chromium.org wrote: > > Hi Akhil, > > > > On Thu, Oct 08, 2020 at 10:39:07PM +0530, Akhil P Oommen wrote: > > > Register GPU as a devfreq cooling device so that it can be passively > > > cooled by the thermal framework. > > > > > > Signed-off-by: Akhil P Oommen <akhilpo@codeaurora.org> > > > --- > > > drivers/gpu/drm/msm/msm_gpu.c | 13 ++++++++++++- > > > drivers/gpu/drm/msm/msm_gpu.h | 2 ++ > > > 2 files changed, 14 insertions(+), 1 deletion(-) > > > > > > diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c > > > index 55d1648..93ffd66 100644 > > > --- a/drivers/gpu/drm/msm/msm_gpu.c > > > +++ b/drivers/gpu/drm/msm/msm_gpu.c > > > @@ -14,6 +14,7 @@ > > > #include <generated/utsrelease.h> > > > #include <linux/string_helpers.h> > > > #include <linux/devfreq.h> > > > +#include <linux/devfreq_cooling.h> > > > #include <linux/devcoredump.h> > > > #include <linux/sched/task.h> > > > @@ -107,9 +108,18 @@ static void msm_devfreq_init(struct msm_gpu *gpu) > > > if (IS_ERR(gpu->devfreq.devfreq)) { > > > DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n"); > > > gpu->devfreq.devfreq = NULL; > > > + return; > > > } > > > devfreq_suspend_device(gpu->devfreq.devfreq); > > > + > > > + gpu->cooling = of_devfreq_cooling_register(gpu->pdev->dev.of_node, > > > + gpu->devfreq.devfreq); > > > + if (IS_ERR(gpu->cooling)) { > > > + DRM_DEV_ERROR(&gpu->pdev->dev, > > > + "Couldn't register GPU cooling device\n"); > > > + gpu->cooling = NULL; > > > + } > > > } > > > static int enable_pwrrail(struct msm_gpu *gpu) > > > @@ -926,7 +936,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, > > > msm_devfreq_init(gpu); > > > - > > > gpu->aspace = gpu->funcs->create_address_space(gpu, pdev); > > > if (gpu->aspace == NULL) > > > @@ -1005,4 +1014,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) > > > gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu); > > > msm_gem_address_space_put(gpu->aspace); > > > } > > > + > > > + devfreq_cooling_unregister(gpu->cooling); > > > > Resources should be released in reverse order, otherwise the cooling device > > could use resources that have already been freed. > > Why do you think this is not the correct order? If you are thinking > about devfreq struct, it is managed device resource. I did not check specifically if changing the frequency really uses any of the resources that are released previously, In any case it's not a good idea to allow other parts of the kernel to use a half initialized/torn down device. Even if it isn't a problem today someone could change the driver to use any of these resources (or add a new one) in a frequency change, without even thinking about the cooling device, just (rightfully) asuming that things are set up and torn down in a sane order.
On 10/12/2020 11:10 PM, mka@chromium.org wrote: > On Mon, Oct 12, 2020 at 07:03:51PM +0530, Akhil P Oommen wrote: >> On 10/10/2020 12:06 AM, mka@chromium.org wrote: >>> Hi Akhil, >>> >>> On Thu, Oct 08, 2020 at 10:39:07PM +0530, Akhil P Oommen wrote: >>>> Register GPU as a devfreq cooling device so that it can be passively >>>> cooled by the thermal framework. >>>> >>>> Signed-off-by: Akhil P Oommen <akhilpo@codeaurora.org> >>>> --- >>>> drivers/gpu/drm/msm/msm_gpu.c | 13 ++++++++++++- >>>> drivers/gpu/drm/msm/msm_gpu.h | 2 ++ >>>> 2 files changed, 14 insertions(+), 1 deletion(-) >>>> >>>> diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c >>>> index 55d1648..93ffd66 100644 >>>> --- a/drivers/gpu/drm/msm/msm_gpu.c >>>> +++ b/drivers/gpu/drm/msm/msm_gpu.c >>>> @@ -14,6 +14,7 @@ >>>> #include <generated/utsrelease.h> >>>> #include <linux/string_helpers.h> >>>> #include <linux/devfreq.h> >>>> +#include <linux/devfreq_cooling.h> >>>> #include <linux/devcoredump.h> >>>> #include <linux/sched/task.h> >>>> @@ -107,9 +108,18 @@ static void msm_devfreq_init(struct msm_gpu *gpu) >>>> if (IS_ERR(gpu->devfreq.devfreq)) { >>>> DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n"); >>>> gpu->devfreq.devfreq = NULL; >>>> + return; >>>> } >>>> devfreq_suspend_device(gpu->devfreq.devfreq); >>>> + >>>> + gpu->cooling = of_devfreq_cooling_register(gpu->pdev->dev.of_node, >>>> + gpu->devfreq.devfreq); >>>> + if (IS_ERR(gpu->cooling)) { >>>> + DRM_DEV_ERROR(&gpu->pdev->dev, >>>> + "Couldn't register GPU cooling device\n"); >>>> + gpu->cooling = NULL; >>>> + } >>>> } >>>> static int enable_pwrrail(struct msm_gpu *gpu) >>>> @@ -926,7 +936,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, >>>> msm_devfreq_init(gpu); >>>> - Will remove this unintended change. >>>> gpu->aspace = gpu->funcs->create_address_space(gpu, pdev); >>>> if (gpu->aspace == NULL) >>>> @@ -1005,4 +1014,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) >>>> gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu); >>>> msm_gem_address_space_put(gpu->aspace); >>>> } >>>> + >>>> + devfreq_cooling_unregister(gpu->cooling); >>> >>> Resources should be released in reverse order, otherwise the cooling device >>> could use resources that have already been freed. >>> Why do you think this is not the correct order? If you are thinking >> about devfreq struct, it is managed device resource. > > I did not check specifically if changing the frequency really uses any of the > resources that are released previously, In any case it's not a good idea to > allow other parts of the kernel to use a half initialized/torn down device. > Even if it isn't a problem today someone could change the driver to use any > of these resources (or add a new one) in a frequency change, without even > thinking about the cooling device, just (rightfully) asuming that things are > set up and torn down in a sane order. 'sane order' relative to what specifically here? Should we worry about freq change at this point because we have already disabled gpu runtime pm and devfreq? -Akhil. > _______________________________________________ > dri-devel mailing list > dri-devel@lists.freedesktop.org > https://lists.freedesktop.org/mailman/listinfo/dri-devel > -Akhil.
On Tue, Oct 13, 2020 at 07:23:34PM +0530, Akhil P Oommen wrote: > On 10/12/2020 11:10 PM, mka@chromium.org wrote: > > On Mon, Oct 12, 2020 at 07:03:51PM +0530, Akhil P Oommen wrote: > > > On 10/10/2020 12:06 AM, mka@chromium.org wrote: > > > > Hi Akhil, > > > > > > > > On Thu, Oct 08, 2020 at 10:39:07PM +0530, Akhil P Oommen wrote: > > > > > Register GPU as a devfreq cooling device so that it can be passively > > > > > cooled by the thermal framework. > > > > > > > > > > Signed-off-by: Akhil P Oommen <akhilpo@codeaurora.org> > > > > > --- > > > > > drivers/gpu/drm/msm/msm_gpu.c | 13 ++++++++++++- > > > > > drivers/gpu/drm/msm/msm_gpu.h | 2 ++ > > > > > 2 files changed, 14 insertions(+), 1 deletion(-) > > > > > > > > > > diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c > > > > > index 55d1648..93ffd66 100644 > > > > > --- a/drivers/gpu/drm/msm/msm_gpu.c > > > > > +++ b/drivers/gpu/drm/msm/msm_gpu.c > > > > > @@ -14,6 +14,7 @@ > > > > > #include <generated/utsrelease.h> > > > > > #include <linux/string_helpers.h> > > > > > #include <linux/devfreq.h> > > > > > +#include <linux/devfreq_cooling.h> > > > > > #include <linux/devcoredump.h> > > > > > #include <linux/sched/task.h> > > > > > @@ -107,9 +108,18 @@ static void msm_devfreq_init(struct msm_gpu *gpu) > > > > > if (IS_ERR(gpu->devfreq.devfreq)) { > > > > > DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n"); > > > > > gpu->devfreq.devfreq = NULL; > > > > > + return; > > > > > } > > > > > devfreq_suspend_device(gpu->devfreq.devfreq); > > > > > + > > > > > + gpu->cooling = of_devfreq_cooling_register(gpu->pdev->dev.of_node, > > > > > + gpu->devfreq.devfreq); > > > > > + if (IS_ERR(gpu->cooling)) { > > > > > + DRM_DEV_ERROR(&gpu->pdev->dev, > > > > > + "Couldn't register GPU cooling device\n"); > > > > > + gpu->cooling = NULL; > > > > > + } > > > > > } > > > > > static int enable_pwrrail(struct msm_gpu *gpu) > > > > > @@ -926,7 +936,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, > > > > > msm_devfreq_init(gpu); > > > > > - > Will remove this unintended change. > > > > > gpu->aspace = gpu->funcs->create_address_space(gpu, pdev); > > > > > if (gpu->aspace == NULL) > > > > > @@ -1005,4 +1014,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) > > > > > gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu); > > > > > msm_gem_address_space_put(gpu->aspace); > > > > > } > > > > > + > > > > > + devfreq_cooling_unregister(gpu->cooling); > > > > > > > > Resources should be released in reverse order, otherwise the cooling device > > > > could use resources that have already been freed. > > > > Why do you think this is not the correct order? If you are thinking > > > about devfreq struct, it is managed device resource. > > > > I did not check specifically if changing the frequency really uses any of the > > resources that are released previously, In any case it's not a good idea to > > allow other parts of the kernel to use a half initialized/torn down device. > > Even if it isn't a problem today someone could change the driver to use any > > of these resources (or add a new one) in a frequency change, without even > > thinking about the cooling device, just (rightfully) asuming that things are > > set up and torn down in a sane order. > 'sane order' relative to what specifically here? Should we worry about freq > change at this point because we have already disabled gpu runtime pm and > devfreq? GPU runtime PM and the devfreq being disabled is not evident from the context of the function. You are probably right that it's not a problem in practice, but why give reason for doubts in the first place if this could be avoided by following a common practice?
On 10/13/2020 11:10 PM, mka@chromium.org wrote: > On Tue, Oct 13, 2020 at 07:23:34PM +0530, Akhil P Oommen wrote: >> On 10/12/2020 11:10 PM, mka@chromium.org wrote: >>> On Mon, Oct 12, 2020 at 07:03:51PM +0530, Akhil P Oommen wrote: >>>> On 10/10/2020 12:06 AM, mka@chromium.org wrote: >>>>> Hi Akhil, >>>>> >>>>> On Thu, Oct 08, 2020 at 10:39:07PM +0530, Akhil P Oommen wrote: >>>>>> Register GPU as a devfreq cooling device so that it can be passively >>>>>> cooled by the thermal framework. >>>>>> >>>>>> Signed-off-by: Akhil P Oommen <akhilpo@codeaurora.org> >>>>>> --- >>>>>> drivers/gpu/drm/msm/msm_gpu.c | 13 ++++++++++++- >>>>>> drivers/gpu/drm/msm/msm_gpu.h | 2 ++ >>>>>> 2 files changed, 14 insertions(+), 1 deletion(-) >>>>>> >>>>>> diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c >>>>>> index 55d1648..93ffd66 100644 >>>>>> --- a/drivers/gpu/drm/msm/msm_gpu.c >>>>>> +++ b/drivers/gpu/drm/msm/msm_gpu.c >>>>>> @@ -14,6 +14,7 @@ >>>>>> #include <generated/utsrelease.h> >>>>>> #include <linux/string_helpers.h> >>>>>> #include <linux/devfreq.h> >>>>>> +#include <linux/devfreq_cooling.h> >>>>>> #include <linux/devcoredump.h> >>>>>> #include <linux/sched/task.h> >>>>>> @@ -107,9 +108,18 @@ static void msm_devfreq_init(struct msm_gpu *gpu) >>>>>> if (IS_ERR(gpu->devfreq.devfreq)) { >>>>>> DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n"); >>>>>> gpu->devfreq.devfreq = NULL; >>>>>> + return; >>>>>> } >>>>>> devfreq_suspend_device(gpu->devfreq.devfreq); >>>>>> + >>>>>> + gpu->cooling = of_devfreq_cooling_register(gpu->pdev->dev.of_node, >>>>>> + gpu->devfreq.devfreq); >>>>>> + if (IS_ERR(gpu->cooling)) { >>>>>> + DRM_DEV_ERROR(&gpu->pdev->dev, >>>>>> + "Couldn't register GPU cooling device\n"); >>>>>> + gpu->cooling = NULL; >>>>>> + } >>>>>> } >>>>>> static int enable_pwrrail(struct msm_gpu *gpu) >>>>>> @@ -926,7 +936,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, >>>>>> msm_devfreq_init(gpu); >>>>>> - >> Will remove this unintended change. >>>>>> gpu->aspace = gpu->funcs->create_address_space(gpu, pdev); >>>>>> if (gpu->aspace == NULL) >>>>>> @@ -1005,4 +1014,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) >>>>>> gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu); >>>>>> msm_gem_address_space_put(gpu->aspace); >>>>>> } >>>>>> + >>>>>> + devfreq_cooling_unregister(gpu->cooling); >>>>> >>>>> Resources should be released in reverse order, otherwise the cooling device >>>>> could use resources that have already been freed. >>>>> Why do you think this is not the correct order? If you are thinking >>>> about devfreq struct, it is managed device resource. >>> >>> I did not check specifically if changing the frequency really uses any of the >>> resources that are released previously, In any case it's not a good idea to >>> allow other parts of the kernel to use a half initialized/torn down device. >>> Even if it isn't a problem today someone could change the driver to use any >>> of these resources (or add a new one) in a frequency change, without even >>> thinking about the cooling device, just (rightfully) asuming that things are >>> set up and torn down in a sane order. >> 'sane order' relative to what specifically here? Should we worry about freq >> change at this point because we have already disabled gpu runtime pm and >> devfreq? > > GPU runtime PM and the devfreq being disabled is not evident from the context > of the function. You are probably right that it's not a problem in practice, > but why give reason for doubts in the first place if this could be avoided > by following a common practice? > _______________________________________________ > dri-devel mailing list > dri-devel@lists.freedesktop.org > https://lists.freedesktop.org/mailman/listinfo/dri-devel > Other option I see is to create a managed device resource (devm) version of the devfreq_cooling_register API and use that. Is that what you are trying to suggest? -Akhil.
On Wed, Oct 14, 2020 at 12:51:55AM +0530, Akhil P Oommen wrote: > On 10/13/2020 11:10 PM, mka@chromium.org wrote: > > On Tue, Oct 13, 2020 at 07:23:34PM +0530, Akhil P Oommen wrote: > > > On 10/12/2020 11:10 PM, mka@chromium.org wrote: > > > > On Mon, Oct 12, 2020 at 07:03:51PM +0530, Akhil P Oommen wrote: > > > > > On 10/10/2020 12:06 AM, mka@chromium.org wrote: > > > > > > Hi Akhil, > > > > > > > > > > > > On Thu, Oct 08, 2020 at 10:39:07PM +0530, Akhil P Oommen wrote: > > > > > > > Register GPU as a devfreq cooling device so that it can be passively > > > > > > > cooled by the thermal framework. > > > > > > > > > > > > > > Signed-off-by: Akhil P Oommen <akhilpo@codeaurora.org> > > > > > > > --- > > > > > > > drivers/gpu/drm/msm/msm_gpu.c | 13 ++++++++++++- > > > > > > > drivers/gpu/drm/msm/msm_gpu.h | 2 ++ > > > > > > > 2 files changed, 14 insertions(+), 1 deletion(-) > > > > > > > > > > > > > > diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c > > > > > > > index 55d1648..93ffd66 100644 > > > > > > > --- a/drivers/gpu/drm/msm/msm_gpu.c > > > > > > > +++ b/drivers/gpu/drm/msm/msm_gpu.c > > > > > > > @@ -14,6 +14,7 @@ > > > > > > > #include <generated/utsrelease.h> > > > > > > > #include <linux/string_helpers.h> > > > > > > > #include <linux/devfreq.h> > > > > > > > +#include <linux/devfreq_cooling.h> > > > > > > > #include <linux/devcoredump.h> > > > > > > > #include <linux/sched/task.h> > > > > > > > @@ -107,9 +108,18 @@ static void msm_devfreq_init(struct msm_gpu *gpu) > > > > > > > if (IS_ERR(gpu->devfreq.devfreq)) { > > > > > > > DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n"); > > > > > > > gpu->devfreq.devfreq = NULL; > > > > > > > + return; > > > > > > > } > > > > > > > devfreq_suspend_device(gpu->devfreq.devfreq); > > > > > > > + > > > > > > > + gpu->cooling = of_devfreq_cooling_register(gpu->pdev->dev.of_node, > > > > > > > + gpu->devfreq.devfreq); > > > > > > > + if (IS_ERR(gpu->cooling)) { > > > > > > > + DRM_DEV_ERROR(&gpu->pdev->dev, > > > > > > > + "Couldn't register GPU cooling device\n"); > > > > > > > + gpu->cooling = NULL; > > > > > > > + } > > > > > > > } > > > > > > > static int enable_pwrrail(struct msm_gpu *gpu) > > > > > > > @@ -926,7 +936,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, > > > > > > > msm_devfreq_init(gpu); > > > > > > > - > > > Will remove this unintended change. > > > > > > > gpu->aspace = gpu->funcs->create_address_space(gpu, pdev); > > > > > > > if (gpu->aspace == NULL) > > > > > > > @@ -1005,4 +1014,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) > > > > > > > gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu); > > > > > > > msm_gem_address_space_put(gpu->aspace); > > > > > > > } > > > > > > > + > > > > > > > + devfreq_cooling_unregister(gpu->cooling); > > > > > > > > > > > > Resources should be released in reverse order, otherwise the cooling device > > > > > > could use resources that have already been freed. > > > > > > Why do you think this is not the correct order? If you are thinking > > > > > about devfreq struct, it is managed device resource. > > > > > > > > I did not check specifically if changing the frequency really uses any of the > > > > resources that are released previously, In any case it's not a good idea to > > > > allow other parts of the kernel to use a half initialized/torn down device. > > > > Even if it isn't a problem today someone could change the driver to use any > > > > of these resources (or add a new one) in a frequency change, without even > > > > thinking about the cooling device, just (rightfully) asuming that things are > > > > set up and torn down in a sane order. > > > 'sane order' relative to what specifically here? Should we worry about freq > > > change at this point because we have already disabled gpu runtime pm and > > > devfreq? > > > > GPU runtime PM and the devfreq being disabled is not evident from the context > > of the function. You are probably right that it's not a problem in practice, > > but why give reason for doubts in the first place if this could be avoided > > by following a common practice? > > _______________________________________________ > > dri-devel mailing list > > dri-devel@lists.freedesktop.org > > https://lists.freedesktop.org/mailman/listinfo/dri-devel > > > Other option I see is to create a managed device resource (devm) version of > the devfreq_cooling_register API and use that. Is that what you are trying > to suggest? No, I was not thinking about a devm version, just manual reverse removal. Actually you can even argue the you are using the right order, I saw the ring buffer and the address space are actually initialized after msm_devfreq_init(). That strikes me a bit odd, I guess the devfreq_suspend_device() in msm_devfreq_init() is supposed to prevent the devfreq from being active, however that is potentially racy, it could become active right after being created. I would have expected the devfreq to be created when everything else is ready, but I don't know this driver well, nor am I a devfreq expert, maybe there is a good reason for it ... In summary, the order you are using is consistent with the what the driver currently does, which might not be entirely correct, but that is beyond the scope of this patch.
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 55d1648..93ffd66 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -14,6 +14,7 @@ #include <generated/utsrelease.h> #include <linux/string_helpers.h> #include <linux/devfreq.h> +#include <linux/devfreq_cooling.h> #include <linux/devcoredump.h> #include <linux/sched/task.h> @@ -107,9 +108,18 @@ static void msm_devfreq_init(struct msm_gpu *gpu) if (IS_ERR(gpu->devfreq.devfreq)) { DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n"); gpu->devfreq.devfreq = NULL; + return; } devfreq_suspend_device(gpu->devfreq.devfreq); + + gpu->cooling = of_devfreq_cooling_register(gpu->pdev->dev.of_node, + gpu->devfreq.devfreq); + if (IS_ERR(gpu->cooling)) { + DRM_DEV_ERROR(&gpu->pdev->dev, + "Couldn't register GPU cooling device\n"); + gpu->cooling = NULL; + } } static int enable_pwrrail(struct msm_gpu *gpu) @@ -926,7 +936,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, msm_devfreq_init(gpu); - gpu->aspace = gpu->funcs->create_address_space(gpu, pdev); if (gpu->aspace == NULL) @@ -1005,4 +1014,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu); msm_gem_address_space_put(gpu->aspace); } + + devfreq_cooling_unregister(gpu->cooling); } diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index 6c9e1fd..9a8f20d 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -147,6 +147,8 @@ struct msm_gpu { struct msm_gpu_state *crashstate; /* True if the hardware supports expanded apriv (a650 and newer) */ bool hw_apriv; + + struct thermal_cooling_device *cooling; }; static inline struct msm_gpu *dev_to_gpu(struct device *dev)
Register GPU as a devfreq cooling device so that it can be passively cooled by the thermal framework. Signed-off-by: Akhil P Oommen <akhilpo@codeaurora.org> --- drivers/gpu/drm/msm/msm_gpu.c | 13 ++++++++++++- drivers/gpu/drm/msm/msm_gpu.h | 2 ++ 2 files changed, 14 insertions(+), 1 deletion(-)