Message ID | 20220914220427.3091448-5-matthew.d.roper@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Further multi-gt handling | expand |
On 9/14/2022 3:04 PM, Matt Roper wrote: > From: Tvrtko Ursulin <tvrtko.ursulin@intel.com> > > This, along with the changes already landed in commit 1c66a12ab431 > ("drm/i915: Handle each GT on init/release and suspend/resume") makes > engines from all GTs actually known to the driver. > > To accomplish this we need to sprinkle a lot of for_each_gt calls around > but is otherwise pretty un-eventuful. > > Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> > Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> > Signed-off-by: Matt Roper <matthew.d.roper@intel.com> > --- > drivers/gpu/drm/i915/i915_driver.c | 3 +- > drivers/gpu/drm/i915/i915_gem.c | 46 ++++++++++++++++++++++-------- > 2 files changed, 36 insertions(+), 13 deletions(-) > > diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c > index c459eb362c47..9d1fc2477f80 100644 > --- a/drivers/gpu/drm/i915/i915_driver.c > +++ b/drivers/gpu/drm/i915/i915_driver.c > @@ -1661,7 +1661,8 @@ static int intel_runtime_suspend(struct device *kdev) > > intel_runtime_pm_enable_interrupts(dev_priv); > > - intel_gt_runtime_resume(to_gt(dev_priv)); > + for_each_gt(gt, dev_priv, i) > + intel_gt_runtime_resume(gt); > > enable_rpm_wakeref_asserts(rpm); > > diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c > index f18cc6270b2b..0bf71082f21a 100644 > --- a/drivers/gpu/drm/i915/i915_gem.c > +++ b/drivers/gpu/drm/i915/i915_gem.c > @@ -1128,6 +1128,8 @@ void i915_gem_drain_workqueue(struct drm_i915_private *i915) > > int i915_gem_init(struct drm_i915_private *dev_priv) > { > + struct intel_gt *gt; > + unsigned int i; > int ret; > > /* We need to fallback to 4K pages if host doesn't support huge gtt. */ > @@ -1158,9 +1160,11 @@ int i915_gem_init(struct drm_i915_private *dev_priv) > */ > intel_init_clock_gating(dev_priv); > > - ret = intel_gt_init(to_gt(dev_priv)); > - if (ret) > - goto err_unlock; > + for_each_gt(gt, dev_priv, i) { > + ret = intel_gt_init(gt); > + if (ret) > + goto err_unlock; > + } > > return 0; > > @@ -1173,8 +1177,15 @@ int i915_gem_init(struct drm_i915_private *dev_priv) > err_unlock: > i915_gem_drain_workqueue(dev_priv); > > - if (ret != -EIO) > - intel_uc_cleanup_firmwares(&to_gt(dev_priv)->uc); > + if (ret != -EIO) { > + for_each_gt(gt, dev_priv, i) { > + intel_gt_driver_remove(gt); > + intel_gt_driver_release(gt); > + } > + > + for_each_gt(gt, dev_priv, i) > + intel_uc_cleanup_firmwares(>->uc); Any reason not to have the uc_cleanup in the same loop as the gt functions? Also, you're looping intel_uc_cleanup_firmwares but not intel_uc_fetch_firmwares(). Not an issue since the cleanup function will skip if the fetch was not done, but I though it was worth mentioning. I can include the loop for the fetch as part of the support for the media GuC (which I'll send after this is merged). > + } > > if (ret == -EIO) { > /* > @@ -1182,10 +1193,12 @@ int i915_gem_init(struct drm_i915_private *dev_priv) > * as wedged. But we only want to do this when the GPU is angry, > * for all other failure, such as an allocation failure, bail. > */ > - if (!intel_gt_is_wedged(to_gt(dev_priv))) { > - i915_probe_error(dev_priv, > - "Failed to initialize GPU, declaring it wedged!\n"); > - intel_gt_set_wedged(to_gt(dev_priv)); > + for_each_gt(gt, dev_priv, i) { > + if (!intel_gt_is_wedged(gt)) { > + i915_probe_error(dev_priv, > + "Failed to initialize GPU, declaring it wedged!\n"); > + intel_gt_set_wedged(gt); > + } > } > > /* Minimal basic recovery for KMS */ > @@ -1213,10 +1226,14 @@ void i915_gem_driver_unregister(struct drm_i915_private *i915) > > void i915_gem_driver_remove(struct drm_i915_private *dev_priv) > { > + struct intel_gt *gt; > + unsigned int i; > + > intel_wakeref_auto_fini(&to_gt(dev_priv)->userfault_wakeref); > > i915_gem_suspend_late(dev_priv); > - intel_gt_driver_remove(to_gt(dev_priv)); > + for_each_gt(gt, dev_priv, i) > + intel_gt_driver_remove(gt); > dev_priv->uabi_engines = RB_ROOT; > > /* Flush any outstanding unpin_work. */ > @@ -1227,9 +1244,14 @@ void i915_gem_driver_remove(struct drm_i915_private *dev_priv) > > void i915_gem_driver_release(struct drm_i915_private *dev_priv) > { > - intel_gt_driver_release(to_gt(dev_priv)); > + struct intel_gt *gt; > + unsigned int i; > + > + for_each_gt(gt, dev_priv, i) > + intel_gt_driver_release(gt); > > - intel_uc_cleanup_firmwares(&to_gt(dev_priv)->uc); > + for_each_gt(gt, dev_priv, i) > + intel_uc_cleanup_firmwares(>->uc); Same here, those can be in the same loop. Daniele > > i915_gem_drain_freed_objects(dev_priv); >
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index c459eb362c47..9d1fc2477f80 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -1661,7 +1661,8 @@ static int intel_runtime_suspend(struct device *kdev) intel_runtime_pm_enable_interrupts(dev_priv); - intel_gt_runtime_resume(to_gt(dev_priv)); + for_each_gt(gt, dev_priv, i) + intel_gt_runtime_resume(gt); enable_rpm_wakeref_asserts(rpm); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index f18cc6270b2b..0bf71082f21a 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1128,6 +1128,8 @@ void i915_gem_drain_workqueue(struct drm_i915_private *i915) int i915_gem_init(struct drm_i915_private *dev_priv) { + struct intel_gt *gt; + unsigned int i; int ret; /* We need to fallback to 4K pages if host doesn't support huge gtt. */ @@ -1158,9 +1160,11 @@ int i915_gem_init(struct drm_i915_private *dev_priv) */ intel_init_clock_gating(dev_priv); - ret = intel_gt_init(to_gt(dev_priv)); - if (ret) - goto err_unlock; + for_each_gt(gt, dev_priv, i) { + ret = intel_gt_init(gt); + if (ret) + goto err_unlock; + } return 0; @@ -1173,8 +1177,15 @@ int i915_gem_init(struct drm_i915_private *dev_priv) err_unlock: i915_gem_drain_workqueue(dev_priv); - if (ret != -EIO) - intel_uc_cleanup_firmwares(&to_gt(dev_priv)->uc); + if (ret != -EIO) { + for_each_gt(gt, dev_priv, i) { + intel_gt_driver_remove(gt); + intel_gt_driver_release(gt); + } + + for_each_gt(gt, dev_priv, i) + intel_uc_cleanup_firmwares(>->uc); + } if (ret == -EIO) { /* @@ -1182,10 +1193,12 @@ int i915_gem_init(struct drm_i915_private *dev_priv) * as wedged. But we only want to do this when the GPU is angry, * for all other failure, such as an allocation failure, bail. */ - if (!intel_gt_is_wedged(to_gt(dev_priv))) { - i915_probe_error(dev_priv, - "Failed to initialize GPU, declaring it wedged!\n"); - intel_gt_set_wedged(to_gt(dev_priv)); + for_each_gt(gt, dev_priv, i) { + if (!intel_gt_is_wedged(gt)) { + i915_probe_error(dev_priv, + "Failed to initialize GPU, declaring it wedged!\n"); + intel_gt_set_wedged(gt); + } } /* Minimal basic recovery for KMS */ @@ -1213,10 +1226,14 @@ void i915_gem_driver_unregister(struct drm_i915_private *i915) void i915_gem_driver_remove(struct drm_i915_private *dev_priv) { + struct intel_gt *gt; + unsigned int i; + intel_wakeref_auto_fini(&to_gt(dev_priv)->userfault_wakeref); i915_gem_suspend_late(dev_priv); - intel_gt_driver_remove(to_gt(dev_priv)); + for_each_gt(gt, dev_priv, i) + intel_gt_driver_remove(gt); dev_priv->uabi_engines = RB_ROOT; /* Flush any outstanding unpin_work. */ @@ -1227,9 +1244,14 @@ void i915_gem_driver_remove(struct drm_i915_private *dev_priv) void i915_gem_driver_release(struct drm_i915_private *dev_priv) { - intel_gt_driver_release(to_gt(dev_priv)); + struct intel_gt *gt; + unsigned int i; + + for_each_gt(gt, dev_priv, i) + intel_gt_driver_release(gt); - intel_uc_cleanup_firmwares(&to_gt(dev_priv)->uc); + for_each_gt(gt, dev_priv, i) + intel_uc_cleanup_firmwares(>->uc); i915_gem_drain_freed_objects(dev_priv);