diff mbox series

[v5,2/5] drm/i915/display: handle migration for dpt

Message ID 20221004131916.233474-2-matthew.auld@intel.com (mailing list archive)
State New, archived
Headers show
Series [v5,1/5] drm/i915: remove the TODO in pin_and_fence_fb_obj | expand

Commit Message

Matthew Auld Oct. 4, 2022, 1:19 p.m. UTC
On platforms like DG2, it looks like the dpt path here is missing the
migrate-to-lmem step on discrete platforms.

v2:
  - Move the vma_pin() under the for_i915_gem_ww(), otherwise the
    object can be moved after dropping the lock and then doing the pin.

Fixes: 33e7a975103c ("drm/i915/xelpd: First stab at DPT support")
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Jianshui Yu <jianshui.yu@intel.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Cc: Nirmoy Das <nirmoy.das@intel.com>
---
 drivers/gpu/drm/i915/display/intel_fb_pin.c | 51 +++++++++++++--------
 1 file changed, 33 insertions(+), 18 deletions(-)

Comments

Ville Syrjälä Oct. 4, 2022, 1:31 p.m. UTC | #1
On Tue, Oct 04, 2022 at 02:19:13PM +0100, Matthew Auld wrote:
> On platforms like DG2, it looks like the dpt path here is missing the
> migrate-to-lmem step on discrete platforms.
> 
> v2:
>   - Move the vma_pin() under the for_i915_gem_ww(), otherwise the
>     object can be moved after dropping the lock and then doing the pin.
> 
> Fixes: 33e7a975103c ("drm/i915/xelpd: First stab at DPT support")
> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
> Cc: Jianshui Yu <jianshui.yu@intel.com>
> Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
> Cc: Nirmoy Das <nirmoy.das@intel.com>

Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>

> ---
>  drivers/gpu/drm/i915/display/intel_fb_pin.c | 51 +++++++++++++--------
>  1 file changed, 33 insertions(+), 18 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
> index 0cd9e8cb078b..5031ee5695dd 100644
> --- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
> +++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
> @@ -26,10 +26,17 @@ intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
>  	struct drm_device *dev = fb->dev;
>  	struct drm_i915_private *dev_priv = to_i915(dev);
>  	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
> +	struct i915_gem_ww_ctx ww;
>  	struct i915_vma *vma;
>  	u32 alignment;
>  	int ret;
>  
> +	/*
> +	 * We are not syncing against the binding (and potential migrations)
> +	 * below, so this vm must never be async.
> +	*/
> +	GEM_WARN_ON(vm->bind_async_flags);
> +
>  	if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
>  		return ERR_PTR(-EINVAL);
>  
> @@ -37,29 +44,37 @@ intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
>  
>  	atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
>  
> -	ret = i915_gem_object_lock_interruptible(obj, NULL);
> -	if (!ret) {
> +	for_i915_gem_ww(&ww, ret, true) {
> +		ret = i915_gem_object_lock(obj, &ww);
> +		if (ret)
> +			continue;
> +
> +		if (HAS_LMEM(dev_priv)) {
> +			ret = i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM_0);
> +			if (ret)
> +				continue;
> +		}
> +
>  		ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
> -		i915_gem_object_unlock(obj);
> -	}
> -	if (ret) {
> -		vma = ERR_PTR(ret);
> -		goto err;
> -	}
> +		if (ret)
> +			continue;
>  
> -	vma = i915_vma_instance(obj, vm, view);
> -	if (IS_ERR(vma))
> -		goto err;
> +		vma = i915_vma_instance(obj, vm, view);
> +		if (IS_ERR(vma)) {
> +			ret = PTR_ERR(vma);
> +			continue;
> +		}
>  
> -	if (i915_vma_misplaced(vma, 0, alignment, 0)) {
> -		ret = i915_vma_unbind_unlocked(vma);
> -		if (ret) {
> -			vma = ERR_PTR(ret);
> -			goto err;
> +		if (i915_vma_misplaced(vma, 0, alignment, 0)) {
> +			ret = i915_vma_unbind(vma);
> +			if (ret)
> +				continue;
>  		}
> -	}
>  
> -	ret = i915_vma_pin(vma, 0, alignment, PIN_GLOBAL);
> +		ret = i915_vma_pin_ww(vma, &ww, 0, alignment, PIN_GLOBAL);
> +		if (ret)
> +			continue;
> +	}
>  	if (ret) {
>  		vma = ERR_PTR(ret);
>  		goto err;
> -- 
> 2.37.3
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
index 0cd9e8cb078b..5031ee5695dd 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -26,10 +26,17 @@  intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
 	struct drm_device *dev = fb->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+	struct i915_gem_ww_ctx ww;
 	struct i915_vma *vma;
 	u32 alignment;
 	int ret;
 
+	/*
+	 * We are not syncing against the binding (and potential migrations)
+	 * below, so this vm must never be async.
+	*/
+	GEM_WARN_ON(vm->bind_async_flags);
+
 	if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
 		return ERR_PTR(-EINVAL);
 
@@ -37,29 +44,37 @@  intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
 
 	atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
 
-	ret = i915_gem_object_lock_interruptible(obj, NULL);
-	if (!ret) {
+	for_i915_gem_ww(&ww, ret, true) {
+		ret = i915_gem_object_lock(obj, &ww);
+		if (ret)
+			continue;
+
+		if (HAS_LMEM(dev_priv)) {
+			ret = i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM_0);
+			if (ret)
+				continue;
+		}
+
 		ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
-		i915_gem_object_unlock(obj);
-	}
-	if (ret) {
-		vma = ERR_PTR(ret);
-		goto err;
-	}
+		if (ret)
+			continue;
 
-	vma = i915_vma_instance(obj, vm, view);
-	if (IS_ERR(vma))
-		goto err;
+		vma = i915_vma_instance(obj, vm, view);
+		if (IS_ERR(vma)) {
+			ret = PTR_ERR(vma);
+			continue;
+		}
 
-	if (i915_vma_misplaced(vma, 0, alignment, 0)) {
-		ret = i915_vma_unbind_unlocked(vma);
-		if (ret) {
-			vma = ERR_PTR(ret);
-			goto err;
+		if (i915_vma_misplaced(vma, 0, alignment, 0)) {
+			ret = i915_vma_unbind(vma);
+			if (ret)
+				continue;
 		}
-	}
 
-	ret = i915_vma_pin(vma, 0, alignment, PIN_GLOBAL);
+		ret = i915_vma_pin_ww(vma, &ww, 0, alignment, PIN_GLOBAL);
+		if (ret)
+			continue;
+	}
 	if (ret) {
 		vma = ERR_PTR(ret);
 		goto err;