@@ -2232,6 +2232,7 @@ static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
struct i915_vma *
intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
+ bool phys_cursor,
const struct i915_ggtt_view *view,
bool uses_fence,
unsigned long *out_flags)
@@ -2240,14 +2241,19 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
intel_wakeref_t wakeref;
+ struct i915_gem_ww_ctx ww;
struct i915_vma *vma;
unsigned int pinctl;
u32 alignment;
+ int ret;
if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
return ERR_PTR(-EINVAL);
- alignment = intel_surf_alignment(fb, 0);
+ if (phys_cursor)
+ alignment = intel_cursor_alignment(dev_priv);
+ else
+ alignment = intel_surf_alignment(fb, 0);
if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
return ERR_PTR(-EINVAL);
@@ -2282,14 +2288,26 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
if (HAS_GMCH(dev_priv))
pinctl |= PIN_MAPPABLE;
- vma = i915_gem_object_pin_to_display_plane(obj,
- alignment, view, pinctl);
- if (IS_ERR(vma))
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ ret = i915_gem_object_lock(obj, &ww);
+ if (!ret && phys_cursor)
+ ret = i915_gem_object_attach_phys(obj, alignment);
+ if (!ret)
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret)
goto err;
- if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
- int ret;
+ if (!ret) {
+ vma = i915_gem_object_pin_to_display_plane(obj, &ww, alignment,
+ view, pinctl);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err_unpin;
+ }
+ }
+ if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
/*
* Install a fence for tiled scan-out. Pre-i965 always needs a
* fence, whereas 965+ only requires a fence if using
@@ -2310,16 +2328,28 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
ret = i915_vma_pin_fence(vma);
if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
i915_gem_object_unpin_from_display_plane(vma);
- vma = ERR_PTR(ret);
- goto err;
+ goto err_unpin;
}
+ ret = 0;
- if (ret == 0 && vma->fence)
+ if (vma->fence)
*out_flags |= PLANE_HAS_FENCE;
}
i915_vma_get(vma);
+
+err_unpin:
+ i915_gem_object_unpin_pages(obj);
err:
+ if (ret == -EDEADLK) {
+ ret = i915_gem_ww_ctx_backoff(&ww);
+ if (!ret)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+ if (ret)
+ vma = ERR_PTR(ret);
+
atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return vma;
@@ -16626,19 +16656,11 @@ static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
struct drm_framebuffer *fb = plane_state->hw.fb;
struct i915_vma *vma;
+ bool phys_cursor =
+ plane->id == PLANE_CURSOR &&
+ INTEL_INFO(dev_priv)->display.cursor_needs_physical;
- if (plane->id == PLANE_CURSOR &&
- INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- const int align = intel_cursor_alignment(dev_priv);
- int err;
-
- err = i915_gem_object_attach_phys(obj, align);
- if (err)
- return err;
- }
-
- vma = intel_pin_and_fence_fb_obj(fb,
+ vma = intel_pin_and_fence_fb_obj(fb, phys_cursor,
&plane_state->view,
intel_plane_uses_fence(plane_state),
&plane_state->flags);
@@ -16734,13 +16756,8 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
if (!obj)
return 0;
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
- return ret;
ret = intel_plane_pin_fb(new_plane_state);
-
- i915_gem_object_unpin_pages(obj);
if (ret)
return ret;
@@ -569,7 +569,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx);
struct i915_vma *
-intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
+intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, bool phys_cursor,
const struct i915_ggtt_view *view,
bool uses_fence,
unsigned long *out_flags);
@@ -211,7 +211,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
* This also validates that any existing fb inherited from the
* BIOS is suitable for own access.
*/
- vma = intel_pin_and_fence_fb_obj(&ifbdev->fb->base,
+ vma = intel_pin_and_fence_fb_obj(&ifbdev->fb->base, false,
&view, false, &flags);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
@@ -755,6 +755,32 @@ static u32 overlay_cmd_reg(struct drm_intel_overlay_put_image *params)
return cmd;
}
+static struct i915_vma *intel_overlay_pin_fb(struct drm_i915_gem_object *new_bo)
+{
+ struct i915_gem_ww_ctx ww;
+ struct i915_vma *vma;
+ int ret;
+
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ ret = i915_gem_object_lock(new_bo, &ww);
+ if (!ret) {
+ vma = i915_gem_object_pin_to_display_plane(new_bo, &ww, 0,
+ NULL, PIN_MAPPABLE);
+ ret = PTR_ERR_OR_ZERO(vma);
+ }
+ if (ret == -EDEADLK) {
+ ret = i915_gem_ww_ctx_backoff(&ww);
+ if (!ret)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return vma;
+}
+
static int intel_overlay_do_put_image(struct intel_overlay *overlay,
struct drm_i915_gem_object *new_bo,
struct drm_intel_overlay_put_image *params)
@@ -776,12 +802,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
- vma = i915_gem_object_pin_to_display_plane(new_bo,
- 0, NULL, PIN_MAPPABLE);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
+ vma = intel_overlay_pin_fb(new_bo);
+ if (IS_ERR(vma))
goto out_pin_section;
- }
+
i915_gem_object_flush_frontbuffer(new_bo, ORIGIN_DIRTYFB);
if (!overlay->active) {
@@ -313,12 +313,12 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
*/
struct i915_vma *
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
+ struct i915_gem_ww_ctx *ww,
u32 alignment,
const struct i915_ggtt_view *view,
unsigned int flags)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_gem_ww_ctx ww;
struct i915_vma *vma;
int ret;
@@ -326,11 +326,6 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
if (HAS_LMEM(i915) && !i915_gem_object_is_lmem(obj))
return ERR_PTR(-EINVAL);
- i915_gem_ww_ctx_init(&ww, true);
-retry:
- ret = i915_gem_object_lock(obj, &ww);
- if (ret)
- goto err;
/*
* The display engine is not coherent with the LLC cache on gen6. As
* a result, we make sure that the pinning that is about to occur is
@@ -345,7 +340,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
HAS_WT(i915) ?
I915_CACHE_WT : I915_CACHE_NONE);
if (ret)
- goto err;
+ return ERR_PTR(ret);
/*
* As the user may map the buffer once pinned in the display plane
@@ -358,32 +353,19 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
vma = ERR_PTR(-ENOSPC);
if ((flags & PIN_MAPPABLE) == 0 &&
(!view || view->type == I915_GGTT_VIEW_NORMAL))
- vma = i915_gem_object_ggtt_pin_ww(obj, &ww, view, 0, alignment,
+ vma = i915_gem_object_ggtt_pin_ww(obj, ww, view, 0, alignment,
flags | PIN_MAPPABLE |
PIN_NONBLOCK);
if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK))
- vma = i915_gem_object_ggtt_pin_ww(obj, &ww, view, 0,
+ vma = i915_gem_object_ggtt_pin_ww(obj, ww, view, 0,
alignment, flags);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto err;
- }
+ if (IS_ERR(vma))
+ return vma;
vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
i915_gem_object_flush_if_display_locked(obj);
-err:
- if (ret == -EDEADLK) {
- ret = i915_gem_ww_ctx_backoff(&ww);
- if (!ret)
- goto retry;
- }
- i915_gem_ww_ctx_fini(&ww);
-
- if (ret)
- return ERR_PTR(ret);
-
return vma;
}
@@ -489,6 +489,7 @@ int __must_check
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
struct i915_vma * __must_check
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
+ struct i915_gem_ww_ctx *ww,
u32 alignment,
const struct i915_ggtt_view *view,
unsigned int flags);
@@ -219,6 +219,8 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
{
int err;
+ assert_object_held(obj);
+
if (align > obj->base.size)
return -EINVAL;
@@ -232,13 +234,9 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
if (err)
return err;
- err = i915_gem_object_lock_interruptible(obj, NULL);
- if (err)
- return err;
-
err = mutex_lock_interruptible(&obj->mm.lock);
if (err)
- goto err_unlock;
+ return err;
if (unlikely(!i915_gem_object_has_struct_page(obj)))
goto out;
@@ -269,8 +267,6 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
out:
mutex_unlock(&obj->mm.lock);
-err_unlock:
- i915_gem_object_unlock(obj);
return err;
}
@@ -31,7 +31,9 @@ static int mock_phys_object(void *arg)
goto out_obj;
}
+ i915_gem_object_lock(obj, NULL);
err = i915_gem_object_attach_phys(obj, PAGE_SIZE);
+ i915_gem_object_unlock(obj);
if (err) {
pr_err("i915_gem_object_attach_phys failed, err=%d\n", err);
goto out_obj;