@@ -4,6 +4,7 @@
*/
#include "gem/i915_gem_domain.h"
+#include "gem/i915_gem_internal.h"
#include "gt/gen8_ppgtt.h"
#include "i915_drv.h"
@@ -116,6 +117,7 @@ static void dpt_cleanup(struct i915_address_space *vm)
{
struct i915_dpt *dpt = i915_vm_to_dpt(vm);
+ i915_gem_object_put(vm->scratch[0]);
i915_gem_object_put(dpt->obj);
}
@@ -230,17 +232,40 @@ void intel_dpt_suspend(struct drm_i915_private *i915)
mutex_unlock(&i915->drm.mode_config.fb_lock);
}
+static int scratch_dummy_obj_get_pages(struct drm_i915_gem_object *obj)
+{
+ obj->mm.pages = ZERO_SIZE_PTR;
+ return 0;
+}
+
+static void scratch_dummy_obj_put_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+}
+
+static const struct drm_i915_gem_object_ops scratch_dummy_obj_ops = {
+ .name = "scratch_dummy_obj",
+ .get_pages = scratch_dummy_obj_get_pages,
+ .put_pages = scratch_dummy_obj_put_pages,
+};
+
struct i915_address_space *
intel_dpt_create(struct intel_framebuffer *fb)
{
struct drm_gem_object *obj = &intel_fb_obj(&fb->base)->base;
struct drm_i915_private *i915 = to_i915(obj->dev);
- struct drm_i915_gem_object *dpt_obj;
+ struct drm_i915_gem_object *dpt_obj, *scratch;
struct i915_address_space *vm;
struct i915_dpt *dpt;
size_t size;
int ret;
+ scratch = __i915_gem_object_create_internal(i915,
+ &scratch_dummy_obj_ops,
+ SZ_4K);
+ if (IS_ERR(scratch))
+ return ERR_CAST(scratch);
+
if (intel_fb_needs_pot_stride_remap(fb))
size = intel_remapped_info_size(&fb->remapped_view.gtt.remapped);
else
@@ -252,23 +277,23 @@ intel_dpt_create(struct intel_framebuffer *fb)
dpt_obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_CONTIGUOUS);
else
dpt_obj = i915_gem_object_create_stolen(i915, size);
- if (IS_ERR(dpt_obj))
- return ERR_CAST(dpt_obj);
+ if (IS_ERR(dpt_obj)) {
+ ret = PTR_ERR(dpt_obj);
+ goto err_put_scratch;
+ }
ret = i915_gem_object_lock_interruptible(dpt_obj, NULL);
if (!ret) {
ret = i915_gem_object_set_cache_level(dpt_obj, I915_CACHE_NONE);
i915_gem_object_unlock(dpt_obj);
}
- if (ret) {
- i915_gem_object_put(dpt_obj);
- return ERR_PTR(ret);
- }
+ if (ret)
+ goto err_put_dpt;
dpt = kzalloc(sizeof(*dpt), GFP_KERNEL);
if (!dpt) {
- i915_gem_object_put(dpt_obj);
- return ERR_PTR(-ENOMEM);
+ ret = -ENOMEM;
+ goto err_put_dpt;
}
vm = &dpt->vm;
@@ -281,6 +306,10 @@ intel_dpt_create(struct intel_framebuffer *fb)
i915_address_space_init(vm, VM_CLASS_DPT);
+ scratch->base.resv = i915_vm_resv_get(vm);
+ scratch->shares_resv_from = vm;
+ vm->scratch[0] = scratch;
+
vm->insert_page = dpt_insert_page;
vm->clear_range = dpt_clear_range;
vm->insert_entries = dpt_insert_entries;
@@ -294,6 +323,12 @@ intel_dpt_create(struct intel_framebuffer *fb)
dpt->obj = dpt_obj;
return &dpt->vm;
+
+err_put_dpt:
+ i915_gem_object_put(dpt_obj);
+err_put_scratch:
+ i915_gem_object_put(scratch);
+ return ERR_PTR(ret);
}
void intel_dpt_destroy(struct i915_address_space *vm)
We currently blow up in i915_vm_lock_objects when binding the dpt, due to what looks like NULL scratch[0]. Likely the moving fence has not been unset yet(even though it should have signalled), due to some previous move. For now let's just create something which more closely resembles a proper vm. Signed-off-by: Matthew Auld <matthew.auld@intel.com> Cc: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com> --- drivers/gpu/drm/i915/display/intel_dpt.c | 53 ++++++++++++++++++++---- 1 file changed, 44 insertions(+), 9 deletions(-)