@@ -265,10 +265,11 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->fix.smem_len = vma->node.size;
}
- vaddr = i915_vma_pin_iomap(vma);
+ vaddr = i915_vma_pin_iomap_unlocked(vma);
if (IS_ERR(vaddr)) {
- drm_err(&dev_priv->drm,
- "Failed to remap framebuffer into virtual memory\n");
+ if (vaddr != ERR_PTR(-EINTR) && vaddr != ERR_PTR(-ERESTARTSYS))
+ drm_err(&dev_priv->drm,
+ "Failed to remap framebuffer into virtual memory\n");
ret = PTR_ERR(vaddr);
goto out_unpin;
}
@@ -1357,7 +1357,7 @@ static int get_registers(struct intel_overlay *overlay, bool use_phys)
overlay->flip_addr = sg_dma_address(obj->mm.pages->sgl);
else
overlay->flip_addr = i915_ggtt_offset(vma);
- overlay->regs = i915_vma_pin_iomap(vma);
+ overlay->regs = i915_vma_pin_iomap_unlocked(vma);
i915_vma_unpin(vma);
if (IS_ERR(overlay->regs)) {
@@ -418,6 +418,12 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
}
if (!ptr) {
+ err = i915_gem_object_wait_moving_fence(obj, true);
+ if (err) {
+ ptr = ERR_PTR(err);
+ goto err_unpin;
+ }
+
if (GEM_WARN_ON(type == I915_MAP_WC &&
!static_cpu_has(X86_FEATURE_PAT)))
ptr = ERR_PTR(-ENODEV);
@@ -101,7 +101,7 @@ static int gtt_set(struct context *ctx, unsigned long offset, u32 v)
intel_gt_pm_get(vma->vm->gt);
- map = i915_vma_pin_iomap(vma);
+ map = i915_vma_pin_iomap_unlocked(vma);
i915_vma_unpin(vma);
if (IS_ERR(map)) {
err = PTR_ERR(map);
@@ -134,7 +134,7 @@ static int gtt_get(struct context *ctx, unsigned long offset, u32 *v)
intel_gt_pm_get(vma->vm->gt);
- map = i915_vma_pin_iomap(vma);
+ map = i915_vma_pin_iomap_unlocked(vma);
i915_vma_unpin(vma);
if (IS_ERR(map)) {
err = PTR_ERR(map);
@@ -125,11 +125,12 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
n = page - view.partial.offset;
GEM_BUG_ON(n >= view.partial.size);
- io = i915_vma_pin_iomap(vma);
+ io = i915_vma_pin_iomap_unlocked(vma);
i915_vma_unpin(vma);
if (IS_ERR(io)) {
- pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
- page, (int)PTR_ERR(io));
+ if (io != ERR_PTR(-EINTR) && io != ERR_PTR(-ERESTARTSYS))
+ pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
+ page, (int)PTR_ERR(io));
err = PTR_ERR(io);
goto out;
}
@@ -219,11 +220,12 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj,
n = page - view.partial.offset;
GEM_BUG_ON(n >= view.partial.size);
- io = i915_vma_pin_iomap(vma);
+ io = i915_vma_pin_iomap_unlocked(vma);
i915_vma_unpin(vma);
if (IS_ERR(io)) {
- pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
- page, (int)PTR_ERR(io));
+ if (io != ERR_PTR(-EINTR) && io != ERR_PTR(-ERESTARTSYS))
+ pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
+ page, (int)PTR_ERR(io));
return PTR_ERR(io);
}
@@ -773,7 +775,7 @@ static int gtt_set(struct drm_i915_gem_object *obj)
return PTR_ERR(vma);
intel_gt_pm_get(vma->vm->gt);
- map = i915_vma_pin_iomap(vma);
+ map = i915_vma_pin_iomap_unlocked(vma);
i915_vma_unpin(vma);
if (IS_ERR(map)) {
err = PTR_ERR(map);
@@ -799,7 +801,7 @@ static int gtt_check(struct drm_i915_gem_object *obj)
return PTR_ERR(vma);
intel_gt_pm_get(vma->vm->gt);
- map = i915_vma_pin_iomap(vma);
+ map = i915_vma_pin_iomap_unlocked(vma);
i915_vma_unpin(vma);
if (IS_ERR(map)) {
err = PTR_ERR(map);
@@ -431,6 +431,13 @@ int i915_vma_bind(struct i915_vma *vma,
work->pinned = i915_gem_object_get(vma->obj);
}
} else {
+ if (vma->obj) {
+ int ret;
+
+ ret = i915_gem_object_wait_moving_fence(vma->obj, true);
+ if (ret)
+ return ret;
+ }
vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags);
}
@@ -455,6 +462,10 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
ptr = READ_ONCE(vma->iomap);
if (ptr == NULL) {
+ err = i915_gem_object_wait_moving_fence(vma->obj, true);
+ if (err)
+ goto err;
+
/*
* TODO: consider just using i915_gem_object_pin_map() for lmem
* instead, which already supports mapping non-contiguous chunks
@@ -496,6 +507,25 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
return IO_ERR_PTR(err);
}
+void __iomem *i915_vma_pin_iomap_unlocked(struct i915_vma *vma)
+{
+ struct i915_gem_ww_ctx ww;
+ void __iomem *map;
+ int err;
+
+ for_i915_gem_ww(&ww, err, true) {
+ err = i915_gem_object_lock(vma->obj, &ww);
+ if (err)
+ continue;
+
+ map = i915_vma_pin_iomap(vma);
+ }
+ if (err)
+ map = IO_ERR_PTR(err);
+
+ return map;
+}
+
void i915_vma_flush_writes(struct i915_vma *vma)
{
if (i915_vma_unset_ggtt_write(vma))
@@ -870,6 +900,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
u64 size, u64 alignment, u64 flags)
{
struct i915_vma_work *work = NULL;
+ struct dma_fence *moving = NULL;
intel_wakeref_t wakeref = 0;
unsigned int bound;
int err;
@@ -895,7 +926,8 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
if (flags & PIN_GLOBAL)
wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
- if (flags & vma->vm->bind_async_flags) {
+ moving = i915_gem_object_get_moving_fence(vma->obj);
+ if (flags & vma->vm->bind_async_flags || moving) {
/* lock VM */
err = i915_vm_lock_objects(vma->vm, ww);
if (err)
@@ -909,6 +941,8 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
work->vm = i915_vm_get(vma->vm);
+ dma_fence_work_chain(&work->base, moving);
+
/* Allocate enough page directories to used PTE */
if (vma->vm->allocate_va_range) {
err = i915_vm_alloc_pt_stash(vma->vm,
@@ -1013,7 +1047,10 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
err_rpm:
if (wakeref)
intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
+ if (moving)
+ dma_fence_put(moving);
vma_put_pages(vma);
+
return err;
}
@@ -326,6 +326,9 @@ static inline bool i915_node_color_differs(const struct drm_mm_node *node,
* Returns a valid iomapped pointer or ERR_PTR.
*/
void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
+
+void __iomem *i915_vma_pin_iomap_unlocked(struct i915_vma *vma);
+
#define IO_ERR_PTR(x) ((void __iomem *)ERR_PTR(x))
/**
@@ -1005,7 +1005,7 @@ static int igt_vma_remapped_gtt(void *arg)
GEM_BUG_ON(vma->ggtt_view.type != *t);
- map = i915_vma_pin_iomap(vma);
+ map = i915_vma_pin_iomap_unlocked(vma);
i915_vma_unpin(vma);
if (IS_ERR(map)) {
err = PTR_ERR(map);
@@ -1036,7 +1036,7 @@ static int igt_vma_remapped_gtt(void *arg)
GEM_BUG_ON(vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL);
- map = i915_vma_pin_iomap(vma);
+ map = i915_vma_pin_iomap_unlocked(vma);
i915_vma_unpin(vma);
if (IS_ERR(map)) {
err = PTR_ERR(map);