@@ -2166,6 +2166,7 @@ void i915_gem_vma_destroy(struct i915_vma *vma);
#define PIN_GLOBAL 0x4
#define PIN_ALIASING 0x8
#define PIN_GLOBAL_ALIASED (PIN_ALIASING | PIN_GLOBAL)
+#define PIN_BOUND 0x10
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
uint32_t alignment,
@@ -3279,7 +3279,13 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
if (IS_ERR(vma))
goto err_unpin;
+ if (flags & PIN_BOUND) {
+ WARN_ON(!vma->node.allocated && !vma->obj->userptr.ptr);
+ goto skip_search;
+ }
+
search_free:
+ WARN_ON(vma->node.allocated);
ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
size, alignment,
obj->cache_level, 0, gtt_max,
@@ -3293,6 +3299,7 @@ search_free:
goto err_free_vma;
}
+skip_search:
if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
obj->cache_level))) {
ret = -EINVAL;
@@ -3329,10 +3336,13 @@ search_free:
i915_gem_vma_bind(vma, obj->cache_level, vma_bind_flags);
i915_gem_verify_gtt(dev);
+ if (flags & PIN_BOUND)
+ vma->uptr_bind=1;
return vma;
err_remove_node:
- drm_mm_remove_node(&vma->node);
+ if ((flags & PIN_BOUND) == 0)
+ drm_mm_remove_node(&vma->node);
err_free_vma:
i915_gem_vma_destroy(vma);
vma = ERR_PTR(ret);
@@ -3875,6 +3885,11 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
return -EINVAL;
+ if (flags & PIN_BOUND) {
+ if (WARN_ON(flags & ~PIN_BOUND))
+ return -EINVAL;
+ }
+
vma = i915_gem_obj_to_vma(obj, vm);
if (vma) {
if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
@@ -3898,7 +3913,8 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
}
}
- if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
+ if (vma == NULL || !drm_mm_node_allocated(&vma->node) ||
+ ((flags & PIN_BOUND) && !vma->uptr_bind)) {
vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -4265,7 +4281,7 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
void i915_gem_vma_destroy(struct i915_vma *vma)
{
- WARN_ON(vma->node.allocated);
+ WARN_ON(vma->node.allocated && !vma->uptr);
/* Keep the vma as a placeholder in the execbuffer reservation lists */
if (!list_empty(&vma->exec_list))
@@ -566,6 +566,9 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
flags |= PIN_GLOBAL;
+ if (vma->uptr)
+ flags |= PIN_BOUND;
+
ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
if (ret)
return ret;
@@ -171,6 +171,10 @@ struct i915_vma {
unsigned int pin_count:4;
#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
+ /* FIXME: */
+ unsigned int uptr:1; /* Whether this VMA has been userptr'd */
+ unsigned int uptr_bind:1; /* Whether we've actually bound it */
+
/** Unmap an object from an address space. This usually consists of
* setting the valid PTE entries to a reserved scratch page. */
void (*unbind_vma)(struct i915_vma *vma);
This HACK allows users to reuse the userptr ioctl in order to pre-reserve the VMA at a specific location. The vma will follow all the same paths as other userptr objects - only the drm_mm node is actually allocated. Again, this patch is a big HACK to get some other people currently using userptr enabled. Signed-off-by: Ben Widawsky <ben@bwidawsk.net> --- drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_gem.c | 22 +++++++++++++++++++--- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 3 +++ drivers/gpu/drm/i915/i915_gem_gtt.h | 4 ++++ 4 files changed, 27 insertions(+), 3 deletions(-)