@@ -83,6 +83,7 @@
#include "i915_file_private.h"
#include "i915_gem_context.h"
+#include "i915_gem_internal.h"
#include "i915_trace.h"
#include "i915_user_extensions.h"
@@ -11,6 +11,7 @@
#include "pxp/intel_pxp.h"
#include "i915_drv.h"
+#include "i915_gem_context.h"
#include "i915_gem_create.h"
#include "i915_trace.h"
#include "i915_user_extensions.h"
@@ -251,6 +252,7 @@ struct create_ext {
unsigned int n_placements;
unsigned int placement_mask;
unsigned long flags;
+ u32 vm_id;
};
static void repr_placements(char *buf, size_t size,
@@ -400,9 +402,32 @@ static int ext_set_protected(struct i915_user_extension __user *base, void *data
return 0;
}
+static int ext_set_vm_private(struct i915_user_extension __user *base,
+ void *data)
+{
+ struct drm_i915_gem_create_ext_vm_private ext;
+ struct create_ext *ext_data = data;
+
+ if (copy_from_user(&ext, base, sizeof(ext)))
+ return -EFAULT;
+
+ /* Reserved fields must be 0 */
+ if (ext.rsvd)
+ return -EINVAL;
+
+ /* vm_id 0 is reserved */
+ if (!ext.vm_id)
+ return -ENOENT;
+
+ ext_data->vm_id = ext.vm_id;
+
+ return 0;
+}
+
static const i915_user_extension_fn create_extensions[] = {
[I915_GEM_CREATE_EXT_MEMORY_REGIONS] = ext_set_placements,
[I915_GEM_CREATE_EXT_PROTECTED_CONTENT] = ext_set_protected,
+ [I915_GEM_CREATE_EXT_VM_PRIVATE] = ext_set_vm_private,
};
/**
@@ -418,6 +443,7 @@ i915_gem_create_ext_ioctl(struct drm_device *dev, void *data,
struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_create_ext *args = data;
struct create_ext ext_data = { .i915 = i915 };
+ struct i915_address_space *vm = NULL;
struct drm_i915_gem_object *obj;
int ret;
@@ -431,6 +457,17 @@ i915_gem_create_ext_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
+ if (ext_data.vm_id) {
+ vm = i915_gem_vm_lookup(file->driver_priv, ext_data.vm_id);
+ if (unlikely(!vm))
+ return -ENOENT;
+
+ if (!i915_gem_vm_is_vm_bind_mode(vm)) {
+ ret = -EINVAL;
+ goto vm_put;
+ }
+ }
+
if (!ext_data.n_placements) {
ext_data.placements[0] =
intel_memory_region_by_type(i915, INTEL_MEMORY_SYSTEM);
@@ -457,8 +494,21 @@ i915_gem_create_ext_ioctl(struct drm_device *dev, void *data,
ext_data.placements,
ext_data.n_placements,
ext_data.flags);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
+ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
+ goto vm_put;
+ }
+
+ if (vm) {
+ obj->base.resv = vm->root_obj->base.resv;
+ obj->priv_root = i915_gem_object_get(vm->root_obj);
+ i915_vm_put(vm);
+ }
return i915_gem_publish(obj, file, &args->size, &args->handle);
+vm_put:
+ if (vm)
+ i915_vm_put(vm);
+
+ return ret;
}
@@ -221,6 +221,12 @@ struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags)
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ if (obj->priv_root) {
+ drm_dbg(obj->base.dev,
+ "Exporting VM private objects is not allowed\n");
+ return ERR_PTR(-EINVAL);
+ }
+
exp_info.ops = &i915_dmabuf_ops;
exp_info.size = gem_obj->size;
exp_info.flags = flags;
@@ -868,6 +868,10 @@ static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
if (unlikely(!obj))
return ERR_PTR(-ENOENT);
+ /* VM private objects are not supported here */
+ if (obj->priv_root)
+ return ERR_PTR(-EINVAL);
+
/*
* If the user has opted-in for protected-object tracking, make
* sure the object encryption can be used.
@@ -111,6 +111,9 @@ void __i915_gem_object_fini(struct drm_i915_gem_object *obj)
mutex_destroy(&obj->mm.get_page.lock);
mutex_destroy(&obj->mm.get_dma_page.lock);
dma_resv_fini(&obj->base._resv);
+
+ if (obj->priv_root)
+ i915_gem_object_put(obj->priv_root);
}
/**
@@ -242,6 +242,12 @@ struct drm_i915_gem_object {
const struct drm_i915_gem_object_ops *ops;
+ /**
+ * @priv_root: pointer to vm->root_obj if object is private,
+ * NULL otherwise.
+ */
+ struct drm_i915_gem_object *priv_root;
+
struct {
/**
* @vma.lock: protect the list/tree of vmas
@@ -87,6 +87,7 @@ static void i915_gem_vm_bind_remove(struct i915_vma *vma, bool release_obj)
lockdep_assert_held(&vma->vm->vm_bind_lock);
list_del_init(&vma->vm_bind_link);
+ list_del_init(&vma->non_priv_vm_bind_link);
i915_vm_bind_it_remove(vma, &vma->vm->va);
/* Release object */
@@ -216,6 +217,11 @@ static int i915_gem_vm_bind_obj(struct i915_address_space *vm,
if (ret)
goto put_obj;
+ if (obj->priv_root && obj->priv_root != vm->root_obj) {
+ ret = -EINVAL;
+ goto put_obj;
+ }
+
ret = mutex_lock_interruptible(&vm->vm_bind_lock);
if (ret)
goto put_obj;
@@ -245,6 +251,9 @@ static int i915_gem_vm_bind_obj(struct i915_address_space *vm,
list_add_tail(&vma->vm_bind_link, &vm->vm_bound_list);
i915_vm_bind_it_insert(vma, &vm->va);
+ if (!obj->priv_root)
+ list_add_tail(&vma->non_priv_vm_bind_link,
+ &vm->non_priv_vm_bind_list);
/* Hold object reference until vm_unbind */
i915_gem_object_get(vma->obj);
@@ -290,6 +290,7 @@ void i915_address_space_init(struct i915_address_space *vm, int subclass)
INIT_LIST_HEAD(&vm->vm_bind_list);
INIT_LIST_HEAD(&vm->vm_bound_list);
mutex_init(&vm->vm_bind_lock);
+ INIT_LIST_HEAD(&vm->non_priv_vm_bind_list);
}
void *__px_vaddr(struct drm_i915_gem_object *p)
@@ -268,6 +268,10 @@ struct i915_address_space {
struct list_head vm_bound_list;
/** @va: tree of persistent vmas */
struct rb_root_cached va;
+ /** @non_priv_vm_bind_list: list of non-private object mappings */
+ struct list_head non_priv_vm_bind_list;
+ /** @root_obj: root object for dma-resv sharing by private objects */
+ struct drm_i915_gem_object *root_obj;
/* Global GTT */
bool is_ggtt:1;
@@ -242,6 +242,7 @@ vma_create(struct drm_i915_gem_object *obj,
mutex_unlock(&vm->mutex);
INIT_LIST_HEAD(&vma->vm_bind_link);
+ INIT_LIST_HEAD(&vma->non_priv_vm_bind_link);
return vma;
err_unlock:
@@ -298,6 +298,8 @@ struct i915_vma {
/** @vm_bind_link: node for the vm_bind related lists of vm */
struct list_head vm_bind_link;
+ /** @non_priv_vm_bind_link: Link in non-private persistent VMA list */
+ struct list_head non_priv_vm_bind_link;
/** Interval tree structures for persistent vma */
@@ -3611,9 +3611,13 @@ struct drm_i915_gem_create_ext {
*
* For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
* struct drm_i915_gem_create_ext_protected_content.
+ *
+ * For I915_GEM_CREATE_EXT_VM_PRIVATE usage see
+ * struct drm_i915_gem_create_ext_vm_private.
*/
#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
+#define I915_GEM_CREATE_EXT_VM_PRIVATE 2
__u64 extensions;
};
@@ -3731,6 +3735,35 @@ struct drm_i915_gem_create_ext_protected_content {
/* ID of the protected content session managed by i915 when PXP is active */
#define I915_PROTECTED_CONTENT_DEFAULT_SESSION 0xf
+/**
+ * struct drm_i915_gem_create_ext_vm_private - Extension to make the object
+ * private to the specified VM.
+ *
+ * See struct drm_i915_gem_create_ext.
+ *
+ * By default, BOs can be mapped on multiple VMs and can also be dma-buf
+ * exported. Hence these BOs are referred to as Shared BOs.
+ * During each execbuf3 submission, the request fence must be added to the
+ * dma-resv fence list of all shared BOs mapped on the VM.
+ *
+ * Unlike Shared BOs, these VM private BOs can only be mapped on the VM they
+ * are private to and can't be dma-buf exported. All private BOs of a VM share
+ * the dma-resv object. Hence during each execbuf3 submission, they need only
+ * one dma-resv fence list updated. Thus, the fast path (where required
+ * mappings are already bound) submission latency is O(1) w.r.t the number of
+ * VM private BOs.
+ */
+struct drm_i915_gem_create_ext_vm_private {
+ /** @base: Extension link. See struct i915_user_extension. */
+ struct i915_user_extension base;
+
+ /** @vm_id: Id of the VM to which Object is private */
+ __u32 vm_id;
+
+ /** @rsvd: Reserved, MBZ */
+ __u32 rsvd;
+};
+
/**
* struct drm_i915_gem_vm_bind - VA to object mapping to bind.
*