@@ -54,6 +54,7 @@
#define STATE_INITIALIZING 0
#define STATE_OK 1
#define STATE_ERR 2
+#define STATE_RES_EXISTS 3
#define MAX_CAPSET_ID 63
#define MAX_RINGS 64
@@ -114,18 +115,23 @@ struct virtio_gpu_object_vram {
};
struct virtio_gpu_object_userptr;
+struct virtio_gpu_fpriv;
struct virtio_gpu_object_userptr_ops {
int (*get_pages)(struct virtio_gpu_object_userptr *userptr);
void (*put_pages)(struct virtio_gpu_object_userptr *userptr);
void (*release)(struct virtio_gpu_object_userptr *userptr);
+ int (*insert)(struct virtio_gpu_object_userptr *userptr, struct virtio_gpu_fpriv *fpriv);
+ int (*remove)(struct virtio_gpu_object_userptr *userptr, struct virtio_gpu_fpriv *fpriv);
};
struct virtio_gpu_object_userptr {
struct virtio_gpu_object base;
const struct virtio_gpu_object_userptr_ops *ops;
struct mutex lock;
+ uint64_t ptr;
uint64_t start;
+ uint64_t last;
uint32_t npages;
uint32_t bo_handle;
uint32_t flags;
@@ -134,6 +140,8 @@ struct virtio_gpu_object_userptr {
struct drm_file *file;
struct page **pages;
struct sg_table *sgt;
+
+ struct interval_tree_node it_node;
};
#define to_virtio_gpu_shmem(virtio_gpu_object) \
@@ -307,6 +315,8 @@ struct virtio_gpu_fpriv {
struct mutex context_lock;
char debug_name[DEBUG_NAME_MAX_LEN];
bool explicit_debug_name;
+ struct rb_root_cached userptrs_tree;
+ struct mutex userptrs_tree_lock;
};
/* virtgpu_ioctl.c */
@@ -520,6 +530,10 @@ int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
int virtio_gpu_userptr_create(struct virtio_gpu_device *vgdev,
struct drm_file *file,
struct virtio_gpu_object_params *params,
- struct virtio_gpu_object **bo_ptr);
+ struct virtio_gpu_object **bo_ptr,
+ struct drm_virtgpu_resource_create_blob *rc_blob);
bool virtio_gpu_is_userptr(struct virtio_gpu_object *bo);
+void virtio_gpu_userptr_interval_tree_init(struct virtio_gpu_fpriv *vfpriv);
+void virtio_gpu_userptr_set_handle(struct virtio_gpu_object *qobj,
+ uint32_t handle);
#endif
@@ -534,8 +534,11 @@ static int virtio_gpu_resource_create_blob_ioctl(struct drm_device *dev,
if (guest_blob && !params.userptr)
ret = virtio_gpu_object_create(vgdev, ¶ms, &bo, NULL);
- else if (guest_blob && params.userptr)
- ret = virtio_gpu_userptr_create(vgdev, file, ¶ms, &bo);
+ else if (guest_blob && params.userptr) {
+ ret = virtio_gpu_userptr_create(vgdev, file, ¶ms, &bo, rc_blob);
+ if (ret > 0)
+ return ret;
+ }
else if (!guest_blob && host3d_blob)
ret = virtio_gpu_vram_create(vgdev, ¶ms, &bo);
else
@@ -567,6 +570,9 @@ static int virtio_gpu_resource_create_blob_ioctl(struct drm_device *dev,
rc_blob->res_handle = bo->hw_res_handle;
rc_blob->bo_handle = handle;
+ if (guest_blob && params.userptr)
+ virtio_gpu_userptr_set_handle(bo, handle);
+
/*
* The handle owns the reference now. But we must drop our
* remaining reference *after* we no longer need to dereference
@@ -691,6 +697,9 @@ static int virtio_gpu_context_init_ioctl(struct drm_device *dev,
}
}
+ if (vfpriv->context_init & VIRTIO_GPU_CAPSET_HSAKMT)
+ virtio_gpu_userptr_interval_tree_init(vfpriv);
+
virtio_gpu_create_context_locked(vgdev, vfpriv);
virtio_gpu_notify(vgdev);
@@ -10,6 +10,92 @@
static struct sg_table *
virtio_gpu_userptr_get_sg_table(struct drm_gem_object *obj);
+static int virtio_gpu_userptr_insert(struct virtio_gpu_object_userptr *userptr,
+ struct virtio_gpu_fpriv *vfpriv)
+{
+ if (!userptr->ops->insert)
+ return -EINVAL;
+
+ return userptr->ops->insert(userptr, vfpriv);
+}
+
+static int virtio_gpu_userptr_remove(struct virtio_gpu_object_userptr *userptr,
+ struct virtio_gpu_fpriv *vfpriv)
+{
+ if (!userptr->ops->remove)
+ return -EINVAL;
+
+ return userptr->ops->remove(userptr, vfpriv);
+}
+
+static uint64_t virtio_gpu_userptr_get_offset(struct virtio_gpu_object *qobj,
+ uint64_t addr)
+{
+ struct virtio_gpu_object_userptr *userptr = to_virtio_gpu_userptr(qobj);
+
+ return PAGE_ALIGN_DOWN(addr) - PAGE_ALIGN_DOWN(userptr->ptr);
+}
+
+static struct virtio_gpu_object_userptr *
+virtio_gpu_userptr_from_addr_range(struct virtio_gpu_fpriv *vfpriv,
+ u_int64_t start, u_int64_t last)
+{
+ struct interval_tree_node *node;
+ struct virtio_gpu_object_userptr *userptr = NULL;
+ struct virtio_gpu_object_userptr *ret = NULL;
+
+ node = interval_tree_iter_first(&vfpriv->userptrs_tree, start, last);
+
+ while (node) {
+ struct interval_tree_node *next;
+
+ userptr = container_of(node, struct virtio_gpu_object_userptr,
+ it_node);
+
+ if (start >= userptr->start && last <= userptr->last) {
+ ret = userptr;
+ return ret;
+ }
+
+ next = interval_tree_iter_next(node, start, last);
+ node = next;
+ }
+
+ return ret;
+}
+
+static int virtio_gpu_userptr_insert_interval_tree(
+ struct virtio_gpu_object_userptr *userptr,
+ struct virtio_gpu_fpriv *vfpriv)
+{
+ if (userptr->it_node.start != 0 && userptr->it_node.last != 0) {
+ userptr->it_node.start = userptr->start;
+ userptr->it_node.last = userptr->last;
+ interval_tree_insert(&userptr->it_node, &vfpriv->userptrs_tree);
+ return 0;
+ } else
+ return -EINVAL;
+}
+
+static int virtio_gpu_userptr_remove_interval_tree(
+ struct virtio_gpu_object_userptr *userptr,
+ struct virtio_gpu_fpriv *vfpriv)
+{
+ if (userptr->it_node.start != 0 && userptr->it_node.last != 0) {
+ interval_tree_remove(&userptr->it_node, &vfpriv->userptrs_tree);
+ return 0;
+ } else
+ return -EINVAL;
+}
+
+void virtio_gpu_userptr_set_handle(struct virtio_gpu_object *qobj,
+ uint32_t handle)
+{
+ struct virtio_gpu_object_userptr *userptr = to_virtio_gpu_userptr(qobj);
+
+ userptr->bo_handle = handle;
+}
+
static void virtio_gpu_userptr_free(struct drm_gem_object *obj)
{
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
@@ -27,6 +113,11 @@ static void virtio_gpu_userptr_free(struct drm_gem_object *obj)
static void virtio_gpu_userptr_object_close(struct drm_gem_object *obj,
struct drm_file *file)
{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+ struct virtio_gpu_object_userptr *userptr = to_virtio_gpu_userptr(bo);
+
+ virtio_gpu_userptr_remove(userptr, file->driver_priv);
+
virtio_gpu_gem_object_close(obj, file);
}
@@ -63,9 +154,9 @@ virtio_gpu_userptr_get_pages(struct virtio_gpu_object_userptr *userptr)
do {
num_pages = userptr->npages - pinned;
- ret = pin_user_pages_fast(userptr->start + pinned * PAGE_SIZE,
- num_pages, flag,
- userptr->pages + pinned);
+ ret = pin_user_pages_fast(
+ PAGE_ALIGN_DOWN(userptr->start) + pinned * PAGE_SIZE,
+ num_pages, flag, userptr->pages + pinned);
if (ret < 0) {
if (pinned)
@@ -127,6 +218,12 @@ virtio_gpu_userptr_get_sg_table(struct drm_gem_object *obj)
return userptr->sgt;
}
+void virtio_gpu_userptr_interval_tree_init(struct virtio_gpu_fpriv *vfpriv)
+{
+ vfpriv->userptrs_tree = RB_ROOT_CACHED;
+ mutex_init(&vfpriv->userptrs_tree_lock);
+}
+
static int
virtio_gpu_userptr_init(struct drm_device *dev, struct drm_file *file,
struct virtio_gpu_object_userptr *userptr,
@@ -144,6 +241,8 @@ virtio_gpu_userptr_init(struct drm_device *dev, struct drm_file *file,
aligned_size = roundup(page_offset + params->size, PAGE_SIZE);
userptr->start = aligned_addr;
+ userptr->last = aligned_addr + aligned_size - 1UL;
+ userptr->ptr = params->userptr;
userptr->npages = aligned_size >> PAGE_SHIFT;
userptr->flags = params->blob_flags;
@@ -167,13 +266,17 @@ static const struct virtio_gpu_object_userptr_ops virtio_gpu_userptr_ops = {
.get_pages = virtio_gpu_userptr_get_pages,
.put_pages = virtio_gpu_userptr_put_pages,
.release = virtio_gpu_userptr_release,
+ .insert = virtio_gpu_userptr_insert_interval_tree,
+ .remove = virtio_gpu_userptr_remove_interval_tree,
};
int virtio_gpu_userptr_create(struct virtio_gpu_device *vgdev,
struct drm_file *file,
struct virtio_gpu_object_params *params,
- struct virtio_gpu_object **bo_ptr)
+ struct virtio_gpu_object **bo_ptr,
+ struct drm_virtgpu_resource_create_blob *rc_blob)
{
+ struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct virtio_gpu_object_userptr *userptr;
int ret, si;
struct sg_table *sgt;
@@ -187,6 +290,20 @@ int virtio_gpu_userptr_create(struct virtio_gpu_device *vgdev,
params->size))
return -EFAULT;
+ mutex_lock(&vfpriv->userptrs_tree_lock);
+
+ userptr = virtio_gpu_userptr_from_addr_range(
+ vfpriv, params->userptr, params->userptr + params->size - 1UL);
+ if (userptr) {
+ *bo_ptr = &userptr->base;
+ rc_blob->res_handle = userptr->base.hw_res_handle;
+ rc_blob->bo_handle = userptr->bo_handle;
+ rc_blob->offset = virtio_gpu_userptr_get_offset(
+ &userptr->base, rc_blob->userptr);
+ mutex_unlock(&vfpriv->userptrs_tree_lock);
+ return STATE_RES_EXISTS;
+ }
+
userptr = kzalloc(sizeof(*userptr), GFP_KERNEL);
if (!userptr)
return -ENOMEM;
@@ -218,6 +335,9 @@ int virtio_gpu_userptr_create(struct virtio_gpu_device *vgdev,
(ents)[si].padding = 0;
}
+ virtio_gpu_userptr_insert(userptr, vfpriv);
+ mutex_unlock(&vfpriv->userptrs_tree_lock);
+
virtio_gpu_cmd_resource_create_blob(vgdev, &userptr->base, params, ents,
sgt->nents);
@@ -225,6 +345,7 @@ int virtio_gpu_userptr_create(struct virtio_gpu_device *vgdev,
return 0;
failed_free:
+ mutex_unlock(&vfpriv->userptrs_tree_lock);
kfree(userptr);
return ret;
}
@@ -196,6 +196,7 @@ struct drm_virtgpu_resource_create_blob {
__u64 cmd;
__u64 blob_id;
__u64 userptr;
+ __u64 offset;
};
#define VIRTGPU_CONTEXT_PARAM_CAPSET_ID 0x0001