@@ -225,7 +225,8 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
if (amdgpu_ttm_tt_get_usermm(bo->ttm))
return -EPERM;
- return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
+ return drm_vma_node_verify_access(&rbo->gem_base.vma_node,
+ filp->private_data);
}
static void amdgpu_move_null(struct ttm_buffer_object *bo,
@@ -150,7 +150,8 @@ static int ast_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
struct ast_bo *astbo = ast_bo(bo);
- return drm_vma_node_verify_access(&astbo->gem.vma_node, filp);
+ return drm_vma_node_verify_access(&astbo->gem.vma_node,
+ filp->private_data);
}
static int ast_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
@@ -128,7 +128,8 @@ static int bochs_bo_verify_access(struct ttm_buffer_object *bo,
{
struct bochs_bo *bochsbo = bochs_bo(bo);
- return drm_vma_node_verify_access(&bochsbo->gem.vma_node, filp);
+ return drm_vma_node_verify_access(&bochsbo->gem.vma_node,
+ filp->private_data);
}
static int bochs_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
@@ -150,7 +150,8 @@ static int cirrus_bo_verify_access(struct ttm_buffer_object *bo, struct file *fi
{
struct cirrus_bo *cirrusbo = cirrus_bo(bo);
- return drm_vma_node_verify_access(&cirrusbo->gem.vma_node, filp);
+ return drm_vma_node_verify_access(&cirrusbo->gem.vma_node,
+ filp->private_data);
}
static int cirrus_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
@@ -257,7 +257,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
if (drm_core_check_feature(dev, DRIVER_PRIME))
drm_gem_remove_prime_handles(obj, file_priv);
- drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
+ drm_vma_node_revoke(&obj->vma_node, file_priv);
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, file_priv);
@@ -372,7 +372,7 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
handle = ret;
- ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
+ ret = drm_vma_node_allow(&obj->vma_node, file_priv);
if (ret)
goto err_remove;
@@ -386,7 +386,7 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
return 0;
err_revoke:
- drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
+ drm_vma_node_revoke(&obj->vma_node, file_priv);
err_remove:
spin_lock(&file_priv->table_lock);
idr_remove(&file_priv->object_idr, handle);
@@ -991,7 +991,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
if (!obj)
return -EINVAL;
- if (!drm_vma_node_is_allowed(node, filp)) {
+ if (!drm_vma_node_is_allowed(node, priv)) {
drm_gem_object_unreference_unlocked(obj);
return -EACCES;
}
@@ -25,7 +25,6 @@
#include <drm/drmP.h>
#include <drm/drm_mm.h>
#include <drm/drm_vma_manager.h>
-#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/rbtree.h>
@@ -277,9 +276,9 @@ EXPORT_SYMBOL(drm_vma_offset_remove);
/**
* drm_vma_node_allow - Add open-file to list of allowed users
* @node: Node to modify
- * @filp: Open file to add
+ * @tag: Tag of file to remove
*
- * Add @filp to the list of allowed open-files for this node. If @filp is
+ * Add @tag to the list of allowed open-files for this node. If @tag is
* already on this list, the ref-count is incremented.
*
* The list of allowed-users is preserved across drm_vma_offset_add() and
@@ -294,7 +293,7 @@ EXPORT_SYMBOL(drm_vma_offset_remove);
* RETURNS:
* 0 on success, negative error code on internal failure (out-of-mem)
*/
-int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp)
+int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
{
struct rb_node **iter;
struct rb_node *parent = NULL;
@@ -315,10 +314,10 @@ int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp)
parent = *iter;
entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
- if (filp == entry->vm_filp) {
+ if (tag == entry->vm_tag) {
entry->vm_count++;
goto unlock;
- } else if (filp > entry->vm_filp) {
+ } else if (tag > entry->vm_tag) {
iter = &(*iter)->rb_right;
} else {
iter = &(*iter)->rb_left;
@@ -330,7 +329,7 @@ int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp)
goto unlock;
}
- new->vm_filp = filp;
+ new->vm_tag = tag;
new->vm_count = 1;
rb_link_node(&new->vm_rb, parent, iter);
rb_insert_color(&new->vm_rb, &node->vm_files);
@@ -346,17 +345,18 @@ EXPORT_SYMBOL(drm_vma_node_allow);
/**
* drm_vma_node_revoke - Remove open-file from list of allowed users
* @node: Node to modify
- * @filp: Open file to remove
+ * @tag: Tag of file to remove
*
- * Decrement the ref-count of @filp in the list of allowed open-files on @node.
- * If the ref-count drops to zero, remove @filp from the list. You must call
- * this once for every drm_vma_node_allow() on @filp.
+ * Decrement the ref-count of @tag in the list of allowed open-files on @node.
+ * If the ref-count drops to zero, remove @tag from the list. You must call
+ * this once for every drm_vma_node_allow() on @tag.
*
* This is locked against concurrent access internally.
*
- * If @filp is not on the list, nothing is done.
+ * If @tag is not on the list, nothing is done.
*/
-void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp)
+void drm_vma_node_revoke(struct drm_vma_offset_node *node,
+ struct drm_file *tag)
{
struct drm_vma_offset_file *entry;
struct rb_node *iter;
@@ -366,13 +366,13 @@ void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp)
iter = node->vm_files.rb_node;
while (likely(iter)) {
entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
- if (filp == entry->vm_filp) {
+ if (tag == entry->vm_tag) {
if (!--entry->vm_count) {
rb_erase(&entry->vm_rb, &node->vm_files);
kfree(entry);
}
break;
- } else if (filp > entry->vm_filp) {
+ } else if (tag > entry->vm_tag) {
iter = iter->rb_right;
} else {
iter = iter->rb_left;
@@ -386,9 +386,9 @@ EXPORT_SYMBOL(drm_vma_node_revoke);
/**
* drm_vma_node_is_allowed - Check whether an open-file is granted access
* @node: Node to check
- * @filp: Open-file to check for
+ * @tag: Tag of file to remove
*
- * Search the list in @node whether @filp is currently on the list of allowed
+ * Search the list in @node whether @tag is currently on the list of allowed
* open-files (see drm_vma_node_allow()).
*
* This is locked against concurrent access internally.
@@ -397,7 +397,7 @@ EXPORT_SYMBOL(drm_vma_node_revoke);
* true iff @filp is on the list
*/
bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
- struct file *filp)
+ struct drm_file *tag)
{
struct drm_vma_offset_file *entry;
struct rb_node *iter;
@@ -407,9 +407,9 @@ bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
iter = node->vm_files.rb_node;
while (likely(iter)) {
entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
- if (filp == entry->vm_filp)
+ if (tag == entry->vm_tag)
break;
- else if (filp > entry->vm_filp)
+ else if (tag > entry->vm_tag)
iter = iter->rb_right;
else
iter = iter->rb_left;
@@ -150,7 +150,8 @@ static int mgag200_bo_verify_access(struct ttm_buffer_object *bo, struct file *f
{
struct mgag200_bo *mgabo = mgag200_bo(bo);
- return drm_vma_node_verify_access(&mgabo->gem.vma_node, filp);
+ return drm_vma_node_verify_access(&mgabo->gem.vma_node,
+ filp->private_data);
}
static int mgag200_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
@@ -1315,7 +1315,8 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
- return drm_vma_node_verify_access(&nvbo->gem.vma_node, filp);
+ return drm_vma_node_verify_access(&nvbo->gem.vma_node,
+ filp->private_data);
}
static int
@@ -210,7 +210,8 @@ static int qxl_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
struct qxl_bo *qbo = to_qxl_bo(bo);
- return drm_vma_node_verify_access(&qbo->gem_base.vma_node, filp);
+ return drm_vma_node_verify_access(&qbo->gem_base.vma_node,
+ filp->private_data);
}
static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
@@ -237,7 +237,8 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
if (radeon_ttm_tt_has_userptr(bo->ttm))
return -EPERM;
- return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
+ return drm_vma_node_verify_access(&rbo->gem_base.vma_node,
+ filp->private_data);
}
static void radeon_move_null(struct ttm_buffer_object *bo,
@@ -24,16 +24,17 @@
*/
#include <drm/drm_mm.h>
-#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/rbtree.h>
#include <linux/spinlock.h>
#include <linux/types.h>
+struct drm_file;
+
struct drm_vma_offset_file {
struct rb_node vm_rb;
- struct file *vm_filp;
+ struct drm_file *vm_tag;
unsigned long vm_count;
};
@@ -62,10 +63,11 @@ int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
struct drm_vma_offset_node *node);
-int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp);
-void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp);
+int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag);
+void drm_vma_node_revoke(struct drm_vma_offset_node *node,
+ struct drm_file *tag);
bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
- struct file *filp);
+ struct drm_file *tag);
/**
* drm_vma_offset_exact_lookup_locked() - Look up node by exact address
@@ -216,9 +218,9 @@ static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node,
/**
* drm_vma_node_verify_access() - Access verification helper for TTM
* @node: Offset node
- * @filp: Open-file
+ * @tag: Tag of file to check
*
- * This checks whether @filp is granted access to @node. It is the same as
+ * This checks whether @tag is granted access to @node. It is the same as
* drm_vma_node_is_allowed() but suitable as drop-in helper for TTM
* verify_access() callbacks.
*
@@ -226,9 +228,9 @@ static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node,
* 0 if access is granted, -EACCES otherwise.
*/
static inline int drm_vma_node_verify_access(struct drm_vma_offset_node *node,
- struct file *filp)
+ struct drm_file *tag)
{
- return drm_vma_node_is_allowed(node, filp) ? 0 : -EACCES;
+ return drm_vma_node_is_allowed(node, tag) ? 0 : -EACCES;
}
#endif /* __DRM_VMA_MANAGER_H__ */
Rather than using "struct file*", use "struct drm_file*" as tag VM tag for BOs. This will pave the way for "struct drm_file*" without any "struct file*" back-pointer. Signed-off-by: David Herrmann <dh.herrmann@gmail.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 3 ++- drivers/gpu/drm/ast/ast_ttm.c | 3 ++- drivers/gpu/drm/bochs/bochs_mm.c | 3 ++- drivers/gpu/drm/cirrus/cirrus_ttm.c | 3 ++- drivers/gpu/drm/drm_gem.c | 8 +++---- drivers/gpu/drm/drm_vma_manager.c | 40 ++++++++++++++++----------------- drivers/gpu/drm/mgag200/mgag200_ttm.c | 3 ++- drivers/gpu/drm/nouveau/nouveau_bo.c | 3 ++- drivers/gpu/drm/qxl/qxl_ttm.c | 3 ++- drivers/gpu/drm/radeon/radeon_ttm.c | 3 ++- include/drm/drm_vma_manager.h | 20 +++++++++-------- 11 files changed, 51 insertions(+), 41 deletions(-)