@@ -304,11 +304,8 @@ drm_gem_free_mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct drm_gem_mm *mm = dev->mm_private;
- struct drm_map_list *list = &obj->map_list;
- drm_vma_offset_destroy(&mm->vma_manager, &list->vma_offset);
- kfree(list->map);
- list->map = NULL;
+ drm_vma_offset_destroy(&mm->vma_manager, &obj->vma_offset);
}
EXPORT_SYMBOL(drm_gem_free_mmap_offset);
@@ -328,32 +325,10 @@ drm_gem_create_mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct drm_gem_mm *mm = dev->mm_private;
- struct drm_map_list *list;
- struct drm_local_map *map;
int ret;
- /* Set the object up for mmap'ing */
- list = &obj->map_list;
- list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
- if (!list->map)
- return -ENOMEM;
-
- map = list->map;
- map->type = _DRM_GEM;
- map->size = obj->size;
- map->handle = obj;
-
- ret = drm_vma_offset_setup(&mm->vma_manager, &list->vma_offset,
+ ret = drm_vma_offset_setup(&mm->vma_manager, &obj->vma_offset,
obj->size / PAGE_SIZE);
- if (ret)
- goto out_free_list;
-
- return 0;
-
-out_free_list:
- kfree(list->map);
- list->map = NULL;
-
return ret;
}
EXPORT_SYMBOL(drm_gem_create_mmap_offset);
@@ -642,10 +617,8 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_gem_mm *mm = dev->mm_private;
- struct drm_local_map *map = NULL;
struct drm_gem_object *obj;
struct drm_vma_offset_node *offset_node;
- struct drm_map_list *list;
int ret = 0;
if (drm_device_is_unplugged(dev))
@@ -660,21 +633,14 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return drm_mmap(filp, vma);
}
- list = container_of(offset_node, struct drm_map_list, vma_offset);
- map = list->map;
- if (!map ||
- ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
- ret = -EPERM;
- goto out_unlock;
- }
+ obj = container_of(offset_node, struct drm_gem_object, vma_offset);
/* Check for valid size. */
- if (map->size < vma->vm_end - vma->vm_start) {
+ if (obj->size < vma->vm_end - vma->vm_start) {
ret = -EINVAL;
goto out_unlock;
}
- obj = map->handle;
if (!obj->dev->driver->gem_vm_ops) {
ret = -EINVAL;
goto out_unlock;
@@ -682,7 +648,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_ops = obj->dev->driver->gem_vm_ops;
- vma->vm_private_data = map->handle;
+ vma->vm_private_data = (void *)obj;
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
/* Take a ref for this mapping of the object, so that the fault
@@ -29,7 +29,7 @@
static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
{
- return (unsigned int)drm_vma_node_offset_addr(&obj->map_list.vma_offset);
+ return (unsigned int)drm_vma_node_offset_addr(&obj->vma_offset);
}
static void drm_gem_cma_buf_destroy(struct drm_device *drm,
@@ -140,7 +140,7 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
{
struct drm_gem_cma_object *cma_obj;
- if (drm_vma_node_is_allocated(&obj->map_list.vma_offset))
+ if (drm_vma_node_is_allocated(&obj->vma_offset))
drm_gem_free_mmap_offset(gem_obj);
drm_gem_object_release(gem_obj);
@@ -168,7 +168,7 @@ out:
exynos_drm_fini_buf(obj->dev, buf);
exynos_gem_obj->buffer = NULL;
- if (drm_vma_node_is_allocated(&obj->map_list.vma_offset))
+ if (drm_vma_node_is_allocated(&obj->vma_offset))
drm_gem_free_mmap_offset(obj);
/* release file pointer to gem object. */
@@ -704,13 +704,13 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
goto unlock;
}
- if (!drm_vma_node_is_allocated(&obj->map_list.vma_offset)) {
+ if (!drm_vma_node_is_allocated(&obj->vma_offset)) {
ret = drm_gem_create_mmap_offset(obj);
if (ret)
goto out;
}
- *offset = drm_vma_node_offset_addr(&obj->map_list.vma_offset);
+ *offset = drm_vma_node_offset_addr(&obj->vma_offset);
DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
out:
@@ -38,7 +38,7 @@ void psb_gem_free_object(struct drm_gem_object *obj)
struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
/* Remove the list map if one is present */
- if (drm_vma_node_is_allocated(&obj->map_list.vma_offset))
+ if (drm_vma_node_is_allocated(&obj->vma_offset))
drm_gem_free_mmap_offset(obj);
drm_gem_object_release(obj);
@@ -81,13 +81,13 @@ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
/* What validation is needed here ? */
/* Make it mmapable */
- if (!drm_vma_node_is_allocated(&obj->map_list.vma_offset)) {
+ if (!drm_vma_node_is_allocated(&obj->vma_offset)) {
ret = drm_gem_create_mmap_offset(obj);
if (ret)
goto out;
}
/* GEM should really work out the hash offsets for us */
- *offset = drm_vma_node_offset_addr(&obj->map_list.vma_offset);
+ *offset = drm_vma_node_offset_addr(&obj->vma_offset);
out:
drm_gem_object_unreference(obj);
unlock:
@@ -1426,7 +1426,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
if (obj->base.dev->dev_mapping)
unmap_mapping_range(obj->base.dev->dev_mapping,
- (loff_t)drm_vma_node_offset_addr(&obj->base.map_list.vma_offset),
+ (loff_t)drm_vma_node_offset_addr(&obj->base.vma_offset),
obj->base.size, 1);
obj->fault_mappable = false;
@@ -1514,7 +1514,7 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
int ret;
- if (drm_vma_node_is_allocated(&obj->base.map_list.vma_offset))
+ if (drm_vma_node_is_allocated(&obj->base.vma_offset))
return 0;
ret = drm_gem_create_mmap_offset(&obj->base);
@@ -1539,7 +1539,7 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
{
- if (!drm_vma_node_is_allocated(&obj->base.map_list.vma_offset))
+ if (!drm_vma_node_is_allocated(&obj->base.vma_offset))
return;
drm_gem_free_mmap_offset(&obj->base);
@@ -1580,7 +1580,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
if (ret)
goto out;
- *offset = drm_vma_node_offset_addr(&obj->base.map_list.vma_offset);
+ *offset = drm_vma_node_offset_addr(&obj->base.vma_offset);
out:
drm_gem_object_unreference(&obj->base);
@@ -223,7 +223,7 @@ void udl_gem_free_object(struct drm_gem_object *gem_obj)
if (obj->pages)
udl_gem_put_pages(obj);
- if (drm_vma_node_is_allocated(&gem_obj->map_list.vma_offset))
+ if (drm_vma_node_is_allocated(&gem_obj->vma_offset))
drm_gem_free_mmap_offset(gem_obj);
}
@@ -247,13 +247,13 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
ret = udl_gem_get_pages(gobj, GFP_KERNEL);
if (ret)
goto out;
- if (!drm_vma_node_is_allocated(&gobj->base.map_list.vma_offset)) {
+ if (!drm_vma_node_is_allocated(&gobj->base.vma_offset)) {
ret = drm_gem_create_mmap_offset(obj);
if (ret)
goto out;
}
- *offset = drm_vma_node_offset_addr(&obj->map_list.vma_offset);
+ *offset = drm_vma_node_offset_addr(&obj->vma_offset);
out:
drm_gem_object_unreference(&gobj->base);
@@ -580,7 +580,6 @@ struct drm_map_list {
struct drm_local_map *map; /**< mapping */
uint64_t user_token;
struct drm_master *master;
- struct drm_vma_offset_node vma_offset;
};
/**
@@ -636,7 +635,7 @@ struct drm_gem_object {
struct file *filp;
/* Mapping info for this object */
- struct drm_map_list map_list;
+ struct drm_vma_offset_node vma_offset;
/**
* Size of the object, in bytes. Immutable over the object's