@@ -125,6 +125,17 @@ out_unlock:
}
EXPORT_SYMBOL(drm_vma_offset_setup);
+void drm_vma_unmap_mapping(struct address_space *dev_mapping,
+ struct drm_vma_offset_node *node)
+{
+ if (dev_mapping && drm_vma_node_is_allocated(node)) {
+ unmap_mapping_range(dev_mapping,
+ drm_vma_node_offset_addr(node),
+ node->num_pages << PAGE_SHIFT, 1);
+ }
+}
+EXPORT_SYMBOL(drm_vma_unmap_mapping);
+
int drm_vma_offset_man_init(struct drm_vma_offset_manager *man, uint64_t file_page_offset, uint64_t size)
{
int ret;
@@ -1424,11 +1424,8 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
if (!obj->fault_mappable)
return;
- if (obj->base.dev->dev_mapping)
- unmap_mapping_range(obj->base.dev->dev_mapping,
- (loff_t)drm_vma_node_offset_addr(&obj->base.vma_offset),
- obj->base.size, 1);
-
+ drm_vma_unmap_mapping(obj->base.dev->dev_mapping,
+ &obj->base.vma_offset);
obj->fault_mappable = false;
}
@@ -1597,12 +1597,8 @@ void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
- if (drm_vma_node_is_allocated(&bo->vma_offset) && bdev->dev_mapping) {
- loff_t offset = (loff_t) drm_vma_node_offset_addr(&bo->vma_offset);
- loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
-
- unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
- }
+ drm_vma_unmap_mapping(bdev->dev_mapping,
+ &bo->vma_offset);
ttm_mem_io_free_vm(bo);
}
@@ -35,6 +35,9 @@ void drm_vma_offset_destroy(struct drm_vma_offset_manager *man,
int drm_vma_offset_man_init(struct drm_vma_offset_manager *man, uint64_t file_page_offset, uint64_t size);
void drm_vma_offset_man_fini(struct drm_vma_offset_manager *man);
+void drm_vma_unmap_mapping(struct address_space *dev_mapping,
+ struct drm_vma_offset_node *node);
+
static inline void drm_vma_node_reset(struct drm_vma_offset_node *node)
{
node->vm_node = NULL;