@@ -324,7 +324,7 @@ int ast_bo_create(struct drm_device *dev, int size, int align,
}
astbo->bo.bdev = &ast->ttm.bdev;
- astbo->bo.bdev->dev_mapping = dev->dev_mapping;
+ astbo->bo.bdev->dev_mapping = dev->anon_inode->i_mapping;
ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
@@ -359,7 +359,7 @@ static int bochs_bo_create(struct drm_device *dev, int size, int align,
}
bochsbo->bo.bdev = &bochs->ttm.bdev;
- bochsbo->bo.bdev->dev_mapping = dev->dev_mapping;
+ bochsbo->bo.bdev->dev_mapping = dev->anon_inode->i_mapping;
bochs_ttm_placement(bochsbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
@@ -329,7 +329,7 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align,
}
cirrusbo->bo.bdev = &cirrus->ttm.bdev;
- cirrusbo->bo.bdev->dev_mapping = dev->dev_mapping;
+ cirrusbo->bo.bdev->dev_mapping = dev->anon_inode->i_mapping;
cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
@@ -83,8 +83,6 @@ int drm_open(struct inode *inode, struct file *filp)
struct drm_minor *minor;
int retcode;
int need_setup = 0;
- struct address_space *old_mapping;
- struct address_space *old_imapping;
minor = drm_minor_acquire(iminor(inode));
if (IS_ERR(minor))
@@ -93,16 +91,9 @@ int drm_open(struct inode *inode, struct file *filp)
dev = minor->dev;
if (!dev->open_count++)
need_setup = 1;
- mutex_lock(&dev->struct_mutex);
- old_imapping = inode->i_mapping;
- old_mapping = dev->dev_mapping;
- if (old_mapping == NULL)
- dev->dev_mapping = &inode->i_data;
- /* ihold ensures nobody can remove inode with our i_data */
- ihold(container_of(dev->dev_mapping, struct inode, i_data));
- inode->i_mapping = dev->dev_mapping;
- filp->f_mapping = dev->dev_mapping;
- mutex_unlock(&dev->struct_mutex);
+
+ /* share address_space across all char-devs of a single device */
+ filp->f_mapping = dev->anon_inode->i_mapping;
retcode = drm_open_helper(inode, filp, minor);
if (retcode)
@@ -115,12 +106,6 @@ int drm_open(struct inode *inode, struct file *filp)
return 0;
err_undo:
- mutex_lock(&dev->struct_mutex);
- filp->f_mapping = old_imapping;
- inode->i_mapping = old_imapping;
- iput(container_of(dev->dev_mapping, struct inode, i_data));
- dev->dev_mapping = old_mapping;
- mutex_unlock(&dev->struct_mutex);
dev->open_count--;
drm_minor_release(minor);
return retcode;
@@ -422,7 +407,6 @@ int drm_lastclose(struct drm_device * dev)
drm_legacy_dma_takedown(dev);
- dev->dev_mapping = NULL;
mutex_unlock(&dev->struct_mutex);
drm_legacy_dev_reinit(dev);
@@ -538,9 +522,6 @@ int drm_release(struct inode *inode, struct file *filp)
}
}
- BUG_ON(dev->dev_mapping == NULL);
- iput(container_of(dev->dev_mapping, struct inode, i_data));
-
/* drop the reference held my the file priv */
if (file_priv->master)
drm_master_put(&file_priv->master);
@@ -573,6 +573,12 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
mutex_init(&dev->struct_mutex);
mutex_init(&dev->ctxlist_mutex);
+ dev->anon_inode = drm_fs_inode_new();
+ if (IS_ERR(dev->anon_inode)) {
+ ret = PTR_ERR(dev->anon_inode);
+ DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
+ goto err_free;
+ }
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
if (ret)
@@ -616,6 +622,8 @@ err_minors:
drm_minor_free(dev, DRM_MINOR_LEGACY);
drm_minor_free(dev, DRM_MINOR_RENDER);
drm_minor_free(dev, DRM_MINOR_CONTROL);
+ drm_fs_inode_free(dev->anon_inode);
+err_free:
kfree(dev);
return NULL;
}
@@ -630,6 +638,7 @@ static void drm_dev_release(struct kref *ref)
drm_ctxbitmap_cleanup(dev);
drm_ht_remove(&dev->map_hash);
+ drm_fs_inode_free(dev->anon_inode);
drm_minor_free(dev, DRM_MINOR_LEGACY);
drm_minor_free(dev, DRM_MINOR_RENDER);
@@ -1526,7 +1526,8 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
if (!obj->fault_mappable)
return;
- drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->dev_mapping);
+ drm_vma_node_unmap(&obj->base.vma_node,
+ obj->base.dev->anon_inode->i_mapping);
obj->fault_mappable = false;
}
@@ -324,7 +324,7 @@ int mgag200_bo_create(struct drm_device *dev, int size, int align,
}
mgabo->bo.bdev = &mdev->ttm.bdev;
- mgabo->bo.bdev->dev_mapping = dev->dev_mapping;
+ mgabo->bo.bdev->dev_mapping = dev->anon_inode->i_mapping;
mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
@@ -228,7 +228,7 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
struct nouveau_bo *nvbo = NULL;
int ret = 0;
- drm->ttm.bdev.dev_mapping = drm->dev->dev_mapping;
+ drm->ttm.bdev.dev_mapping = drm->dev->anon_inode->i_mapping;
if (!pfb->memtype_valid(pfb, req->info.tile_flags)) {
NV_ERROR(cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
@@ -153,24 +153,24 @@ static struct {
static void evict_entry(struct drm_gem_object *obj,
enum tiler_fmt fmt, struct usergart_entry *entry)
{
- if (obj->dev->dev_mapping) {
- struct omap_gem_object *omap_obj = to_omap_bo(obj);
- int n = usergart[fmt].height;
- size_t size = PAGE_SIZE * n;
- loff_t off = mmap_offset(obj) +
- (entry->obj_pgoff << PAGE_SHIFT);
- const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
- if (m > 1) {
- int i;
- /* if stride > than PAGE_SIZE then sparse mapping: */
- for (i = n; i > 0; i--) {
- unmap_mapping_range(obj->dev->dev_mapping,
- off, PAGE_SIZE, 1);
- off += PAGE_SIZE * m;
- }
- } else {
- unmap_mapping_range(obj->dev->dev_mapping, off, size, 1);
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ int n = usergart[fmt].height;
+ size_t size = PAGE_SIZE * n;
+ loff_t off = mmap_offset(obj) +
+ (entry->obj_pgoff << PAGE_SHIFT);
+ const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
+
+ if (m > 1) {
+ int i;
+ /* if stride > than PAGE_SIZE then sparse mapping: */
+ for (i = n; i > 0; i--) {
+ unmap_mapping_range(obj->dev->anon_inode->i_mapping,
+ off, PAGE_SIZE, 1);
+ off += PAGE_SIZE * m;
}
+ } else {
+ unmap_mapping_range(obj->dev->anon_inode->i_mapping,
+ off, size, 1);
}
entry->obj = NULL;
@@ -82,8 +82,7 @@ int qxl_bo_create(struct qxl_device *qdev,
enum ttm_bo_type type;
int r;
- if (unlikely(qdev->mman.bdev.dev_mapping == NULL))
- qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping;
+ qdev->mman.bdev.dev_mapping = qdev->ddev->anon_inode->i_mapping;
if (kernel)
type = ttm_bo_type_kernel;
else
@@ -518,8 +518,7 @@ int qxl_ttm_init(struct qxl_device *qdev)
((unsigned)num_io_pages * PAGE_SIZE) / (1024 * 1024));
DRM_INFO("qxl: %uM of Surface memory size\n",
(unsigned)qdev->surfaceram_size / (1024 * 1024));
- if (unlikely(qdev->mman.bdev.dev_mapping == NULL))
- qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping;
+ qdev->mman.bdev.dev_mapping = qdev->ddev->anon_inode->i_mapping;
r = qxl_ttm_debugfs_init(qdev);
if (r) {
DRM_ERROR("Failed to init debugfs\n");
@@ -120,7 +120,7 @@ int radeon_bo_create(struct radeon_device *rdev,
size = ALIGN(size, PAGE_SIZE);
- rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
+ rdev->mman.bdev.dev_mapping = rdev->ddev->anon_inode->i_mapping;
if (kernel) {
type = ttm_bo_type_kernel;
} else if (sg) {
@@ -742,7 +742,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
}
DRM_INFO("radeon: %uM of GTT memory ready.\n",
(unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
- rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
+ rdev->mman.bdev.dev_mapping = rdev->ddev->anon_inode->i_mapping;
r = radeon_ttm_debugfs_init(rdev);
if (r) {
@@ -969,7 +969,7 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
goto out_no_shman;
file_priv->driver_priv = vmw_fp;
- dev_priv->bdev.dev_mapping = dev->dev_mapping;
+ dev_priv->bdev.dev_mapping = dev->anon_inode->i_mapping;
return 0;
@@ -1091,11 +1091,11 @@ struct drm_device {
struct device *dev; /**< Device structure of bus-device */
struct drm_driver *driver; /**< DRM driver managing the device */
void *dev_private; /**< DRM driver private data */
- struct address_space *dev_mapping; /**< Private addr-space just for the device */
struct drm_minor *control; /**< Control node */
struct drm_minor *primary; /**< Primary node */
struct drm_minor *render; /**< Render node */
atomic_t unplugged; /**< Flag whether dev is dead */
+ struct inode *anon_inode; /**< Private addr-space just for the device */
/*@} */
/** \name Locks */