@@ -35,11 +35,6 @@ struct nouveau_bo {
struct nouveau_drm_tile *tile;
- /* Only valid if allocated via nouveau_gem_new() and iff you hold a
- * gem reference to it! For debugging, use gem.filp != NULL to test
- * whether it is valid. */
- struct drm_gem_object gem;
-
/* protect by the ttm reservation lock */
int pin_refcnt;
@@ -10,7 +10,7 @@
static inline struct nouveau_bo *
nouveau_gem_object(struct drm_gem_object *gem)
{
- return gem ? container_of(gem, struct nouveau_bo, gem) : NULL;
+ return gem ? container_of(gem, struct nouveau_bo, bo.base) : NULL;
}
/* nouveau_gem.c */
@@ -139,7 +139,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
if (chan->ntfy) {
nouveau_vma_del(&chan->ntfy_vma);
nouveau_bo_unpin(chan->ntfy);
- drm_gem_object_put_unlocked(&chan->ntfy->gem);
+ drm_gem_object_put_unlocked(&chan->ntfy->bo.base);
}
if (chan->heap.block_size)
@@ -339,7 +339,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
goto done;
}
- ret = drm_gem_handle_create(file_priv, &chan->ntfy->gem,
+ ret = drm_gem_handle_create(file_priv, &chan->ntfy->bo.base,
&init->notifier_handle);
if (ret)
goto done;
@@ -136,7 +136,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
struct drm_device *dev = drm->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo);
- if (unlikely(nvbo->gem.filp))
+ if (unlikely(nvbo->bo.base.filp))
DRM_ERROR("bo %p still attached to GEM object\n", bo);
WARN_ON(nvbo->pin_refcnt > 0);
nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
@@ -299,7 +299,7 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
type, &nvbo->placement,
align >> PAGE_SHIFT, false, acc_size, sg,
robj, nouveau_bo_del_ttm);
- nvbo->gem.resv = nvbo->bo.resv;
+ nvbo->bo.base.resv = nvbo->bo.resv;
if (ret) {
/* ttm will call nouveau_bo_del_ttm if it fails.. */
@@ -1402,7 +1402,7 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
- return drm_vma_node_verify_access(&nvbo->gem.vma_node,
+ return drm_vma_node_verify_access(&nvbo->bo.base.vma_node,
filp->private_data);
}
@@ -201,7 +201,7 @@ nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
if (fb->nvbo)
- drm_gem_object_put_unlocked(&fb->nvbo->gem);
+ drm_gem_object_put_unlocked(&fb->nvbo->bo.base);
drm_framebuffer_cleanup(drm_fb);
kfree(fb);
@@ -214,7 +214,7 @@ nouveau_user_framebuffer_create_handle(struct drm_framebuffer *drm_fb,
{
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
- return drm_gem_handle_create(file_priv, &fb->nvbo->gem, handle);
+ return drm_gem_handle_create(file_priv, &fb->nvbo->bo.base, handle);
}
static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
@@ -660,8 +660,8 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
if (ret)
return ret;
- ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle);
- drm_gem_object_put_unlocked(&bo->gem);
+ ret = drm_gem_handle_create(file_priv, &bo->bo.base, &args->handle);
+ drm_gem_object_put_unlocked(&bo->bo.base);
return ret;
}
@@ -205,13 +205,13 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
/* Initialize the embedded gem-object. We return a single gem-reference
* to the caller, instead of a normal nouveau_bo ttm reference. */
- ret = drm_gem_object_init(drm->dev, &nvbo->gem, nvbo->bo.mem.size);
+ ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, nvbo->bo.mem.size);
if (ret) {
nouveau_bo_ref(NULL, pnvbo);
return -ENOMEM;
}
- nvbo->bo.persistent_swap_storage = nvbo->gem.filp;
+ nvbo->bo.persistent_swap_storage = nvbo->bo.base.filp;
return 0;
}
@@ -268,15 +268,16 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
if (ret)
return ret;
- ret = drm_gem_handle_create(file_priv, &nvbo->gem, &req->info.handle);
+ ret = drm_gem_handle_create(file_priv, &nvbo->bo.base,
+ &req->info.handle);
if (ret == 0) {
- ret = nouveau_gem_info(file_priv, &nvbo->gem, &req->info);
+ ret = nouveau_gem_info(file_priv, &nvbo->bo.base, &req->info);
if (ret)
drm_gem_handle_delete(file_priv, req->info.handle);
}
/* drop reference from allocate - handle holds it now */
- drm_gem_object_put_unlocked(&nvbo->gem);
+ drm_gem_object_put_unlocked(&nvbo->bo.base);
return ret;
}
@@ -355,7 +356,7 @@ validate_fini_no_ticket(struct validate_op *op, struct nouveau_channel *chan,
list_del(&nvbo->entry);
nvbo->reserved_by = NULL;
ttm_bo_unreserve(&nvbo->bo);
- drm_gem_object_put_unlocked(&nvbo->gem);
+ drm_gem_object_put_unlocked(&nvbo->bo.base);
}
}
@@ -493,7 +494,7 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
list_for_each_entry(nvbo, list, entry) {
struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
- ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
+ ret = nouveau_gem_set_domain(&nvbo->bo.base, b->read_domains,
b->write_domains,
b->valid_domains);
if (unlikely(ret)) {
@@ -79,13 +79,13 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
/* Initialize the embedded gem-object. We return a single gem-reference
* to the caller, instead of a normal nouveau_bo ttm reference. */
- ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size);
+ ret = drm_gem_object_init(dev, &nvbo->bo.base, nvbo->bo.mem.size);
if (ret) {
nouveau_bo_ref(NULL, &nvbo);
return ERR_PTR(-ENOMEM);
}
- return &nvbo->gem;
+ return &nvbo->bo.base;
}
int nouveau_gem_prime_pin(struct drm_gem_object *obj)