@@ -61,6 +61,56 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
+/*
+ * Global memory.
+ */
+
+/**
+ * amdgpu_ttm_global_init - Initialize global TTM memory reference structures.
+ *
+ * @adev: AMDGPU device for which the global structures need to be registered.
+ *
+ * This is called as part of the AMDGPU ttm init from amdgpu_ttm_init()
+ * during bring up.
+ */
+static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
+{
+ struct drm_global_reference *global_ref;
+ int r;
+
+ /* ensure reference is false in case init fails */
+ adev->mman.mem_global_referenced = false;
+
+ global_ref = &adev->mman.bo_global_ref.ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_ref_init;
+ global_ref->release = &ttm_bo_global_ref_release;
+ r = drm_global_item_ref(global_ref);
+ if (r) {
+ DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+ goto error_bo;
+ }
+
+ mutex_init(&adev->mman.gtt_window_lock);
+
+ adev->mman.mem_global_referenced = true;
+
+ return 0;
+
+error_bo:
+ return r;
+}
+
+static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
+{
+ if (adev->mman.mem_global_referenced) {
+ mutex_destroy(&adev->mman.gtt_window_lock);
+ drm_global_item_unref(&adev->mman.bo_global_ref.ref);
+ adev->mman.mem_global_referenced = false;
+ }
+}
+
static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
{
return 0;
@@ -1664,10 +1714,14 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
int r;
u64 vis_vram_limit;
- mutex_init(&adev->mman.gtt_window_lock);
-
+ /* initialize global references for vram/gtt */
+ r = amdgpu_ttm_global_init(adev);
+ if (r) {
+ return r;
+ }
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&adev->mman.bdev,
+ adev->mman.bo_global_ref.ref.object,
&amdgpu_bo_driver,
adev->ddev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET,
@@ -1824,6 +1878,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS);
ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
ttm_bo_device_release(&adev->mman.bdev);
+ amdgpu_ttm_global_fini(adev);
adev->mman.initialized = false;
DRM_INFO("amdgpu: ttm finalized\n");
}
@@ -39,6 +39,7 @@
#define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2
struct amdgpu_mman {
+ struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
bool mem_global_referenced;
bool initialized;
@@ -104,6 +104,7 @@ struct ast_private {
int fb_mtrr;
struct {
+ struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
} ttm;
@@ -36,6 +36,35 @@ ast_bdev(struct ttm_bo_device *bd)
return container_of(bd, struct ast_private, ttm.bdev);
}
+static int ast_ttm_global_init(struct ast_private *ast)
+{
+ struct drm_global_reference *global_ref;
+ int r;
+
+ global_ref = &ast->ttm.bo_global_ref.ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_ref_init;
+ global_ref->release = &ttm_bo_global_ref_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+ return r;
+ }
+ return 0;
+}
+
+static void
+ast_ttm_global_release(struct ast_private *ast)
+{
+ if (ast->ttm.bo_global_ref.ref.release == NULL)
+ return;
+
+ drm_global_item_unref(&ast->ttm.bo_global_ref.ref);
+ ast->ttm.bo_global_ref.ref.release = NULL;
+}
+
+
static void ast_bo_ttm_destroy(struct ttm_buffer_object *tbo)
{
struct ast_bo *bo;
@@ -175,7 +204,12 @@ int ast_mm_init(struct ast_private *ast)
struct drm_device *dev = ast->dev;
struct ttm_bo_device *bdev = &ast->ttm.bdev;
+ ret = ast_ttm_global_init(ast);
+ if (ret)
+ return ret;
+
ret = ttm_bo_device_init(&ast->ttm.bdev,
+ ast->ttm.bo_global_ref.ref.object,
&ast_bo_driver,
dev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET,
@@ -206,6 +240,8 @@ void ast_mm_fini(struct ast_private *ast)
ttm_bo_device_release(&ast->ttm.bdev);
+ ast_ttm_global_release(ast);
+
arch_phys_wc_del(ast->fb_mtrr);
arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
@@ -77,6 +77,7 @@ struct bochs_device {
/* ttm */
struct {
+ struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
bool initialized;
} ttm;
@@ -16,6 +16,35 @@ static inline struct bochs_device *bochs_bdev(struct ttm_bo_device *bd)
return container_of(bd, struct bochs_device, ttm.bdev);
}
+static int bochs_ttm_global_init(struct bochs_device *bochs)
+{
+ struct drm_global_reference *global_ref;
+ int r;
+
+ global_ref = &bochs->ttm.bo_global_ref.ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_ref_init;
+ global_ref->release = &ttm_bo_global_ref_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+ return r;
+ }
+
+ return 0;
+}
+
+static void bochs_ttm_global_release(struct bochs_device *bochs)
+{
+ if (bochs->ttm.bo_global_ref.ref.release == NULL)
+ return;
+
+ drm_global_item_unref(&bochs->ttm.bo_global_ref.ref);
+ bochs->ttm.bo_global_ref.ref.release = NULL;
+}
+
+
static void bochs_bo_ttm_destroy(struct ttm_buffer_object *tbo)
{
struct bochs_bo *bo;
@@ -153,7 +182,12 @@ int bochs_mm_init(struct bochs_device *bochs)
struct ttm_bo_device *bdev = &bochs->ttm.bdev;
int ret;
+ ret = bochs_ttm_global_init(bochs);
+ if (ret)
+ return ret;
+
ret = ttm_bo_device_init(&bochs->ttm.bdev,
+ bochs->ttm.bo_global_ref.ref.object,
&bochs_bo_driver,
bochs->dev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET,
@@ -180,6 +214,7 @@ void bochs_mm_fini(struct bochs_device *bochs)
return;
ttm_bo_device_release(&bochs->ttm.bdev);
+ bochs_ttm_global_release(bochs);
bochs->ttm.initialized = false;
}
@@ -136,6 +136,7 @@ struct cirrus_device {
int fb_mtrr;
struct {
+ struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
} ttm;
bool mm_inited;
@@ -36,6 +36,35 @@ cirrus_bdev(struct ttm_bo_device *bd)
return container_of(bd, struct cirrus_device, ttm.bdev);
}
+static int cirrus_ttm_global_init(struct cirrus_device *cirrus)
+{
+ struct drm_global_reference *global_ref;
+ int r;
+
+ global_ref = &cirrus->ttm.bo_global_ref.ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_ref_init;
+ global_ref->release = &ttm_bo_global_ref_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+ return r;
+ }
+ return 0;
+}
+
+static void
+cirrus_ttm_global_release(struct cirrus_device *cirrus)
+{
+ if (cirrus->ttm.bo_global_ref.ref.release == NULL)
+ return;
+
+ drm_global_item_unref(&cirrus->ttm.bo_global_ref.ref);
+ cirrus->ttm.bo_global_ref.ref.release = NULL;
+}
+
+
static void cirrus_bo_ttm_destroy(struct ttm_buffer_object *tbo)
{
struct cirrus_bo *bo;
@@ -175,7 +204,12 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
struct drm_device *dev = cirrus->dev;
struct ttm_bo_device *bdev = &cirrus->ttm.bdev;
+ ret = cirrus_ttm_global_init(cirrus);
+ if (ret)
+ return ret;
+
ret = ttm_bo_device_init(&cirrus->ttm.bdev,
+ cirrus->ttm.bo_global_ref.ref.object,
&cirrus_bo_driver,
dev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET,
@@ -211,6 +245,8 @@ void cirrus_mm_fini(struct cirrus_device *cirrus)
ttm_bo_device_release(&cirrus->ttm.bdev);
+ cirrus_ttm_global_release(cirrus);
+
arch_phys_wc_del(cirrus->fb_mtrr);
cirrus->fb_mtrr = 0;
arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
@@ -49,6 +49,7 @@ struct hibmc_drm_private {
bool mode_config_initialized;
/* ttm */
+ struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
bool initialized;
@@ -29,6 +29,32 @@ hibmc_bdev(struct ttm_bo_device *bd)
return container_of(bd, struct hibmc_drm_private, bdev);
}
+static int hibmc_ttm_global_init(struct hibmc_drm_private *hibmc)
+{
+ int ret;
+
+ hibmc->bo_global_ref.ref.global_type = DRM_GLOBAL_TTM_BO;
+ hibmc->bo_global_ref.ref.size = sizeof(struct ttm_bo_global);
+ hibmc->bo_global_ref.ref.init = &ttm_bo_global_ref_init;
+ hibmc->bo_global_ref.ref.release = &ttm_bo_global_ref_release;
+ ret = drm_global_item_ref(&hibmc->bo_global_ref.ref);
+ if (ret) {
+ DRM_ERROR("failed setting up TTM BO subsystem: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static void
+hibmc_ttm_global_release(struct hibmc_drm_private *hibmc)
+{
+ if (hibmc->bo_global_ref.ref.release == NULL)
+ return;
+
+ drm_global_item_unref(&hibmc->bo_global_ref.ref);
+ hibmc->bo_global_ref.ref.release = NULL;
+}
+
static void hibmc_bo_ttm_destroy(struct ttm_buffer_object *tbo)
{
struct hibmc_bo *bo = container_of(tbo, struct hibmc_bo, bo);
@@ -188,12 +214,18 @@ int hibmc_mm_init(struct hibmc_drm_private *hibmc)
struct drm_device *dev = hibmc->dev;
struct ttm_bo_device *bdev = &hibmc->bdev;
+ ret = hibmc_ttm_global_init(hibmc);
+ if (ret)
+ return ret;
+
ret = ttm_bo_device_init(&hibmc->bdev,
+ hibmc->bo_global_ref.ref.object,
&hibmc_bo_driver,
dev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET,
true);
if (ret) {
+ hibmc_ttm_global_release(hibmc);
DRM_ERROR("error initializing bo driver: %d\n", ret);
return ret;
}
@@ -201,6 +233,7 @@ int hibmc_mm_init(struct hibmc_drm_private *hibmc)
ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
hibmc->fb_size >> PAGE_SHIFT);
if (ret) {
+ hibmc_ttm_global_release(hibmc);
DRM_ERROR("failed ttm VRAM init: %d\n", ret);
return ret;
}
@@ -215,6 +248,7 @@ void hibmc_mm_fini(struct hibmc_drm_private *hibmc)
return;
ttm_bo_device_release(&hibmc->bdev);
+ hibmc_ttm_global_release(hibmc);
hibmc->mm_inited = false;
}
@@ -212,6 +212,7 @@ struct mga_device {
int fb_mtrr;
struct {
+ struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
} ttm;
@@ -36,6 +36,35 @@ mgag200_bdev(struct ttm_bo_device *bd)
return container_of(bd, struct mga_device, ttm.bdev);
}
+static int mgag200_ttm_global_init(struct mga_device *ast)
+{
+ struct drm_global_reference *global_ref;
+ int r;
+
+ global_ref = &ast->ttm.bo_global_ref.ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_ref_init;
+ global_ref->release = &ttm_bo_global_ref_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+ return r;
+ }
+ return 0;
+}
+
+static void
+mgag200_ttm_global_release(struct mga_device *ast)
+{
+ if (ast->ttm.bo_global_ref.ref.release == NULL)
+ return;
+
+ drm_global_item_unref(&ast->ttm.bo_global_ref.ref);
+ ast->ttm.bo_global_ref.ref.release = NULL;
+}
+
+
static void mgag200_bo_ttm_destroy(struct ttm_buffer_object *tbo)
{
struct mgag200_bo *bo;
@@ -175,7 +204,12 @@ int mgag200_mm_init(struct mga_device *mdev)
struct drm_device *dev = mdev->dev;
struct ttm_bo_device *bdev = &mdev->ttm.bdev;
+ ret = mgag200_ttm_global_init(mdev);
+ if (ret)
+ return ret;
+
ret = ttm_bo_device_init(&mdev->ttm.bdev,
+ mdev->ttm.bo_global_ref.ref.object,
&mgag200_bo_driver,
dev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET,
@@ -206,6 +240,8 @@ void mgag200_mm_fini(struct mga_device *mdev)
ttm_bo_device_release(&mdev->ttm.bdev);
+ mgag200_ttm_global_release(mdev);
+
arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
arch_phys_wc_del(mdev->fb_mtrr);
@@ -146,6 +146,7 @@ struct nouveau_drm {
/* TTM interface support */
struct {
+ struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
atomic_t validate_sequence;
int (*move)(struct nouveau_channel *,
@@ -174,6 +174,38 @@ nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
}
+int
+nouveau_ttm_global_init(struct nouveau_drm *drm)
+{
+ struct drm_global_reference *global_ref;
+ int ret;
+
+ global_ref = &drm->ttm.bo_global_ref.ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_ref_init;
+ global_ref->release = &ttm_bo_global_ref_release;
+
+ ret = drm_global_item_ref(global_ref);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed setting up TTM BO subsystem\n");
+ drm->ttm.bo_global_ref.ref.release = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+void
+nouveau_ttm_global_release(struct nouveau_drm *drm)
+{
+ if (drm->ttm.bo_global_ref.ref.release == NULL)
+ return;
+
+ drm_global_item_unref(&drm->ttm.bo_global_ref.ref);
+ drm->ttm.bo_global_ref.ref.release = NULL;
+}
+
static int
nouveau_ttm_init_host(struct nouveau_drm *drm, u8 kind)
{
@@ -236,7 +268,12 @@ nouveau_ttm_init(struct nouveau_drm *drm)
drm->agp.cma = pci->agp.cma;
}
+ ret = nouveau_ttm_global_init(drm);
+ if (ret)
+ return ret;
+
ret = ttm_bo_device_init(&drm->ttm.bdev,
+ drm->ttm.bo_global_ref.ref.object,
&nouveau_bo_driver,
dev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET,
@@ -291,6 +328,8 @@ nouveau_ttm_fini(struct nouveau_drm *drm)
ttm_bo_device_release(&drm->ttm.bdev);
+ nouveau_ttm_global_release(drm);
+
arch_phys_wc_del(drm->ttm.mtrr);
drm->ttm.mtrr = 0;
arch_io_free_memtype_wc(device->func->resource_addr(device, 1),
@@ -126,6 +126,8 @@ struct qxl_output {
#define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, enc)
struct qxl_mman {
+ struct ttm_bo_global_ref bo_global_ref;
+ bool mem_global_referenced;
struct ttm_bo_device bdev;
};
@@ -46,6 +46,34 @@ static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev)
return qdev;
}
+static int qxl_ttm_global_init(struct qxl_device *qdev)
+{
+ struct drm_global_reference *global_ref;
+ int r;
+
+ global_ref = &qdev->mman.bo_global_ref.ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_ref_init;
+ global_ref->release = &ttm_bo_global_ref_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+ return r;
+ }
+
+ qdev->mman.mem_global_referenced = true;
+ return 0;
+}
+
+static void qxl_ttm_global_fini(struct qxl_device *qdev)
+{
+ if (qdev->mman.mem_global_referenced) {
+ drm_global_item_unref(&qdev->mman.bo_global_ref.ref);
+ qdev->mman.mem_global_referenced = false;
+ }
+}
+
static struct vm_operations_struct qxl_ttm_vm_ops;
static const struct vm_operations_struct *ttm_vm_ops;
@@ -316,8 +344,12 @@ int qxl_ttm_init(struct qxl_device *qdev)
int r;
int num_io_pages; /* != rom->num_io_pages, we include surface0 */
+ r = qxl_ttm_global_init(qdev);
+ if (r)
+ return r;
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&qdev->mman.bdev,
+ qdev->mman.bo_global_ref.ref.object,
&qxl_bo_driver,
qdev->ddev.anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET, 0);
@@ -353,6 +385,7 @@ void qxl_ttm_fini(struct qxl_device *qdev)
ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_VRAM);
ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV);
ttm_bo_device_release(&qdev->mman.bdev);
+ qxl_ttm_global_fini(qdev);
DRM_INFO("qxl: ttm finalized\n");
}
@@ -448,7 +448,9 @@ struct radeon_surface_reg {
* TTM.
*/
struct radeon_mman {
+ struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
+ bool mem_global_referenced;
bool initialized;
#if defined(CONFIG_DEBUG_FS)
@@ -60,6 +60,39 @@ static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
return rdev;
}
+
+/*
+ * Global memory.
+ */
+static int radeon_ttm_global_init(struct radeon_device *rdev)
+{
+ struct drm_global_reference *global_ref;
+ int r;
+
+ rdev->mman.mem_global_referenced = false;
+ global_ref = &rdev->mman.bo_global_ref.ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_ref_init;
+ global_ref->release = &ttm_bo_global_ref_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+ return r;
+ }
+
+ rdev->mman.mem_global_referenced = true;
+ return 0;
+}
+
+static void radeon_ttm_global_fini(struct radeon_device *rdev)
+{
+ if (rdev->mman.mem_global_referenced) {
+ drm_global_item_unref(&rdev->mman.bo_global_ref.ref);
+ rdev->mman.mem_global_referenced = false;
+ }
+}
+
static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
{
return 0;
@@ -788,8 +821,13 @@ int radeon_ttm_init(struct radeon_device *rdev)
{
int r;
+ r = radeon_ttm_global_init(rdev);
+ if (r) {
+ return r;
+ }
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&rdev->mman.bdev,
+ rdev->mman.bo_global_ref.ref.object,
&radeon_bo_driver,
rdev->ddev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET,
@@ -861,6 +899,7 @@ void radeon_ttm_fini(struct radeon_device *rdev)
ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
ttm_bo_device_release(&rdev->mman.bdev);
radeon_gart_fini(rdev);
+ radeon_ttm_global_fini(rdev);
rdev->mman.initialized = false;
DRM_INFO("radeon: ttm finalized\n");
}
@@ -1530,7 +1530,7 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj)
__free_page(glob->dummy_read_page);
}
-static void ttm_bo_global_release(void)
+void ttm_bo_global_release(void)
{
struct ttm_bo_global *glob = &ttm_bo_glob;
@@ -1544,8 +1544,9 @@ static void ttm_bo_global_release(void)
out:
mutex_unlock(&ttm_global_mutex);
}
+EXPORT_SYMBOL(ttm_bo_global_release);
-static int ttm_bo_global_init(void)
+int ttm_bo_global_init(void)
{
struct ttm_bo_global *glob = &ttm_bo_glob;
int ret = 0;
@@ -1582,6 +1583,8 @@ static int ttm_bo_global_init(void)
mutex_unlock(&ttm_global_mutex);
return ret;
}
+EXPORT_SYMBOL(ttm_bo_global_init);
+
int ttm_bo_device_release(struct ttm_bo_device *bdev)
{
@@ -1620,25 +1623,18 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
drm_vma_offset_manager_destroy(&bdev->vma_manager);
- if (!ret)
- ttm_bo_global_release();
-
return ret;
}
EXPORT_SYMBOL(ttm_bo_device_release);
int ttm_bo_device_init(struct ttm_bo_device *bdev,
+ struct ttm_bo_global *glob,
struct ttm_bo_driver *driver,
struct address_space *mapping,
uint64_t file_page_offset,
bool need_dma32)
{
- struct ttm_bo_global *glob = &ttm_bo_glob;
- int ret;
-
- ret = ttm_bo_global_init();
- if (ret)
- return ret;
+ int ret = -EINVAL;
bdev->driver = driver;
@@ -1665,7 +1661,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
return 0;
out_no_sys:
- ttm_bo_global_release();
return ret;
}
EXPORT_SYMBOL(ttm_bo_device_init);
@@ -145,6 +145,8 @@ struct virtio_gpu_fbdev {
};
struct virtio_gpu_mman {
+ struct ttm_bo_global_ref bo_global_ref;
+ bool mem_global_referenced;
struct ttm_bo_device bdev;
};
@@ -50,6 +50,35 @@ virtio_gpu_device *virtio_gpu_get_vgdev(struct ttm_bo_device *bdev)
return vgdev;
}
+static int virtio_gpu_ttm_global_init(struct virtio_gpu_device *vgdev)
+{
+ struct drm_global_reference *global_ref;
+ int r;
+
+ vgdev->mman.mem_global_referenced = false;
+ global_ref = &vgdev->mman.bo_global_ref.ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_ref_init;
+ global_ref->release = &ttm_bo_global_ref_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+ return r;
+ }
+
+ vgdev->mman.mem_global_referenced = true;
+ return 0;
+}
+
+static void virtio_gpu_ttm_global_fini(struct virtio_gpu_device *vgdev)
+{
+ if (vgdev->mman.mem_global_referenced) {
+ drm_global_item_unref(&vgdev->mman.bo_global_ref.ref);
+ vgdev->mman.mem_global_referenced = false;
+ }
+}
+
int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *file_priv;
@@ -326,8 +355,12 @@ int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev)
{
int r;
+ r = virtio_gpu_ttm_global_init(vgdev);
+ if (r)
+ return r;
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&vgdev->mman.bdev,
+ vgdev->mman.bo_global_ref.ref.object,
&virtio_gpu_bo_driver,
vgdev->ddev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET, 0);
@@ -346,11 +379,13 @@ int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev)
err_mm_init:
ttm_bo_device_release(&vgdev->mman.bdev);
err_dev_init:
+ virtio_gpu_ttm_global_fini(vgdev);
return r;
}
void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev)
{
ttm_bo_device_release(&vgdev->mman.bdev);
+ virtio_gpu_ttm_global_fini(vgdev);
DRM_INFO("virtio_gpu: ttm finalized\n");
}
@@ -785,6 +785,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
dev_priv->mmio_start, dev_priv->mmio_size / 1024);
+ ret = vmw_ttm_global_init(dev_priv);
+ if (unlikely(ret != 0))
+ goto out_err0;
+
+
vmw_master_init(&dev_priv->fbdev_master);
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
dev_priv->active_master = &dev_priv->fbdev_master;
@@ -795,7 +800,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
if (unlikely(dev_priv->mmio_virt == NULL)) {
ret = -ENOMEM;
DRM_ERROR("Failed mapping MMIO.\n");
- goto out_err0;
+ goto out_err3;
}
/* Need mmio memory to check for fifo pitchlock cap. */
@@ -849,6 +854,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
}
ret = ttm_bo_device_init(&dev_priv->bdev,
+ dev_priv->bo_global_ref.ref.object,
&vmw_bo_driver,
dev->anon_inode->i_mapping,
VMWGFX_FILE_PAGE_OFFSET,
@@ -970,6 +976,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
ttm_object_device_release(&dev_priv->tdev);
out_err4:
memunmap(dev_priv->mmio_virt);
+out_err3:
+ vmw_ttm_global_release(dev_priv);
out_err0:
for (i = vmw_res_context; i < vmw_res_max; ++i)
idr_destroy(&dev_priv->res_idr[i]);
@@ -1021,6 +1029,7 @@ static void vmw_driver_unload(struct drm_device *dev)
memunmap(dev_priv->mmio_virt);
if (dev_priv->ctx.staged_bindings)
vmw_binding_state_free(dev_priv->ctx.staged_bindings);
+ vmw_ttm_global_release(dev_priv);
for (i = vmw_res_context; i < vmw_res_max; ++i)
idr_destroy(&dev_priv->res_idr[i]);
@@ -417,6 +417,7 @@ enum {
struct vmw_private {
struct ttm_bo_device bdev;
+ struct ttm_bo_global_ref bo_global_ref;
struct vmw_fifo_state fifo;
@@ -832,6 +833,8 @@ extern int vmw_fifo_flush(struct vmw_private *dev_priv,
* TTM glue - vmwgfx_ttm_glue.c
*/
+extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
+extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv,
@@ -78,3 +78,30 @@ void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv, size_t gran)
vvm->unreserve_mem = vmw_vmt_unreserve;
vvm->gran = gran;
}
+
+int vmw_ttm_global_init(struct vmw_private *dev_priv)
+{
+ struct drm_global_reference *global_ref;
+ int ret;
+
+ global_ref = &dev_priv->bo_global_ref.ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_ref_init;
+ global_ref->release = &ttm_bo_global_ref_release;
+ ret = drm_global_item_ref(global_ref);
+
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed setting up TTM buffer objects.\n");
+ goto out_no_bo;
+ }
+
+ return 0;
+out_no_bo:
+ return ret;
+}
+
+void vmw_ttm_global_release(struct vmw_private *dev_priv)
+{
+ drm_global_item_unref(&dev_priv->bo_global_ref.ref);
+}
@@ -79,6 +79,7 @@ struct vbox_private {
int fb_mtrr;
struct {
+ struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
} ttm;
@@ -16,6 +16,37 @@ static inline struct vbox_private *vbox_bdev(struct ttm_bo_device *bd)
return container_of(bd, struct vbox_private, ttm.bdev);
}
+/**
+ * Adds the vbox memory manager object/structures to the global memory manager.
+ */
+static int vbox_ttm_global_init(struct vbox_private *vbox)
+{
+ struct drm_global_reference *global_ref;
+ int ret;
+
+ global_ref = &vbox->ttm.bo_global_ref.ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_ref_init;
+ global_ref->release = &ttm_bo_global_ref_release;
+
+ ret = drm_global_item_ref(global_ref);
+ if (ret) {
+ DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * Removes the vbox memory manager object from the global memory manager.
+ */
+static void vbox_ttm_global_release(struct vbox_private *vbox)
+{
+ drm_global_item_unref(&vbox->ttm.bo_global_ref.ref);
+}
+
static void vbox_bo_ttm_destroy(struct ttm_buffer_object *tbo)
{
struct vbox_bo *bo;
@@ -153,13 +184,18 @@ int vbox_mm_init(struct vbox_private *vbox)
struct drm_device *dev = &vbox->ddev;
struct ttm_bo_device *bdev = &vbox->ttm.bdev;
+ ret = vbox_ttm_global_init(vbox);
+ if (ret)
+ return ret;
+
ret = ttm_bo_device_init(&vbox->ttm.bdev,
+ vbox->ttm.bo_global_ref.ref.object,
&vbox_bo_driver,
dev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET, true);
if (ret) {
DRM_ERROR("Error initialising bo driver; %d\n", ret);
- return ret;
+ goto err_ttm_global_release;
}
ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
@@ -181,6 +217,8 @@ int vbox_mm_init(struct vbox_private *vbox)
err_device_release:
ttm_bo_device_release(&vbox->ttm.bdev);
+err_ttm_global_release:
+ vbox_ttm_global_release(vbox);
return ret;
}
@@ -194,6 +232,7 @@ void vbox_mm_fini(struct vbox_private *vbox)
arch_phys_wc_del(vbox->fb_mtrr);
#endif
ttm_bo_device_release(&vbox->ttm.bdev);
+ vbox_ttm_global_release(vbox);
}
void vbox_ttm_placement(struct vbox_bo *bo, int domain)
@@ -569,6 +569,9 @@ void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem);
void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
+void ttm_bo_global_release(void);
+int ttm_bo_global_init(void);
+
int ttm_bo_device_release(struct ttm_bo_device *bdev);
/**
@@ -586,7 +589,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev);
* Returns:
* !0: Failure.
*/
-int ttm_bo_device_init(struct ttm_bo_device *bdev,
+int ttm_bo_device_init(struct ttm_bo_device *bdev, struct ttm_bo_global *glob,
struct ttm_bo_driver *driver,
struct address_space *mapping,
uint64_t file_page_offset, bool need_dma32);
@@ -885,4 +888,40 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
+/**
+ * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global.
+ */
+
+struct ttm_bo_global_ref {
+ struct drm_global_reference ref;
+};
+
+/**
+ * ttm_bo_global_ref_init
+ *
+ * @ref: DRM global reference
+ *
+ * Helper function that initializes a struct ttm_bo_global. This function
+ * is used as init call-back function for DRM global references of type
+ * DRM_GLOBAL_TTM_BO_REF.
+ */
+static inline int ttm_bo_global_ref_init(struct drm_global_reference *ref)
+{
+ return ttm_bo_global_init();
+}
+
+/**
+ * ttm_bo_global_ref_release
+ *
+ * @ref: DRM global reference
+ *
+ * Helper function that releases a struct ttm_bo_global. This function
+ * is used as release call-back function for DRM global references of type
+ * DRM_GLOBAL_TTM_BO_REF.
+ */
+static inline void ttm_bo_global_ref_release(struct drm_global_reference *ref)
+{
+ ttm_bo_global_release();
+}
+
#endif
This reverts commit a64f784bb14a56bfdfad2dc397dd67e4564e3a29. Signed-off-by: Karol Herbst <kherbst@redhat.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 59 ++++++++++++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 1 + drivers/gpu/drm/ast/ast_drv.h | 1 + drivers/gpu/drm/ast/ast_ttm.c | 36 +++++++++++ drivers/gpu/drm/bochs/bochs.h | 1 + drivers/gpu/drm/bochs/bochs_mm.c | 35 +++++++++++ drivers/gpu/drm/cirrus/cirrus_drv.h | 1 + drivers/gpu/drm/cirrus/cirrus_ttm.c | 36 +++++++++++ .../gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h | 1 + drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c | 34 +++++++++++ drivers/gpu/drm/mgag200/mgag200_drv.h | 1 + drivers/gpu/drm/mgag200/mgag200_ttm.c | 36 +++++++++++ drivers/gpu/drm/nouveau/nouveau_drv.h | 1 + drivers/gpu/drm/nouveau/nouveau_ttm.c | 39 ++++++++++++ drivers/gpu/drm/qxl/qxl_drv.h | 2 + drivers/gpu/drm/qxl/qxl_ttm.c | 33 +++++++++++ drivers/gpu/drm/radeon/radeon.h | 2 + drivers/gpu/drm/radeon/radeon_ttm.c | 39 ++++++++++++ drivers/gpu/drm/ttm/ttm_bo.c | 19 +++--- drivers/gpu/drm/virtio/virtgpu_drv.h | 2 + drivers/gpu/drm/virtio/virtgpu_ttm.c | 35 +++++++++++ drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 11 +++- drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 3 + drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c | 27 +++++++++ drivers/staging/vboxvideo/vbox_drv.h | 1 + drivers/staging/vboxvideo/vbox_ttm.c | 41 ++++++++++++- include/drm/ttm/ttm_bo_driver.h | 41 ++++++++++++- 27 files changed, 521 insertions(+), 17 deletions(-)