@@ -918,7 +918,8 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
bo->pin_count++;
if (max_offset != 0) {
- u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset;
+ u64 domain_start = amdgpu_ttm_domain_start(adev,
+ mem_type);
WARN_ON_ONCE(max_offset <
(amdgpu_bo_gpu_offset(bo) - domain_start));
}
@@ -1484,7 +1485,25 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
!(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
- return amdgpu_gmc_sign_extend(bo->tbo.offset);
+ return amdgpu_bo_gpu_offset_no_check(bo);
+}
+
+/**
+ * amdgpu_bo_gpu_offset_no_check - return GPU offset of bo
+ * @bo: amdgpu object for which we query the offset
+ *
+ * Returns:
+ * current GPU offset of the object without raising warnings.
+ */
+u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ uint64_t offset;
+
+ offset = (bo->tbo.mem.start << PAGE_SHIFT) +
+ amdgpu_ttm_domain_start(adev, bo->tbo.mem.mem_type);
+
+ return amdgpu_gmc_sign_extend(offset);
}
/**
@@ -282,6 +282,7 @@ int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
bool intr);
int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
+u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo);
int amdgpu_bo_validate(struct amdgpu_bo *bo);
int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
struct dma_fence **fence);
@@ -96,7 +96,6 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
case TTM_PL_TT:
/* GTT memory */
man->func = &amdgpu_gtt_mgr_func;
- man->gpu_offset = adev->gmc.gart_start;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
@@ -104,7 +103,6 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
case TTM_PL_VRAM:
/* "On-card" video ram */
man->func = &amdgpu_vram_mgr_func;
- man->gpu_offset = adev->gmc.vram_start;
man->flags = TTM_MEMTYPE_FLAG_FIXED |
TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
@@ -115,7 +113,6 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
case AMDGPU_PL_OA:
/* On-chip GDS memory*/
man->func = &ttm_bo_manager_func;
- man->gpu_offset = 0;
man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_CMA;
man->available_caching = TTM_PL_FLAG_UNCACHED;
man->default_caching = TTM_PL_FLAG_UNCACHED;
@@ -263,7 +260,8 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
if (mm_node->start != AMDGPU_BO_INVALID_OFFSET) {
addr = mm_node->start << PAGE_SHIFT;
- addr += bo->bdev->man[mem->mem_type].gpu_offset;
+ addr += amdgpu_ttm_domain_start(amdgpu_ttm_adev(bo->bdev),
+ mem->mem_type);
}
return addr;
}
@@ -750,6 +748,27 @@ static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
(offset >> PAGE_SHIFT);
}
+/**
+ * amdgpu_ttm_domain_start - Returns GPU start address
+ * @adev: amdgpu device object
+ * @type: type of the memory
+ *
+ * Returns:
+ * GPU start address of a memory domain
+ */
+
+uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type)
+{
+ switch (type) {
+ case TTM_PL_TT:
+ return adev->gmc.gart_start;
+ case TTM_PL_VRAM:
+ return adev->gmc.vram_start;
+ }
+
+ return 0;
+}
+
/*
* TTM backend functions.
*/
@@ -1163,9 +1182,6 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
bo->mem = tmp;
}
- bo->offset = (bo->mem.start << PAGE_SHIFT) +
- bo->bdev->man[bo->mem.mem_type].gpu_offset;
-
return 0;
}
@@ -112,6 +112,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo);
int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
+uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type);
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages);
@@ -141,7 +141,7 @@ static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
src += p->num_dw_left * 4;
- pe += amdgpu_gmc_sign_extend(bo->tbo.offset);
+ pe += amdgpu_bo_gpu_offset_no_check(bo);
trace_amdgpu_vm_copy_ptes(pe, src, count, p->direct);
amdgpu_vm_copy_pte(p->adev, ib, pe, src, count);
@@ -168,7 +168,7 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
{
struct amdgpu_ib *ib = p->job->ibs;
- pe += amdgpu_gmc_sign_extend(bo->tbo.offset);
+ pe += amdgpu_bo_gpu_offset_no_check(bo);
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct);
if (count < 3) {
amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags,