@@ -404,7 +404,7 @@ int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
(*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
}
r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
- &(*bo_ptr)->tbo.resource, &ctx);
+ &(*bo_ptr)->tbo.resource, &ctx, false);
if (r)
goto error;
@@ -967,7 +967,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
placements.mem_type = TTM_PL_TT;
placements.flags = bo->resource->placement;
- r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
+ r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx, true);
if (unlikely(r))
return r;
@@ -414,7 +414,7 @@ static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
hop_placement.placement = hop;
/* find space in the bounce domain */
- ret = ttm_bo_mem_space(bo, &hop_placement, &hop_mem, ctx);
+ ret = ttm_bo_mem_space(bo, &hop_placement, &hop_mem, ctx, true);
if (ret)
return ret;
/* move to the bounce domain */
@@ -454,7 +454,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
return ttm_bo_pipeline_gutting(bo);
}
- ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
+ ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx, true);
if (ret) {
if (ret != -ERESTARTSYS) {
pr_err("Failed to find memory space for buffer 0x%p eviction\n",
@@ -724,37 +724,6 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
return ret;
}
-/*
- * Repeatedly evict memory from the LRU for @mem_type until we create enough
- * space, or we've evicted everything and there isn't enough space.
- */
-static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
- const struct ttm_place *place,
- struct ttm_resource **mem,
- struct ttm_operation_ctx *ctx)
-{
- struct ttm_device *bdev = bo->bdev;
- struct ttm_resource_manager *man;
- struct ww_acquire_ctx *ticket;
- int ret;
-
- man = ttm_manager_type(bdev, place->mem_type);
- ticket = dma_resv_locking_ctx(bo->base.resv);
- do {
- ret = ttm_resource_alloc(bo, place, mem);
- if (likely(!ret))
- break;
- if (unlikely(ret != -ENOSPC))
- return ret;
- ret = ttm_mem_evict_first(bdev, man, place, ctx,
- ticket);
- if (unlikely(ret != 0))
- return ret;
- } while (1);
-
- return ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
-}
-
/**
* ttm_bo_mem_space
*
@@ -763,6 +732,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
* @placement: Proposed new placement for the buffer object.
* @mem: A struct ttm_resource.
* @ctx: if and how to sleep, lock buffers and alloc memory
+ * @force_space: If we should evict buffers to force space
*
* Allocate memory space for the buffer object pointed to by @bo, using
* the placement flags in @placement, potentially evicting other idle buffer objects.
@@ -776,12 +746,14 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_resource **mem,
- struct ttm_operation_ctx *ctx)
+ struct ttm_operation_ctx *ctx,
+ bool force_space)
{
struct ttm_device *bdev = bo->bdev;
- bool type_found = false;
+ struct ww_acquire_ctx *ticket;
int i, ret;
+ ticket = dma_resv_locking_ctx(bo->base.resv);
ret = dma_resv_reserve_fences(bo->base.resv, 1);
if (unlikely(ret))
return ret;
@@ -790,19 +762,30 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
const struct ttm_place *place = &placement->placement[i];
struct ttm_resource_manager *man;
- if (place->flags & TTM_PL_FLAG_BUSY)
- continue;
-
man = ttm_manager_type(bdev, place->mem_type);
if (!man || !ttm_resource_manager_used(man))
continue;
- type_found = true;
- ret = ttm_resource_alloc(bo, place, mem);
- if (ret == -ENOSPC)
+ if (place->flags & (force_space ? TTM_PL_FLAG_IDLE :
+ TTM_PL_FLAG_BUSY))
+ continue;
+
+ do {
+ ret = ttm_resource_alloc(bo, place, mem);
+ if (unlikely(ret != -ENOSPC))
+ return ret;
+ if (likely(!ret) || !force_space)
+ break;
+
+ ret = ttm_mem_evict_first(bdev, man, place, ctx,
+ ticket);
+ if (unlikely(ret == -EBUSY))
+ break;
+ if (unlikely(ret))
+ return ret;
+ } while (1);
+ if (ret)
continue;
- if (unlikely(ret))
- goto error;
ret = ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
if (unlikely(ret)) {
@@ -810,45 +793,19 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (ret == -EBUSY)
continue;
- goto error;
+ return ret;
}
return 0;
}
- for (i = 0; i < placement->num_placement; ++i) {
- const struct ttm_place *place = &placement->placement[i];
- struct ttm_resource_manager *man;
-
- if (place->flags & TTM_PL_FLAG_IDLE)
- continue;
-
- man = ttm_manager_type(bdev, place->mem_type);
- if (!man || !ttm_resource_manager_used(man))
- continue;
-
- type_found = true;
- ret = ttm_bo_mem_force_space(bo, place, mem, ctx);
- if (likely(!ret))
- return 0;
-
- if (ret && ret != -EBUSY)
- goto error;
- }
-
- ret = -ENOSPC;
- if (!type_found) {
- pr_err(TTM_PFX "No compatible memory type found\n");
- ret = -EINVAL;
- }
-
-error:
- return ret;
+ return -ENOSPC;
}
EXPORT_SYMBOL(ttm_bo_mem_space);
static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
- struct ttm_operation_ctx *ctx)
+ struct ttm_operation_ctx *ctx,
+ bool force_space)
{
struct ttm_resource *mem;
struct ttm_place hop;
@@ -865,7 +822,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
* stop and the driver will be called to make
* the second hop.
*/
- ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
+ ret = ttm_bo_mem_space(bo, placement, &mem, ctx, force_space);
if (ret)
return ret;
bounce:
@@ -902,6 +859,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_operation_ctx *ctx)
{
+ bool force_space;
int ret;
dma_resv_assert_held(bo->base.resv);
@@ -912,20 +870,27 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
if (!placement->num_placement)
return ttm_bo_pipeline_gutting(bo);
- /* Check whether we need to move buffer. */
- if (bo->resource && ttm_resource_compatible(bo->resource, placement))
- return 0;
+ force_space = false;
+ do {
+ /* Check whether we need to move buffer. */
+ if (bo->resource &&
+ ttm_resource_compatible(bo->resource, placement,
+ force_space))
+ return 0;
+
+ /* Moving of pinned BOs is forbidden */
+ if (bo->pin_count)
+ return -EINVAL;
- /* Moving of pinned BOs is forbidden */
- if (bo->pin_count)
- return -EINVAL;
+ ret = ttm_bo_move_buffer(bo, placement, ctx, force_space);
+ if (ret && ret != -ENOSPC)
+ return ret;
- ret = ttm_bo_move_buffer(bo, placement, ctx);
+ force_space = !force_space;
+ } while (force_space);
/* For backward compatibility with userspace */
if (ret == -ENOSPC)
return -ENOMEM;
- if (ret)
- return ret;
/*
* We might need to add a TTM.
@@ -295,11 +295,13 @@ bool ttm_resource_intersects(struct ttm_device *bdev,
*
* @res: the resource to check
* @placement: the placement to check against
+ * @busy: controls which places to check
*
* Returns true if the placement is compatible.
*/
bool ttm_resource_compatible(struct ttm_resource *res,
- struct ttm_placement *placement)
+ struct ttm_placement *placement,
+ bool busy)
{
struct ttm_buffer_object *bo = res->bo;
struct ttm_device *bdev = bo->bdev;
@@ -315,14 +317,19 @@ bool ttm_resource_compatible(struct ttm_resource *res,
if (res->mem_type != place->mem_type)
continue;
+ if (place->flags & (busy ? TTM_PL_FLAG_IDLE : TTM_PL_FLAG_BUSY))
+ continue;
+
+ if (place->flags & TTM_PL_FLAG_CONTIGUOUS &&
+ !(res->placement & TTM_PL_FLAG_CONTIGUOUS))
+ continue;
+
man = ttm_manager_type(bdev, res->mem_type);
if (man->func->compatible &&
!man->func->compatible(man, res, place, bo->base.size))
continue;
- if ((!(place->flags & TTM_PL_FLAG_CONTIGUOUS) ||
- (res->placement & TTM_PL_FLAG_CONTIGUOUS)))
- return true;
+ return true;
}
return false;
}
@@ -397,7 +397,8 @@ vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot);
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_resource **mem,
- struct ttm_operation_ctx *ctx);
+ struct ttm_operation_ctx *ctx,
+ bool force_space);
void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
/*
@@ -366,7 +366,8 @@ bool ttm_resource_intersects(struct ttm_device *bdev,
const struct ttm_place *place,
size_t size);
bool ttm_resource_compatible(struct ttm_resource *res,
- struct ttm_placement *placement);
+ struct ttm_placement *placement,
+ bool busy);
void ttm_resource_set_bo(struct ttm_resource *res,
struct ttm_buffer_object *bo);
Previously we would never try to move a BO into the preferred placements when it ever landed in a busy placement since those were considered compatible. Rework the whole handling and finally unify the idle and busy handling. ttm_bo_validate() is now responsible to try idle placement first and then use the busy placement if that didn't worked. Drawback is that we now always try the idle placement first for each validation which might cause some additional CPU overhead on overcommit. v2: fix kerneldoc warning and coding style Signed-off-by: Christian König <christian.koenig@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 2 +- drivers/gpu/drm/ttm/ttm_bo.c | 131 ++++++++------------- drivers/gpu/drm/ttm/ttm_resource.c | 15 ++- include/drm/ttm/ttm_bo.h | 3 +- include/drm/ttm/ttm_resource.h | 3 +- 6 files changed, 65 insertions(+), 91 deletions(-)