@@ -708,8 +708,16 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
}
if (old_mem->mem_type == TTM_PL_TT &&
- new_mem->mem_type == TTM_PL_SYSTEM)
- return ttm_bo_move_ttm(bo, ctx, new_mem);
+ new_mem->mem_type == TTM_PL_SYSTEM) {
+ r = ttm_bo_move_old_to_system(bo, ctx);
+ if (r)
+ return r;
+ r = ttm_tt_set_placement_caching(bo->ttm, new_mem->placement);
+ if (r)
+ return r;
+ ttm_bo_assign_mem(bo, new_mem);
+ return 0;
+ }
if (old_mem->mem_type == AMDGPU_PL_GDS ||
old_mem->mem_type == AMDGPU_PL_GWS ||
@@ -1107,8 +1107,14 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
if (old_reg->mem_type == TTM_PL_TT &&
new_reg->mem_type == TTM_PL_SYSTEM) {
- ret = ttm_bo_move_ttm(bo, ctx, new_reg);
- goto out;
+ ret = ttm_bo_move_old_to_system(bo, ctx);
+ if (ret)
+ goto out;
+ ret = ttm_tt_set_placement_caching(bo->ttm, new_reg->placement);
+ if (ret)
+ goto out;
+ ttm_bo_assign_mem(bo, new_reg);
+ return 0;
}
/* Hardware assisted copy. */
@@ -353,8 +353,16 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
}
if (old_mem->mem_type == TTM_PL_TT &&
- new_mem->mem_type == TTM_PL_SYSTEM)
- return ttm_bo_move_ttm(bo, ctx, new_mem);
+ new_mem->mem_type == TTM_PL_SYSTEM) {
+ r = ttm_bo_move_old_to_system(bo, ctx);
+ if (r)
+ return r;
+ r = ttm_tt_set_placement_caching(bo->ttm, new_mem->placement);
+ if (r)
+ return r;
+ ttm_bo_assign_mem(bo, new_mem);
+ return 0;
+ }
if (!rdev->ring[radeon_copy_ring_index(rdev)].ready ||
rdev->asic->copy.copy == NULL) {
@@ -89,25 +89,6 @@ int ttm_bo_move_old_to_system(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_move_old_to_system);
-int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
- struct ttm_operation_ctx *ctx,
- struct ttm_resource *new_mem)
-{
- int ret;
-
- ret = ttm_bo_move_old_to_system(bo, ctx);
- if (ret)
- return ret;
-
- ret = ttm_bo_move_to_new_tt_mem(bo, ctx, new_mem);
- if (ret)
- return ret;
-
- ttm_bo_assign_mem(bo, new_mem);
- return 0;
-}
-EXPORT_SYMBOL(ttm_bo_move_ttm);
-
int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
struct ttm_resource *mem)
{
@@ -742,7 +742,15 @@ static int vmw_move(struct ttm_buffer_object *bo,
ttm_bo_assign_mem(bo, new_mem);
return 0;
}
- ret = ttm_bo_move_ttm(bo, ctx, new_mem);
+ ret = ttm_bo_move_old_to_system(bo, ctx);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_move_to_new_tt_mem(bo, ctx, new_mem);
+ if (ret)
+ return ret;
+
+ ttm_bo_assign_mem(bo, new_mem);
} else
ret = ttm_bo_move_memcpy(bo, ctx, new_mem);
@@ -583,27 +583,6 @@ int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
struct ttm_resource *mem);
void ttm_mem_io_free(struct ttm_bo_device *bdev,
struct ttm_resource *mem);
-/**
- * ttm_bo_move_ttm
- *
- * @bo: A pointer to a struct ttm_buffer_object.
- * @interruptible: Sleep interruptible if waiting.
- * @no_wait_gpu: Return immediately if the GPU is busy.
- * @new_mem: struct ttm_resource indicating where to move.
- *
- * Optimized move function for a buffer object with both old and
- * new placement backed by a TTM. The function will, if successful,
- * free any old aperture space, and set (@new_mem)->mm_node to NULL,
- * and update the (@bo)->mem placement flags. If unsuccessful, the old
- * data remains untouched, and it's up to the caller to free the
- * memory space indicated by @new_mem.
- * Returns:
- * !0: Failure.
- */
-
-int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
- struct ttm_operation_ctx *ctx,
- struct ttm_resource *new_mem);
int ttm_bo_move_old_to_system(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx);