Message ID | 20200923030454.362731-4-airlied@gmail.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | ttm driver cleanups and invert move | expand |
Am 23.09.20 um 05:04 schrieb Dave Airlie: > From: Dave Airlie <airlied@redhat.com> > > Just pass it around move, and remove unused pieces > > Signed-off-by: Dave Airlie <airlied@redhat.com> Reviewed-by: Christian König <christian.koenig@amd.com> > --- > drivers/gpu/drm/radeon/radeon_ttm.c | 34 +++++++++++++---------------- > 1 file changed, 15 insertions(+), 19 deletions(-) > > diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c > index 085f58e833d8..9ff8c81d7784 100644 > --- a/drivers/gpu/drm/radeon/radeon_ttm.c > +++ b/drivers/gpu/drm/radeon/radeon_ttm.c > @@ -151,7 +151,7 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp) > } > > static int radeon_move_blit(struct ttm_buffer_object *bo, > - bool evict, bool no_wait_gpu, > + bool evict, > struct ttm_resource *new_mem, > struct ttm_resource *old_mem) > { > @@ -206,11 +206,10 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, > } > > static int radeon_move_vram_ram(struct ttm_buffer_object *bo, > - bool evict, bool interruptible, > - bool no_wait_gpu, > + bool evict, > + struct ttm_operation_ctx *ctx, > struct ttm_resource *new_mem) > { > - struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu }; > struct ttm_resource *old_mem = &bo->mem; > struct ttm_resource tmp_mem; > struct ttm_place placements; > @@ -227,7 +226,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, > placements.lpfn = 0; > placements.mem_type = TTM_PL_TT; > placements.flags = TTM_PL_MASK_CACHING; > - r = ttm_bo_mem_space(bo, &placement, &tmp_mem, &ctx); > + r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); > if (unlikely(r)) { > return r; > } > @@ -237,7 +236,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, > goto out_cleanup; > } > > - r = ttm_tt_populate(bo->bdev, bo->ttm, &ctx); > + r = ttm_tt_populate(bo->bdev, bo->ttm, ctx); > if (unlikely(r)) { > goto out_cleanup; > } > @@ -246,22 +245,21 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, > if (unlikely(r)) { > goto out_cleanup; > } > - r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem); > + r = radeon_move_blit(bo, true, &tmp_mem, old_mem); > if (unlikely(r)) { > goto out_cleanup; > } > - r = ttm_bo_move_ttm(bo, &ctx, new_mem); > + r = ttm_bo_move_ttm(bo, ctx, new_mem); > out_cleanup: > ttm_resource_free(bo, &tmp_mem); > return r; > } > > static int radeon_move_ram_vram(struct ttm_buffer_object *bo, > - bool evict, bool interruptible, > - bool no_wait_gpu, > + bool evict, > + struct ttm_operation_ctx *ctx, > struct ttm_resource *new_mem) > { > - struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu }; > struct ttm_resource *old_mem = &bo->mem; > struct ttm_resource tmp_mem; > struct ttm_placement placement; > @@ -278,15 +276,15 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo, > placements.lpfn = 0; > placements.mem_type = TTM_PL_TT; > placements.flags = TTM_PL_MASK_CACHING; > - r = ttm_bo_mem_space(bo, &placement, &tmp_mem, &ctx); > + r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); > if (unlikely(r)) { > return r; > } > - r = ttm_bo_move_ttm(bo, &ctx, &tmp_mem); > + r = ttm_bo_move_ttm(bo, ctx, &tmp_mem); > if (unlikely(r)) { > goto out_cleanup; > } > - r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem); > + r = radeon_move_blit(bo, true, new_mem, old_mem); > if (unlikely(r)) { > goto out_cleanup; > } > @@ -334,14 +332,12 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, > > if (old_mem->mem_type == TTM_PL_VRAM && > new_mem->mem_type == TTM_PL_SYSTEM) { > - r = radeon_move_vram_ram(bo, evict, ctx->interruptible, > - ctx->no_wait_gpu, new_mem); > + r = radeon_move_vram_ram(bo, evict, ctx, new_mem); > } else if (old_mem->mem_type == TTM_PL_SYSTEM && > new_mem->mem_type == TTM_PL_VRAM) { > - r = radeon_move_ram_vram(bo, evict, ctx->interruptible, > - ctx->no_wait_gpu, new_mem); > + r = radeon_move_ram_vram(bo, evict, ctx, new_mem); > } else { > - r = radeon_move_blit(bo, evict, ctx->no_wait_gpu, > + r = radeon_move_blit(bo, evict, > new_mem, old_mem); > } >
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 085f58e833d8..9ff8c81d7784 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -151,7 +151,7 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp) } static int radeon_move_blit(struct ttm_buffer_object *bo, - bool evict, bool no_wait_gpu, + bool evict, struct ttm_resource *new_mem, struct ttm_resource *old_mem) { @@ -206,11 +206,10 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, } static int radeon_move_vram_ram(struct ttm_buffer_object *bo, - bool evict, bool interruptible, - bool no_wait_gpu, + bool evict, + struct ttm_operation_ctx *ctx, struct ttm_resource *new_mem) { - struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu }; struct ttm_resource *old_mem = &bo->mem; struct ttm_resource tmp_mem; struct ttm_place placements; @@ -227,7 +226,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, placements.lpfn = 0; placements.mem_type = TTM_PL_TT; placements.flags = TTM_PL_MASK_CACHING; - r = ttm_bo_mem_space(bo, &placement, &tmp_mem, &ctx); + r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); if (unlikely(r)) { return r; } @@ -237,7 +236,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, goto out_cleanup; } - r = ttm_tt_populate(bo->bdev, bo->ttm, &ctx); + r = ttm_tt_populate(bo->bdev, bo->ttm, ctx); if (unlikely(r)) { goto out_cleanup; } @@ -246,22 +245,21 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, if (unlikely(r)) { goto out_cleanup; } - r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem); + r = radeon_move_blit(bo, true, &tmp_mem, old_mem); if (unlikely(r)) { goto out_cleanup; } - r = ttm_bo_move_ttm(bo, &ctx, new_mem); + r = ttm_bo_move_ttm(bo, ctx, new_mem); out_cleanup: ttm_resource_free(bo, &tmp_mem); return r; } static int radeon_move_ram_vram(struct ttm_buffer_object *bo, - bool evict, bool interruptible, - bool no_wait_gpu, + bool evict, + struct ttm_operation_ctx *ctx, struct ttm_resource *new_mem) { - struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu }; struct ttm_resource *old_mem = &bo->mem; struct ttm_resource tmp_mem; struct ttm_placement placement; @@ -278,15 +276,15 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo, placements.lpfn = 0; placements.mem_type = TTM_PL_TT; placements.flags = TTM_PL_MASK_CACHING; - r = ttm_bo_mem_space(bo, &placement, &tmp_mem, &ctx); + r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); if (unlikely(r)) { return r; } - r = ttm_bo_move_ttm(bo, &ctx, &tmp_mem); + r = ttm_bo_move_ttm(bo, ctx, &tmp_mem); if (unlikely(r)) { goto out_cleanup; } - r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem); + r = radeon_move_blit(bo, true, new_mem, old_mem); if (unlikely(r)) { goto out_cleanup; } @@ -334,14 +332,12 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, if (old_mem->mem_type == TTM_PL_VRAM && new_mem->mem_type == TTM_PL_SYSTEM) { - r = radeon_move_vram_ram(bo, evict, ctx->interruptible, - ctx->no_wait_gpu, new_mem); + r = radeon_move_vram_ram(bo, evict, ctx, new_mem); } else if (old_mem->mem_type == TTM_PL_SYSTEM && new_mem->mem_type == TTM_PL_VRAM) { - r = radeon_move_ram_vram(bo, evict, ctx->interruptible, - ctx->no_wait_gpu, new_mem); + r = radeon_move_ram_vram(bo, evict, ctx, new_mem); } else { - r = radeon_move_blit(bo, evict, ctx->no_wait_gpu, + r = radeon_move_blit(bo, evict, new_mem, old_mem); }