diff mbox series

[v2,10/10] drm/amdgpu: Move dmabuf attach/detach to backend_(un)bind

Message ID 20210422013058.6305-11-Felix.Kuehling@amd.com (mailing list archive)
State New, archived
Headers show
Series Implement multi-GPU DMA mappings for KFD | expand

Commit Message

Felix Kuehling April 22, 2021, 1:30 a.m. UTC
The dmabuf attachment should be updated by moving the SG BO to DOMAIN_CPU
and back to DOMAIN_GTT. This does not necessarily invoke the
populate/unpopulate callbacks. Do this in backend_bind/unbind instead.

Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
---
 .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c  |  3 --
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c       | 51 +++++++++----------
 2 files changed, 25 insertions(+), 29 deletions(-)

Comments

Christian König April 22, 2021, 11:20 a.m. UTC | #1
Am 22.04.21 um 03:30 schrieb Felix Kuehling:
> The dmabuf attachment should be updated by moving the SG BO to DOMAIN_CPU
> and back to DOMAIN_GTT. This does not necessarily invoke the
> populate/unpopulate callbacks. Do this in backend_bind/unbind instead.
>
> Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>

Reviewed-by: Christian König <christian.koenig@amd.com>

> ---
>   .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c  |  3 --
>   drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c       | 51 +++++++++----------
>   2 files changed, 25 insertions(+), 29 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> index 18a1f9222a59..68e6ce8dcf33 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> @@ -582,9 +582,6 @@ kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment *attachment)
>   
>   	amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
>   	ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
> -	/* FIXME: This does not guarantee that amdgpu_ttm_tt_unpopulate is
> -	 * called
> -	 */
>   }
>   
>   static void
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> index 7e7d8330d64b..fc2a8d681dbc 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> @@ -910,7 +910,23 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
>   			DRM_ERROR("failed to pin userptr\n");
>   			return r;
>   		}
> +	} else if (ttm->page_flags & TTM_PAGE_FLAG_SG) {
> +		if (!ttm->sg) {
> +			struct dma_buf_attachment *attach;
> +			struct sg_table *sgt;
> +
> +			attach = gtt->gobj->import_attach;
> +			sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
> +			if (IS_ERR(sgt))
> +				return PTR_ERR(sgt);
> +
> +			ttm->sg = sgt;
> +		}
> +
> +		drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
> +					       ttm->num_pages);
>   	}
> +
>   	if (!ttm->num_pages) {
>   		WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
>   		     ttm->num_pages, bo_mem, ttm);
> @@ -1037,8 +1053,15 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
>   	int r;
>   
>   	/* if the pages have userptr pinning then clear that first */
> -	if (gtt->userptr)
> +	if (gtt->userptr) {
>   		amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
> +	} else if (ttm->sg && gtt->gobj->import_attach) {
> +		struct dma_buf_attachment *attach;
> +
> +		attach = gtt->gobj->import_attach;
> +		dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
> +		ttm->sg = NULL;
> +	}
>   
>   	if (!gtt->bound)
>   		return;
> @@ -1125,23 +1148,8 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
>   		return 0;
>   	}
>   
> -	if (ttm->page_flags & TTM_PAGE_FLAG_SG) {
> -		if (!ttm->sg) {
> -			struct dma_buf_attachment *attach;
> -			struct sg_table *sgt;
> -
> -			attach = gtt->gobj->import_attach;
> -			sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
> -			if (IS_ERR(sgt))
> -				return PTR_ERR(sgt);
> -
> -			ttm->sg = sgt;
> -		}
> -
> -		drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
> -					       ttm->num_pages);
> +	if (ttm->page_flags & TTM_PAGE_FLAG_SG)
>   		return 0;
> -	}
>   
>   	return ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx);
>   }
> @@ -1165,15 +1173,6 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
>   		return;
>   	}
>   
> -	if (ttm->sg && gtt->gobj->import_attach) {
> -		struct dma_buf_attachment *attach;
> -
> -		attach = gtt->gobj->import_attach;
> -		dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
> -		ttm->sg = NULL;
> -		return;
> -	}
> -
>   	if (ttm->page_flags & TTM_PAGE_FLAG_SG)
>   		return;
>
Ramesh Errabolu May 10, 2021, 10:09 p.m. UTC | #2
[AMD Official Use Only - Internal Distribution Only]

Acked-by: Ramesh Errabolu <ramesh.errabolu@amd.com>

-----Original Message-----
From: amd-gfx <amd-gfx-bounces@lists.freedesktop.org> On Behalf Of Christian König
Sent: Thursday, April 22, 2021 6:20 AM
To: Kuehling, Felix <Felix.Kuehling@amd.com>; amd-gfx@lists.freedesktop.org; dri-devel@lists.freedesktop.org
Subject: Re: [PATCH v2 10/10] drm/amdgpu: Move dmabuf attach/detach to backend_(un)bind

Am 22.04.21 um 03:30 schrieb Felix Kuehling:
> The dmabuf attachment should be updated by moving the SG BO to 
> DOMAIN_CPU and back to DOMAIN_GTT. This does not necessarily invoke 
> the populate/unpopulate callbacks. Do this in backend_bind/unbind instead.
>
> Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>

Reviewed-by: Christian König <christian.koenig@amd.com>

> ---
>   .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c  |  3 --
>   drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c       | 51 +++++++++----------
>   2 files changed, 25 insertions(+), 29 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> index 18a1f9222a59..68e6ce8dcf33 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> @@ -582,9 +582,6 @@ kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment 
> *attachment)
>   
>   	amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
>   	ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
> -	/* FIXME: This does not guarantee that amdgpu_ttm_tt_unpopulate is
> -	 * called
> -	 */
>   }
>   
>   static void
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> index 7e7d8330d64b..fc2a8d681dbc 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> @@ -910,7 +910,23 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
>   			DRM_ERROR("failed to pin userptr\n");
>   			return r;
>   		}
> +	} else if (ttm->page_flags & TTM_PAGE_FLAG_SG) {
> +		if (!ttm->sg) {
> +			struct dma_buf_attachment *attach;
> +			struct sg_table *sgt;
> +
> +			attach = gtt->gobj->import_attach;
> +			sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
> +			if (IS_ERR(sgt))
> +				return PTR_ERR(sgt);
> +
> +			ttm->sg = sgt;
> +		}
> +
> +		drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
> +					       ttm->num_pages);
>   	}
> +
>   	if (!ttm->num_pages) {
>   		WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
>   		     ttm->num_pages, bo_mem, ttm); @@ -1037,8 +1053,15 @@ static 
> void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
>   	int r;
>   
>   	/* if the pages have userptr pinning then clear that first */
> -	if (gtt->userptr)
> +	if (gtt->userptr) {
>   		amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
> +	} else if (ttm->sg && gtt->gobj->import_attach) {
> +		struct dma_buf_attachment *attach;
> +
> +		attach = gtt->gobj->import_attach;
> +		dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
> +		ttm->sg = NULL;
> +	}
>   
>   	if (!gtt->bound)
>   		return;
> @@ -1125,23 +1148,8 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
>   		return 0;
>   	}
>   
> -	if (ttm->page_flags & TTM_PAGE_FLAG_SG) {
> -		if (!ttm->sg) {
> -			struct dma_buf_attachment *attach;
> -			struct sg_table *sgt;
> -
> -			attach = gtt->gobj->import_attach;
> -			sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
> -			if (IS_ERR(sgt))
> -				return PTR_ERR(sgt);
> -
> -			ttm->sg = sgt;
> -		}
> -
> -		drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
> -					       ttm->num_pages);
> +	if (ttm->page_flags & TTM_PAGE_FLAG_SG)
>   		return 0;
> -	}
>   
>   	return ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx);
>   }
> @@ -1165,15 +1173,6 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
>   		return;
>   	}
>   
> -	if (ttm->sg && gtt->gobj->import_attach) {
> -		struct dma_buf_attachment *attach;
> -
> -		attach = gtt->gobj->import_attach;
> -		dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
> -		ttm->sg = NULL;
> -		return;
> -	}
> -
>   	if (ttm->page_flags & TTM_PAGE_FLAG_SG)
>   		return;
>
diff mbox series

Patch

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 18a1f9222a59..68e6ce8dcf33 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -582,9 +582,6 @@  kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment *attachment)
 
 	amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
 	ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
-	/* FIXME: This does not guarantee that amdgpu_ttm_tt_unpopulate is
-	 * called
-	 */
 }
 
 static void
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 7e7d8330d64b..fc2a8d681dbc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -910,7 +910,23 @@  static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
 			DRM_ERROR("failed to pin userptr\n");
 			return r;
 		}
+	} else if (ttm->page_flags & TTM_PAGE_FLAG_SG) {
+		if (!ttm->sg) {
+			struct dma_buf_attachment *attach;
+			struct sg_table *sgt;
+
+			attach = gtt->gobj->import_attach;
+			sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+			if (IS_ERR(sgt))
+				return PTR_ERR(sgt);
+
+			ttm->sg = sgt;
+		}
+
+		drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
+					       ttm->num_pages);
 	}
+
 	if (!ttm->num_pages) {
 		WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
 		     ttm->num_pages, bo_mem, ttm);
@@ -1037,8 +1053,15 @@  static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
 	int r;
 
 	/* if the pages have userptr pinning then clear that first */
-	if (gtt->userptr)
+	if (gtt->userptr) {
 		amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
+	} else if (ttm->sg && gtt->gobj->import_attach) {
+		struct dma_buf_attachment *attach;
+
+		attach = gtt->gobj->import_attach;
+		dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
+		ttm->sg = NULL;
+	}
 
 	if (!gtt->bound)
 		return;
@@ -1125,23 +1148,8 @@  static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
 		return 0;
 	}
 
-	if (ttm->page_flags & TTM_PAGE_FLAG_SG) {
-		if (!ttm->sg) {
-			struct dma_buf_attachment *attach;
-			struct sg_table *sgt;
-
-			attach = gtt->gobj->import_attach;
-			sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
-			if (IS_ERR(sgt))
-				return PTR_ERR(sgt);
-
-			ttm->sg = sgt;
-		}
-
-		drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
-					       ttm->num_pages);
+	if (ttm->page_flags & TTM_PAGE_FLAG_SG)
 		return 0;
-	}
 
 	return ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx);
 }
@@ -1165,15 +1173,6 @@  static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
 		return;
 	}
 
-	if (ttm->sg && gtt->gobj->import_attach) {
-		struct dma_buf_attachment *attach;
-
-		attach = gtt->gobj->import_attach;
-		dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
-		ttm->sg = NULL;
-		return;
-	}
-
 	if (ttm->page_flags & TTM_PAGE_FLAG_SG)
 		return;