diff mbox series

[2/4] drm/gem: Delete gem array fencing helpers

Message ID 20220331204651.2699107-3-daniel.vetter@ffwll.ch (mailing list archive)
State New, archived
Headers show
Series [1/4] drm/etnaviv: Use scheduler dependency handling | expand

Commit Message

Daniel Vetter March 31, 2022, 8:46 p.m. UTC
Integrated into the scheduler now and all users converted over.

v2: Rebased over changes from König.

Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Maxime Ripard <mripard@kernel.org>
Cc: Thomas Zimmermann <tzimmermann@suse.de>
Cc: David Airlie <airlied@linux.ie>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: "Christian König" <christian.koenig@amd.com>
Cc: linux-media@vger.kernel.org
Cc: linaro-mm-sig@lists.linaro.org
---
 drivers/gpu/drm/drm_gem.c | 80 ---------------------------------------
 include/drm/drm_gem.h     |  5 ---
 2 files changed, 85 deletions(-)

Comments

Daniel Vetter April 4, 2022, 1:15 p.m. UTC | #1
On Thu, Mar 31, 2022 at 10:46:49PM +0200, Daniel Vetter wrote:
> Integrated into the scheduler now and all users converted over.
> 
> v2: Rebased over changes from König.
> 
> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
> Cc: Maxime Ripard <mripard@kernel.org>
> Cc: Thomas Zimmermann <tzimmermann@suse.de>
> Cc: David Airlie <airlied@linux.ie>
> Cc: Daniel Vetter <daniel@ffwll.ch>
> Cc: Sumit Semwal <sumit.semwal@linaro.org>
> Cc: "Christian König" <christian.koenig@amd.com>
> Cc: linux-media@vger.kernel.org
> Cc: linaro-mm-sig@lists.linaro.org

Anyone up for an ack for this one here?

Thanks, Daniel

> ---
>  drivers/gpu/drm/drm_gem.c | 80 ---------------------------------------
>  include/drm/drm_gem.h     |  5 ---
>  2 files changed, 85 deletions(-)
> 
> diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
> index 56fb87885146..133dfae06fab 100644
> --- a/drivers/gpu/drm/drm_gem.c
> +++ b/drivers/gpu/drm/drm_gem.c
> @@ -1273,83 +1273,3 @@ drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
>  	ww_acquire_fini(acquire_ctx);
>  }
>  EXPORT_SYMBOL(drm_gem_unlock_reservations);
> -
> -/**
> - * drm_gem_fence_array_add - Adds the fence to an array of fences to be
> - * waited on, deduplicating fences from the same context.
> - *
> - * @fence_array: array of dma_fence * for the job to block on.
> - * @fence: the dma_fence to add to the list of dependencies.
> - *
> - * This functions consumes the reference for @fence both on success and error
> - * cases.
> - *
> - * Returns:
> - * 0 on success, or an error on failing to expand the array.
> - */
> -int drm_gem_fence_array_add(struct xarray *fence_array,
> -			    struct dma_fence *fence)
> -{
> -	struct dma_fence *entry;
> -	unsigned long index;
> -	u32 id = 0;
> -	int ret;
> -
> -	if (!fence)
> -		return 0;
> -
> -	/* Deduplicate if we already depend on a fence from the same context.
> -	 * This lets the size of the array of deps scale with the number of
> -	 * engines involved, rather than the number of BOs.
> -	 */
> -	xa_for_each(fence_array, index, entry) {
> -		if (entry->context != fence->context)
> -			continue;
> -
> -		if (dma_fence_is_later(fence, entry)) {
> -			dma_fence_put(entry);
> -			xa_store(fence_array, index, fence, GFP_KERNEL);
> -		} else {
> -			dma_fence_put(fence);
> -		}
> -		return 0;
> -	}
> -
> -	ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL);
> -	if (ret != 0)
> -		dma_fence_put(fence);
> -
> -	return ret;
> -}
> -EXPORT_SYMBOL(drm_gem_fence_array_add);
> -
> -/**
> - * drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked
> - * in the GEM object's reservation object to an array of dma_fences for use in
> - * scheduling a rendering job.
> - *
> - * This should be called after drm_gem_lock_reservations() on your array of
> - * GEM objects used in the job but before updating the reservations with your
> - * own fences.
> - *
> - * @fence_array: array of dma_fence * for the job to block on.
> - * @obj: the gem object to add new dependencies from.
> - * @write: whether the job might write the object (so we need to depend on
> - * shared fences in the reservation object).
> - */
> -int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
> -				     struct drm_gem_object *obj,
> -				     bool write)
> -{
> -	struct dma_resv_iter cursor;
> -	struct dma_fence *fence;
> -	int ret = 0;
> -
> -	dma_resv_for_each_fence(&cursor, obj->resv, write, fence) {
> -		ret = drm_gem_fence_array_add(fence_array, fence);
> -		if (ret)
> -			break;
> -	}
> -	return ret;
> -}
> -EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);
> diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
> index e2941cee14b6..9d7c61a122dc 100644
> --- a/include/drm/drm_gem.h
> +++ b/include/drm/drm_gem.h
> @@ -407,11 +407,6 @@ int drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
>  			      struct ww_acquire_ctx *acquire_ctx);
>  void drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
>  				 struct ww_acquire_ctx *acquire_ctx);
> -int drm_gem_fence_array_add(struct xarray *fence_array,
> -			    struct dma_fence *fence);
> -int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
> -				     struct drm_gem_object *obj,
> -				     bool write);
>  int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
>  			    u32 handle, u64 *offset);
>  
> -- 
> 2.34.1
>
Christian König April 4, 2022, 1:30 p.m. UTC | #2
Am 04.04.22 um 15:15 schrieb Daniel Vetter:
> On Thu, Mar 31, 2022 at 10:46:49PM +0200, Daniel Vetter wrote:
>> Integrated into the scheduler now and all users converted over.
>>
>> v2: Rebased over changes from König.
>>
>> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
>> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
>> Cc: Maxime Ripard <mripard@kernel.org>
>> Cc: Thomas Zimmermann <tzimmermann@suse.de>
>> Cc: David Airlie <airlied@linux.ie>
>> Cc: Daniel Vetter <daniel@ffwll.ch>
>> Cc: Sumit Semwal <sumit.semwal@linaro.org>
>> Cc: "Christian König" <christian.koenig@amd.com>
>> Cc: linux-media@vger.kernel.org
>> Cc: linaro-mm-sig@lists.linaro.org
> Anyone up for an ack for this one here?

Acked-by: Christian König <christian.koenig@amd.com>

Please land that ASAP so that I can rebase on top.

Thanks,
Christian.

>
> Thanks, Daniel
>
>> ---
>>   drivers/gpu/drm/drm_gem.c | 80 ---------------------------------------
>>   include/drm/drm_gem.h     |  5 ---
>>   2 files changed, 85 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
>> index 56fb87885146..133dfae06fab 100644
>> --- a/drivers/gpu/drm/drm_gem.c
>> +++ b/drivers/gpu/drm/drm_gem.c
>> @@ -1273,83 +1273,3 @@ drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
>>   	ww_acquire_fini(acquire_ctx);
>>   }
>>   EXPORT_SYMBOL(drm_gem_unlock_reservations);
>> -
>> -/**
>> - * drm_gem_fence_array_add - Adds the fence to an array of fences to be
>> - * waited on, deduplicating fences from the same context.
>> - *
>> - * @fence_array: array of dma_fence * for the job to block on.
>> - * @fence: the dma_fence to add to the list of dependencies.
>> - *
>> - * This functions consumes the reference for @fence both on success and error
>> - * cases.
>> - *
>> - * Returns:
>> - * 0 on success, or an error on failing to expand the array.
>> - */
>> -int drm_gem_fence_array_add(struct xarray *fence_array,
>> -			    struct dma_fence *fence)
>> -{
>> -	struct dma_fence *entry;
>> -	unsigned long index;
>> -	u32 id = 0;
>> -	int ret;
>> -
>> -	if (!fence)
>> -		return 0;
>> -
>> -	/* Deduplicate if we already depend on a fence from the same context.
>> -	 * This lets the size of the array of deps scale with the number of
>> -	 * engines involved, rather than the number of BOs.
>> -	 */
>> -	xa_for_each(fence_array, index, entry) {
>> -		if (entry->context != fence->context)
>> -			continue;
>> -
>> -		if (dma_fence_is_later(fence, entry)) {
>> -			dma_fence_put(entry);
>> -			xa_store(fence_array, index, fence, GFP_KERNEL);
>> -		} else {
>> -			dma_fence_put(fence);
>> -		}
>> -		return 0;
>> -	}
>> -
>> -	ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL);
>> -	if (ret != 0)
>> -		dma_fence_put(fence);
>> -
>> -	return ret;
>> -}
>> -EXPORT_SYMBOL(drm_gem_fence_array_add);
>> -
>> -/**
>> - * drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked
>> - * in the GEM object's reservation object to an array of dma_fences for use in
>> - * scheduling a rendering job.
>> - *
>> - * This should be called after drm_gem_lock_reservations() on your array of
>> - * GEM objects used in the job but before updating the reservations with your
>> - * own fences.
>> - *
>> - * @fence_array: array of dma_fence * for the job to block on.
>> - * @obj: the gem object to add new dependencies from.
>> - * @write: whether the job might write the object (so we need to depend on
>> - * shared fences in the reservation object).
>> - */
>> -int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
>> -				     struct drm_gem_object *obj,
>> -				     bool write)
>> -{
>> -	struct dma_resv_iter cursor;
>> -	struct dma_fence *fence;
>> -	int ret = 0;
>> -
>> -	dma_resv_for_each_fence(&cursor, obj->resv, write, fence) {
>> -		ret = drm_gem_fence_array_add(fence_array, fence);
>> -		if (ret)
>> -			break;
>> -	}
>> -	return ret;
>> -}
>> -EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);
>> diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
>> index e2941cee14b6..9d7c61a122dc 100644
>> --- a/include/drm/drm_gem.h
>> +++ b/include/drm/drm_gem.h
>> @@ -407,11 +407,6 @@ int drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
>>   			      struct ww_acquire_ctx *acquire_ctx);
>>   void drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
>>   				 struct ww_acquire_ctx *acquire_ctx);
>> -int drm_gem_fence_array_add(struct xarray *fence_array,
>> -			    struct dma_fence *fence);
>> -int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
>> -				     struct drm_gem_object *obj,
>> -				     bool write);
>>   int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
>>   			    u32 handle, u64 *offset);
>>   
>> -- 
>> 2.34.1
>>
Daniel Vetter April 4, 2022, 3:35 p.m. UTC | #3
On Mon, Apr 04, 2022 at 03:30:59PM +0200, Christian König wrote:
> Am 04.04.22 um 15:15 schrieb Daniel Vetter:
> > On Thu, Mar 31, 2022 at 10:46:49PM +0200, Daniel Vetter wrote:
> > > Integrated into the scheduler now and all users converted over.
> > > 
> > > v2: Rebased over changes from König.
> > > 
> > > Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
> > > Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
> > > Cc: Maxime Ripard <mripard@kernel.org>
> > > Cc: Thomas Zimmermann <tzimmermann@suse.de>
> > > Cc: David Airlie <airlied@linux.ie>
> > > Cc: Daniel Vetter <daniel@ffwll.ch>
> > > Cc: Sumit Semwal <sumit.semwal@linaro.org>
> > > Cc: "Christian König" <christian.koenig@amd.com>
> > > Cc: linux-media@vger.kernel.org
> > > Cc: linaro-mm-sig@lists.linaro.org
> > Anyone up for an ack for this one here?
> 
> Acked-by: Christian König <christian.koenig@amd.com>
> 
> Please land that ASAP so that I can rebase on top.

First 3 patches pushed, I'll drop the fourth.
-Daniel

> 
> Thanks,
> Christian.
> 
> > 
> > Thanks, Daniel
> > 
> > > ---
> > >   drivers/gpu/drm/drm_gem.c | 80 ---------------------------------------
> > >   include/drm/drm_gem.h     |  5 ---
> > >   2 files changed, 85 deletions(-)
> > > 
> > > diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
> > > index 56fb87885146..133dfae06fab 100644
> > > --- a/drivers/gpu/drm/drm_gem.c
> > > +++ b/drivers/gpu/drm/drm_gem.c
> > > @@ -1273,83 +1273,3 @@ drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
> > >   	ww_acquire_fini(acquire_ctx);
> > >   }
> > >   EXPORT_SYMBOL(drm_gem_unlock_reservations);
> > > -
> > > -/**
> > > - * drm_gem_fence_array_add - Adds the fence to an array of fences to be
> > > - * waited on, deduplicating fences from the same context.
> > > - *
> > > - * @fence_array: array of dma_fence * for the job to block on.
> > > - * @fence: the dma_fence to add to the list of dependencies.
> > > - *
> > > - * This functions consumes the reference for @fence both on success and error
> > > - * cases.
> > > - *
> > > - * Returns:
> > > - * 0 on success, or an error on failing to expand the array.
> > > - */
> > > -int drm_gem_fence_array_add(struct xarray *fence_array,
> > > -			    struct dma_fence *fence)
> > > -{
> > > -	struct dma_fence *entry;
> > > -	unsigned long index;
> > > -	u32 id = 0;
> > > -	int ret;
> > > -
> > > -	if (!fence)
> > > -		return 0;
> > > -
> > > -	/* Deduplicate if we already depend on a fence from the same context.
> > > -	 * This lets the size of the array of deps scale with the number of
> > > -	 * engines involved, rather than the number of BOs.
> > > -	 */
> > > -	xa_for_each(fence_array, index, entry) {
> > > -		if (entry->context != fence->context)
> > > -			continue;
> > > -
> > > -		if (dma_fence_is_later(fence, entry)) {
> > > -			dma_fence_put(entry);
> > > -			xa_store(fence_array, index, fence, GFP_KERNEL);
> > > -		} else {
> > > -			dma_fence_put(fence);
> > > -		}
> > > -		return 0;
> > > -	}
> > > -
> > > -	ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL);
> > > -	if (ret != 0)
> > > -		dma_fence_put(fence);
> > > -
> > > -	return ret;
> > > -}
> > > -EXPORT_SYMBOL(drm_gem_fence_array_add);
> > > -
> > > -/**
> > > - * drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked
> > > - * in the GEM object's reservation object to an array of dma_fences for use in
> > > - * scheduling a rendering job.
> > > - *
> > > - * This should be called after drm_gem_lock_reservations() on your array of
> > > - * GEM objects used in the job but before updating the reservations with your
> > > - * own fences.
> > > - *
> > > - * @fence_array: array of dma_fence * for the job to block on.
> > > - * @obj: the gem object to add new dependencies from.
> > > - * @write: whether the job might write the object (so we need to depend on
> > > - * shared fences in the reservation object).
> > > - */
> > > -int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
> > > -				     struct drm_gem_object *obj,
> > > -				     bool write)
> > > -{
> > > -	struct dma_resv_iter cursor;
> > > -	struct dma_fence *fence;
> > > -	int ret = 0;
> > > -
> > > -	dma_resv_for_each_fence(&cursor, obj->resv, write, fence) {
> > > -		ret = drm_gem_fence_array_add(fence_array, fence);
> > > -		if (ret)
> > > -			break;
> > > -	}
> > > -	return ret;
> > > -}
> > > -EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);
> > > diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
> > > index e2941cee14b6..9d7c61a122dc 100644
> > > --- a/include/drm/drm_gem.h
> > > +++ b/include/drm/drm_gem.h
> > > @@ -407,11 +407,6 @@ int drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
> > >   			      struct ww_acquire_ctx *acquire_ctx);
> > >   void drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
> > >   				 struct ww_acquire_ctx *acquire_ctx);
> > > -int drm_gem_fence_array_add(struct xarray *fence_array,
> > > -			    struct dma_fence *fence);
> > > -int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
> > > -				     struct drm_gem_object *obj,
> > > -				     bool write);
> > >   int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
> > >   			    u32 handle, u64 *offset);
> > > -- 
> > > 2.34.1
> > > 
>
diff mbox series

Patch

diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 56fb87885146..133dfae06fab 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -1273,83 +1273,3 @@  drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
 	ww_acquire_fini(acquire_ctx);
 }
 EXPORT_SYMBOL(drm_gem_unlock_reservations);
-
-/**
- * drm_gem_fence_array_add - Adds the fence to an array of fences to be
- * waited on, deduplicating fences from the same context.
- *
- * @fence_array: array of dma_fence * for the job to block on.
- * @fence: the dma_fence to add to the list of dependencies.
- *
- * This functions consumes the reference for @fence both on success and error
- * cases.
- *
- * Returns:
- * 0 on success, or an error on failing to expand the array.
- */
-int drm_gem_fence_array_add(struct xarray *fence_array,
-			    struct dma_fence *fence)
-{
-	struct dma_fence *entry;
-	unsigned long index;
-	u32 id = 0;
-	int ret;
-
-	if (!fence)
-		return 0;
-
-	/* Deduplicate if we already depend on a fence from the same context.
-	 * This lets the size of the array of deps scale with the number of
-	 * engines involved, rather than the number of BOs.
-	 */
-	xa_for_each(fence_array, index, entry) {
-		if (entry->context != fence->context)
-			continue;
-
-		if (dma_fence_is_later(fence, entry)) {
-			dma_fence_put(entry);
-			xa_store(fence_array, index, fence, GFP_KERNEL);
-		} else {
-			dma_fence_put(fence);
-		}
-		return 0;
-	}
-
-	ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL);
-	if (ret != 0)
-		dma_fence_put(fence);
-
-	return ret;
-}
-EXPORT_SYMBOL(drm_gem_fence_array_add);
-
-/**
- * drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked
- * in the GEM object's reservation object to an array of dma_fences for use in
- * scheduling a rendering job.
- *
- * This should be called after drm_gem_lock_reservations() on your array of
- * GEM objects used in the job but before updating the reservations with your
- * own fences.
- *
- * @fence_array: array of dma_fence * for the job to block on.
- * @obj: the gem object to add new dependencies from.
- * @write: whether the job might write the object (so we need to depend on
- * shared fences in the reservation object).
- */
-int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
-				     struct drm_gem_object *obj,
-				     bool write)
-{
-	struct dma_resv_iter cursor;
-	struct dma_fence *fence;
-	int ret = 0;
-
-	dma_resv_for_each_fence(&cursor, obj->resv, write, fence) {
-		ret = drm_gem_fence_array_add(fence_array, fence);
-		if (ret)
-			break;
-	}
-	return ret;
-}
-EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index e2941cee14b6..9d7c61a122dc 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -407,11 +407,6 @@  int drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
 			      struct ww_acquire_ctx *acquire_ctx);
 void drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
 				 struct ww_acquire_ctx *acquire_ctx);
-int drm_gem_fence_array_add(struct xarray *fence_array,
-			    struct dma_fence *fence);
-int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
-				     struct drm_gem_object *obj,
-				     bool write);
 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
 			    u32 handle, u64 *offset);