diff mbox series

[v6] drm/i915: Individualize fences before adding to dma_resv obj

Message ID 20220525095955.15371-1-nirmoy.das@intel.com (mailing list archive)
State New, archived
Headers show
Series [v6] drm/i915: Individualize fences before adding to dma_resv obj | expand

Commit Message

Nirmoy Das May 25, 2022, 9:59 a.m. UTC
_i915_vma_move_to_active() can receive > 1 fences for
multiple batch buffers submission. Because dma_resv_add_fence()
can only accept one fence at a time, change _i915_vma_move_to_active()
to be aware of multiple fences so that it can add individual
fences to the dma resv object.

v6: fix multi-line comment.
v5: remove double fence reservation for batch VMAs.
v4: Reserve fences for composite_fence on multi-batch contexts and
    also reserve fence slots to composite_fence for each VMAs.
v3: dma_resv_reserve_fences is not cumulative so pass num_fences.
v2: make sure to reserve enough fence slots before adding.

Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/5614
Signed-off-by: Nirmoy Das <nirmoy.das@intel.com>
---
 .../gpu/drm/i915/gem/i915_gem_execbuffer.c    |  3 +-
 drivers/gpu/drm/i915/i915_vma.c               | 48 +++++++++++--------
 2 files changed, 30 insertions(+), 21 deletions(-)

Comments

Matthew Auld May 26, 2022, 9:27 a.m. UTC | #1
On 25/05/2022 10:59, Nirmoy Das wrote:
> _i915_vma_move_to_active() can receive > 1 fences for
> multiple batch buffers submission. Because dma_resv_add_fence()
> can only accept one fence at a time, change _i915_vma_move_to_active()
> to be aware of multiple fences so that it can add individual
> fences to the dma resv object.
> 
> v6: fix multi-line comment.
> v5: remove double fence reservation for batch VMAs.
> v4: Reserve fences for composite_fence on multi-batch contexts and
>      also reserve fence slots to composite_fence for each VMAs.
> v3: dma_resv_reserve_fences is not cumulative so pass num_fences.
> v2: make sure to reserve enough fence slots before adding.
> 
> Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/5614
> Signed-off-by: Nirmoy Das <nirmoy.das@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>

Do we need Fixes: ?

> ---
>   .../gpu/drm/i915/gem/i915_gem_execbuffer.c    |  3 +-
>   drivers/gpu/drm/i915/i915_vma.c               | 48 +++++++++++--------
>   2 files changed, 30 insertions(+), 21 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
> index b279588c0672..8880d38d36b6 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
> @@ -1010,7 +1010,8 @@ static int eb_validate_vmas(struct i915_execbuffer *eb)
>   			}
>   		}
>   
> -		err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
> +		/* Reserve enough slots to accommodate composite fences */
> +		err = dma_resv_reserve_fences(vma->obj->base.resv, eb->num_batches);
>   		if (err)
>   			return err;
>   
> diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
> index 4f6db539571a..0bffb70b3c5f 100644
> --- a/drivers/gpu/drm/i915/i915_vma.c
> +++ b/drivers/gpu/drm/i915/i915_vma.c
> @@ -23,6 +23,7 @@
>    */
>   
>   #include <linux/sched/mm.h>
> +#include <linux/dma-fence-array.h>
>   #include <drm/drm_gem.h>
>   
>   #include "display/intel_frontbuffer.h"
> @@ -1823,6 +1824,21 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
>   	if (unlikely(err))
>   		return err;
>   
> +	/*
> +	 * Reserve fences slot early to prevent an allocation after preparing
> +	 * the workload and associating fences with dma_resv.
> +	 */
> +	if (fence && !(flags & __EXEC_OBJECT_NO_RESERVE)) {
> +		struct dma_fence *curr;
> +		int idx;
> +
> +		dma_fence_array_for_each(curr, idx, fence)
> +			;
> +		err = dma_resv_reserve_fences(vma->obj->base.resv, idx);
> +		if (unlikely(err))
> +			return err;
> +	}
> +
>   	if (flags & EXEC_OBJECT_WRITE) {
>   		struct intel_frontbuffer *front;
>   
> @@ -1832,31 +1848,23 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
>   				i915_active_add_request(&front->write, rq);
>   			intel_frontbuffer_put(front);
>   		}
> +	}
>   
> -		if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
> -			err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
> -			if (unlikely(err))
> -				return err;
> -		}
> +	if (fence) {
> +		struct dma_fence *curr;
> +		enum dma_resv_usage usage;
> +		int idx;
>   
> -		if (fence) {
> -			dma_resv_add_fence(vma->obj->base.resv, fence,
> -					   DMA_RESV_USAGE_WRITE);
> +		obj->read_domains = 0;
> +		if (flags & EXEC_OBJECT_WRITE) {
> +			usage = DMA_RESV_USAGE_WRITE;
>   			obj->write_domain = I915_GEM_DOMAIN_RENDER;
> -			obj->read_domains = 0;
> -		}
> -	} else {
> -		if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
> -			err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
> -			if (unlikely(err))
> -				return err;
> +		} else {
> +			usage = DMA_RESV_USAGE_READ;
>   		}
>   
> -		if (fence) {
> -			dma_resv_add_fence(vma->obj->base.resv, fence,
> -					   DMA_RESV_USAGE_READ);
> -			obj->write_domain = 0;
> -		}
> +		dma_fence_array_for_each(curr, idx, fence)
> +			dma_resv_add_fence(vma->obj->base.resv, curr, usage);
>   	}
>   
>   	if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
Andrzej Hajda May 26, 2022, 10 a.m. UTC | #2
On 25.05.2022 11:59, Nirmoy Das wrote:
> _i915_vma_move_to_active() can receive > 1 fences for
> multiple batch buffers submission. Because dma_resv_add_fence()
> can only accept one fence at a time, change _i915_vma_move_to_active()
> to be aware of multiple fences so that it can add individual
> fences to the dma resv object.
> 
> v6: fix multi-line comment.
> v5: remove double fence reservation for batch VMAs.
> v4: Reserve fences for composite_fence on multi-batch contexts and
>      also reserve fence slots to composite_fence for each VMAs.
> v3: dma_resv_reserve_fences is not cumulative so pass num_fences.
> v2: make sure to reserve enough fence slots before adding.
> 
> Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/5614
> Signed-off-by: Nirmoy Das <nirmoy.das@intel.com>
> ---
>   .../gpu/drm/i915/gem/i915_gem_execbuffer.c    |  3 +-
>   drivers/gpu/drm/i915/i915_vma.c               | 48 +++++++++++--------
>   2 files changed, 30 insertions(+), 21 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
> index b279588c0672..8880d38d36b6 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
> @@ -1010,7 +1010,8 @@ static int eb_validate_vmas(struct i915_execbuffer *eb)
>   			}
>   		}
>   
> -		err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
> +		/* Reserve enough slots to accommodate composite fences */
> +		err = dma_resv_reserve_fences(vma->obj->base.resv, eb->num_batches);
>   		if (err)
>   			return err;
>   
> diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
> index 4f6db539571a..0bffb70b3c5f 100644
> --- a/drivers/gpu/drm/i915/i915_vma.c
> +++ b/drivers/gpu/drm/i915/i915_vma.c
> @@ -23,6 +23,7 @@
>    */
>   
>   #include <linux/sched/mm.h>
> +#include <linux/dma-fence-array.h>
>   #include <drm/drm_gem.h>
>   
>   #include "display/intel_frontbuffer.h"
> @@ -1823,6 +1824,21 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
>   	if (unlikely(err))
>   		return err;
>   
> +	/*
> +	 * Reserve fences slot early to prevent an allocation after preparing
> +	 * the workload and associating fences with dma_resv.
> +	 */
> +	if (fence && !(flags & __EXEC_OBJECT_NO_RESERVE)) {
> +		struct dma_fence *curr;
> +		int idx;
> +
> +		dma_fence_array_for_each(curr, idx, fence)
> +			;

This look little odd.
Maybe we could add helper to dma core to get fences count, different story.

Reviewed-by: Andrzej Hajda <andrzej.hajda@intel.com>

Regards
Andrzej



> +		err = dma_resv_reserve_fences(vma->obj->base.resv, idx);
> +		if (unlikely(err))
> +			return err;
> +	}
> +
>   	if (flags & EXEC_OBJECT_WRITE) {
>   		struct intel_frontbuffer *front;
>   
> @@ -1832,31 +1848,23 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
>   				i915_active_add_request(&front->write, rq);
>   			intel_frontbuffer_put(front);
>   		}
> +	}
>   
> -		if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
> -			err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
> -			if (unlikely(err))
> -				return err;
> -		}
> +	if (fence) {
> +		struct dma_fence *curr;
> +		enum dma_resv_usage usage;
> +		int idx;
>   
> -		if (fence) {
> -			dma_resv_add_fence(vma->obj->base.resv, fence,
> -					   DMA_RESV_USAGE_WRITE);
> +		obj->read_domains = 0;
> +		if (flags & EXEC_OBJECT_WRITE) {
> +			usage = DMA_RESV_USAGE_WRITE;
>   			obj->write_domain = I915_GEM_DOMAIN_RENDER;
> -			obj->read_domains = 0;
> -		}
> -	} else {
> -		if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
> -			err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
> -			if (unlikely(err))
> -				return err;
> +		} else {
> +			usage = DMA_RESV_USAGE_READ;
>   		}
>   
> -		if (fence) {
> -			dma_resv_add_fence(vma->obj->base.resv, fence,
> -					   DMA_RESV_USAGE_READ);
> -			obj->write_domain = 0;
> -		}
> +		dma_fence_array_for_each(curr, idx, fence)
> +			dma_resv_add_fence(vma->obj->base.resv, curr, usage);
>   	}
>   
>   	if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
Nirmoy Das May 27, 2022, 9:46 a.m. UTC | #3
On 5/26/2022 11:27 AM, Matthew Auld wrote:
> On 25/05/2022 10:59, Nirmoy Das wrote:
>> _i915_vma_move_to_active() can receive > 1 fences for
>> multiple batch buffers submission. Because dma_resv_add_fence()
>> can only accept one fence at a time, change _i915_vma_move_to_active()
>> to be aware of multiple fences so that it can add individual
>> fences to the dma resv object.
>>
>> v6: fix multi-line comment.
>> v5: remove double fence reservation for batch VMAs.
>> v4: Reserve fences for composite_fence on multi-batch contexts and
>>      also reserve fence slots to composite_fence for each VMAs.
>> v3: dma_resv_reserve_fences is not cumulative so pass num_fences.
>> v2: make sure to reserve enough fence slots before adding.
>>
>> Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/5614
>> Signed-off-by: Nirmoy Das <nirmoy.das@intel.com>
> Reviewed-by: Matthew Auld <matthew.auld@intel.com>
>
> Do we need Fixes: ?


We can add:

Fixes: 544460c33821 ("drm/i915: Multi-BB execbuf")


Regards,

Nirmoy


>
>> ---
>>   .../gpu/drm/i915/gem/i915_gem_execbuffer.c    |  3 +-
>>   drivers/gpu/drm/i915/i915_vma.c               | 48 +++++++++++--------
>>   2 files changed, 30 insertions(+), 21 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 
>> b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
>> index b279588c0672..8880d38d36b6 100644
>> --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
>> @@ -1010,7 +1010,8 @@ static int eb_validate_vmas(struct 
>> i915_execbuffer *eb)
>>               }
>>           }
>>   -        err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
>> +        /* Reserve enough slots to accommodate composite fences */
>> +        err = dma_resv_reserve_fences(vma->obj->base.resv, 
>> eb->num_batches);
>>           if (err)
>>               return err;
>>   diff --git a/drivers/gpu/drm/i915/i915_vma.c 
>> b/drivers/gpu/drm/i915/i915_vma.c
>> index 4f6db539571a..0bffb70b3c5f 100644
>> --- a/drivers/gpu/drm/i915/i915_vma.c
>> +++ b/drivers/gpu/drm/i915/i915_vma.c
>> @@ -23,6 +23,7 @@
>>    */
>>     #include <linux/sched/mm.h>
>> +#include <linux/dma-fence-array.h>
>>   #include <drm/drm_gem.h>
>>     #include "display/intel_frontbuffer.h"
>> @@ -1823,6 +1824,21 @@ int _i915_vma_move_to_active(struct i915_vma 
>> *vma,
>>       if (unlikely(err))
>>           return err;
>>   +    /*
>> +     * Reserve fences slot early to prevent an allocation after 
>> preparing
>> +     * the workload and associating fences with dma_resv.
>> +     */
>> +    if (fence && !(flags & __EXEC_OBJECT_NO_RESERVE)) {
>> +        struct dma_fence *curr;
>> +        int idx;
>> +
>> +        dma_fence_array_for_each(curr, idx, fence)
>> +            ;
>> +        err = dma_resv_reserve_fences(vma->obj->base.resv, idx);
>> +        if (unlikely(err))
>> +            return err;
>> +    }
>> +
>>       if (flags & EXEC_OBJECT_WRITE) {
>>           struct intel_frontbuffer *front;
>>   @@ -1832,31 +1848,23 @@ int _i915_vma_move_to_active(struct 
>> i915_vma *vma,
>>                   i915_active_add_request(&front->write, rq);
>>               intel_frontbuffer_put(front);
>>           }
>> +    }
>>   -        if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
>> -            err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
>> -            if (unlikely(err))
>> -                return err;
>> -        }
>> +    if (fence) {
>> +        struct dma_fence *curr;
>> +        enum dma_resv_usage usage;
>> +        int idx;
>>   -        if (fence) {
>> -            dma_resv_add_fence(vma->obj->base.resv, fence,
>> -                       DMA_RESV_USAGE_WRITE);
>> +        obj->read_domains = 0;
>> +        if (flags & EXEC_OBJECT_WRITE) {
>> +            usage = DMA_RESV_USAGE_WRITE;
>>               obj->write_domain = I915_GEM_DOMAIN_RENDER;
>> -            obj->read_domains = 0;
>> -        }
>> -    } else {
>> -        if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
>> -            err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
>> -            if (unlikely(err))
>> -                return err;
>> +        } else {
>> +            usage = DMA_RESV_USAGE_READ;
>>           }
>>   -        if (fence) {
>> -            dma_resv_add_fence(vma->obj->base.resv, fence,
>> -                       DMA_RESV_USAGE_READ);
>> -            obj->write_domain = 0;
>> -        }
>> +        dma_fence_array_for_each(curr, idx, fence)
>> +            dma_resv_add_fence(vma->obj->base.resv, curr, usage);
>>       }
>>         if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
Matthew Auld May 27, 2022, 9:55 a.m. UTC | #4
On 27/05/2022 10:46, Das, Nirmoy wrote:
> 
> On 5/26/2022 11:27 AM, Matthew Auld wrote:
>> On 25/05/2022 10:59, Nirmoy Das wrote:
>>> _i915_vma_move_to_active() can receive > 1 fences for
>>> multiple batch buffers submission. Because dma_resv_add_fence()
>>> can only accept one fence at a time, change _i915_vma_move_to_active()
>>> to be aware of multiple fences so that it can add individual
>>> fences to the dma resv object.
>>>
>>> v6: fix multi-line comment.
>>> v5: remove double fence reservation for batch VMAs.
>>> v4: Reserve fences for composite_fence on multi-batch contexts and
>>>      also reserve fence slots to composite_fence for each VMAs.
>>> v3: dma_resv_reserve_fences is not cumulative so pass num_fences.
>>> v2: make sure to reserve enough fence slots before adding.
>>>
>>> Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/5614
>>> Signed-off-by: Nirmoy Das <nirmoy.das@intel.com>
>> Reviewed-by: Matthew Auld <matthew.auld@intel.com>
>>
>> Do we need Fixes: ?
> 
> 
> We can add:
> 
> Fixes: 544460c33821 ("drm/i915: Multi-BB execbuf")

Ok, so needs:

Fixes: 544460c33821 ("drm/i915: Multi-BB execbuf")
Cc: <stable@vger.kernel.org> # v5.16+

Should I go ahead and push this to gt-next?

> 
> 
> Regards,
> 
> Nirmoy
> 
> 
>>
>>> ---
>>>   .../gpu/drm/i915/gem/i915_gem_execbuffer.c    |  3 +-
>>>   drivers/gpu/drm/i915/i915_vma.c               | 48 +++++++++++--------
>>>   2 files changed, 30 insertions(+), 21 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 
>>> b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
>>> index b279588c0672..8880d38d36b6 100644
>>> --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
>>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
>>> @@ -1010,7 +1010,8 @@ static int eb_validate_vmas(struct 
>>> i915_execbuffer *eb)
>>>               }
>>>           }
>>>   -        err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
>>> +        /* Reserve enough slots to accommodate composite fences */
>>> +        err = dma_resv_reserve_fences(vma->obj->base.resv, 
>>> eb->num_batches);
>>>           if (err)
>>>               return err;
>>>   diff --git a/drivers/gpu/drm/i915/i915_vma.c 
>>> b/drivers/gpu/drm/i915/i915_vma.c
>>> index 4f6db539571a..0bffb70b3c5f 100644
>>> --- a/drivers/gpu/drm/i915/i915_vma.c
>>> +++ b/drivers/gpu/drm/i915/i915_vma.c
>>> @@ -23,6 +23,7 @@
>>>    */
>>>     #include <linux/sched/mm.h>
>>> +#include <linux/dma-fence-array.h>
>>>   #include <drm/drm_gem.h>
>>>     #include "display/intel_frontbuffer.h"
>>> @@ -1823,6 +1824,21 @@ int _i915_vma_move_to_active(struct i915_vma 
>>> *vma,
>>>       if (unlikely(err))
>>>           return err;
>>>   +    /*
>>> +     * Reserve fences slot early to prevent an allocation after 
>>> preparing
>>> +     * the workload and associating fences with dma_resv.
>>> +     */
>>> +    if (fence && !(flags & __EXEC_OBJECT_NO_RESERVE)) {
>>> +        struct dma_fence *curr;
>>> +        int idx;
>>> +
>>> +        dma_fence_array_for_each(curr, idx, fence)
>>> +            ;
>>> +        err = dma_resv_reserve_fences(vma->obj->base.resv, idx);
>>> +        if (unlikely(err))
>>> +            return err;
>>> +    }
>>> +
>>>       if (flags & EXEC_OBJECT_WRITE) {
>>>           struct intel_frontbuffer *front;
>>>   @@ -1832,31 +1848,23 @@ int _i915_vma_move_to_active(struct 
>>> i915_vma *vma,
>>>                   i915_active_add_request(&front->write, rq);
>>>               intel_frontbuffer_put(front);
>>>           }
>>> +    }
>>>   -        if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
>>> -            err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
>>> -            if (unlikely(err))
>>> -                return err;
>>> -        }
>>> +    if (fence) {
>>> +        struct dma_fence *curr;
>>> +        enum dma_resv_usage usage;
>>> +        int idx;
>>>   -        if (fence) {
>>> -            dma_resv_add_fence(vma->obj->base.resv, fence,
>>> -                       DMA_RESV_USAGE_WRITE);
>>> +        obj->read_domains = 0;
>>> +        if (flags & EXEC_OBJECT_WRITE) {
>>> +            usage = DMA_RESV_USAGE_WRITE;
>>>               obj->write_domain = I915_GEM_DOMAIN_RENDER;
>>> -            obj->read_domains = 0;
>>> -        }
>>> -    } else {
>>> -        if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
>>> -            err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
>>> -            if (unlikely(err))
>>> -                return err;
>>> +        } else {
>>> +            usage = DMA_RESV_USAGE_READ;
>>>           }
>>>   -        if (fence) {
>>> -            dma_resv_add_fence(vma->obj->base.resv, fence,
>>> -                       DMA_RESV_USAGE_READ);
>>> -            obj->write_domain = 0;
>>> -        }
>>> +        dma_fence_array_for_each(curr, idx, fence)
>>> +            dma_resv_add_fence(vma->obj->base.resv, curr, usage);
>>>       }
>>>         if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
Nirmoy Das May 27, 2022, 10:24 a.m. UTC | #5
On 5/27/2022 11:55 AM, Matthew Auld wrote:
> On 27/05/2022 10:46, Das, Nirmoy wrote:
>>
>> On 5/26/2022 11:27 AM, Matthew Auld wrote:
>>> On 25/05/2022 10:59, Nirmoy Das wrote:
>>>> _i915_vma_move_to_active() can receive > 1 fences for
>>>> multiple batch buffers submission. Because dma_resv_add_fence()
>>>> can only accept one fence at a time, change _i915_vma_move_to_active()
>>>> to be aware of multiple fences so that it can add individual
>>>> fences to the dma resv object.
>>>>
>>>> v6: fix multi-line comment.
>>>> v5: remove double fence reservation for batch VMAs.
>>>> v4: Reserve fences for composite_fence on multi-batch contexts and
>>>>      also reserve fence slots to composite_fence for each VMAs.
>>>> v3: dma_resv_reserve_fences is not cumulative so pass num_fences.
>>>> v2: make sure to reserve enough fence slots before adding.
>>>>
>>>> Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/5614
>>>> Signed-off-by: Nirmoy Das <nirmoy.das@intel.com>
>>> Reviewed-by: Matthew Auld <matthew.auld@intel.com>
>>>
>>> Do we need Fixes: ?
>>
>>
>> We can add:
>>
>> Fixes: 544460c33821 ("drm/i915: Multi-BB execbuf")
>
> Ok, so needs:
>
> Fixes: 544460c33821 ("drm/i915: Multi-BB execbuf")
> Cc: <stable@vger.kernel.org> # v5.16+
>
> Should I go ahead and push this to gt-next?


The patch will not get applied cleanly on 5.16 and 5.17. How do we 
handle that generally ?


Thanks,

Nirmoy

>
>>
>>
>> Regards,
>>
>> Nirmoy
>>
>>
>>>
>>>> ---
>>>>   .../gpu/drm/i915/gem/i915_gem_execbuffer.c    |  3 +-
>>>>   drivers/gpu/drm/i915/i915_vma.c               | 48 
>>>> +++++++++++--------
>>>>   2 files changed, 30 insertions(+), 21 deletions(-)
>>>>
>>>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 
>>>> b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
>>>> index b279588c0672..8880d38d36b6 100644
>>>> --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
>>>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
>>>> @@ -1010,7 +1010,8 @@ static int eb_validate_vmas(struct 
>>>> i915_execbuffer *eb)
>>>>               }
>>>>           }
>>>>   -        err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
>>>> +        /* Reserve enough slots to accommodate composite fences */
>>>> +        err = dma_resv_reserve_fences(vma->obj->base.resv, 
>>>> eb->num_batches);
>>>>           if (err)
>>>>               return err;
>>>>   diff --git a/drivers/gpu/drm/i915/i915_vma.c 
>>>> b/drivers/gpu/drm/i915/i915_vma.c
>>>> index 4f6db539571a..0bffb70b3c5f 100644
>>>> --- a/drivers/gpu/drm/i915/i915_vma.c
>>>> +++ b/drivers/gpu/drm/i915/i915_vma.c
>>>> @@ -23,6 +23,7 @@
>>>>    */
>>>>     #include <linux/sched/mm.h>
>>>> +#include <linux/dma-fence-array.h>
>>>>   #include <drm/drm_gem.h>
>>>>     #include "display/intel_frontbuffer.h"
>>>> @@ -1823,6 +1824,21 @@ int _i915_vma_move_to_active(struct i915_vma 
>>>> *vma,
>>>>       if (unlikely(err))
>>>>           return err;
>>>>   +    /*
>>>> +     * Reserve fences slot early to prevent an allocation after 
>>>> preparing
>>>> +     * the workload and associating fences with dma_resv.
>>>> +     */
>>>> +    if (fence && !(flags & __EXEC_OBJECT_NO_RESERVE)) {
>>>> +        struct dma_fence *curr;
>>>> +        int idx;
>>>> +
>>>> +        dma_fence_array_for_each(curr, idx, fence)
>>>> +            ;
>>>> +        err = dma_resv_reserve_fences(vma->obj->base.resv, idx);
>>>> +        if (unlikely(err))
>>>> +            return err;
>>>> +    }
>>>> +
>>>>       if (flags & EXEC_OBJECT_WRITE) {
>>>>           struct intel_frontbuffer *front;
>>>>   @@ -1832,31 +1848,23 @@ int _i915_vma_move_to_active(struct 
>>>> i915_vma *vma,
>>>> i915_active_add_request(&front->write, rq);
>>>>               intel_frontbuffer_put(front);
>>>>           }
>>>> +    }
>>>>   -        if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
>>>> -            err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
>>>> -            if (unlikely(err))
>>>> -                return err;
>>>> -        }
>>>> +    if (fence) {
>>>> +        struct dma_fence *curr;
>>>> +        enum dma_resv_usage usage;
>>>> +        int idx;
>>>>   -        if (fence) {
>>>> -            dma_resv_add_fence(vma->obj->base.resv, fence,
>>>> -                       DMA_RESV_USAGE_WRITE);
>>>> +        obj->read_domains = 0;
>>>> +        if (flags & EXEC_OBJECT_WRITE) {
>>>> +            usage = DMA_RESV_USAGE_WRITE;
>>>>               obj->write_domain = I915_GEM_DOMAIN_RENDER;
>>>> -            obj->read_domains = 0;
>>>> -        }
>>>> -    } else {
>>>> -        if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
>>>> -            err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
>>>> -            if (unlikely(err))
>>>> -                return err;
>>>> +        } else {
>>>> +            usage = DMA_RESV_USAGE_READ;
>>>>           }
>>>>   -        if (fence) {
>>>> -            dma_resv_add_fence(vma->obj->base.resv, fence,
>>>> -                       DMA_RESV_USAGE_READ);
>>>> -            obj->write_domain = 0;
>>>> -        }
>>>> +        dma_fence_array_for_each(curr, idx, fence)
>>>> +            dma_resv_add_fence(vma->obj->base.resv, curr, usage);
>>>>       }
>>>>         if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
Matthew Auld May 27, 2022, 11:37 a.m. UTC | #6
On 27/05/2022 11:24, Das, Nirmoy wrote:
> 
> On 5/27/2022 11:55 AM, Matthew Auld wrote:
>> On 27/05/2022 10:46, Das, Nirmoy wrote:
>>>
>>> On 5/26/2022 11:27 AM, Matthew Auld wrote:
>>>> On 25/05/2022 10:59, Nirmoy Das wrote:
>>>>> _i915_vma_move_to_active() can receive > 1 fences for
>>>>> multiple batch buffers submission. Because dma_resv_add_fence()
>>>>> can only accept one fence at a time, change _i915_vma_move_to_active()
>>>>> to be aware of multiple fences so that it can add individual
>>>>> fences to the dma resv object.
>>>>>
>>>>> v6: fix multi-line comment.
>>>>> v5: remove double fence reservation for batch VMAs.
>>>>> v4: Reserve fences for composite_fence on multi-batch contexts and
>>>>>      also reserve fence slots to composite_fence for each VMAs.
>>>>> v3: dma_resv_reserve_fences is not cumulative so pass num_fences.
>>>>> v2: make sure to reserve enough fence slots before adding.
>>>>>
>>>>> Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/5614
>>>>> Signed-off-by: Nirmoy Das <nirmoy.das@intel.com>
>>>> Reviewed-by: Matthew Auld <matthew.auld@intel.com>
>>>>
>>>> Do we need Fixes: ?
>>>
>>>
>>> We can add:
>>>
>>> Fixes: 544460c33821 ("drm/i915: Multi-BB execbuf")
>>
>> Ok, so needs:
>>
>> Fixes: 544460c33821 ("drm/i915: Multi-BB execbuf")
>> Cc: <stable@vger.kernel.org> # v5.16+
>>
>> Should I go ahead and push this to gt-next?
> 
> 
> The patch will not get applied cleanly on 5.16 and 5.17. How do we 
> handle that generally ?

The above is just the output of 'dim fixes <sha>'.

I'm not really sure tbh, but I think the author just gets notified that 
their patch doesn't apply to a certain stable version, at which point I 
guess it's up to them to provide a version that does, if they deem it 
necessary. Also note that there is no CI coverage for such things...

> 
> 
> Thanks,
> 
> Nirmoy
> 
>>
>>>
>>>
>>> Regards,
>>>
>>> Nirmoy
>>>
>>>
>>>>
>>>>> ---
>>>>>   .../gpu/drm/i915/gem/i915_gem_execbuffer.c    |  3 +-
>>>>>   drivers/gpu/drm/i915/i915_vma.c               | 48 
>>>>> +++++++++++--------
>>>>>   2 files changed, 30 insertions(+), 21 deletions(-)
>>>>>
>>>>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 
>>>>> b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
>>>>> index b279588c0672..8880d38d36b6 100644
>>>>> --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
>>>>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
>>>>> @@ -1010,7 +1010,8 @@ static int eb_validate_vmas(struct 
>>>>> i915_execbuffer *eb)
>>>>>               }
>>>>>           }
>>>>>   -        err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
>>>>> +        /* Reserve enough slots to accommodate composite fences */
>>>>> +        err = dma_resv_reserve_fences(vma->obj->base.resv, 
>>>>> eb->num_batches);
>>>>>           if (err)
>>>>>               return err;
>>>>>   diff --git a/drivers/gpu/drm/i915/i915_vma.c 
>>>>> b/drivers/gpu/drm/i915/i915_vma.c
>>>>> index 4f6db539571a..0bffb70b3c5f 100644
>>>>> --- a/drivers/gpu/drm/i915/i915_vma.c
>>>>> +++ b/drivers/gpu/drm/i915/i915_vma.c
>>>>> @@ -23,6 +23,7 @@
>>>>>    */
>>>>>     #include <linux/sched/mm.h>
>>>>> +#include <linux/dma-fence-array.h>
>>>>>   #include <drm/drm_gem.h>
>>>>>     #include "display/intel_frontbuffer.h"
>>>>> @@ -1823,6 +1824,21 @@ int _i915_vma_move_to_active(struct i915_vma 
>>>>> *vma,
>>>>>       if (unlikely(err))
>>>>>           return err;
>>>>>   +    /*
>>>>> +     * Reserve fences slot early to prevent an allocation after 
>>>>> preparing
>>>>> +     * the workload and associating fences with dma_resv.
>>>>> +     */
>>>>> +    if (fence && !(flags & __EXEC_OBJECT_NO_RESERVE)) {
>>>>> +        struct dma_fence *curr;
>>>>> +        int idx;
>>>>> +
>>>>> +        dma_fence_array_for_each(curr, idx, fence)
>>>>> +            ;
>>>>> +        err = dma_resv_reserve_fences(vma->obj->base.resv, idx);
>>>>> +        if (unlikely(err))
>>>>> +            return err;
>>>>> +    }
>>>>> +
>>>>>       if (flags & EXEC_OBJECT_WRITE) {
>>>>>           struct intel_frontbuffer *front;
>>>>>   @@ -1832,31 +1848,23 @@ int _i915_vma_move_to_active(struct 
>>>>> i915_vma *vma,
>>>>> i915_active_add_request(&front->write, rq);
>>>>>               intel_frontbuffer_put(front);
>>>>>           }
>>>>> +    }
>>>>>   -        if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
>>>>> -            err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
>>>>> -            if (unlikely(err))
>>>>> -                return err;
>>>>> -        }
>>>>> +    if (fence) {
>>>>> +        struct dma_fence *curr;
>>>>> +        enum dma_resv_usage usage;
>>>>> +        int idx;
>>>>>   -        if (fence) {
>>>>> -            dma_resv_add_fence(vma->obj->base.resv, fence,
>>>>> -                       DMA_RESV_USAGE_WRITE);
>>>>> +        obj->read_domains = 0;
>>>>> +        if (flags & EXEC_OBJECT_WRITE) {
>>>>> +            usage = DMA_RESV_USAGE_WRITE;
>>>>>               obj->write_domain = I915_GEM_DOMAIN_RENDER;
>>>>> -            obj->read_domains = 0;
>>>>> -        }
>>>>> -    } else {
>>>>> -        if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
>>>>> -            err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
>>>>> -            if (unlikely(err))
>>>>> -                return err;
>>>>> +        } else {
>>>>> +            usage = DMA_RESV_USAGE_READ;
>>>>>           }
>>>>>   -        if (fence) {
>>>>> -            dma_resv_add_fence(vma->obj->base.resv, fence,
>>>>> -                       DMA_RESV_USAGE_READ);
>>>>> -            obj->write_domain = 0;
>>>>> -        }
>>>>> +        dma_fence_array_for_each(curr, idx, fence)
>>>>> +            dma_resv_add_fence(vma->obj->base.resv, curr, usage);
>>>>>       }
>>>>>         if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
Nirmoy Das May 27, 2022, 11:44 a.m. UTC | #7
On 5/27/2022 1:37 PM, Matthew Auld wrote:
> On 27/05/2022 11:24, Das, Nirmoy wrote:
>>
>> On 5/27/2022 11:55 AM, Matthew Auld wrote:
>>> On 27/05/2022 10:46, Das, Nirmoy wrote:
>>>>
>>>> On 5/26/2022 11:27 AM, Matthew Auld wrote:
>>>>> On 25/05/2022 10:59, Nirmoy Das wrote:
>>>>>> _i915_vma_move_to_active() can receive > 1 fences for
>>>>>> multiple batch buffers submission. Because dma_resv_add_fence()
>>>>>> can only accept one fence at a time, change 
>>>>>> _i915_vma_move_to_active()
>>>>>> to be aware of multiple fences so that it can add individual
>>>>>> fences to the dma resv object.
>>>>>>
>>>>>> v6: fix multi-line comment.
>>>>>> v5: remove double fence reservation for batch VMAs.
>>>>>> v4: Reserve fences for composite_fence on multi-batch contexts and
>>>>>>      also reserve fence slots to composite_fence for each VMAs.
>>>>>> v3: dma_resv_reserve_fences is not cumulative so pass num_fences.
>>>>>> v2: make sure to reserve enough fence slots before adding.
>>>>>>
>>>>>> Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/5614
>>>>>> Signed-off-by: Nirmoy Das <nirmoy.das@intel.com>
>>>>> Reviewed-by: Matthew Auld <matthew.auld@intel.com>
>>>>>
>>>>> Do we need Fixes: ?
>>>>
>>>>
>>>> We can add:
>>>>
>>>> Fixes: 544460c33821 ("drm/i915: Multi-BB execbuf")
>>>
>>> Ok, so needs:
>>>
>>> Fixes: 544460c33821 ("drm/i915: Multi-BB execbuf")
>>> Cc: <stable@vger.kernel.org> # v5.16+
>>>
>>> Should I go ahead and push this to gt-next?
>>
>>
>> The patch will not get applied cleanly on 5.16 and 5.17. How do we 
>> handle that generally ?
>
> The above is just the output of 'dim fixes <sha>'.
>
> I'm not really sure tbh, but I think the author just gets notified 
> that their patch doesn't apply to a certain stable version, at which 
> point I guess it's up to them to provide a version that does, if they 
> deem it necessary. Also note that there is no CI coverage for such 
> things...


OK, please push with the fixes tag, might useful in future.


Thanks,

Nirmoy

>
>>
>>
>> Thanks,
>>
>> Nirmoy
>>
>>>
>>>>
>>>>
>>>> Regards,
>>>>
>>>> Nirmoy
>>>>
>>>>
>>>>>
>>>>>> ---
>>>>>>   .../gpu/drm/i915/gem/i915_gem_execbuffer.c    |  3 +-
>>>>>>   drivers/gpu/drm/i915/i915_vma.c               | 48 
>>>>>> +++++++++++--------
>>>>>>   2 files changed, 30 insertions(+), 21 deletions(-)
>>>>>>
>>>>>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 
>>>>>> b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
>>>>>> index b279588c0672..8880d38d36b6 100644
>>>>>> --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
>>>>>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
>>>>>> @@ -1010,7 +1010,8 @@ static int eb_validate_vmas(struct 
>>>>>> i915_execbuffer *eb)
>>>>>>               }
>>>>>>           }
>>>>>>   -        err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
>>>>>> +        /* Reserve enough slots to accommodate composite fences */
>>>>>> +        err = dma_resv_reserve_fences(vma->obj->base.resv, 
>>>>>> eb->num_batches);
>>>>>>           if (err)
>>>>>>               return err;
>>>>>>   diff --git a/drivers/gpu/drm/i915/i915_vma.c 
>>>>>> b/drivers/gpu/drm/i915/i915_vma.c
>>>>>> index 4f6db539571a..0bffb70b3c5f 100644
>>>>>> --- a/drivers/gpu/drm/i915/i915_vma.c
>>>>>> +++ b/drivers/gpu/drm/i915/i915_vma.c
>>>>>> @@ -23,6 +23,7 @@
>>>>>>    */
>>>>>>     #include <linux/sched/mm.h>
>>>>>> +#include <linux/dma-fence-array.h>
>>>>>>   #include <drm/drm_gem.h>
>>>>>>     #include "display/intel_frontbuffer.h"
>>>>>> @@ -1823,6 +1824,21 @@ int _i915_vma_move_to_active(struct 
>>>>>> i915_vma *vma,
>>>>>>       if (unlikely(err))
>>>>>>           return err;
>>>>>>   +    /*
>>>>>> +     * Reserve fences slot early to prevent an allocation after 
>>>>>> preparing
>>>>>> +     * the workload and associating fences with dma_resv.
>>>>>> +     */
>>>>>> +    if (fence && !(flags & __EXEC_OBJECT_NO_RESERVE)) {
>>>>>> +        struct dma_fence *curr;
>>>>>> +        int idx;
>>>>>> +
>>>>>> +        dma_fence_array_for_each(curr, idx, fence)
>>>>>> +            ;
>>>>>> +        err = dma_resv_reserve_fences(vma->obj->base.resv, idx);
>>>>>> +        if (unlikely(err))
>>>>>> +            return err;
>>>>>> +    }
>>>>>> +
>>>>>>       if (flags & EXEC_OBJECT_WRITE) {
>>>>>>           struct intel_frontbuffer *front;
>>>>>>   @@ -1832,31 +1848,23 @@ int _i915_vma_move_to_active(struct 
>>>>>> i915_vma *vma,
>>>>>> i915_active_add_request(&front->write, rq);
>>>>>>               intel_frontbuffer_put(front);
>>>>>>           }
>>>>>> +    }
>>>>>>   -        if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
>>>>>> -            err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
>>>>>> -            if (unlikely(err))
>>>>>> -                return err;
>>>>>> -        }
>>>>>> +    if (fence) {
>>>>>> +        struct dma_fence *curr;
>>>>>> +        enum dma_resv_usage usage;
>>>>>> +        int idx;
>>>>>>   -        if (fence) {
>>>>>> - dma_resv_add_fence(vma->obj->base.resv, fence,
>>>>>> -                       DMA_RESV_USAGE_WRITE);
>>>>>> +        obj->read_domains = 0;
>>>>>> +        if (flags & EXEC_OBJECT_WRITE) {
>>>>>> +            usage = DMA_RESV_USAGE_WRITE;
>>>>>>               obj->write_domain = I915_GEM_DOMAIN_RENDER;
>>>>>> -            obj->read_domains = 0;
>>>>>> -        }
>>>>>> -    } else {
>>>>>> -        if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
>>>>>> -            err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
>>>>>> -            if (unlikely(err))
>>>>>> -                return err;
>>>>>> +        } else {
>>>>>> +            usage = DMA_RESV_USAGE_READ;
>>>>>>           }
>>>>>>   -        if (fence) {
>>>>>> - dma_resv_add_fence(vma->obj->base.resv, fence,
>>>>>> -                       DMA_RESV_USAGE_READ);
>>>>>> -            obj->write_domain = 0;
>>>>>> -        }
>>>>>> +        dma_fence_array_for_each(curr, idx, fence)
>>>>>> + dma_resv_add_fence(vma->obj->base.resv, curr, usage);
>>>>>>       }
>>>>>>         if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index b279588c0672..8880d38d36b6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -1010,7 +1010,8 @@  static int eb_validate_vmas(struct i915_execbuffer *eb)
 			}
 		}
 
-		err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
+		/* Reserve enough slots to accommodate composite fences */
+		err = dma_resv_reserve_fences(vma->obj->base.resv, eb->num_batches);
 		if (err)
 			return err;
 
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 4f6db539571a..0bffb70b3c5f 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -23,6 +23,7 @@ 
  */
 
 #include <linux/sched/mm.h>
+#include <linux/dma-fence-array.h>
 #include <drm/drm_gem.h>
 
 #include "display/intel_frontbuffer.h"
@@ -1823,6 +1824,21 @@  int _i915_vma_move_to_active(struct i915_vma *vma,
 	if (unlikely(err))
 		return err;
 
+	/*
+	 * Reserve fences slot early to prevent an allocation after preparing
+	 * the workload and associating fences with dma_resv.
+	 */
+	if (fence && !(flags & __EXEC_OBJECT_NO_RESERVE)) {
+		struct dma_fence *curr;
+		int idx;
+
+		dma_fence_array_for_each(curr, idx, fence)
+			;
+		err = dma_resv_reserve_fences(vma->obj->base.resv, idx);
+		if (unlikely(err))
+			return err;
+	}
+
 	if (flags & EXEC_OBJECT_WRITE) {
 		struct intel_frontbuffer *front;
 
@@ -1832,31 +1848,23 @@  int _i915_vma_move_to_active(struct i915_vma *vma,
 				i915_active_add_request(&front->write, rq);
 			intel_frontbuffer_put(front);
 		}
+	}
 
-		if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
-			err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
-			if (unlikely(err))
-				return err;
-		}
+	if (fence) {
+		struct dma_fence *curr;
+		enum dma_resv_usage usage;
+		int idx;
 
-		if (fence) {
-			dma_resv_add_fence(vma->obj->base.resv, fence,
-					   DMA_RESV_USAGE_WRITE);
+		obj->read_domains = 0;
+		if (flags & EXEC_OBJECT_WRITE) {
+			usage = DMA_RESV_USAGE_WRITE;
 			obj->write_domain = I915_GEM_DOMAIN_RENDER;
-			obj->read_domains = 0;
-		}
-	} else {
-		if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
-			err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
-			if (unlikely(err))
-				return err;
+		} else {
+			usage = DMA_RESV_USAGE_READ;
 		}
 
-		if (fence) {
-			dma_resv_add_fence(vma->obj->base.resv, fence,
-					   DMA_RESV_USAGE_READ);
-			obj->write_domain = 0;
-		}
+		dma_fence_array_for_each(curr, idx, fence)
+			dma_resv_add_fence(vma->obj->base.resv, curr, usage);
 	}
 
 	if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)