diff mbox series

[v5,18/25] arm64/mm: Split __flush_tlb_range() to elide trailing DSB

Message ID 20240202080756.1453939-19-ryan.roberts@arm.com (mailing list archive)
State New
Headers show
Series Transparent Contiguous PTEs for User Mappings | expand

Commit Message

Ryan Roberts Feb. 2, 2024, 8:07 a.m. UTC
Split __flush_tlb_range() into __flush_tlb_range_nosync() +
__flush_tlb_range(), in the same way as the existing flush_tlb_page()
arrangement. This allows calling __flush_tlb_range_nosync() to elide the
trailing DSB. Forthcoming "contpte" code will take advantage of this
when clearing the young bit from a contiguous range of ptes.

Tested-by: John Hubbard <jhubbard@nvidia.com>
Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
---
 arch/arm64/include/asm/tlbflush.h | 13 +++++++++++--
 1 file changed, 11 insertions(+), 2 deletions(-)

Comments

David Hildenbrand Feb. 12, 2024, 12:44 p.m. UTC | #1
On 02.02.24 09:07, Ryan Roberts wrote:
> Split __flush_tlb_range() into __flush_tlb_range_nosync() +
> __flush_tlb_range(), in the same way as the existing flush_tlb_page()
> arrangement. This allows calling __flush_tlb_range_nosync() to elide the
> trailing DSB. Forthcoming "contpte" code will take advantage of this
> when clearing the young bit from a contiguous range of ptes.
> 
> Tested-by: John Hubbard <jhubbard@nvidia.com>
> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
> ---
>   arch/arm64/include/asm/tlbflush.h | 13 +++++++++++--
>   1 file changed, 11 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
> index 79e932a1bdf8..50a765917327 100644
> --- a/arch/arm64/include/asm/tlbflush.h
> +++ b/arch/arm64/include/asm/tlbflush.h
> @@ -422,7 +422,7 @@ do {									\
>   #define __flush_s2_tlb_range_op(op, start, pages, stride, tlb_level) \
>   	__flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false, kvm_lpa2_is_enabled());
>   
> -static inline void __flush_tlb_range(struct vm_area_struct *vma,
> +static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma,
>   				     unsigned long start, unsigned long end,
>   				     unsigned long stride, bool last_level,
>   				     int tlb_level)
> @@ -456,10 +456,19 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
>   		__flush_tlb_range_op(vae1is, start, pages, stride, asid,
>   				     tlb_level, true, lpa2_is_enabled());
>   
> -	dsb(ish);
>   	mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, start, end);
>   }
>   
> +static inline void __flush_tlb_range(struct vm_area_struct *vma,
> +				     unsigned long start, unsigned long end,
> +				     unsigned long stride, bool last_level,
> +				     int tlb_level)
> +{
> +	__flush_tlb_range_nosync(vma, start, end, stride,
> +				 last_level, tlb_level);
> +	dsb(ish);
> +}
> +
>   static inline void flush_tlb_range(struct vm_area_struct *vma,
>   				   unsigned long start, unsigned long end)
>   {

You're now calling dsb() after 
mmu_notifier_arch_invalidate_secondary_tlbs().


In flush_tlb_mm(), we have the order

	dsb(ish);	
	mmu_notifier_arch_invalidate_secondary_tlbs()

In flush_tlb_page(), we have the effective order:

	mmu_notifier_arch_invalidate_secondary_tlbs()
	dsb(ish);

In flush_tlb_range(), we used to have the order:

	dsb(ish);
	mmu_notifier_arch_invalidate_secondary_tlbs();


So I *suspect* having that DSB before 
mmu_notifier_arch_invalidate_secondary_tlbs() is fine. Hopefully, 
nothing in there relies on that placement.

Maybe wort spelling out in the patch description

Reviewed-by: David Hildenbrand <david@redhat.com>
Ryan Roberts Feb. 12, 2024, 1:05 p.m. UTC | #2
On 12/02/2024 12:44, David Hildenbrand wrote:
> On 02.02.24 09:07, Ryan Roberts wrote:
>> Split __flush_tlb_range() into __flush_tlb_range_nosync() +
>> __flush_tlb_range(), in the same way as the existing flush_tlb_page()
>> arrangement. This allows calling __flush_tlb_range_nosync() to elide the
>> trailing DSB. Forthcoming "contpte" code will take advantage of this
>> when clearing the young bit from a contiguous range of ptes.
>>
>> Tested-by: John Hubbard <jhubbard@nvidia.com>
>> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
>> ---
>>   arch/arm64/include/asm/tlbflush.h | 13 +++++++++++--
>>   1 file changed, 11 insertions(+), 2 deletions(-)
>>
>> diff --git a/arch/arm64/include/asm/tlbflush.h
>> b/arch/arm64/include/asm/tlbflush.h
>> index 79e932a1bdf8..50a765917327 100644
>> --- a/arch/arm64/include/asm/tlbflush.h
>> +++ b/arch/arm64/include/asm/tlbflush.h
>> @@ -422,7 +422,7 @@ do {                                    \
>>   #define __flush_s2_tlb_range_op(op, start, pages, stride, tlb_level) \
>>       __flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false,
>> kvm_lpa2_is_enabled());
>>   -static inline void __flush_tlb_range(struct vm_area_struct *vma,
>> +static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma,
>>                        unsigned long start, unsigned long end,
>>                        unsigned long stride, bool last_level,
>>                        int tlb_level)
>> @@ -456,10 +456,19 @@ static inline void __flush_tlb_range(struct
>> vm_area_struct *vma,
>>           __flush_tlb_range_op(vae1is, start, pages, stride, asid,
>>                        tlb_level, true, lpa2_is_enabled());
>>   -    dsb(ish);
>>       mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, start, end);
>>   }
>>   +static inline void __flush_tlb_range(struct vm_area_struct *vma,
>> +                     unsigned long start, unsigned long end,
>> +                     unsigned long stride, bool last_level,
>> +                     int tlb_level)
>> +{
>> +    __flush_tlb_range_nosync(vma, start, end, stride,
>> +                 last_level, tlb_level);
>> +    dsb(ish);
>> +}
>> +
>>   static inline void flush_tlb_range(struct vm_area_struct *vma,
>>                      unsigned long start, unsigned long end)
>>   {
> 
> You're now calling dsb() after mmu_notifier_arch_invalidate_secondary_tlbs().
> 
> 
> In flush_tlb_mm(), we have the order
> 
>     dsb(ish);   
>     mmu_notifier_arch_invalidate_secondary_tlbs()
> 
> In flush_tlb_page(), we have the effective order:
> 
>     mmu_notifier_arch_invalidate_secondary_tlbs()
>     dsb(ish);
> 
> In flush_tlb_range(), we used to have the order:
> 
>     dsb(ish);
>     mmu_notifier_arch_invalidate_secondary_tlbs();
> 
> 
> So I *suspect* having that DSB before
> mmu_notifier_arch_invalidate_secondary_tlbs() is fine. Hopefully, nothing in
> there relies on that placement.

Will spotted this against v3. My argument was that I was following the existing
pattern in flush_tlb_page(). Apparently that is not correct and needs changing,
but the conclusion was to leave my change as is for now, since it is consistent
and change them at a later date together.

https://lore.kernel.org/linux-arm-kernel/123a58b0-2ea6-4da3-9719-98ca55c8095e@arm.com/



> 
> Maybe wort spelling out in the patch description
> 
> Reviewed-by: David Hildenbrand <david@redhat.com>
> 

Thanks!
David Hildenbrand Feb. 12, 2024, 1:15 p.m. UTC | #3
On 12.02.24 14:05, Ryan Roberts wrote:
> On 12/02/2024 12:44, David Hildenbrand wrote:
>> On 02.02.24 09:07, Ryan Roberts wrote:
>>> Split __flush_tlb_range() into __flush_tlb_range_nosync() +
>>> __flush_tlb_range(), in the same way as the existing flush_tlb_page()
>>> arrangement. This allows calling __flush_tlb_range_nosync() to elide the
>>> trailing DSB. Forthcoming "contpte" code will take advantage of this
>>> when clearing the young bit from a contiguous range of ptes.
>>>
>>> Tested-by: John Hubbard <jhubbard@nvidia.com>
>>> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
>>> ---
>>>    arch/arm64/include/asm/tlbflush.h | 13 +++++++++++--
>>>    1 file changed, 11 insertions(+), 2 deletions(-)
>>>
>>> diff --git a/arch/arm64/include/asm/tlbflush.h
>>> b/arch/arm64/include/asm/tlbflush.h
>>> index 79e932a1bdf8..50a765917327 100644
>>> --- a/arch/arm64/include/asm/tlbflush.h
>>> +++ b/arch/arm64/include/asm/tlbflush.h
>>> @@ -422,7 +422,7 @@ do {                                    \
>>>    #define __flush_s2_tlb_range_op(op, start, pages, stride, tlb_level) \
>>>        __flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false,
>>> kvm_lpa2_is_enabled());
>>>    -static inline void __flush_tlb_range(struct vm_area_struct *vma,
>>> +static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma,
>>>                         unsigned long start, unsigned long end,
>>>                         unsigned long stride, bool last_level,
>>>                         int tlb_level)
>>> @@ -456,10 +456,19 @@ static inline void __flush_tlb_range(struct
>>> vm_area_struct *vma,
>>>            __flush_tlb_range_op(vae1is, start, pages, stride, asid,
>>>                         tlb_level, true, lpa2_is_enabled());
>>>    -    dsb(ish);
>>>        mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, start, end);
>>>    }
>>>    +static inline void __flush_tlb_range(struct vm_area_struct *vma,
>>> +                     unsigned long start, unsigned long end,
>>> +                     unsigned long stride, bool last_level,
>>> +                     int tlb_level)
>>> +{
>>> +    __flush_tlb_range_nosync(vma, start, end, stride,
>>> +                 last_level, tlb_level);
>>> +    dsb(ish);
>>> +}
>>> +
>>>    static inline void flush_tlb_range(struct vm_area_struct *vma,
>>>                       unsigned long start, unsigned long end)
>>>    {
>>
>> You're now calling dsb() after mmu_notifier_arch_invalidate_secondary_tlbs().
>>
>>
>> In flush_tlb_mm(), we have the order
>>
>>      dsb(ish);
>>      mmu_notifier_arch_invalidate_secondary_tlbs()
>>
>> In flush_tlb_page(), we have the effective order:
>>
>>      mmu_notifier_arch_invalidate_secondary_tlbs()
>>      dsb(ish);
>>
>> In flush_tlb_range(), we used to have the order:
>>
>>      dsb(ish);
>>      mmu_notifier_arch_invalidate_secondary_tlbs();
>>
>>
>> So I *suspect* having that DSB before
>> mmu_notifier_arch_invalidate_secondary_tlbs() is fine. Hopefully, nothing in
>> there relies on that placement.
> 
> Will spotted this against v3. My argument was that I was following the existing
> pattern in flush_tlb_page(). Apparently that is not correct and needs changing,
> but the conclusion was to leave my change as is for now, since it is consistent
> and change them at a later date together.

Good, I think you should add a few words to the patch description 
("ordering might be incorrect, but is in-line with __flush_tlb_page()"; 
will be resolved separately).
Ryan Roberts Feb. 12, 2024, 1:27 p.m. UTC | #4
On 12/02/2024 13:15, David Hildenbrand wrote:
> On 12.02.24 14:05, Ryan Roberts wrote:
>> On 12/02/2024 12:44, David Hildenbrand wrote:
>>> On 02.02.24 09:07, Ryan Roberts wrote:
>>>> Split __flush_tlb_range() into __flush_tlb_range_nosync() +
>>>> __flush_tlb_range(), in the same way as the existing flush_tlb_page()
>>>> arrangement. This allows calling __flush_tlb_range_nosync() to elide the
>>>> trailing DSB. Forthcoming "contpte" code will take advantage of this
>>>> when clearing the young bit from a contiguous range of ptes.
>>>>
>>>> Tested-by: John Hubbard <jhubbard@nvidia.com>
>>>> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
>>>> ---
>>>>    arch/arm64/include/asm/tlbflush.h | 13 +++++++++++--
>>>>    1 file changed, 11 insertions(+), 2 deletions(-)
>>>>
>>>> diff --git a/arch/arm64/include/asm/tlbflush.h
>>>> b/arch/arm64/include/asm/tlbflush.h
>>>> index 79e932a1bdf8..50a765917327 100644
>>>> --- a/arch/arm64/include/asm/tlbflush.h
>>>> +++ b/arch/arm64/include/asm/tlbflush.h
>>>> @@ -422,7 +422,7 @@ do {                                    \
>>>>    #define __flush_s2_tlb_range_op(op, start, pages, stride, tlb_level) \
>>>>        __flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false,
>>>> kvm_lpa2_is_enabled());
>>>>    -static inline void __flush_tlb_range(struct vm_area_struct *vma,
>>>> +static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma,
>>>>                         unsigned long start, unsigned long end,
>>>>                         unsigned long stride, bool last_level,
>>>>                         int tlb_level)
>>>> @@ -456,10 +456,19 @@ static inline void __flush_tlb_range(struct
>>>> vm_area_struct *vma,
>>>>            __flush_tlb_range_op(vae1is, start, pages, stride, asid,
>>>>                         tlb_level, true, lpa2_is_enabled());
>>>>    -    dsb(ish);
>>>>        mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, start, end);
>>>>    }
>>>>    +static inline void __flush_tlb_range(struct vm_area_struct *vma,
>>>> +                     unsigned long start, unsigned long end,
>>>> +                     unsigned long stride, bool last_level,
>>>> +                     int tlb_level)
>>>> +{
>>>> +    __flush_tlb_range_nosync(vma, start, end, stride,
>>>> +                 last_level, tlb_level);
>>>> +    dsb(ish);
>>>> +}
>>>> +
>>>>    static inline void flush_tlb_range(struct vm_area_struct *vma,
>>>>                       unsigned long start, unsigned long end)
>>>>    {
>>>
>>> You're now calling dsb() after mmu_notifier_arch_invalidate_secondary_tlbs().
>>>
>>>
>>> In flush_tlb_mm(), we have the order
>>>
>>>      dsb(ish);
>>>      mmu_notifier_arch_invalidate_secondary_tlbs()
>>>
>>> In flush_tlb_page(), we have the effective order:
>>>
>>>      mmu_notifier_arch_invalidate_secondary_tlbs()
>>>      dsb(ish);
>>>
>>> In flush_tlb_range(), we used to have the order:
>>>
>>>      dsb(ish);
>>>      mmu_notifier_arch_invalidate_secondary_tlbs();
>>>
>>>
>>> So I *suspect* having that DSB before
>>> mmu_notifier_arch_invalidate_secondary_tlbs() is fine. Hopefully, nothing in
>>> there relies on that placement.
>>
>> Will spotted this against v3. My argument was that I was following the existing
>> pattern in flush_tlb_page(). Apparently that is not correct and needs changing,
>> but the conclusion was to leave my change as is for now, since it is consistent
>> and change them at a later date together.
> 
> Good, I think you should add a few words to the patch description ("ordering
> might be incorrect, but is in-line with __flush_tlb_page()"; will be resolved
> separately).
> 

ACK, will do. Thanks!
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 79e932a1bdf8..50a765917327 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -422,7 +422,7 @@  do {									\
 #define __flush_s2_tlb_range_op(op, start, pages, stride, tlb_level) \
 	__flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false, kvm_lpa2_is_enabled());
 
-static inline void __flush_tlb_range(struct vm_area_struct *vma,
+static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma,
 				     unsigned long start, unsigned long end,
 				     unsigned long stride, bool last_level,
 				     int tlb_level)
@@ -456,10 +456,19 @@  static inline void __flush_tlb_range(struct vm_area_struct *vma,
 		__flush_tlb_range_op(vae1is, start, pages, stride, asid,
 				     tlb_level, true, lpa2_is_enabled());
 
-	dsb(ish);
 	mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, start, end);
 }
 
+static inline void __flush_tlb_range(struct vm_area_struct *vma,
+				     unsigned long start, unsigned long end,
+				     unsigned long stride, bool last_level,
+				     int tlb_level)
+{
+	__flush_tlb_range_nosync(vma, start, end, stride,
+				 last_level, tlb_level);
+	dsb(ish);
+}
+
 static inline void flush_tlb_range(struct vm_area_struct *vma,
 				   unsigned long start, unsigned long end)
 {