diff mbox series

[v3,8/9] mm/mremap: Allow arch runtime override

Message ID 20210330060752.592769-9-aneesh.kumar@linux.ibm.com (mailing list archive)
State New, archived
Headers show
Series Speedup mremap on ppc64 | expand

Commit Message

Aneesh Kumar K.V March 30, 2021, 6:07 a.m. UTC
Architectures like ppc64 can only support faster mremap only with radix
translation. Hence allow a runtime check w.r.t support for fast mremap.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
---
 arch/arc/include/asm/tlb.h     |  5 +++++
 arch/arm64/include/asm/tlb.h   |  6 ++++++
 arch/powerpc/include/asm/tlb.h |  6 ++++++
 arch/x86/include/asm/tlb.h     |  5 +++++
 mm/mremap.c                    | 14 +++++++++++++-
 5 files changed, 35 insertions(+), 1 deletion(-)

Comments

Christophe Leroy April 9, 2021, 9:35 a.m. UTC | #1
Le 30/03/2021 à 08:07, Aneesh Kumar K.V a écrit :
> Architectures like ppc64 can only support faster mremap only with radix

... only .... only ...

> translation. Hence allow a runtime check w.r.t support for fast mremap.
> 
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
> ---
>   arch/arc/include/asm/tlb.h     |  5 +++++
>   arch/arm64/include/asm/tlb.h   |  6 ++++++
>   arch/powerpc/include/asm/tlb.h |  6 ++++++
>   arch/x86/include/asm/tlb.h     |  5 +++++
>   mm/mremap.c                    | 14 +++++++++++++-
>   5 files changed, 35 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/arc/include/asm/tlb.h b/arch/arc/include/asm/tlb.h
> index 975b35d3738d..22b8cfb46cbf 100644
> --- a/arch/arc/include/asm/tlb.h
> +++ b/arch/arc/include/asm/tlb.h
> @@ -9,4 +9,9 @@
>   #include <linux/pagemap.h>
>   #include <asm-generic/tlb.h>
>   
> +#define arch_supports_page_tables_move arch_supports_page_tables_move
> +static inline bool arch_supports_page_tables_move(void)
> +{
> +	return true;
> +}

I can't see why ARC arch needs that. It neither selects CONFIG_HAVE_MOVE_PMD nor CONFIG_HAVE_MOVE_PUD.


>   #endif /* _ASM_ARC_TLB_H */
> diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
> index 61c97d3b58c7..fe209efc6a10 100644
> --- a/arch/arm64/include/asm/tlb.h
> +++ b/arch/arm64/include/asm/tlb.h
> @@ -94,4 +94,10 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
>   }
>   #endif
>   
> +#define arch_supports_page_tables_move arch_supports_page_tables_move
> +static inline bool arch_supports_page_tables_move(void)
> +{
> +	return true;
> +}
> +
>   #endif
> diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
> index 160422a439aa..058918a7cd3c 100644
> --- a/arch/powerpc/include/asm/tlb.h
> +++ b/arch/powerpc/include/asm/tlb.h
> @@ -83,5 +83,11 @@ static inline int mm_is_thread_local(struct mm_struct *mm)
>   }
>   #endif
>   
> +#define arch_supports_page_tables_move arch_supports_page_tables_move
> +static inline bool arch_supports_page_tables_move(void)
> +{
> +	return radix_enabled();
> +}
> +
>   #endif /* __KERNEL__ */
>   #endif /* __ASM_POWERPC_TLB_H */
> diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h
> index 1bfe979bb9bc..62915238bb36 100644
> --- a/arch/x86/include/asm/tlb.h
> +++ b/arch/x86/include/asm/tlb.h
> @@ -37,4 +37,9 @@ static inline void __tlb_remove_table(void *table)
>   	free_page_and_swap_cache(table);
>   }
>   
> +#define arch_supports_page_tables_move arch_supports_page_tables_move
> +static inline bool arch_supports_page_tables_move(void)
> +{
> +	return true;
> +}
>   #endif /* _ASM_X86_TLB_H */
> diff --git a/mm/mremap.c b/mm/mremap.c
> index 7ac1df8e6d51..4d812af3e44b 100644
> --- a/mm/mremap.c
> +++ b/mm/mremap.c
> @@ -25,7 +25,7 @@
>   #include <linux/userfaultfd_k.h>
>   
>   #include <asm/cacheflush.h>
> -#include <asm/tlbflush.h>
> +#include <asm/tlb.h>
>   #include <asm/pgalloc.h>
>   
>   #include "internal.h"
> @@ -221,6 +221,14 @@ static inline void flush_pte_tlb_pwc_range(struct vm_area_struct *vma,
>   }
>   #endif
>   
> +#ifndef arch_supports_page_tables_move
> +#define arch_supports_page_tables_move arch_supports_page_tables_move
> +static inline bool arch_supports_page_tables_move(void)
> +{
> +	return false;

Can you do

	return IS_ENABLED(CONFIG_HAVE_MOVE_PMD) || IS_ENABLED(CONFIG_HAVE_MOVE_PUD);

And then remove the arch_supports_page_tables_move() you have added for arc, arm64 and x86 ?

> +}
> +#endif
> +
>   #ifdef CONFIG_HAVE_MOVE_PMD

Next step could be remove that #ifdef and the content of the matching #else
For that we'd just need a stub version of set_pmd_at() and set_pud_at().

>   static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
>   		  unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
> @@ -229,6 +237,8 @@ static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
>   	struct mm_struct *mm = vma->vm_mm;
>   	pmd_t pmd;
>   
> +	if (!arch_supports_page_tables_move())
> +		return false;
>   	/*
>   	 * The destination pmd shouldn't be established, free_pgtables()
>   	 * should have released it.
> @@ -295,6 +305,8 @@ static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr,
>   	struct mm_struct *mm = vma->vm_mm;
>   	pud_t pud;
>   
> +	if (!arch_supports_page_tables_move())
> +		return false;
>   	/*
>   	 * The destination pud shouldn't be established, free_pgtables()
>   	 * should have released it.
>
Aneesh Kumar K.V April 9, 2021, 11:59 a.m. UTC | #2
On 4/9/21 3:05 PM, Christophe Leroy wrote:
> 
> 
> Le 30/03/2021 à 08:07, Aneesh Kumar K.V a écrit :
>> Architectures like ppc64 can only support faster mremap only with radix
> 
> ... only .... only ...
> 
>> translation. Hence allow a runtime check w.r.t support for fast mremap.
>>

will fix that

>> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
>> ---
>>   arch/arc/include/asm/tlb.h     |  5 +++++
>>   arch/arm64/include/asm/tlb.h   |  6 ++++++
>>   arch/powerpc/include/asm/tlb.h |  6 ++++++
>>   arch/x86/include/asm/tlb.h     |  5 +++++
>>   mm/mremap.c                    | 14 +++++++++++++-
>>   5 files changed, 35 insertions(+), 1 deletion(-)
>>
>> diff --git a/arch/arc/include/asm/tlb.h b/arch/arc/include/asm/tlb.h
>> index 975b35d3738d..22b8cfb46cbf 100644
>> --- a/arch/arc/include/asm/tlb.h
>> +++ b/arch/arc/include/asm/tlb.h
>> @@ -9,4 +9,9 @@
>>   #include <linux/pagemap.h>
>>   #include <asm-generic/tlb.h>
>> +#define arch_supports_page_tables_move arch_supports_page_tables_move
>> +static inline bool arch_supports_page_tables_move(void)
>> +{
>> +    return true;
>> +}
> 
> I can't see why ARC arch needs that. It neither selects 
> CONFIG_HAVE_MOVE_PMD nor CONFIG_HAVE_MOVE_PUD.
> 
>

ok will fix that (I confused arch/Kconfig with arc/Kconfig )



>>   #endif /* _ASM_ARC_TLB_H */
>> diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
>> index 61c97d3b58c7..fe209efc6a10 100644
>> --- a/arch/arm64/include/asm/tlb.h
>> +++ b/arch/arm64/include/asm/tlb.h
>> @@ -94,4 +94,10 @@ static inline void __pud_free_tlb(struct mmu_gather 
>> *tlb, pud_t *pudp,
>>   }
>>   #endif
>> +#define arch_supports_page_tables_move arch_supports_page_tables_move
>> +static inline bool arch_supports_page_tables_move(void)
>> +{
>> +    return true;
>> +}
>> +
>>   #endif
>> diff --git a/arch/powerpc/include/asm/tlb.h 
>> b/arch/powerpc/include/asm/tlb.h
>> index 160422a439aa..058918a7cd3c 100644
>> --- a/arch/powerpc/include/asm/tlb.h
>> +++ b/arch/powerpc/include/asm/tlb.h
>> @@ -83,5 +83,11 @@ static inline int mm_is_thread_local(struct 
>> mm_struct *mm)
>>   }
>>   #endif
>> +#define arch_supports_page_tables_move arch_supports_page_tables_move
>> +static inline bool arch_supports_page_tables_move(void)
>> +{
>> +    return radix_enabled();
>> +}
>> +
>>   #endif /* __KERNEL__ */
>>   #endif /* __ASM_POWERPC_TLB_H */
>> diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h
>> index 1bfe979bb9bc..62915238bb36 100644
>> --- a/arch/x86/include/asm/tlb.h
>> +++ b/arch/x86/include/asm/tlb.h
>> @@ -37,4 +37,9 @@ static inline void __tlb_remove_table(void *table)
>>       free_page_and_swap_cache(table);
>>   }
>> +#define arch_supports_page_tables_move arch_supports_page_tables_move
>> +static inline bool arch_supports_page_tables_move(void)
>> +{
>> +    return true;
>> +}
>>   #endif /* _ASM_X86_TLB_H */
>> diff --git a/mm/mremap.c b/mm/mremap.c
>> index 7ac1df8e6d51..4d812af3e44b 100644
>> --- a/mm/mremap.c
>> +++ b/mm/mremap.c
>> @@ -25,7 +25,7 @@
>>   #include <linux/userfaultfd_k.h>
>>   #include <asm/cacheflush.h>
>> -#include <asm/tlbflush.h>
>> +#include <asm/tlb.h>
>>   #include <asm/pgalloc.h>
>>   #include "internal.h"
>> @@ -221,6 +221,14 @@ static inline void flush_pte_tlb_pwc_range(struct 
>> vm_area_struct *vma,
>>   }
>>   #endif
>> +#ifndef arch_supports_page_tables_move
>> +#define arch_supports_page_tables_move arch_supports_page_tables_move
>> +static inline bool arch_supports_page_tables_move(void)
>> +{
>> +    return false;
> 
> Can you do
> 
>      return IS_ENABLED(CONFIG_HAVE_MOVE_PMD) || 
> IS_ENABLED(CONFIG_HAVE_MOVE_PUD);
> 
> And then remove the arch_supports_page_tables_move() you have added for 
> arc, arm64 and x86 ?
> 


something like below?

#ifndef arch_supports_page_tables_move
#define arch_supports_page_tables_move arch_supports_page_tables_move
static inline bool arch_supports_page_tables_move(void)
{
	return IS_ENABLED(CONFIG_HAVE_MOVE_PMD) ||
		IS_ENABLED(CONFIG_HAVE_MOVE_PUD);
}
#endif

are remove those from those arch headers.





>> +}
>> +#endif
>> +
>>   #ifdef CONFIG_HAVE_MOVE_PMD
> 
> Next step could be remove that #ifdef and the content of the matching #else
> For that we'd just need a stub version of set_pmd_at() and set_pud_at().
> 
>>   static bool move_normal_pmd(struct vm_area_struct *vma, unsigned 
>> long old_addr,
>>             unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
>> @@ -229,6 +237,8 @@ static bool move_normal_pmd(struct vm_area_struct 
>> *vma, unsigned long old_addr,
>>       struct mm_struct *mm = vma->vm_mm;
>>       pmd_t pmd;
>> +    if (!arch_supports_page_tables_move())
>> +        return false;
>>       /*
>>        * The destination pmd shouldn't be established, free_pgtables()
>>        * should have released it.
>> @@ -295,6 +305,8 @@ static bool move_normal_pud(struct vm_area_struct 
>> *vma, unsigned long old_addr,
>>       struct mm_struct *mm = vma->vm_mm;
>>       pud_t pud;
>> +    if (!arch_supports_page_tables_move())
>> +        return false;
>>       /*
>>        * The destination pud shouldn't be established, free_pgtables()
>>        * should have released it.
>>
diff mbox series

Patch

diff --git a/arch/arc/include/asm/tlb.h b/arch/arc/include/asm/tlb.h
index 975b35d3738d..22b8cfb46cbf 100644
--- a/arch/arc/include/asm/tlb.h
+++ b/arch/arc/include/asm/tlb.h
@@ -9,4 +9,9 @@ 
 #include <linux/pagemap.h>
 #include <asm-generic/tlb.h>
 
+#define arch_supports_page_tables_move arch_supports_page_tables_move
+static inline bool arch_supports_page_tables_move(void)
+{
+	return true;
+}
 #endif /* _ASM_ARC_TLB_H */
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index 61c97d3b58c7..fe209efc6a10 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -94,4 +94,10 @@  static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
 }
 #endif
 
+#define arch_supports_page_tables_move arch_supports_page_tables_move
+static inline bool arch_supports_page_tables_move(void)
+{
+	return true;
+}
+
 #endif
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
index 160422a439aa..058918a7cd3c 100644
--- a/arch/powerpc/include/asm/tlb.h
+++ b/arch/powerpc/include/asm/tlb.h
@@ -83,5 +83,11 @@  static inline int mm_is_thread_local(struct mm_struct *mm)
 }
 #endif
 
+#define arch_supports_page_tables_move arch_supports_page_tables_move
+static inline bool arch_supports_page_tables_move(void)
+{
+	return radix_enabled();
+}
+
 #endif /* __KERNEL__ */
 #endif /* __ASM_POWERPC_TLB_H */
diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h
index 1bfe979bb9bc..62915238bb36 100644
--- a/arch/x86/include/asm/tlb.h
+++ b/arch/x86/include/asm/tlb.h
@@ -37,4 +37,9 @@  static inline void __tlb_remove_table(void *table)
 	free_page_and_swap_cache(table);
 }
 
+#define arch_supports_page_tables_move arch_supports_page_tables_move
+static inline bool arch_supports_page_tables_move(void)
+{
+	return true;
+}
 #endif /* _ASM_X86_TLB_H */
diff --git a/mm/mremap.c b/mm/mremap.c
index 7ac1df8e6d51..4d812af3e44b 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -25,7 +25,7 @@ 
 #include <linux/userfaultfd_k.h>
 
 #include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
+#include <asm/tlb.h>
 #include <asm/pgalloc.h>
 
 #include "internal.h"
@@ -221,6 +221,14 @@  static inline void flush_pte_tlb_pwc_range(struct vm_area_struct *vma,
 }
 #endif
 
+#ifndef arch_supports_page_tables_move
+#define arch_supports_page_tables_move arch_supports_page_tables_move
+static inline bool arch_supports_page_tables_move(void)
+{
+	return false;
+}
+#endif
+
 #ifdef CONFIG_HAVE_MOVE_PMD
 static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
 		  unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
@@ -229,6 +237,8 @@  static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
 	struct mm_struct *mm = vma->vm_mm;
 	pmd_t pmd;
 
+	if (!arch_supports_page_tables_move())
+		return false;
 	/*
 	 * The destination pmd shouldn't be established, free_pgtables()
 	 * should have released it.
@@ -295,6 +305,8 @@  static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr,
 	struct mm_struct *mm = vma->vm_mm;
 	pud_t pud;
 
+	if (!arch_supports_page_tables_move())
+		return false;
 	/*
 	 * The destination pud shouldn't be established, free_pgtables()
 	 * should have released it.