Message ID | 20220407103251.1209606-4-anshuman.khandual@arm.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | mm/mmap: Drop arch_vm_get_page_prot() and arch_filter_pgprot() | expand |
On Thu, Apr 07, 2022 at 04:02:47PM +0530, Anshuman Khandual wrote: > diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c > index 77ada00280d9..307534fcec00 100644 > --- a/arch/arm64/mm/mmap.c > +++ b/arch/arm64/mm/mmap.c > @@ -55,3 +55,36 @@ static int __init adjust_protection_map(void) > return 0; > } > arch_initcall(adjust_protection_map); > + > +static pgprot_t arm64_arch_vm_get_page_prot(unsigned long vm_flags) > +{ > + pteval_t prot = 0; > + > + if (vm_flags & VM_ARM64_BTI) > + prot |= PTE_GP; > + > + /* > + * There are two conditions required for returning a Normal Tagged > + * memory type: (1) the user requested it via PROT_MTE passed to > + * mmap() or mprotect() and (2) the corresponding vma supports MTE. We > + * register (1) as VM_MTE in the vma->vm_flags and (2) as > + * VM_MTE_ALLOWED. Note that the latter can only be set during the > + * mmap() call since mprotect() does not accept MAP_* flags. > + * Checking for VM_MTE only is sufficient since arch_validate_flags() > + * does not permit (VM_MTE & !VM_MTE_ALLOWED). > + */ > + if (vm_flags & VM_MTE) > + prot |= PTE_ATTRINDX(MT_NORMAL_TAGGED); > + > + return __pgprot(prot); > +} > + > +pgprot_t vm_get_page_prot(unsigned long vm_flags) > +{ > + pgprot_t ret = __pgprot(pgprot_val(protection_map[vm_flags & > + (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) | > + pgprot_val(arm64_arch_vm_get_page_prot(vm_flags))); > + > + return ret; > +} > +EXPORT_SYMBOL(vm_get_page_prot); Could you write all this in a single function? I think I mentioned it in a previous series (untested): pgprot_t vm_get_page_prot(unsigned long vm_flags) { pteval_t prot = pgprot_val(protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]); if (vm_flags & VM_ARM64_BTI) prot |= PTE_GP; /* * There are two conditions required for returning a Normal Tagged * memory type: (1) the user requested it via PROT_MTE passed to * mmap() or mprotect() and (2) the corresponding vma supports MTE. We * register (1) as VM_MTE in the vma->vm_flags and (2) as * VM_MTE_ALLOWED. Note that the latter can only be set during the * mmap() call since mprotect() does not accept MAP_* flags. * Checking for VM_MTE only is sufficient since arch_validate_flags() * does not permit (VM_MTE & !VM_MTE_ALLOWED). */ if (vm_flags & VM_MTE) prot |= PTE_ATTRINDX(MT_NORMAL_TAGGED); return __pgprot(prot); } EXPORT_SYMBOL(vm_get_page_prot); With that: Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
On 4/8/22 15:58, Catalin Marinas wrote: > On Thu, Apr 07, 2022 at 04:02:47PM +0530, Anshuman Khandual wrote: >> diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c >> index 77ada00280d9..307534fcec00 100644 >> --- a/arch/arm64/mm/mmap.c >> +++ b/arch/arm64/mm/mmap.c >> @@ -55,3 +55,36 @@ static int __init adjust_protection_map(void) >> return 0; >> } >> arch_initcall(adjust_protection_map); >> + >> +static pgprot_t arm64_arch_vm_get_page_prot(unsigned long vm_flags) >> +{ >> + pteval_t prot = 0; >> + >> + if (vm_flags & VM_ARM64_BTI) >> + prot |= PTE_GP; >> + >> + /* >> + * There are two conditions required for returning a Normal Tagged >> + * memory type: (1) the user requested it via PROT_MTE passed to >> + * mmap() or mprotect() and (2) the corresponding vma supports MTE. We >> + * register (1) as VM_MTE in the vma->vm_flags and (2) as >> + * VM_MTE_ALLOWED. Note that the latter can only be set during the >> + * mmap() call since mprotect() does not accept MAP_* flags. >> + * Checking for VM_MTE only is sufficient since arch_validate_flags() >> + * does not permit (VM_MTE & !VM_MTE_ALLOWED). >> + */ >> + if (vm_flags & VM_MTE) >> + prot |= PTE_ATTRINDX(MT_NORMAL_TAGGED); >> + >> + return __pgprot(prot); >> +} >> + >> +pgprot_t vm_get_page_prot(unsigned long vm_flags) >> +{ >> + pgprot_t ret = __pgprot(pgprot_val(protection_map[vm_flags & >> + (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) | >> + pgprot_val(arm64_arch_vm_get_page_prot(vm_flags))); >> + >> + return ret; >> +} >> +EXPORT_SYMBOL(vm_get_page_prot); > > Could you write all this in a single function? I think I mentioned it in > a previous series (untested): Right, missed that. > > pgprot_t vm_get_page_prot(unsigned long vm_flags) > { > pteval_t prot = pgprot_val(protection_map[vm_flags & > (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]); > > if (vm_flags & VM_ARM64_BTI) > prot |= PTE_GP; > > /* > * There are two conditions required for returning a Normal Tagged > * memory type: (1) the user requested it via PROT_MTE passed to > * mmap() or mprotect() and (2) the corresponding vma supports MTE. We > * register (1) as VM_MTE in the vma->vm_flags and (2) as > * VM_MTE_ALLOWED. Note that the latter can only be set during the > * mmap() call since mprotect() does not accept MAP_* flags. > * Checking for VM_MTE only is sufficient since arch_validate_flags() > * does not permit (VM_MTE & !VM_MTE_ALLOWED). > */ > if (vm_flags & VM_MTE) > prot |= PTE_ATTRINDX(MT_NORMAL_TAGGED); > > return __pgprot(prot); > } > EXPORT_SYMBOL(vm_get_page_prot); > > With that: Sure, will change them into a single function. > > Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> >
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 57c4c995965f..dd0b15162bb3 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -45,6 +45,7 @@ config ARM64 select ARCH_HAS_SYSCALL_WRAPPER select ARCH_HAS_TEARDOWN_DMA_OPS if IOMMU_SUPPORT select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HAS_ZONE_DMA_SET if EXPERT select ARCH_HAVE_ELF_PROT select ARCH_HAVE_NMI_SAFE_CMPXCHG diff --git a/arch/arm64/include/asm/mman.h b/arch/arm64/include/asm/mman.h index e3e28f7daf62..5966ee4a6154 100644 --- a/arch/arm64/include/asm/mman.h +++ b/arch/arm64/include/asm/mman.h @@ -35,30 +35,6 @@ static inline unsigned long arch_calc_vm_flag_bits(unsigned long flags) } #define arch_calc_vm_flag_bits(flags) arch_calc_vm_flag_bits(flags) -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags) -{ - pteval_t prot = 0; - - if (vm_flags & VM_ARM64_BTI) - prot |= PTE_GP; - - /* - * There are two conditions required for returning a Normal Tagged - * memory type: (1) the user requested it via PROT_MTE passed to - * mmap() or mprotect() and (2) the corresponding vma supports MTE. We - * register (1) as VM_MTE in the vma->vm_flags and (2) as - * VM_MTE_ALLOWED. Note that the latter can only be set during the - * mmap() call since mprotect() does not accept MAP_* flags. - * Checking for VM_MTE only is sufficient since arch_validate_flags() - * does not permit (VM_MTE & !VM_MTE_ALLOWED). - */ - if (vm_flags & VM_MTE) - prot |= PTE_ATTRINDX(MT_NORMAL_TAGGED); - - return __pgprot(prot); -} -#define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags) - static inline bool arch_validate_prot(unsigned long prot, unsigned long addr __always_unused) { diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c index 77ada00280d9..307534fcec00 100644 --- a/arch/arm64/mm/mmap.c +++ b/arch/arm64/mm/mmap.c @@ -55,3 +55,36 @@ static int __init adjust_protection_map(void) return 0; } arch_initcall(adjust_protection_map); + +static pgprot_t arm64_arch_vm_get_page_prot(unsigned long vm_flags) +{ + pteval_t prot = 0; + + if (vm_flags & VM_ARM64_BTI) + prot |= PTE_GP; + + /* + * There are two conditions required for returning a Normal Tagged + * memory type: (1) the user requested it via PROT_MTE passed to + * mmap() or mprotect() and (2) the corresponding vma supports MTE. We + * register (1) as VM_MTE in the vma->vm_flags and (2) as + * VM_MTE_ALLOWED. Note that the latter can only be set during the + * mmap() call since mprotect() does not accept MAP_* flags. + * Checking for VM_MTE only is sufficient since arch_validate_flags() + * does not permit (VM_MTE & !VM_MTE_ALLOWED). + */ + if (vm_flags & VM_MTE) + prot |= PTE_ATTRINDX(MT_NORMAL_TAGGED); + + return __pgprot(prot); +} + +pgprot_t vm_get_page_prot(unsigned long vm_flags) +{ + pgprot_t ret = __pgprot(pgprot_val(protection_map[vm_flags & + (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) | + pgprot_val(arm64_arch_vm_get_page_prot(vm_flags))); + + return ret; +} +EXPORT_SYMBOL(vm_get_page_prot);
This defines and exports a platform specific custom vm_get_page_prot() via subscribing ARCH_HAS_VM_GET_PAGE_PROT. It localizes arch_vm_get_page_prot() and moves it near vm_get_page_prot(). Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will@kernel.org> Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com> --- arch/arm64/Kconfig | 1 + arch/arm64/include/asm/mman.h | 24 ------------------------ arch/arm64/mm/mmap.c | 33 +++++++++++++++++++++++++++++++++ 3 files changed, 34 insertions(+), 24 deletions(-)