From patchwork Mon Feb 28 10:47:27 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Anshuman Khandual X-Patchwork-Id: 12762764 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 52EC5C433EF for ; Mon, 28 Feb 2022 10:49:37 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S234961AbiB1KuM (ORCPT ); Mon, 28 Feb 2022 05:50:12 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:44574 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230078AbiB1Kt5 (ORCPT ); Mon, 28 Feb 2022 05:49:57 -0500 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by lindbergh.monkeyblade.net (Postfix) with ESMTP id 12BD95A14A; Mon, 28 Feb 2022 02:49:19 -0800 (PST) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id D3C7D106F; Mon, 28 Feb 2022 02:49:18 -0800 (PST) Received: from p8cg001049571a15.arm.com (unknown [10.163.47.185]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 63D2F3F73D; Mon, 28 Feb 2022 02:49:10 -0800 (PST) From: Anshuman Khandual To: linux-mm@kvack.org, akpm@linux-foundation.org Cc: linux-kernel@vger.kernel.org, geert@linux-m68k.org, Anshuman Khandual , Christoph Hellwig , linuxppc-dev@lists.ozlabs.org, linux-arm-kernel@lists.infradead.org, sparclinux@vger.kernel.org, linux-mips@vger.kernel.org, linux-m68k@lists.linux-m68k.org, linux-s390@vger.kernel.org, linux-riscv@lists.infradead.org, linux-alpha@vger.kernel.org, linux-sh@vger.kernel.org, linux-snps-arc@lists.infradead.org, linux-csky@vger.kernel.org, linux-xtensa@linux-xtensa.org, linux-parisc@vger.kernel.org, openrisc@lists.librecores.org, linux-um@lists.infradead.org, linux-hexagon@vger.kernel.org, linux-ia64@vger.kernel.org, linux-arch@vger.kernel.org, Michael Ellerman , Paul Mackerras Subject: [PATCH V3 04/30] powerpc/mm: Enable ARCH_HAS_VM_GET_PAGE_PROT Date: Mon, 28 Feb 2022 16:17:27 +0530 Message-Id: <1646045273-9343-5-git-send-email-anshuman.khandual@arm.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1646045273-9343-1-git-send-email-anshuman.khandual@arm.com> References: <1646045273-9343-1-git-send-email-anshuman.khandual@arm.com> Precedence: bulk List-ID: X-Mailing-List: linux-mips@vger.kernel.org This defines and exports a platform specific custom vm_get_page_prot() via subscribing ARCH_HAS_VM_GET_PAGE_PROT. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. While here, this also localizes arch_vm_get_page_prot() as powerpc_vm_get_page_prot() and moves it near vm_get_page_prot(). Cc: Michael Ellerman Cc: Paul Mackerras Cc: linuxppc-dev@lists.ozlabs.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual Acked-by: Michael Ellerman (powerpc) --- arch/powerpc/Kconfig | 1 + arch/powerpc/include/asm/mman.h | 12 ------ arch/powerpc/include/asm/pgtable.h | 19 ---------- arch/powerpc/mm/mmap.c | 59 ++++++++++++++++++++++++++++++ 4 files changed, 60 insertions(+), 31 deletions(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index b779603978e1..ddb4a3687c05 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -135,6 +135,7 @@ config PPC select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_UACCESS_FLUSHCACHE select ARCH_HAS_UBSAN_SANITIZE_ALL + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_KEEP_MEMBLOCK select ARCH_MIGHT_HAVE_PC_PARPORT diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h index 7cb6d18f5cd6..1b024e64c8ec 100644 --- a/arch/powerpc/include/asm/mman.h +++ b/arch/powerpc/include/asm/mman.h @@ -24,18 +24,6 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, } #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey) -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags) -{ -#ifdef CONFIG_PPC_MEM_KEYS - return (vm_flags & VM_SAO) ? - __pgprot(_PAGE_SAO | vmflag_to_pte_pkey_bits(vm_flags)) : - __pgprot(0 | vmflag_to_pte_pkey_bits(vm_flags)); -#else - return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0); -#endif -} -#define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags) - static inline bool arch_validate_prot(unsigned long prot, unsigned long addr) { if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_SAO)) diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index d564d0ecd4cd..3cbb6de20f9d 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -20,25 +20,6 @@ struct mm_struct; #include #endif /* !CONFIG_PPC_BOOK3S */ -/* Note due to the way vm flags are laid out, the bits are XWR */ -#define __P000 PAGE_NONE -#define __P001 PAGE_READONLY -#define __P010 PAGE_COPY -#define __P011 PAGE_COPY -#define __P100 PAGE_READONLY_X -#define __P101 PAGE_READONLY_X -#define __P110 PAGE_COPY_X -#define __P111 PAGE_COPY_X - -#define __S000 PAGE_NONE -#define __S001 PAGE_READONLY -#define __S010 PAGE_SHARED -#define __S011 PAGE_SHARED -#define __S100 PAGE_READONLY_X -#define __S101 PAGE_READONLY_X -#define __S110 PAGE_SHARED_X -#define __S111 PAGE_SHARED_X - #ifndef __ASSEMBLY__ #ifndef MAX_PTRS_PER_PGD diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c index c475cf810aa8..ee275937fe19 100644 --- a/arch/powerpc/mm/mmap.c +++ b/arch/powerpc/mm/mmap.c @@ -254,3 +254,62 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) mm->get_unmapped_area = arch_get_unmapped_area_topdown; } } + +static inline pgprot_t __vm_get_page_prot(unsigned long vm_flags) +{ + switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) { + case VM_NONE: + return PAGE_NONE; + case VM_READ: + return PAGE_READONLY; + case VM_WRITE: + case VM_WRITE | VM_READ: + return PAGE_COPY; + case VM_EXEC: + case VM_EXEC | VM_READ: + return PAGE_READONLY_X; + case VM_EXEC | VM_WRITE: + case VM_EXEC | VM_WRITE | VM_READ: + return PAGE_COPY_X; + case VM_SHARED: + return PAGE_NONE; + case VM_SHARED | VM_READ: + return PAGE_READONLY; + case VM_SHARED | VM_WRITE: + case VM_SHARED | VM_WRITE | VM_READ: + return PAGE_SHARED; + case VM_SHARED | VM_EXEC: + case VM_SHARED | VM_EXEC | VM_READ: + return PAGE_READONLY_X; + case VM_SHARED | VM_EXEC | VM_WRITE: + case VM_SHARED | VM_EXEC | VM_WRITE | VM_READ: + return PAGE_SHARED_X; + default: + BUILD_BUG(); + } +} + +#ifdef CONFIG_PPC64 +static pgprot_t powerpc_vm_get_page_prot(unsigned long vm_flags) +{ +#ifdef CONFIG_PPC_MEM_KEYS + return (vm_flags & VM_SAO) ? + __pgprot(_PAGE_SAO | vmflag_to_pte_pkey_bits(vm_flags)) : + __pgprot(0 | vmflag_to_pte_pkey_bits(vm_flags)); +#else + return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0); +#endif +} +#else +static pgprot_t powerpc_vm_get_page_prot(unsigned long vm_flags) +{ + return __pgprot(0); +} +#endif /* CONFIG_PPC64 */ + +pgprot_t vm_get_page_prot(unsigned long vm_flags) +{ + return __pgprot(pgprot_val(__vm_get_page_prot(vm_flags)) | + pgprot_val(powerpc_vm_get_page_prot(vm_flags))); +} +EXPORT_SYMBOL(vm_get_page_prot);