Message ID | 20240503130147.1154804-14-joey.gouly@arm.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | arm64: Permission Overlay Extension | expand |
On 5/3/24 18:31, Joey Gouly wrote: > Modify arch_calc_vm_prot_bits() and vm_get_page_prot() such that the pkey > value is set in the vm_flags and then into the pgprot value. > > Signed-off-by: Joey Gouly <joey.gouly@arm.com> > Cc: Catalin Marinas <catalin.marinas@arm.com> > Cc: Will Deacon <will@kernel.org> > --- > arch/arm64/include/asm/mman.h | 8 +++++++- > arch/arm64/mm/mmap.c | 9 +++++++++ > 2 files changed, 16 insertions(+), 1 deletion(-) > > diff --git a/arch/arm64/include/asm/mman.h b/arch/arm64/include/asm/mman.h > index 5966ee4a6154..ecb2d18dc4d7 100644 > --- a/arch/arm64/include/asm/mman.h > +++ b/arch/arm64/include/asm/mman.h > @@ -7,7 +7,7 @@ > #include <uapi/asm/mman.h> > > static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, > - unsigned long pkey __always_unused) > + unsigned long pkey) > { > unsigned long ret = 0; > > @@ -17,6 +17,12 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, > if (system_supports_mte() && (prot & PROT_MTE)) > ret |= VM_MTE; > > +#if defined(CONFIG_ARCH_HAS_PKEYS) Should there be system_supports_poe() check like above? Thanks, Amit > + ret |= pkey & 0x1 ? VM_PKEY_BIT0 : 0; > + ret |= pkey & 0x2 ? VM_PKEY_BIT1 : 0; > + ret |= pkey & 0x4 ? VM_PKEY_BIT2 : 0; > +#endif > + > return ret; > } > #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey) > diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c > index 642bdf908b22..86eda6bc7893 100644 > --- a/arch/arm64/mm/mmap.c > +++ b/arch/arm64/mm/mmap.c > @@ -102,6 +102,15 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags) > if (vm_flags & VM_MTE) > prot |= PTE_ATTRINDX(MT_NORMAL_TAGGED); > > +#ifdef CONFIG_ARCH_HAS_PKEYS > + if (vm_flags & VM_PKEY_BIT0) > + prot |= PTE_PO_IDX_0; > + if (vm_flags & VM_PKEY_BIT1) > + prot |= PTE_PO_IDX_1; > + if (vm_flags & VM_PKEY_BIT2) > + prot |= PTE_PO_IDX_2; > +#endif > + > return __pgprot(prot); > } > EXPORT_SYMBOL(vm_get_page_prot);
On Tue, May 28, 2024 at 12:24:57PM +0530, Amit Daniel Kachhap wrote: > On 5/3/24 18:31, Joey Gouly wrote: > > diff --git a/arch/arm64/include/asm/mman.h b/arch/arm64/include/asm/mman.h > > index 5966ee4a6154..ecb2d18dc4d7 100644 > > --- a/arch/arm64/include/asm/mman.h > > +++ b/arch/arm64/include/asm/mman.h > > @@ -7,7 +7,7 @@ > > #include <uapi/asm/mman.h> > > static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, > > - unsigned long pkey __always_unused) > > + unsigned long pkey) > > { > > unsigned long ret = 0; > > @@ -17,6 +17,12 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, > > if (system_supports_mte() && (prot & PROT_MTE)) > > ret |= VM_MTE; > > +#if defined(CONFIG_ARCH_HAS_PKEYS) > > Should there be system_supports_poe() check like above? I think it should, otherwise we end up with these bits in the pte even when POE is not supported. > > + ret |= pkey & 0x1 ? VM_PKEY_BIT0 : 0; > > + ret |= pkey & 0x2 ? VM_PKEY_BIT1 : 0; > > + ret |= pkey & 0x4 ? VM_PKEY_BIT2 : 0; > > +#endif > > + > > return ret; > > } > > #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey)
Hi, On Wed, Jun 19, 2024 at 05:45:29PM +0100, Catalin Marinas wrote: > On Tue, May 28, 2024 at 12:24:57PM +0530, Amit Daniel Kachhap wrote: > > On 5/3/24 18:31, Joey Gouly wrote: > > > diff --git a/arch/arm64/include/asm/mman.h b/arch/arm64/include/asm/mman.h > > > index 5966ee4a6154..ecb2d18dc4d7 100644 > > > --- a/arch/arm64/include/asm/mman.h > > > +++ b/arch/arm64/include/asm/mman.h > > > @@ -7,7 +7,7 @@ > > > #include <uapi/asm/mman.h> > > > static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, > > > - unsigned long pkey __always_unused) > > > + unsigned long pkey) > > > { > > > unsigned long ret = 0; > > > @@ -17,6 +17,12 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, > > > if (system_supports_mte() && (prot & PROT_MTE)) > > > ret |= VM_MTE; > > > +#if defined(CONFIG_ARCH_HAS_PKEYS) > > > > Should there be system_supports_poe() check like above? > > I think it should, otherwise we end up with these bits in the pte even > when POE is not supported. I think it can't get here due to the flow of the code, but I will add it to be defensive (since it's just an alternative that gets patched). I still need the defined(CONFIG_ARCH_HAS_PKEYS) check, since the VM_PKEY_BIT* are only defined then. > > > > + ret |= pkey & 0x1 ? VM_PKEY_BIT0 : 0; > > > + ret |= pkey & 0x2 ? VM_PKEY_BIT1 : 0; > > > + ret |= pkey & 0x4 ? VM_PKEY_BIT2 : 0; > > > +#endif > > > + > > > return ret; > > > } > > > #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey) Thanks, Joey
On Thu, Jul 04, 2024 at 01:47:04PM +0100, Joey Gouly wrote: > On Wed, Jun 19, 2024 at 05:45:29PM +0100, Catalin Marinas wrote: > > On Tue, May 28, 2024 at 12:24:57PM +0530, Amit Daniel Kachhap wrote: > > > On 5/3/24 18:31, Joey Gouly wrote: > > > > diff --git a/arch/arm64/include/asm/mman.h b/arch/arm64/include/asm/mman.h > > > > index 5966ee4a6154..ecb2d18dc4d7 100644 > > > > --- a/arch/arm64/include/asm/mman.h > > > > +++ b/arch/arm64/include/asm/mman.h > > > > @@ -7,7 +7,7 @@ > > > > #include <uapi/asm/mman.h> > > > > static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, > > > > - unsigned long pkey __always_unused) > > > > + unsigned long pkey) > > > > { > > > > unsigned long ret = 0; > > > > @@ -17,6 +17,12 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, > > > > if (system_supports_mte() && (prot & PROT_MTE)) > > > > ret |= VM_MTE; > > > > +#if defined(CONFIG_ARCH_HAS_PKEYS) > > > > > > Should there be system_supports_poe() check like above? > > > > I think it should, otherwise we end up with these bits in the pte even > > when POE is not supported. > > I think it can't get here due to the flow of the code, but I will add it to be > defensive (since it's just an alternative that gets patched). You are probably right, the mprotect_pkey() will reject the call if we don't support POE. So you could add a comment instead (but a system_supports_poe() check seems safer). > I still need the defined(CONFIG_ARCH_HAS_PKEYS) check, since the VM_PKEY_BIT* > are only defined then. Yes, the ifdef will stay.
On 5/3/24 18:31, Joey Gouly wrote: > Modify arch_calc_vm_prot_bits() and vm_get_page_prot() such that the pkey > value is set in the vm_flags and then into the pgprot value. > > Signed-off-by: Joey Gouly <joey.gouly@arm.com> > Cc: Catalin Marinas <catalin.marinas@arm.com> > Cc: Will Deacon <will@kernel.org> > --- > arch/arm64/include/asm/mman.h | 8 +++++++- > arch/arm64/mm/mmap.c | 9 +++++++++ > 2 files changed, 16 insertions(+), 1 deletion(-) > > diff --git a/arch/arm64/include/asm/mman.h b/arch/arm64/include/asm/mman.h > index 5966ee4a6154..ecb2d18dc4d7 100644 > --- a/arch/arm64/include/asm/mman.h > +++ b/arch/arm64/include/asm/mman.h > @@ -7,7 +7,7 @@ > #include <uapi/asm/mman.h> > > static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, > - unsigned long pkey __always_unused) > + unsigned long pkey) > { > unsigned long ret = 0; > > @@ -17,6 +17,12 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, > if (system_supports_mte() && (prot & PROT_MTE)) > ret |= VM_MTE; > > +#if defined(CONFIG_ARCH_HAS_PKEYS) > + ret |= pkey & 0x1 ? VM_PKEY_BIT0 : 0; > + ret |= pkey & 0x2 ? VM_PKEY_BIT1 : 0; > + ret |= pkey & 0x4 ? VM_PKEY_BIT2 : 0; 0x1, 0x2, 0x4 here are standard bit positions for their corresponding VM_KEY_XXX based protection values ? Although this is similar to what x86 is doing currently, hence just trying to understand if these bit positions here are related to the user visible ABI, which should be standardized ? Agree with previous comments about the need for system_supports_poe() based additional check for the above code block. > +#endif > + > return ret; > } > #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey) > diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c > index 642bdf908b22..86eda6bc7893 100644 > --- a/arch/arm64/mm/mmap.c > +++ b/arch/arm64/mm/mmap.c > @@ -102,6 +102,15 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags) > if (vm_flags & VM_MTE) > prot |= PTE_ATTRINDX(MT_NORMAL_TAGGED); > > +#ifdef CONFIG_ARCH_HAS_PKEYS > + if (vm_flags & VM_PKEY_BIT0) > + prot |= PTE_PO_IDX_0; > + if (vm_flags & VM_PKEY_BIT1) > + prot |= PTE_PO_IDX_1; > + if (vm_flags & VM_PKEY_BIT2) > + prot |= PTE_PO_IDX_2; > +#endif > + > return __pgprot(prot); > } > EXPORT_SYMBOL(vm_get_page_prot);
On Tue, Jul 16, 2024 at 02:35:48PM +0530, Anshuman Khandual wrote: > > > On 5/3/24 18:31, Joey Gouly wrote: > > Modify arch_calc_vm_prot_bits() and vm_get_page_prot() such that the pkey > > value is set in the vm_flags and then into the pgprot value. > > > > Signed-off-by: Joey Gouly <joey.gouly@arm.com> > > Cc: Catalin Marinas <catalin.marinas@arm.com> > > Cc: Will Deacon <will@kernel.org> > > --- > > arch/arm64/include/asm/mman.h | 8 +++++++- > > arch/arm64/mm/mmap.c | 9 +++++++++ > > 2 files changed, 16 insertions(+), 1 deletion(-) > > > > diff --git a/arch/arm64/include/asm/mman.h b/arch/arm64/include/asm/mman.h > > index 5966ee4a6154..ecb2d18dc4d7 100644 > > --- a/arch/arm64/include/asm/mman.h > > +++ b/arch/arm64/include/asm/mman.h > > @@ -7,7 +7,7 @@ > > #include <uapi/asm/mman.h> > > > > static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, > > - unsigned long pkey __always_unused) > > + unsigned long pkey) > > { > > unsigned long ret = 0; > > > > @@ -17,6 +17,12 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, > > if (system_supports_mte() && (prot & PROT_MTE)) > > ret |= VM_MTE; > > > > +#if defined(CONFIG_ARCH_HAS_PKEYS) > > + ret |= pkey & 0x1 ? VM_PKEY_BIT0 : 0; > > + ret |= pkey & 0x2 ? VM_PKEY_BIT1 : 0; > > + ret |= pkey & 0x4 ? VM_PKEY_BIT2 : 0; > > 0x1, 0x2, 0x4 here are standard bit positions for their corresponding > VM_KEY_XXX based protection values ? Although this is similar to what > x86 is doing currently, hence just trying to understand if these bit > positions here are related to the user visible ABI, which should be > standardized ? The bit positions of VM_PKEY_BIT* aren't user visible. This is converting the value of the `pkey` that was passed to the mprotect, into the internal flags. I might replace those hex values with BIT(0), BIT(1), BIT(2), might be clearer. > > Agree with previous comments about the need for system_supports_poe() > based additional check for the above code block. > > > +#endif > > + > > return ret; > > } > > #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey) > > diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c > > index 642bdf908b22..86eda6bc7893 100644 > > --- a/arch/arm64/mm/mmap.c > > +++ b/arch/arm64/mm/mmap.c > > @@ -102,6 +102,15 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags) > > if (vm_flags & VM_MTE) > > prot |= PTE_ATTRINDX(MT_NORMAL_TAGGED); > > > > +#ifdef CONFIG_ARCH_HAS_PKEYS > > + if (vm_flags & VM_PKEY_BIT0) > > + prot |= PTE_PO_IDX_0; > > + if (vm_flags & VM_PKEY_BIT1) > > + prot |= PTE_PO_IDX_1; > > + if (vm_flags & VM_PKEY_BIT2) > > + prot |= PTE_PO_IDX_2; > > +#endif > > + > > return __pgprot(prot); > > } > > EXPORT_SYMBOL(vm_get_page_prot); > Thanks, Joey
On Fri, May 03, 2024 at 02:01:31PM +0100, Joey Gouly wrote: > Modify arch_calc_vm_prot_bits() and vm_get_page_prot() such that the pkey > value is set in the vm_flags and then into the pgprot value. > > Signed-off-by: Joey Gouly <joey.gouly@arm.com> > Cc: Catalin Marinas <catalin.marinas@arm.com> > Cc: Will Deacon <will@kernel.org> > --- > arch/arm64/include/asm/mman.h | 8 +++++++- > arch/arm64/mm/mmap.c | 9 +++++++++ > 2 files changed, 16 insertions(+), 1 deletion(-) > > diff --git a/arch/arm64/include/asm/mman.h b/arch/arm64/include/asm/mman.h > index 5966ee4a6154..ecb2d18dc4d7 100644 > --- a/arch/arm64/include/asm/mman.h > +++ b/arch/arm64/include/asm/mman.h > @@ -7,7 +7,7 @@ > #include <uapi/asm/mman.h> > > static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, > - unsigned long pkey __always_unused) > + unsigned long pkey) > { > unsigned long ret = 0; > > @@ -17,6 +17,12 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, > if (system_supports_mte() && (prot & PROT_MTE)) > ret |= VM_MTE; > > +#if defined(CONFIG_ARCH_HAS_PKEYS) > + ret |= pkey & 0x1 ? VM_PKEY_BIT0 : 0; > + ret |= pkey & 0x2 ? VM_PKEY_BIT1 : 0; > + ret |= pkey & 0x4 ? VM_PKEY_BIT2 : 0; Out of interest, is this as bad as it looks or does the compiler turn it into a shift and mask? > +#endif > + > return ret; > } > #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey) > diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c > index 642bdf908b22..86eda6bc7893 100644 > --- a/arch/arm64/mm/mmap.c > +++ b/arch/arm64/mm/mmap.c > @@ -102,6 +102,15 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags) > if (vm_flags & VM_MTE) > prot |= PTE_ATTRINDX(MT_NORMAL_TAGGED); > > +#ifdef CONFIG_ARCH_HAS_PKEYS > + if (vm_flags & VM_PKEY_BIT0) > + prot |= PTE_PO_IDX_0; > + if (vm_flags & VM_PKEY_BIT1) > + prot |= PTE_PO_IDX_1; > + if (vm_flags & VM_PKEY_BIT2) > + prot |= PTE_PO_IDX_2; > +#endif > + Ditto. At least we only have three bits to cope with either way. I'm guessing that these functions are not super-hot path. [...] Cheers ---Dave
On Thu, Jul 25, 2024 at 04:49:50PM +0100, Dave Martin wrote: > On Fri, May 03, 2024 at 02:01:31PM +0100, Joey Gouly wrote: > > Modify arch_calc_vm_prot_bits() and vm_get_page_prot() such that the pkey > > value is set in the vm_flags and then into the pgprot value. > > > > Signed-off-by: Joey Gouly <joey.gouly@arm.com> > > Cc: Catalin Marinas <catalin.marinas@arm.com> > > Cc: Will Deacon <will@kernel.org> > > --- > > arch/arm64/include/asm/mman.h | 8 +++++++- > > arch/arm64/mm/mmap.c | 9 +++++++++ > > 2 files changed, 16 insertions(+), 1 deletion(-) > > > > diff --git a/arch/arm64/include/asm/mman.h b/arch/arm64/include/asm/mman.h > > index 5966ee4a6154..ecb2d18dc4d7 100644 > > --- a/arch/arm64/include/asm/mman.h > > +++ b/arch/arm64/include/asm/mman.h > > @@ -7,7 +7,7 @@ > > #include <uapi/asm/mman.h> > > > > static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, > > - unsigned long pkey __always_unused) > > + unsigned long pkey) > > { > > unsigned long ret = 0; > > > > @@ -17,6 +17,12 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, > > if (system_supports_mte() && (prot & PROT_MTE)) > > ret |= VM_MTE; > > > > +#if defined(CONFIG_ARCH_HAS_PKEYS) > > + ret |= pkey & 0x1 ? VM_PKEY_BIT0 : 0; > > + ret |= pkey & 0x2 ? VM_PKEY_BIT1 : 0; > > + ret |= pkey & 0x4 ? VM_PKEY_BIT2 : 0; > > Out of interest, is this as bad as it looks or does the compiler turn > it into a shift and mask? Yeah, (gcc 13.2) produces good code here (this is do_mprotect_pkey after removing a lot of branching): and w0, w0, #0x7 orr x1, x1, x0, lsl #32 > > > > +#endif > > + > > return ret; > > } > > #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey) > > diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c > > index 642bdf908b22..86eda6bc7893 100644 > > --- a/arch/arm64/mm/mmap.c > > +++ b/arch/arm64/mm/mmap.c > > @@ -102,6 +102,15 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags) > > if (vm_flags & VM_MTE) > > prot |= PTE_ATTRINDX(MT_NORMAL_TAGGED); > > > > +#ifdef CONFIG_ARCH_HAS_PKEYS > > + if (vm_flags & VM_PKEY_BIT0) > > + prot |= PTE_PO_IDX_0; > > + if (vm_flags & VM_PKEY_BIT1) > > + prot |= PTE_PO_IDX_1; > > + if (vm_flags & VM_PKEY_BIT2) > > + prot |= PTE_PO_IDX_2; > > +#endif > > + > > Ditto. At least we only have three bits to cope with either way. > > I'm guessing that these functions are not super-hot path. > > [...] > > Cheers > ---Dave
On Thu, Aug 01, 2024 at 11:55:02AM +0100, Joey Gouly wrote: > On Thu, Jul 25, 2024 at 04:49:50PM +0100, Dave Martin wrote: > > On Fri, May 03, 2024 at 02:01:31PM +0100, Joey Gouly wrote: > > > Modify arch_calc_vm_prot_bits() and vm_get_page_prot() such that the pkey > > > value is set in the vm_flags and then into the pgprot value. > > > > > > Signed-off-by: Joey Gouly <joey.gouly@arm.com> > > > Cc: Catalin Marinas <catalin.marinas@arm.com> > > > Cc: Will Deacon <will@kernel.org> > > > --- > > > arch/arm64/include/asm/mman.h | 8 +++++++- > > > arch/arm64/mm/mmap.c | 9 +++++++++ > > > 2 files changed, 16 insertions(+), 1 deletion(-) > > > > > > diff --git a/arch/arm64/include/asm/mman.h b/arch/arm64/include/asm/mman.h > > > index 5966ee4a6154..ecb2d18dc4d7 100644 > > > --- a/arch/arm64/include/asm/mman.h > > > +++ b/arch/arm64/include/asm/mman.h > > > @@ -7,7 +7,7 @@ > > > #include <uapi/asm/mman.h> > > > > > > static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, > > > - unsigned long pkey __always_unused) > > > + unsigned long pkey) > > > { > > > unsigned long ret = 0; > > > > > > @@ -17,6 +17,12 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, > > > if (system_supports_mte() && (prot & PROT_MTE)) > > > ret |= VM_MTE; > > > > > > +#if defined(CONFIG_ARCH_HAS_PKEYS) > > > + ret |= pkey & 0x1 ? VM_PKEY_BIT0 : 0; > > > + ret |= pkey & 0x2 ? VM_PKEY_BIT1 : 0; > > > + ret |= pkey & 0x4 ? VM_PKEY_BIT2 : 0; > > > > Out of interest, is this as bad as it looks or does the compiler turn > > it into a shift and mask? > > Yeah, (gcc 13.2) produces good code here (this is do_mprotect_pkey after removing a lot of branching): > > and w0, w0, #0x7 > orr x1, x1, x0, lsl #32 Neat, good ol' gcc! Cheers ---Dave
diff --git a/arch/arm64/include/asm/mman.h b/arch/arm64/include/asm/mman.h index 5966ee4a6154..ecb2d18dc4d7 100644 --- a/arch/arm64/include/asm/mman.h +++ b/arch/arm64/include/asm/mman.h @@ -7,7 +7,7 @@ #include <uapi/asm/mman.h> static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, - unsigned long pkey __always_unused) + unsigned long pkey) { unsigned long ret = 0; @@ -17,6 +17,12 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, if (system_supports_mte() && (prot & PROT_MTE)) ret |= VM_MTE; +#if defined(CONFIG_ARCH_HAS_PKEYS) + ret |= pkey & 0x1 ? VM_PKEY_BIT0 : 0; + ret |= pkey & 0x2 ? VM_PKEY_BIT1 : 0; + ret |= pkey & 0x4 ? VM_PKEY_BIT2 : 0; +#endif + return ret; } #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey) diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c index 642bdf908b22..86eda6bc7893 100644 --- a/arch/arm64/mm/mmap.c +++ b/arch/arm64/mm/mmap.c @@ -102,6 +102,15 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags) if (vm_flags & VM_MTE) prot |= PTE_ATTRINDX(MT_NORMAL_TAGGED); +#ifdef CONFIG_ARCH_HAS_PKEYS + if (vm_flags & VM_PKEY_BIT0) + prot |= PTE_PO_IDX_0; + if (vm_flags & VM_PKEY_BIT1) + prot |= PTE_PO_IDX_1; + if (vm_flags & VM_PKEY_BIT2) + prot |= PTE_PO_IDX_2; +#endif + return __pgprot(prot); } EXPORT_SYMBOL(vm_get_page_prot);
Modify arch_calc_vm_prot_bits() and vm_get_page_prot() such that the pkey value is set in the vm_flags and then into the pgprot value. Signed-off-by: Joey Gouly <joey.gouly@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will@kernel.org> --- arch/arm64/include/asm/mman.h | 8 +++++++- arch/arm64/mm/mmap.c | 9 +++++++++ 2 files changed, 16 insertions(+), 1 deletion(-)