Message ID | 20250224225246.3712295-5-jeffxu@google.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | mseal system mappings | expand |
On Mon, Feb 24, 2025 at 10:52:43PM +0000, jeffxu@chromium.org wrote: > From: Jeff Xu <jeffxu@chromium.org> > > Provide support for CONFIG_MSEAL_SYSTEM_MAPPINGS on arm64, covering > the vdso, vvar, and compat-mode vectors and sigpage mappings. > > Production release testing passes on Android and Chrome OS. This is pretty limited (yes yes I know android is massive etc. but we must account for all the weird and wonderful arm64 devices out there in context of upstream :) Have you looking through all arm64-code relating to vdso, vvar, compat-mode vectors, sigpage mapping and ensured nothing kernel-side relies upon relocation? Some arches actually seem to want to do this. Pretty sure PPC does... so a bit nervous of that. At any rate some comment about having checked/confirmed this would be good, arm concerns me a lot more than x86 on this front. Thanks however for doing extensive testing android/chrome side! This is of course, very important for sheer volume (and probably worldwide % of deployed arm64 devices...) Just need to dot our i's and cross our t's... > > Signed-off-by: Jeff Xu <jeffxu@chromium.org> > --- > arch/arm64/Kconfig | 1 + > arch/arm64/kernel/vdso.c | 22 +++++++++++++++------- > 2 files changed, 16 insertions(+), 7 deletions(-) > > diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig > index fcdd0ed3eca8..39202aa9a5af 100644 > --- a/arch/arm64/Kconfig > +++ b/arch/arm64/Kconfig > @@ -38,6 +38,7 @@ config ARM64 > select ARCH_HAS_KEEPINITRD > select ARCH_HAS_MEMBARRIER_SYNC_CORE > select ARCH_HAS_MEM_ENCRYPT > + select ARCH_HAS_MSEAL_SYSTEM_MAPPINGS > select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS > select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE > select ARCH_HAS_NONLEAF_PMD_YOUNG if ARM64_HAFT > diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c > index e8ed8e5b713b..12e6ab396018 100644 > --- a/arch/arm64/kernel/vdso.c > +++ b/arch/arm64/kernel/vdso.c > @@ -183,6 +183,7 @@ static int __setup_additional_pages(enum vdso_abi abi, > { > unsigned long vdso_base, vdso_text_len, vdso_mapping_len; > unsigned long gp_flags = 0; > + unsigned long vm_flags; > void *ret; > > BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES); > @@ -197,8 +198,10 @@ static int __setup_additional_pages(enum vdso_abi abi, > goto up_fail; > } > > + vm_flags = VM_READ|VM_MAYREAD|VM_PFNMAP; > + vm_flags |= VM_SEALED_SYSMAP; > ret = _install_special_mapping(mm, vdso_base, VVAR_NR_PAGES * PAGE_SIZE, > - VM_READ|VM_MAYREAD|VM_PFNMAP, > + vm_flags, > &vvar_map); > if (IS_ERR(ret)) > goto up_fail; > @@ -208,9 +211,10 @@ static int __setup_additional_pages(enum vdso_abi abi, > > vdso_base += VVAR_NR_PAGES * PAGE_SIZE; > mm->context.vdso = (void *)vdso_base; > + vm_flags = VM_READ|VM_EXEC|gp_flags|VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC; > + vm_flags |= VM_SEALED_SYSMAP; > ret = _install_special_mapping(mm, vdso_base, vdso_text_len, > - VM_READ|VM_EXEC|gp_flags| > - VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, > + vm_flags, > vdso_info[abi].cm); > if (IS_ERR(ret)) > goto up_fail; > @@ -326,6 +330,7 @@ arch_initcall(aarch32_alloc_vdso_pages); > static int aarch32_kuser_helpers_setup(struct mm_struct *mm) > { > void *ret; > + unsigned long vm_flags; > > if (!IS_ENABLED(CONFIG_KUSER_HELPERS)) > return 0; > @@ -334,9 +339,10 @@ static int aarch32_kuser_helpers_setup(struct mm_struct *mm) > * Avoid VM_MAYWRITE for compatibility with arch/arm/, where it's > * not safe to CoW the page containing the CPU exception vectors. > */ > + vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC; > + vm_flags |= VM_SEALED_SYSMAP; > ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE, > - VM_READ | VM_EXEC | > - VM_MAYREAD | VM_MAYEXEC, > + vm_flags, > &aarch32_vdso_maps[AA32_MAP_VECTORS]); > > return PTR_ERR_OR_ZERO(ret); > @@ -345,6 +351,7 @@ static int aarch32_kuser_helpers_setup(struct mm_struct *mm) > static int aarch32_sigreturn_setup(struct mm_struct *mm) > { > unsigned long addr; > + unsigned long vm_flags; > void *ret; > > addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); > @@ -357,9 +364,10 @@ static int aarch32_sigreturn_setup(struct mm_struct *mm) > * VM_MAYWRITE is required to allow gdb to Copy-on-Write and > * set breakpoints. > */ > + vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC; > + vm_flags |= VM_SEALED_SYSMAP; > ret = _install_special_mapping(mm, addr, PAGE_SIZE, > - VM_READ | VM_EXEC | VM_MAYREAD | > - VM_MAYWRITE | VM_MAYEXEC, > + vm_flags, > &aarch32_vdso_maps[AA32_MAP_SIGPAGE]); > if (IS_ERR(ret)) > goto out; > -- > 2.48.1.658.g4767266eb4-goog > Patch looks fine for purposes of what you're trying to achieve though, just need to have some calming reassurances about arch :) Thanks!
On Mon, Feb 24, 2025 at 10:20 PM Lorenzo Stoakes <lorenzo.stoakes@oracle.com> wrote: > > On Mon, Feb 24, 2025 at 10:52:43PM +0000, jeffxu@chromium.org wrote: > > From: Jeff Xu <jeffxu@chromium.org> > > > > Provide support for CONFIG_MSEAL_SYSTEM_MAPPINGS on arm64, covering > > the vdso, vvar, and compat-mode vectors and sigpage mappings. > > > > Production release testing passes on Android and Chrome OS. > > This is pretty limited (yes yes I know android is massive etc. but we must > account for all the weird and wonderful arm64 devices out there in context of > upstream :) > > Have you looking through all arm64-code relating to vdso, vvar, compat-mode > vectors, sigpage mapping and ensured nothing kernel-side relies upon relocation? > Some arches actually seem to want to do this. Pretty sure PPC does... so a bit > nervous of that. > Can you please point out where PPC munmap/mremap the vdso ? Previously, when you mentioned that, I thought you meant user space in PPC, I didn't realize that you meant that kernel code in PPC. I tried, but didn't find anything, hence asking. Thanks. -Jeff
On Tue, Feb 25, 2025 at 02:26:50PM -0800, Jeff Xu wrote: > On Mon, Feb 24, 2025 at 10:20 PM Lorenzo Stoakes > <lorenzo.stoakes@oracle.com> wrote: > > > > On Mon, Feb 24, 2025 at 10:52:43PM +0000, jeffxu@chromium.org wrote: > > > From: Jeff Xu <jeffxu@chromium.org> > > > > > > Provide support for CONFIG_MSEAL_SYSTEM_MAPPINGS on arm64, covering > > > the vdso, vvar, and compat-mode vectors and sigpage mappings. > > > > > > Production release testing passes on Android and Chrome OS. > > > > This is pretty limited (yes yes I know android is massive etc. but we must > > account for all the weird and wonderful arm64 devices out there in context of > > upstream :) > > > > Have you looking through all arm64-code relating to vdso, vvar, compat-mode > > vectors, sigpage mapping and ensured nothing kernel-side relies upon relocation? > > Some arches actually seem to want to do this. Pretty sure PPC does... so a bit > > nervous of that. > > > Can you please point out where PPC munmap/mremap the vdso ? > > Previously, when you mentioned that, I thought you meant user space in > PPC, I didn't realize that you meant that kernel code in PPC. I > tried, but didn't find anything, hence asking. Jeff, please stick to replying to review. 'Have you looking through all arm64-code'. I ended up doing this myself yesterday and found no issues, as with x86-64. I said I'm _pretty sure_ PPC does this. Liam mentioned something about it. We can discuss it, and I can find specifics if + when you try to add this to PPC. Please try to respect my time... > > Thanks. > -Jeff
* Lorenzo Stoakes <lorenzo.stoakes@oracle.com> [250226 00:26]: > On Tue, Feb 25, 2025 at 02:26:50PM -0800, Jeff Xu wrote: > > On Mon, Feb 24, 2025 at 10:20 PM Lorenzo Stoakes > > <lorenzo.stoakes@oracle.com> wrote: > > > > > > On Mon, Feb 24, 2025 at 10:52:43PM +0000, jeffxu@chromium.org wrote: > > > > From: Jeff Xu <jeffxu@chromium.org> > > > > > > > > Provide support for CONFIG_MSEAL_SYSTEM_MAPPINGS on arm64, covering > > > > the vdso, vvar, and compat-mode vectors and sigpage mappings. > > > > > > > > Production release testing passes on Android and Chrome OS. > > > > > > This is pretty limited (yes yes I know android is massive etc. but we must > > > account for all the weird and wonderful arm64 devices out there in context of > > > upstream :) > > > > > > Have you looking through all arm64-code relating to vdso, vvar, compat-mode > > > vectors, sigpage mapping and ensured nothing kernel-side relies upon relocation? > > > Some arches actually seem to want to do this. Pretty sure PPC does... so a bit > > > nervous of that. > > > > > Can you please point out where PPC munmap/mremap the vdso ? > > > > Previously, when you mentioned that, I thought you meant user space in > > PPC, I didn't realize that you meant that kernel code in PPC. I > > tried, but didn't find anything, hence asking. > > Jeff, please stick to replying to review. 'Have you looking through all > arm64-code'. > > I ended up doing this myself yesterday and found no issues, as with x86-64. > > I said I'm _pretty sure_ PPC does this. Liam mentioned something about > it. We can discuss it, and I can find specifics if + when you try to add > this to PPC. > PPC allows the vma to be munmapped then detects and falls back to the slower method, iirc. They were against the removal of the fallback; other archs also have this infrastructure. Really, if we fixed the fallback to work for all platforms then it would probably also remove the possibility of a remap over the VDSO being a problem (if it is today, which still isn't clear?). Thanks, Liam
On Wed, Feb 26, 2025 at 9:12 AM Liam R. Howlett <Liam.Howlett@oracle.com> wrote: > > * Lorenzo Stoakes <lorenzo.stoakes@oracle.com> [250226 00:26]: > > On Tue, Feb 25, 2025 at 02:26:50PM -0800, Jeff Xu wrote: > > > On Mon, Feb 24, 2025 at 10:20 PM Lorenzo Stoakes > > > <lorenzo.stoakes@oracle.com> wrote: > > > > > > > > On Mon, Feb 24, 2025 at 10:52:43PM +0000, jeffxu@chromium.org wrote: > > > > > From: Jeff Xu <jeffxu@chromium.org> > > > > > > > > > > Provide support for CONFIG_MSEAL_SYSTEM_MAPPINGS on arm64, covering > > > > > the vdso, vvar, and compat-mode vectors and sigpage mappings. > > > > > > > > > > Production release testing passes on Android and Chrome OS. > > > > > > > > This is pretty limited (yes yes I know android is massive etc. but we must > > > > account for all the weird and wonderful arm64 devices out there in context of > > > > upstream :) > > > > > > > > Have you looking through all arm64-code relating to vdso, vvar, compat-mode > > > > vectors, sigpage mapping and ensured nothing kernel-side relies upon relocation? > > > > Some arches actually seem to want to do this. Pretty sure PPC does... so a bit > > > > nervous of that. > > > > > > > Can you please point out where PPC munmap/mremap the vdso ? > > > > > > Previously, when you mentioned that, I thought you meant user space in > > > PPC, I didn't realize that you meant that kernel code in PPC. I > > > tried, but didn't find anything, hence asking. > > > > Jeff, please stick to replying to review. 'Have you looking through all > > arm64-code'. > > > > I ended up doing this myself yesterday and found no issues, as with x86-64. > > > > I said I'm _pretty sure_ PPC does this. Liam mentioned something about > > it. We can discuss it, and I can find specifics if + when you try to add > > this to PPC. > > > > PPC allows the vma to be munmapped then detects and falls back to the > slower method, iirc. > Is this code in the kernel or userspace? If PPC doesn't want to create vdso for all its userspace apps, we could instead "don't create" vdso during the execve call. > They were against the removal of the fallback; other archs also have > this infrastructure. Really, if we fixed the fallback to work for > all platforms then it would probably also remove the possibility of a > remap over the VDSO being a problem (if it is today, which still isn't > clear?). > Any past thread/communication about this that I can read ? Thanks -Jeff > Thanks, > Liam
On Wed, Feb 26, 2025 at 09:17:10AM -0800, Jeff Xu wrote: > On Wed, Feb 26, 2025 at 9:12 AM Liam R. Howlett <Liam.Howlett@oracle.com> wrote: > > > > * Lorenzo Stoakes <lorenzo.stoakes@oracle.com> [250226 00:26]: > > > On Tue, Feb 25, 2025 at 02:26:50PM -0800, Jeff Xu wrote: > > > > On Mon, Feb 24, 2025 at 10:20 PM Lorenzo Stoakes > > > > <lorenzo.stoakes@oracle.com> wrote: > > > > > > > > > > On Mon, Feb 24, 2025 at 10:52:43PM +0000, jeffxu@chromium.org wrote: > > > > > > From: Jeff Xu <jeffxu@chromium.org> > > > > > > > > > > > > Provide support for CONFIG_MSEAL_SYSTEM_MAPPINGS on arm64, covering > > > > > > the vdso, vvar, and compat-mode vectors and sigpage mappings. > > > > > > > > > > > > Production release testing passes on Android and Chrome OS. > > > > > > > > > > This is pretty limited (yes yes I know android is massive etc. but we must > > > > > account for all the weird and wonderful arm64 devices out there in context of > > > > > upstream :) > > > > > > > > > > Have you looking through all arm64-code relating to vdso, vvar, compat-mode > > > > > vectors, sigpage mapping and ensured nothing kernel-side relies upon relocation? > > > > > Some arches actually seem to want to do this. Pretty sure PPC does... so a bit > > > > > nervous of that. > > > > > > > > > Can you please point out where PPC munmap/mremap the vdso ? > > > > > > > > Previously, when you mentioned that, I thought you meant user space in > > > > PPC, I didn't realize that you meant that kernel code in PPC. I > > > > tried, but didn't find anything, hence asking. > > > > > > Jeff, please stick to replying to review. 'Have you looking through all > > > arm64-code'. > > > > > > I ended up doing this myself yesterday and found no issues, as with x86-64. > > > > > > I said I'm _pretty sure_ PPC does this. Liam mentioned something about > > > it. We can discuss it, and I can find specifics if + when you try to add > > > this to PPC. > > > > > > > PPC allows the vma to be munmapped then detects and falls back to the > > slower method, iirc. > > > Is this code in the kernel or userspace? > > If PPC doesn't want to create vdso for all its userspace apps, we > could instead "don't create" vdso during the execve call. > > > > They were against the removal of the fallback; other archs also have > > this infrastructure. Really, if we fixed the fallback to work for > > all platforms then it would probably also remove the possibility of a > > remap over the VDSO being a problem (if it is today, which still isn't > > clear?). > > > Any past thread/communication about this that I can read ? Jeff, I'm sure you don't intend to, but I find it quite disrespectful that you ignored my feedback here (and elsewhere, regarding you ignoring 4 sets of feedback). This? https://elixir.bootlin.com/linux/v6.13.4/source/arch/powerpc/kernel/vdso.c#L236 Was [0] a relevant discussion? [0]: https://lore.kernel.org/all/lhe2mky6ahlk2jzvvfjyongqiseelyx2uy7sbyuso6jcy3b2dq@7ju6cea62jgk/ > > Thanks > -Jeff > > > > Thanks, > > Liam
On Wed, Feb 26, 2025 at 05:43:22PM +0000, Lorenzo Stoakes wrote: > On Wed, Feb 26, 2025 at 09:17:10AM -0800, Jeff Xu wrote: > > On Wed, Feb 26, 2025 at 9:12 AM Liam R. Howlett <Liam.Howlett@oracle.com> wrote: > > > > > > * Lorenzo Stoakes <lorenzo.stoakes@oracle.com> [250226 00:26]: > > > > On Tue, Feb 25, 2025 at 02:26:50PM -0800, Jeff Xu wrote: > > > > > On Mon, Feb 24, 2025 at 10:20 PM Lorenzo Stoakes > > > > > <lorenzo.stoakes@oracle.com> wrote: > > > > > > > > > > > > On Mon, Feb 24, 2025 at 10:52:43PM +0000, jeffxu@chromium.org wrote: > > > > > > > From: Jeff Xu <jeffxu@chromium.org> > > > > > > > > > > > > > > Provide support for CONFIG_MSEAL_SYSTEM_MAPPINGS on arm64, covering > > > > > > > the vdso, vvar, and compat-mode vectors and sigpage mappings. > > > > > > > > > > > > > > Production release testing passes on Android and Chrome OS. > > > > > > > > > > > > This is pretty limited (yes yes I know android is massive etc. but we must > > > > > > account for all the weird and wonderful arm64 devices out there in context of > > > > > > upstream :) > > > > > > > > > > > > Have you looking through all arm64-code relating to vdso, vvar, compat-mode > > > > > > vectors, sigpage mapping and ensured nothing kernel-side relies upon relocation? > > > > > > Some arches actually seem to want to do this. Pretty sure PPC does... so a bit > > > > > > nervous of that. > > > > > > > > > > > Can you please point out where PPC munmap/mremap the vdso ? > > > > > > > > > > Previously, when you mentioned that, I thought you meant user space in > > > > > PPC, I didn't realize that you meant that kernel code in PPC. I > > > > > tried, but didn't find anything, hence asking. > > > > > > > > Jeff, please stick to replying to review. 'Have you looking through all > > > > arm64-code'. > > > > > > > > I ended up doing this myself yesterday and found no issues, as with x86-64. > > > > > > > > I said I'm _pretty sure_ PPC does this. Liam mentioned something about > > > > it. We can discuss it, and I can find specifics if + when you try to add > > > > this to PPC. > > > > > > > > > > PPC allows the vma to be munmapped then detects and falls back to the > > > slower method, iirc. > > > > > Is this code in the kernel or userspace? > > > > If PPC doesn't want to create vdso for all its userspace apps, we > > could instead "don't create" vdso during the execve call. > > > > > > > They were against the removal of the fallback; other archs also have > > > this infrastructure. Really, if we fixed the fallback to work for > > > all platforms then it would probably also remove the possibility of a > > > remap over the VDSO being a problem (if it is today, which still isn't > > > clear?). > > > > > Any past thread/communication about this that I can read ? > > Jeff, I'm sure you don't intend to, but I find it quite disrespectful that you > ignored my feedback here (and elsewhere, regarding you ignoring 4 sets of > feedback). Apologies, I meant to reword this to sound less harsh but somebody phoned me and I hit send... What I mean to say is I think you _do_ ack what I've said here, but you think it's not useful to reply because there's not really a converastion to be had. Whereas I'm saying it'd be useful to ack :) Sorry I did not mean for this to sound quite so 'full on'. > > This? > > https://elixir.bootlin.com/linux/v6.13.4/source/arch/powerpc/kernel/vdso.c#L236 > > Was [0] a relevant discussion? > > [0]: https://lore.kernel.org/all/lhe2mky6ahlk2jzvvfjyongqiseelyx2uy7sbyuso6jcy3b2dq@7ju6cea62jgk/ I did in the end go and check this, so hopefully this is useful at least. But again, I really think we should hold off on PPC stuff until we come to it and focus on getting this series into mergeable state. Am doing my best to try to get you there ASAP as there's been a lot of delays here. > > > > > Thanks > > -Jeff > > > > > > > Thanks, > > > Liam Cheers, Lorenzo
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index fcdd0ed3eca8..39202aa9a5af 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -38,6 +38,7 @@ config ARM64 select ARCH_HAS_KEEPINITRD select ARCH_HAS_MEMBARRIER_SYNC_CORE select ARCH_HAS_MEM_ENCRYPT + select ARCH_HAS_MSEAL_SYSTEM_MAPPINGS select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE select ARCH_HAS_NONLEAF_PMD_YOUNG if ARM64_HAFT diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index e8ed8e5b713b..12e6ab396018 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -183,6 +183,7 @@ static int __setup_additional_pages(enum vdso_abi abi, { unsigned long vdso_base, vdso_text_len, vdso_mapping_len; unsigned long gp_flags = 0; + unsigned long vm_flags; void *ret; BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES); @@ -197,8 +198,10 @@ static int __setup_additional_pages(enum vdso_abi abi, goto up_fail; } + vm_flags = VM_READ|VM_MAYREAD|VM_PFNMAP; + vm_flags |= VM_SEALED_SYSMAP; ret = _install_special_mapping(mm, vdso_base, VVAR_NR_PAGES * PAGE_SIZE, - VM_READ|VM_MAYREAD|VM_PFNMAP, + vm_flags, &vvar_map); if (IS_ERR(ret)) goto up_fail; @@ -208,9 +211,10 @@ static int __setup_additional_pages(enum vdso_abi abi, vdso_base += VVAR_NR_PAGES * PAGE_SIZE; mm->context.vdso = (void *)vdso_base; + vm_flags = VM_READ|VM_EXEC|gp_flags|VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC; + vm_flags |= VM_SEALED_SYSMAP; ret = _install_special_mapping(mm, vdso_base, vdso_text_len, - VM_READ|VM_EXEC|gp_flags| - VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, + vm_flags, vdso_info[abi].cm); if (IS_ERR(ret)) goto up_fail; @@ -326,6 +330,7 @@ arch_initcall(aarch32_alloc_vdso_pages); static int aarch32_kuser_helpers_setup(struct mm_struct *mm) { void *ret; + unsigned long vm_flags; if (!IS_ENABLED(CONFIG_KUSER_HELPERS)) return 0; @@ -334,9 +339,10 @@ static int aarch32_kuser_helpers_setup(struct mm_struct *mm) * Avoid VM_MAYWRITE for compatibility with arch/arm/, where it's * not safe to CoW the page containing the CPU exception vectors. */ + vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC; + vm_flags |= VM_SEALED_SYSMAP; ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE, - VM_READ | VM_EXEC | - VM_MAYREAD | VM_MAYEXEC, + vm_flags, &aarch32_vdso_maps[AA32_MAP_VECTORS]); return PTR_ERR_OR_ZERO(ret); @@ -345,6 +351,7 @@ static int aarch32_kuser_helpers_setup(struct mm_struct *mm) static int aarch32_sigreturn_setup(struct mm_struct *mm) { unsigned long addr; + unsigned long vm_flags; void *ret; addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); @@ -357,9 +364,10 @@ static int aarch32_sigreturn_setup(struct mm_struct *mm) * VM_MAYWRITE is required to allow gdb to Copy-on-Write and * set breakpoints. */ + vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC; + vm_flags |= VM_SEALED_SYSMAP; ret = _install_special_mapping(mm, addr, PAGE_SIZE, - VM_READ | VM_EXEC | VM_MAYREAD | - VM_MAYWRITE | VM_MAYEXEC, + vm_flags, &aarch32_vdso_maps[AA32_MAP_SIGPAGE]); if (IS_ERR(ret)) goto out;