diff mbox series

[5/6] arm64: vdso32: Remove a bunch of #ifdef CONFIG_COMPAT_VDSO guards

Message ID 20200623085436.3696-6-will@kernel.org (mailing list archive)
State New, archived
Headers show
Series Fix unwinding through sigreturn trampolines | expand

Commit Message

Will Deacon June 23, 2020, 8:54 a.m. UTC
Most of the compat vDSO code can be built and guarded using IS_ENABLED,
so drop the unnecessary #ifdefs.

Signed-off-by: Will Deacon <will@kernel.org>
---
 arch/arm64/kernel/vdso.c | 44 ++++++++++++++++------------------------
 1 file changed, 17 insertions(+), 27 deletions(-)

Comments

Mark Rutland June 23, 2020, 9:10 a.m. UTC | #1
On Tue, Jun 23, 2020 at 09:54:35AM +0100, Will Deacon wrote:
> Most of the compat vDSO code can be built and guarded using IS_ENABLED,
> so drop the unnecessary #ifdefs.
> 
> Signed-off-by: Will Deacon <will@kernel.org>

I'd considered doing this when I did the other cleanup, but I avoided
this only because of the AA32 sigpage being mutually exclusive with the
full AA32 VDSO, and not seeing a nice way to enforce that otherwise.

So assuming the prior patch removing that is valid, this patch looks
entirely sane to me, and with that assumption:

Acked-by: Mark Rutland <mark.rutland@arm.com>

Mark.

> ---
>  arch/arm64/kernel/vdso.c | 44 ++++++++++++++++------------------------
>  1 file changed, 17 insertions(+), 27 deletions(-)
> 
> diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
> index e546df0efefb..30b01c2f50da 100644
> --- a/arch/arm64/kernel/vdso.c
> +++ b/arch/arm64/kernel/vdso.c
> @@ -29,15 +29,11 @@
>  #include <asm/vdso.h>
>  
>  extern char vdso_start[], vdso_end[];
> -#ifdef CONFIG_COMPAT_VDSO
>  extern char vdso32_start[], vdso32_end[];
> -#endif /* CONFIG_COMPAT_VDSO */
>  
>  enum vdso_abi {
>  	VDSO_ABI_AA64,
> -#ifdef CONFIG_COMPAT_VDSO
>  	VDSO_ABI_AA32,
> -#endif /* CONFIG_COMPAT_VDSO */
>  };
>  
>  struct vdso_abi_info {
> @@ -178,21 +174,17 @@ static int __setup_additional_pages(enum vdso_abi abi,
>  /*
>   * Create and map the vectors page for AArch32 tasks.
>   */
> -#ifdef CONFIG_COMPAT_VDSO
>  static int aarch32_vdso_mremap(const struct vm_special_mapping *sm,
>  		struct vm_area_struct *new_vma)
>  {
>  	return __vdso_remap(VDSO_ABI_AA32, sm, new_vma);
>  }
> -#endif /* CONFIG_COMPAT_VDSO */
>  
>  enum aarch32_map {
>  	AA32_MAP_VECTORS, /* kuser helpers */
> -#ifdef CONFIG_COMPAT_VDSO
> +	AA32_MAP_SIGPAGE,
>  	AA32_MAP_VVAR,
>  	AA32_MAP_VDSO,
> -#endif
> -	AA32_MAP_SIGPAGE
>  };
>  
>  static struct page *aarch32_vectors_page __ro_after_init;
> @@ -203,7 +195,10 @@ static struct vm_special_mapping aarch32_vdso_maps[] = {
>  		.name	= "[vectors]", /* ABI */
>  		.pages	= &aarch32_vectors_page,
>  	},
> -#ifdef CONFIG_COMPAT_VDSO
> +	[AA32_MAP_SIGPAGE] = {
> +		.name	= "[sigpage]", /* ABI */
> +		.pages	= &aarch32_sig_page,
> +	},
>  	[AA32_MAP_VVAR] = {
>  		.name = "[vvar]",
>  	},
> @@ -211,11 +206,6 @@ static struct vm_special_mapping aarch32_vdso_maps[] = {
>  		.name = "[vdso]",
>  		.mremap = aarch32_vdso_mremap,
>  	},
> -#endif /* CONFIG_COMPAT_VDSO */
> -	[AA32_MAP_SIGPAGE] = {
> -		.name	= "[sigpage]", /* ABI */
> -		.pages	= &aarch32_sig_page,
> -	},
>  };
>  
>  static int aarch32_alloc_kuser_vdso_page(void)
> @@ -254,25 +244,25 @@ static int aarch32_alloc_sigpage(void)
>  	return 0;
>  }
>  
> -#ifdef CONFIG_COMPAT_VDSO
>  static int __aarch32_alloc_vdso_pages(void)
>  {
> +
> +	if (!IS_ENABLED(CONFIG_COMPAT_VDSO))
> +		return 0;
> +
>  	vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
>  	vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
>  
>  	return __vdso_init(VDSO_ABI_AA32);
>  }
> -#endif /* CONFIG_COMPAT_VDSO */
>  
>  static int __init aarch32_alloc_vdso_pages(void)
>  {
>  	int ret;
>  
> -#ifdef CONFIG_COMPAT_VDSO
>  	ret = __aarch32_alloc_vdso_pages();
>  	if (ret)
>  		return ret;
> -#endif
>  
>  	ret = aarch32_alloc_sigpage();
>  	if (ret)
> @@ -341,14 +331,14 @@ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
>  	if (ret)
>  		goto out;
>  
> -#ifdef CONFIG_COMPAT_VDSO
> -	ret = __setup_additional_pages(VDSO_ABI_AA32,
> -				       mm,
> -				       bprm,
> -				       uses_interp);
> -	if (ret)
> -		goto out;
> -#endif /* CONFIG_COMPAT_VDSO */
> +	if (IS_ENABLED(CONFIG_COMPAT_VDSO)) {
> +		ret = __setup_additional_pages(VDSO_ABI_AA32,
> +					       mm,
> +					       bprm,
> +					       uses_interp);
> +		if (ret)
> +			goto out;
> +	}
>  
>  	ret = aarch32_sigreturn_setup(mm);
>  out:
> -- 
> 2.27.0.111.gc72c7da667-goog
> 
> 
> _______________________________________________
> linux-arm-kernel mailing list
> linux-arm-kernel@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
Mark Rutland June 23, 2020, 9:17 a.m. UTC | #2
On Tue, Jun 23, 2020 at 10:10:42AM +0100, Mark Rutland wrote:
> On Tue, Jun 23, 2020 at 09:54:35AM +0100, Will Deacon wrote:
> > Most of the compat vDSO code can be built and guarded using IS_ENABLED,
> > so drop the unnecessary #ifdefs.
> > 
> > Signed-off-by: Will Deacon <will@kernel.org>
> 
> I'd considered doing this when I did the other cleanup, but I avoided
> this only because of the AA32 sigpage being mutually exclusive with the
> full AA32 VDSO, and not seeing a nice way to enforce that otherwise.
> 
> So assuming the prior patch removing that is valid, this patch looks
> entirely sane to me, and with that assumption:

... and now I see the prior patches are cleanup to ensure that's true.
Lemme go review the whole thing in-order...

Mark.

> 
> Acked-by: Mark Rutland <mark.rutland@arm.com>
> 
> Mark.
> 
> > ---
> >  arch/arm64/kernel/vdso.c | 44 ++++++++++++++++------------------------
> >  1 file changed, 17 insertions(+), 27 deletions(-)
> > 
> > diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
> > index e546df0efefb..30b01c2f50da 100644
> > --- a/arch/arm64/kernel/vdso.c
> > +++ b/arch/arm64/kernel/vdso.c
> > @@ -29,15 +29,11 @@
> >  #include <asm/vdso.h>
> >  
> >  extern char vdso_start[], vdso_end[];
> > -#ifdef CONFIG_COMPAT_VDSO
> >  extern char vdso32_start[], vdso32_end[];
> > -#endif /* CONFIG_COMPAT_VDSO */
> >  
> >  enum vdso_abi {
> >  	VDSO_ABI_AA64,
> > -#ifdef CONFIG_COMPAT_VDSO
> >  	VDSO_ABI_AA32,
> > -#endif /* CONFIG_COMPAT_VDSO */
> >  };
> >  
> >  struct vdso_abi_info {
> > @@ -178,21 +174,17 @@ static int __setup_additional_pages(enum vdso_abi abi,
> >  /*
> >   * Create and map the vectors page for AArch32 tasks.
> >   */
> > -#ifdef CONFIG_COMPAT_VDSO
> >  static int aarch32_vdso_mremap(const struct vm_special_mapping *sm,
> >  		struct vm_area_struct *new_vma)
> >  {
> >  	return __vdso_remap(VDSO_ABI_AA32, sm, new_vma);
> >  }
> > -#endif /* CONFIG_COMPAT_VDSO */
> >  
> >  enum aarch32_map {
> >  	AA32_MAP_VECTORS, /* kuser helpers */
> > -#ifdef CONFIG_COMPAT_VDSO
> > +	AA32_MAP_SIGPAGE,
> >  	AA32_MAP_VVAR,
> >  	AA32_MAP_VDSO,
> > -#endif
> > -	AA32_MAP_SIGPAGE
> >  };
> >  
> >  static struct page *aarch32_vectors_page __ro_after_init;
> > @@ -203,7 +195,10 @@ static struct vm_special_mapping aarch32_vdso_maps[] = {
> >  		.name	= "[vectors]", /* ABI */
> >  		.pages	= &aarch32_vectors_page,
> >  	},
> > -#ifdef CONFIG_COMPAT_VDSO
> > +	[AA32_MAP_SIGPAGE] = {
> > +		.name	= "[sigpage]", /* ABI */
> > +		.pages	= &aarch32_sig_page,
> > +	},
> >  	[AA32_MAP_VVAR] = {
> >  		.name = "[vvar]",
> >  	},
> > @@ -211,11 +206,6 @@ static struct vm_special_mapping aarch32_vdso_maps[] = {
> >  		.name = "[vdso]",
> >  		.mremap = aarch32_vdso_mremap,
> >  	},
> > -#endif /* CONFIG_COMPAT_VDSO */
> > -	[AA32_MAP_SIGPAGE] = {
> > -		.name	= "[sigpage]", /* ABI */
> > -		.pages	= &aarch32_sig_page,
> > -	},
> >  };
> >  
> >  static int aarch32_alloc_kuser_vdso_page(void)
> > @@ -254,25 +244,25 @@ static int aarch32_alloc_sigpage(void)
> >  	return 0;
> >  }
> >  
> > -#ifdef CONFIG_COMPAT_VDSO
> >  static int __aarch32_alloc_vdso_pages(void)
> >  {
> > +
> > +	if (!IS_ENABLED(CONFIG_COMPAT_VDSO))
> > +		return 0;
> > +
> >  	vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
> >  	vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
> >  
> >  	return __vdso_init(VDSO_ABI_AA32);
> >  }
> > -#endif /* CONFIG_COMPAT_VDSO */
> >  
> >  static int __init aarch32_alloc_vdso_pages(void)
> >  {
> >  	int ret;
> >  
> > -#ifdef CONFIG_COMPAT_VDSO
> >  	ret = __aarch32_alloc_vdso_pages();
> >  	if (ret)
> >  		return ret;
> > -#endif
> >  
> >  	ret = aarch32_alloc_sigpage();
> >  	if (ret)
> > @@ -341,14 +331,14 @@ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
> >  	if (ret)
> >  		goto out;
> >  
> > -#ifdef CONFIG_COMPAT_VDSO
> > -	ret = __setup_additional_pages(VDSO_ABI_AA32,
> > -				       mm,
> > -				       bprm,
> > -				       uses_interp);
> > -	if (ret)
> > -		goto out;
> > -#endif /* CONFIG_COMPAT_VDSO */
> > +	if (IS_ENABLED(CONFIG_COMPAT_VDSO)) {
> > +		ret = __setup_additional_pages(VDSO_ABI_AA32,
> > +					       mm,
> > +					       bprm,
> > +					       uses_interp);
> > +		if (ret)
> > +			goto out;
> > +	}
> >  
> >  	ret = aarch32_sigreturn_setup(mm);
> >  out:
> > -- 
> > 2.27.0.111.gc72c7da667-goog
> > 
> > 
> > _______________________________________________
> > linux-arm-kernel mailing list
> > linux-arm-kernel@lists.infradead.org
> > http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
> 
> _______________________________________________
> linux-arm-kernel mailing list
> linux-arm-kernel@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
Mark Rutland June 23, 2020, 10:37 a.m. UTC | #3
On Tue, Jun 23, 2020 at 10:10:42AM +0100, Mark Rutland wrote:
> On Tue, Jun 23, 2020 at 09:54:35AM +0100, Will Deacon wrote:
> > Most of the compat vDSO code can be built and guarded using IS_ENABLED,
> > so drop the unnecessary #ifdefs.
> > 
> > Signed-off-by: Will Deacon <will@kernel.org>
> 
> I'd considered doing this when I did the other cleanup, but I avoided
> this only because of the AA32 sigpage being mutually exclusive with the
> full AA32 VDSO, and not seeing a nice way to enforce that otherwise.
> 
> So assuming the prior patch removing that is valid, this patch looks
> entirely sane to me, and with that assumption:
> 
> Acked-by: Mark Rutland <mark.rutland@arm.com>

Now that I've gone through the seirees I see that holds and the result
looks sound. Please upgrade that to:

Reviewed-by: Mark Rutland <mark.rutland@arm.com>

Mark.

> > ---
> >  arch/arm64/kernel/vdso.c | 44 ++++++++++++++++------------------------
> >  1 file changed, 17 insertions(+), 27 deletions(-)
> > 
> > diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
> > index e546df0efefb..30b01c2f50da 100644
> > --- a/arch/arm64/kernel/vdso.c
> > +++ b/arch/arm64/kernel/vdso.c
> > @@ -29,15 +29,11 @@
> >  #include <asm/vdso.h>
> >  
> >  extern char vdso_start[], vdso_end[];
> > -#ifdef CONFIG_COMPAT_VDSO
> >  extern char vdso32_start[], vdso32_end[];
> > -#endif /* CONFIG_COMPAT_VDSO */
> >  
> >  enum vdso_abi {
> >  	VDSO_ABI_AA64,
> > -#ifdef CONFIG_COMPAT_VDSO
> >  	VDSO_ABI_AA32,
> > -#endif /* CONFIG_COMPAT_VDSO */
> >  };
> >  
> >  struct vdso_abi_info {
> > @@ -178,21 +174,17 @@ static int __setup_additional_pages(enum vdso_abi abi,
> >  /*
> >   * Create and map the vectors page for AArch32 tasks.
> >   */
> > -#ifdef CONFIG_COMPAT_VDSO
> >  static int aarch32_vdso_mremap(const struct vm_special_mapping *sm,
> >  		struct vm_area_struct *new_vma)
> >  {
> >  	return __vdso_remap(VDSO_ABI_AA32, sm, new_vma);
> >  }
> > -#endif /* CONFIG_COMPAT_VDSO */
> >  
> >  enum aarch32_map {
> >  	AA32_MAP_VECTORS, /* kuser helpers */
> > -#ifdef CONFIG_COMPAT_VDSO
> > +	AA32_MAP_SIGPAGE,
> >  	AA32_MAP_VVAR,
> >  	AA32_MAP_VDSO,
> > -#endif
> > -	AA32_MAP_SIGPAGE
> >  };
> >  
> >  static struct page *aarch32_vectors_page __ro_after_init;
> > @@ -203,7 +195,10 @@ static struct vm_special_mapping aarch32_vdso_maps[] = {
> >  		.name	= "[vectors]", /* ABI */
> >  		.pages	= &aarch32_vectors_page,
> >  	},
> > -#ifdef CONFIG_COMPAT_VDSO
> > +	[AA32_MAP_SIGPAGE] = {
> > +		.name	= "[sigpage]", /* ABI */
> > +		.pages	= &aarch32_sig_page,
> > +	},
> >  	[AA32_MAP_VVAR] = {
> >  		.name = "[vvar]",
> >  	},
> > @@ -211,11 +206,6 @@ static struct vm_special_mapping aarch32_vdso_maps[] = {
> >  		.name = "[vdso]",
> >  		.mremap = aarch32_vdso_mremap,
> >  	},
> > -#endif /* CONFIG_COMPAT_VDSO */
> > -	[AA32_MAP_SIGPAGE] = {
> > -		.name	= "[sigpage]", /* ABI */
> > -		.pages	= &aarch32_sig_page,
> > -	},
> >  };
> >  
> >  static int aarch32_alloc_kuser_vdso_page(void)
> > @@ -254,25 +244,25 @@ static int aarch32_alloc_sigpage(void)
> >  	return 0;
> >  }
> >  
> > -#ifdef CONFIG_COMPAT_VDSO
> >  static int __aarch32_alloc_vdso_pages(void)
> >  {
> > +
> > +	if (!IS_ENABLED(CONFIG_COMPAT_VDSO))
> > +		return 0;
> > +
> >  	vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
> >  	vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
> >  
> >  	return __vdso_init(VDSO_ABI_AA32);
> >  }
> > -#endif /* CONFIG_COMPAT_VDSO */
> >  
> >  static int __init aarch32_alloc_vdso_pages(void)
> >  {
> >  	int ret;
> >  
> > -#ifdef CONFIG_COMPAT_VDSO
> >  	ret = __aarch32_alloc_vdso_pages();
> >  	if (ret)
> >  		return ret;
> > -#endif
> >  
> >  	ret = aarch32_alloc_sigpage();
> >  	if (ret)
> > @@ -341,14 +331,14 @@ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
> >  	if (ret)
> >  		goto out;
> >  
> > -#ifdef CONFIG_COMPAT_VDSO
> > -	ret = __setup_additional_pages(VDSO_ABI_AA32,
> > -				       mm,
> > -				       bprm,
> > -				       uses_interp);
> > -	if (ret)
> > -		goto out;
> > -#endif /* CONFIG_COMPAT_VDSO */
> > +	if (IS_ENABLED(CONFIG_COMPAT_VDSO)) {
> > +		ret = __setup_additional_pages(VDSO_ABI_AA32,
> > +					       mm,
> > +					       bprm,
> > +					       uses_interp);
> > +		if (ret)
> > +			goto out;
> > +	}
> >  
> >  	ret = aarch32_sigreturn_setup(mm);
> >  out:
> > -- 
> > 2.27.0.111.gc72c7da667-goog
> > 
> > 
> > _______________________________________________
> > linux-arm-kernel mailing list
> > linux-arm-kernel@lists.infradead.org
> > http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
> 
> _______________________________________________
> linux-arm-kernel mailing list
> linux-arm-kernel@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
Vincenzo Frascino June 23, 2020, 11:55 a.m. UTC | #4
On 6/23/20 9:54 AM, Will Deacon wrote:
> Most of the compat vDSO code can be built and guarded using IS_ENABLED,
> so drop the unnecessary #ifdefs.
> 
> Signed-off-by: Will Deacon <will@kernel.org>

Reviewed-by: Vincenzo Frascino <vincenzo.frascino@arm.com>

> ---
>  arch/arm64/kernel/vdso.c | 44 ++++++++++++++++------------------------
>  1 file changed, 17 insertions(+), 27 deletions(-)
> 
> diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
> index e546df0efefb..30b01c2f50da 100644
> --- a/arch/arm64/kernel/vdso.c
> +++ b/arch/arm64/kernel/vdso.c
> @@ -29,15 +29,11 @@
>  #include <asm/vdso.h>
>  
>  extern char vdso_start[], vdso_end[];
> -#ifdef CONFIG_COMPAT_VDSO
>  extern char vdso32_start[], vdso32_end[];
> -#endif /* CONFIG_COMPAT_VDSO */
>  
>  enum vdso_abi {
>  	VDSO_ABI_AA64,
> -#ifdef CONFIG_COMPAT_VDSO
>  	VDSO_ABI_AA32,
> -#endif /* CONFIG_COMPAT_VDSO */
>  };
>  
>  struct vdso_abi_info {
> @@ -178,21 +174,17 @@ static int __setup_additional_pages(enum vdso_abi abi,
>  /*
>   * Create and map the vectors page for AArch32 tasks.
>   */
> -#ifdef CONFIG_COMPAT_VDSO
>  static int aarch32_vdso_mremap(const struct vm_special_mapping *sm,
>  		struct vm_area_struct *new_vma)
>  {
>  	return __vdso_remap(VDSO_ABI_AA32, sm, new_vma);
>  }
> -#endif /* CONFIG_COMPAT_VDSO */
>  
>  enum aarch32_map {
>  	AA32_MAP_VECTORS, /* kuser helpers */
> -#ifdef CONFIG_COMPAT_VDSO
> +	AA32_MAP_SIGPAGE,
>  	AA32_MAP_VVAR,
>  	AA32_MAP_VDSO,
> -#endif
> -	AA32_MAP_SIGPAGE
>  };
>  
>  static struct page *aarch32_vectors_page __ro_after_init;
> @@ -203,7 +195,10 @@ static struct vm_special_mapping aarch32_vdso_maps[] = {
>  		.name	= "[vectors]", /* ABI */
>  		.pages	= &aarch32_vectors_page,
>  	},
> -#ifdef CONFIG_COMPAT_VDSO
> +	[AA32_MAP_SIGPAGE] = {
> +		.name	= "[sigpage]", /* ABI */
> +		.pages	= &aarch32_sig_page,
> +	},
>  	[AA32_MAP_VVAR] = {
>  		.name = "[vvar]",
>  	},
> @@ -211,11 +206,6 @@ static struct vm_special_mapping aarch32_vdso_maps[] = {
>  		.name = "[vdso]",
>  		.mremap = aarch32_vdso_mremap,
>  	},
> -#endif /* CONFIG_COMPAT_VDSO */
> -	[AA32_MAP_SIGPAGE] = {
> -		.name	= "[sigpage]", /* ABI */
> -		.pages	= &aarch32_sig_page,
> -	},
>  };
>  
>  static int aarch32_alloc_kuser_vdso_page(void)
> @@ -254,25 +244,25 @@ static int aarch32_alloc_sigpage(void)
>  	return 0;
>  }
>  
> -#ifdef CONFIG_COMPAT_VDSO
>  static int __aarch32_alloc_vdso_pages(void)
>  {
> +
> +	if (!IS_ENABLED(CONFIG_COMPAT_VDSO))
> +		return 0;
> +
>  	vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
>  	vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
>  
>  	return __vdso_init(VDSO_ABI_AA32);
>  }
> -#endif /* CONFIG_COMPAT_VDSO */
>  
>  static int __init aarch32_alloc_vdso_pages(void)
>  {
>  	int ret;
>  
> -#ifdef CONFIG_COMPAT_VDSO
>  	ret = __aarch32_alloc_vdso_pages();
>  	if (ret)
>  		return ret;
> -#endif
>  
>  	ret = aarch32_alloc_sigpage();
>  	if (ret)
> @@ -341,14 +331,14 @@ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
>  	if (ret)
>  		goto out;
>  
> -#ifdef CONFIG_COMPAT_VDSO
> -	ret = __setup_additional_pages(VDSO_ABI_AA32,
> -				       mm,
> -				       bprm,
> -				       uses_interp);
> -	if (ret)
> -		goto out;
> -#endif /* CONFIG_COMPAT_VDSO */
> +	if (IS_ENABLED(CONFIG_COMPAT_VDSO)) {
> +		ret = __setup_additional_pages(VDSO_ABI_AA32,
> +					       mm,
> +					       bprm,
> +					       uses_interp);
> +		if (ret)
> +			goto out;
> +	}
>  
>  	ret = aarch32_sigreturn_setup(mm);
>  out:
>
diff mbox series

Patch

diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index e546df0efefb..30b01c2f50da 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -29,15 +29,11 @@ 
 #include <asm/vdso.h>
 
 extern char vdso_start[], vdso_end[];
-#ifdef CONFIG_COMPAT_VDSO
 extern char vdso32_start[], vdso32_end[];
-#endif /* CONFIG_COMPAT_VDSO */
 
 enum vdso_abi {
 	VDSO_ABI_AA64,
-#ifdef CONFIG_COMPAT_VDSO
 	VDSO_ABI_AA32,
-#endif /* CONFIG_COMPAT_VDSO */
 };
 
 struct vdso_abi_info {
@@ -178,21 +174,17 @@  static int __setup_additional_pages(enum vdso_abi abi,
 /*
  * Create and map the vectors page for AArch32 tasks.
  */
-#ifdef CONFIG_COMPAT_VDSO
 static int aarch32_vdso_mremap(const struct vm_special_mapping *sm,
 		struct vm_area_struct *new_vma)
 {
 	return __vdso_remap(VDSO_ABI_AA32, sm, new_vma);
 }
-#endif /* CONFIG_COMPAT_VDSO */
 
 enum aarch32_map {
 	AA32_MAP_VECTORS, /* kuser helpers */
-#ifdef CONFIG_COMPAT_VDSO
+	AA32_MAP_SIGPAGE,
 	AA32_MAP_VVAR,
 	AA32_MAP_VDSO,
-#endif
-	AA32_MAP_SIGPAGE
 };
 
 static struct page *aarch32_vectors_page __ro_after_init;
@@ -203,7 +195,10 @@  static struct vm_special_mapping aarch32_vdso_maps[] = {
 		.name	= "[vectors]", /* ABI */
 		.pages	= &aarch32_vectors_page,
 	},
-#ifdef CONFIG_COMPAT_VDSO
+	[AA32_MAP_SIGPAGE] = {
+		.name	= "[sigpage]", /* ABI */
+		.pages	= &aarch32_sig_page,
+	},
 	[AA32_MAP_VVAR] = {
 		.name = "[vvar]",
 	},
@@ -211,11 +206,6 @@  static struct vm_special_mapping aarch32_vdso_maps[] = {
 		.name = "[vdso]",
 		.mremap = aarch32_vdso_mremap,
 	},
-#endif /* CONFIG_COMPAT_VDSO */
-	[AA32_MAP_SIGPAGE] = {
-		.name	= "[sigpage]", /* ABI */
-		.pages	= &aarch32_sig_page,
-	},
 };
 
 static int aarch32_alloc_kuser_vdso_page(void)
@@ -254,25 +244,25 @@  static int aarch32_alloc_sigpage(void)
 	return 0;
 }
 
-#ifdef CONFIG_COMPAT_VDSO
 static int __aarch32_alloc_vdso_pages(void)
 {
+
+	if (!IS_ENABLED(CONFIG_COMPAT_VDSO))
+		return 0;
+
 	vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
 	vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
 
 	return __vdso_init(VDSO_ABI_AA32);
 }
-#endif /* CONFIG_COMPAT_VDSO */
 
 static int __init aarch32_alloc_vdso_pages(void)
 {
 	int ret;
 
-#ifdef CONFIG_COMPAT_VDSO
 	ret = __aarch32_alloc_vdso_pages();
 	if (ret)
 		return ret;
-#endif
 
 	ret = aarch32_alloc_sigpage();
 	if (ret)
@@ -341,14 +331,14 @@  int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 	if (ret)
 		goto out;
 
-#ifdef CONFIG_COMPAT_VDSO
-	ret = __setup_additional_pages(VDSO_ABI_AA32,
-				       mm,
-				       bprm,
-				       uses_interp);
-	if (ret)
-		goto out;
-#endif /* CONFIG_COMPAT_VDSO */
+	if (IS_ENABLED(CONFIG_COMPAT_VDSO)) {
+		ret = __setup_additional_pages(VDSO_ABI_AA32,
+					       mm,
+					       bprm,
+					       uses_interp);
+		if (ret)
+			goto out;
+	}
 
 	ret = aarch32_sigreturn_setup(mm);
 out: