diff mbox series

[v2,3/3] arm64: remove the rest of asm-uaccess.h

Message ID 20191122022406.590141-4-pasha.tatashin@soleen.com (mailing list archive)
State New, archived
Headers show
Series Use C inlines for uaccess | expand

Commit Message

Pasha Tatashin Nov. 22, 2019, 2:24 a.m. UTC
The __uaccess_ttbr0_disable and __uaccess_ttbr0_enable,
are the last two macros defined in asm-uaccess.h.

Replace them with C wrappers and call C functions from
kernel_entry and kernel_exit.

Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com>
---
 arch/arm64/include/asm/asm-uaccess.h | 38 ----------------------------
 arch/arm64/kernel/entry.S            |  6 ++---
 arch/arm64/lib/clear_user.S          |  2 +-
 arch/arm64/lib/copy_from_user.S      |  2 +-
 arch/arm64/lib/copy_in_user.S        |  2 +-
 arch/arm64/lib/copy_to_user.S        |  2 +-
 arch/arm64/mm/cache.S                |  1 -
 arch/arm64/mm/context.c              | 12 +++++++++
 8 files changed, 19 insertions(+), 46 deletions(-)
 delete mode 100644 arch/arm64/include/asm/asm-uaccess.h

Comments

Mark Rutland Nov. 27, 2019, 3:11 p.m. UTC | #1
On Thu, Nov 21, 2019 at 09:24:06PM -0500, Pavel Tatashin wrote:
> The __uaccess_ttbr0_disable and __uaccess_ttbr0_enable,
> are the last two macros defined in asm-uaccess.h.
> 
> Replace them with C wrappers and call C functions from
> kernel_entry and kernel_exit.

For now, please leave those as-is.

I don't think we want to have out-of-line C wrappers in the middle of
the entry assembly where we don't have a complete kernel environment.
The use in entry code can also assume non-preemptibility, while the C
functions have to explcitily disable that.

We can certainly remove the includes of <asm/asm-uaccess.h> elsewhere,
and maybe fold the macros into entry.S if it's not too crowded.

Thanks,
Mark.

> 
> Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com>
> ---
>  arch/arm64/include/asm/asm-uaccess.h | 38 ----------------------------
>  arch/arm64/kernel/entry.S            |  6 ++---
>  arch/arm64/lib/clear_user.S          |  2 +-
>  arch/arm64/lib/copy_from_user.S      |  2 +-
>  arch/arm64/lib/copy_in_user.S        |  2 +-
>  arch/arm64/lib/copy_to_user.S        |  2 +-
>  arch/arm64/mm/cache.S                |  1 -
>  arch/arm64/mm/context.c              | 12 +++++++++
>  8 files changed, 19 insertions(+), 46 deletions(-)
>  delete mode 100644 arch/arm64/include/asm/asm-uaccess.h
> 
> diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
> deleted file mode 100644
> index 8f763e5b41b1..000000000000
> --- a/arch/arm64/include/asm/asm-uaccess.h
> +++ /dev/null
> @@ -1,38 +0,0 @@
> -/* SPDX-License-Identifier: GPL-2.0 */
> -#ifndef __ASM_ASM_UACCESS_H
> -#define __ASM_ASM_UACCESS_H
> -
> -#include <asm/alternative.h>
> -#include <asm/kernel-pgtable.h>
> -#include <asm/mmu.h>
> -#include <asm/sysreg.h>
> -#include <asm/assembler.h>
> -
> -/*
> - * User access enabling/disabling macros.
> - */
> -#ifdef CONFIG_ARM64_SW_TTBR0_PAN
> -	.macro	__uaccess_ttbr0_disable, tmp1
> -	mrs	\tmp1, ttbr1_el1			// swapper_pg_dir
> -	bic	\tmp1, \tmp1, #TTBR_ASID_MASK
> -	sub	\tmp1, \tmp1, #RESERVED_TTBR0_SIZE	// reserved_ttbr0 just before swapper_pg_dir
> -	msr	ttbr0_el1, \tmp1			// set reserved TTBR0_EL1
> -	isb
> -	add	\tmp1, \tmp1, #RESERVED_TTBR0_SIZE
> -	msr	ttbr1_el1, \tmp1		// set reserved ASID
> -	isb
> -	.endm
> -
> -	.macro	__uaccess_ttbr0_enable, tmp1, tmp2
> -	get_current_task \tmp1
> -	ldr	\tmp1, [\tmp1, #TSK_TI_TTBR0]	// load saved TTBR0_EL1
> -	mrs	\tmp2, ttbr1_el1
> -	extr    \tmp2, \tmp2, \tmp1, #48
> -	ror     \tmp2, \tmp2, #16
> -	msr	ttbr1_el1, \tmp2		// set the active ASID
> -	isb
> -	msr	ttbr0_el1, \tmp1		// set the non-PAN TTBR0_EL1
> -	isb
> -	.endm
> -#endif
> -#endif
> diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
> index 583f71abbe98..c7b571e6d0f2 100644
> --- a/arch/arm64/kernel/entry.S
> +++ b/arch/arm64/kernel/entry.S
> @@ -22,8 +22,8 @@
>  #include <asm/mmu.h>
>  #include <asm/processor.h>
>  #include <asm/ptrace.h>
> +#include <asm/kernel-pgtable.h>
>  #include <asm/thread_info.h>
> -#include <asm/asm-uaccess.h>
>  #include <asm/unistd.h>
>  
>  /*
> @@ -219,7 +219,7 @@ alternative_else_nop_endif
>  	and	x23, x23, #~PSR_PAN_BIT		// Clear the emulated PAN in the saved SPSR
>  	.endif
>  
> -	__uaccess_ttbr0_disable x21
> +	bl __uaccess_ttbr0_disable_c
>  1:
>  #endif
>  
> @@ -293,7 +293,7 @@ alternative_else_nop_endif
>  	tbnz	x22, #22, 1f			// Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
>  	.endif
>  
> -	__uaccess_ttbr0_enable x0, x1
> +	bl	__uaccess_ttbr0_enable_c
>  
>  	.if	\el == 0
>  	/*
> diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
> index aeafc03e961a..b0b4a86a09e2 100644
> --- a/arch/arm64/lib/clear_user.S
> +++ b/arch/arm64/lib/clear_user.S
> @@ -6,7 +6,7 @@
>   */
>  #include <linux/linkage.h>
>  
> -#include <asm/asm-uaccess.h>
> +#include <asm/alternative.h>
>  #include <asm/assembler.h>
>  
>  	.text
> diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
> index ebb3c06cbb5d..142bc7505518 100644
> --- a/arch/arm64/lib/copy_from_user.S
> +++ b/arch/arm64/lib/copy_from_user.S
> @@ -5,7 +5,7 @@
>  
>  #include <linux/linkage.h>
>  
> -#include <asm/asm-uaccess.h>
> +#include <asm/alternative.h>
>  #include <asm/assembler.h>
>  #include <asm/cache.h>
>  
> diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
> index 3d8153a1ebce..04dc48ca26f7 100644
> --- a/arch/arm64/lib/copy_in_user.S
> +++ b/arch/arm64/lib/copy_in_user.S
> @@ -7,7 +7,7 @@
>  
>  #include <linux/linkage.h>
>  
> -#include <asm/asm-uaccess.h>
> +#include <asm/alternative.h>
>  #include <asm/assembler.h>
>  #include <asm/cache.h>
>  
> diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
> index 357eae2c18eb..8f3218ae88ab 100644
> --- a/arch/arm64/lib/copy_to_user.S
> +++ b/arch/arm64/lib/copy_to_user.S
> @@ -5,7 +5,7 @@
>  
>  #include <linux/linkage.h>
>  
> -#include <asm/asm-uaccess.h>
> +#include <asm/alternative.h>
>  #include <asm/assembler.h>
>  #include <asm/cache.h>
>  
> diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
> index 408d317a47d2..7940d6ef5da5 100644
> --- a/arch/arm64/mm/cache.S
> +++ b/arch/arm64/mm/cache.S
> @@ -12,7 +12,6 @@
>  #include <asm/assembler.h>
>  #include <asm/cpufeature.h>
>  #include <asm/alternative.h>
> -#include <asm/asm-uaccess.h>
>  
>  /*
>   *	__arch_flush_icache_range(start,end)
> diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
> index b5e329fde2dd..4fc32c504dea 100644
> --- a/arch/arm64/mm/context.c
> +++ b/arch/arm64/mm/context.c
> @@ -237,6 +237,18 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
>  		cpu_switch_mm(mm->pgd, mm);
>  }
>  
> +#ifdef CONFIG_ARM64_SW_TTBR0_PAN
> +asmlinkage void __uaccess_ttbr0_enable_c(void)
> +{
> +	__uaccess_ttbr0_enable();
> +}
> +
> +asmlinkage void __uaccess_ttbr0_disable_c(void)
> +{
> +	__uaccess_ttbr0_disable();
> +}
> +#endif
> +
>  /* Errata workaround post TTBRx_EL1 update. */
>  asmlinkage void post_ttbr_update_workaround(void)
>  {
> -- 
> 2.24.0
>
Pasha Tatashin Nov. 27, 2019, 3:31 p.m. UTC | #2
On Wed, Nov 27, 2019 at 10:12 AM Mark Rutland <mark.rutland@arm.com> wrote:
>
> On Thu, Nov 21, 2019 at 09:24:06PM -0500, Pavel Tatashin wrote:
> > The __uaccess_ttbr0_disable and __uaccess_ttbr0_enable,
> > are the last two macros defined in asm-uaccess.h.
> >
> > Replace them with C wrappers and call C functions from
> > kernel_entry and kernel_exit.
>
> For now, please leave those as-is.
>
> I don't think we want to have out-of-line C wrappers in the middle of
> the entry assembly where we don't have a complete kernel environment.
> The use in entry code can also assume non-preemptibility, while the C
> functions have to explcitily disable that.

I do not understand, if C function is called form non-preemptible
context it stays non-preemptible. kernel_exit already may call C
functions around the time __uaccess_ttbr0_enable is called (it may
call post_ttbr_update_workaround), and that C functions does not do
explicit preempt disable:

> We can certainly remove the includes of <asm/asm-uaccess.h> elsewhere,
> and maybe fold the macros into entry.S if it's not too crowded.

I can do this as a separate patch.

Thank you,
Pasha
Mark Rutland Nov. 27, 2019, 4:03 p.m. UTC | #3
On Wed, Nov 27, 2019 at 10:31:54AM -0500, Pavel Tatashin wrote:
> On Wed, Nov 27, 2019 at 10:12 AM Mark Rutland <mark.rutland@arm.com> wrote:
> >
> > On Thu, Nov 21, 2019 at 09:24:06PM -0500, Pavel Tatashin wrote:
> > > The __uaccess_ttbr0_disable and __uaccess_ttbr0_enable,
> > > are the last two macros defined in asm-uaccess.h.
> > >
> > > Replace them with C wrappers and call C functions from
> > > kernel_entry and kernel_exit.
> >
> > For now, please leave those as-is.
> >
> > I don't think we want to have out-of-line C wrappers in the middle of
> > the entry assembly where we don't have a complete kernel environment.
> > The use in entry code can also assume non-preemptibility, while the C
> > functions have to explcitily disable that.
> 
> I do not understand, if C function is called form non-preemptible
> context it stays non-preemptible. kernel_exit already may call C
> functions around the time __uaccess_ttbr0_enable is called (it may
> call post_ttbr_update_workaround), and that C functions does not do
> explicit preempt disable:

Sorry, I meant that IRQs are disabled here.

The C wrapper calls __uaccess_ttbr0_enable(), which calls
local_irq_save() and local_irq_restore(). Those are pointless in the
bowels of the entry code, and potentially expensive if IRQ prio masking
is in use.

I'd rather not add more out-of-line C code calls here right now as I'd
prefer to factor out the logic to C in a better way.

> > We can certainly remove the includes of <asm/asm-uaccess.h> elsewhere,
> > and maybe fold the macros into entry.S if it's not too crowded.
> 
> I can do this as a separate patch.

That sounds fine to me,

Thanks,
Mark.
Pasha Tatashin Nov. 27, 2019, 4:09 p.m. UTC | #4
On Wed, Nov 27, 2019 at 11:03 AM Mark Rutland <mark.rutland@arm.com> wrote:
>
> On Wed, Nov 27, 2019 at 10:31:54AM -0500, Pavel Tatashin wrote:
> > On Wed, Nov 27, 2019 at 10:12 AM Mark Rutland <mark.rutland@arm.com> wrote:
> > >
> > > On Thu, Nov 21, 2019 at 09:24:06PM -0500, Pavel Tatashin wrote:
> > > > The __uaccess_ttbr0_disable and __uaccess_ttbr0_enable,
> > > > are the last two macros defined in asm-uaccess.h.
> > > >
> > > > Replace them with C wrappers and call C functions from
> > > > kernel_entry and kernel_exit.
> > >
> > > For now, please leave those as-is.
> > >
> > > I don't think we want to have out-of-line C wrappers in the middle of
> > > the entry assembly where we don't have a complete kernel environment.
> > > The use in entry code can also assume non-preemptibility, while the C
> > > functions have to explcitily disable that.
> >
> > I do not understand, if C function is called form non-preemptible
> > context it stays non-preemptible. kernel_exit already may call C
> > functions around the time __uaccess_ttbr0_enable is called (it may
> > call post_ttbr_update_workaround), and that C functions does not do
> > explicit preempt disable:
>
> Sorry, I meant that IRQs are disabled here.
>
> The C wrapper calls __uaccess_ttbr0_enable(), which calls
> local_irq_save() and local_irq_restore(). Those are pointless in the
> bowels of the entry code, and potentially expensive if IRQ prio masking
> is in use.
>
> I'd rather not add more out-of-line C code calls here right now as I'd
> prefer to factor out the logic to C in a better way.

Ah, yes, this makes sense. I could certainly factor out C calls in a
better way, or is this something you want to work on?

Without removing these assembly macros I do not think we want to
address this suggestion from Kees Cook:
https://lore.kernel.org/lkml/CA+CK2bCBS2fKOTmTFm13iv3u5TBPwpoCsYeeP352DVE-gs9GJw@mail.gmail.com/

Thank you,
Pasha
Mark Rutland Nov. 27, 2019, 5:01 p.m. UTC | #5
On Wed, Nov 27, 2019 at 11:09:35AM -0500, Pavel Tatashin wrote:
> On Wed, Nov 27, 2019 at 11:03 AM Mark Rutland <mark.rutland@arm.com> wrote:
> >
> > On Wed, Nov 27, 2019 at 10:31:54AM -0500, Pavel Tatashin wrote:
> > > On Wed, Nov 27, 2019 at 10:12 AM Mark Rutland <mark.rutland@arm.com> wrote:
> > > >
> > > > On Thu, Nov 21, 2019 at 09:24:06PM -0500, Pavel Tatashin wrote:
> > > > > The __uaccess_ttbr0_disable and __uaccess_ttbr0_enable,
> > > > > are the last two macros defined in asm-uaccess.h.
> > > > >
> > > > > Replace them with C wrappers and call C functions from
> > > > > kernel_entry and kernel_exit.
> > > >
> > > > For now, please leave those as-is.
> > > >
> > > > I don't think we want to have out-of-line C wrappers in the middle of
> > > > the entry assembly where we don't have a complete kernel environment.
> > > > The use in entry code can also assume non-preemptibility, while the C
> > > > functions have to explcitily disable that.
> > >
> > > I do not understand, if C function is called form non-preemptible
> > > context it stays non-preemptible. kernel_exit already may call C
> > > functions around the time __uaccess_ttbr0_enable is called (it may
> > > call post_ttbr_update_workaround), and that C functions does not do
> > > explicit preempt disable:
> >
> > Sorry, I meant that IRQs are disabled here.
> >
> > The C wrapper calls __uaccess_ttbr0_enable(), which calls
> > local_irq_save() and local_irq_restore(). Those are pointless in the
> > bowels of the entry code, and potentially expensive if IRQ prio masking
> > is in use.
> >
> > I'd rather not add more out-of-line C code calls here right now as I'd
> > prefer to factor out the logic to C in a better way.
> 
> Ah, yes, this makes sense. I could certainly factor out C calls in a
> better way, or is this something you want to work on?

I'm hoping to do that as part of ongoing entry-deasm work, now that a
lot of the prerequisite work was merged in v5.4.

> Without removing these assembly macros I do not think we want to
> address this suggestion from Kees Cook:
> https://lore.kernel.org/lkml/CA+CK2bCBS2fKOTmTFm13iv3u5TBPwpoCsYeeP352DVE-gs9GJw@mail.gmail.com/

In the mean time, we could add checks around addr_limit_user_check(),
and in the context-switch path. I have some preparatory cleanup to allow
for the context-switch check, which I'll send out at -rc1. That was what
I used to detect the case you reported previously.

Thanks,
Mark.
Pasha Tatashin Nov. 27, 2019, 5:13 p.m. UTC | #6
On Wed, Nov 27, 2019 at 12:01 PM Mark Rutland <mark.rutland@arm.com> wrote:
>
> On Wed, Nov 27, 2019 at 11:09:35AM -0500, Pavel Tatashin wrote:
> > On Wed, Nov 27, 2019 at 11:03 AM Mark Rutland <mark.rutland@arm.com> wrote:
> > >
> > > On Wed, Nov 27, 2019 at 10:31:54AM -0500, Pavel Tatashin wrote:
> > > > On Wed, Nov 27, 2019 at 10:12 AM Mark Rutland <mark.rutland@arm.com> wrote:
> > > > >
> > > > > On Thu, Nov 21, 2019 at 09:24:06PM -0500, Pavel Tatashin wrote:
> > > > > > The __uaccess_ttbr0_disable and __uaccess_ttbr0_enable,
> > > > > > are the last two macros defined in asm-uaccess.h.
> > > > > >
> > > > > > Replace them with C wrappers and call C functions from
> > > > > > kernel_entry and kernel_exit.
> > > > >
> > > > > For now, please leave those as-is.
> > > > >
> > > > > I don't think we want to have out-of-line C wrappers in the middle of
> > > > > the entry assembly where we don't have a complete kernel environment.
> > > > > The use in entry code can also assume non-preemptibility, while the C
> > > > > functions have to explcitily disable that.
> > > >
> > > > I do not understand, if C function is called form non-preemptible
> > > > context it stays non-preemptible. kernel_exit already may call C
> > > > functions around the time __uaccess_ttbr0_enable is called (it may
> > > > call post_ttbr_update_workaround), and that C functions does not do
> > > > explicit preempt disable:
> > >
> > > Sorry, I meant that IRQs are disabled here.
> > >
> > > The C wrapper calls __uaccess_ttbr0_enable(), which calls
> > > local_irq_save() and local_irq_restore(). Those are pointless in the
> > > bowels of the entry code, and potentially expensive if IRQ prio masking
> > > is in use.
> > >
> > > I'd rather not add more out-of-line C code calls here right now as I'd
> > > prefer to factor out the logic to C in a better way.
> >
> > Ah, yes, this makes sense. I could certainly factor out C calls in a
> > better way, or is this something you want to work on?
>
> I'm hoping to do that as part of ongoing entry-deasm work, now that a
> lot of the prerequisite work was merged in v5.4.

OK, I will send new patches with what we agreed on, and your comments addressed.

>
> > Without removing these assembly macros I do not think we want to
> > address this suggestion from Kees Cook:
> > https://lore.kernel.org/lkml/CA+CK2bCBS2fKOTmTFm13iv3u5TBPwpoCsYeeP352DVE-gs9GJw@mail.gmail.com/
>
> In the mean time, we could add checks around addr_limit_user_check(),
> and in the context-switch path. I have some preparatory cleanup to allow
> for the context-switch check, which I'll send out at -rc1. That was what
> I used to detect the case you reported previously.

Sounds good.

Thank you,
Pasha
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
deleted file mode 100644
index 8f763e5b41b1..000000000000
--- a/arch/arm64/include/asm/asm-uaccess.h
+++ /dev/null
@@ -1,38 +0,0 @@ 
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_ASM_UACCESS_H
-#define __ASM_ASM_UACCESS_H
-
-#include <asm/alternative.h>
-#include <asm/kernel-pgtable.h>
-#include <asm/mmu.h>
-#include <asm/sysreg.h>
-#include <asm/assembler.h>
-
-/*
- * User access enabling/disabling macros.
- */
-#ifdef CONFIG_ARM64_SW_TTBR0_PAN
-	.macro	__uaccess_ttbr0_disable, tmp1
-	mrs	\tmp1, ttbr1_el1			// swapper_pg_dir
-	bic	\tmp1, \tmp1, #TTBR_ASID_MASK
-	sub	\tmp1, \tmp1, #RESERVED_TTBR0_SIZE	// reserved_ttbr0 just before swapper_pg_dir
-	msr	ttbr0_el1, \tmp1			// set reserved TTBR0_EL1
-	isb
-	add	\tmp1, \tmp1, #RESERVED_TTBR0_SIZE
-	msr	ttbr1_el1, \tmp1		// set reserved ASID
-	isb
-	.endm
-
-	.macro	__uaccess_ttbr0_enable, tmp1, tmp2
-	get_current_task \tmp1
-	ldr	\tmp1, [\tmp1, #TSK_TI_TTBR0]	// load saved TTBR0_EL1
-	mrs	\tmp2, ttbr1_el1
-	extr    \tmp2, \tmp2, \tmp1, #48
-	ror     \tmp2, \tmp2, #16
-	msr	ttbr1_el1, \tmp2		// set the active ASID
-	isb
-	msr	ttbr0_el1, \tmp1		// set the non-PAN TTBR0_EL1
-	isb
-	.endm
-#endif
-#endif
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 583f71abbe98..c7b571e6d0f2 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -22,8 +22,8 @@ 
 #include <asm/mmu.h>
 #include <asm/processor.h>
 #include <asm/ptrace.h>
+#include <asm/kernel-pgtable.h>
 #include <asm/thread_info.h>
-#include <asm/asm-uaccess.h>
 #include <asm/unistd.h>
 
 /*
@@ -219,7 +219,7 @@  alternative_else_nop_endif
 	and	x23, x23, #~PSR_PAN_BIT		// Clear the emulated PAN in the saved SPSR
 	.endif
 
-	__uaccess_ttbr0_disable x21
+	bl __uaccess_ttbr0_disable_c
 1:
 #endif
 
@@ -293,7 +293,7 @@  alternative_else_nop_endif
 	tbnz	x22, #22, 1f			// Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
 	.endif
 
-	__uaccess_ttbr0_enable x0, x1
+	bl	__uaccess_ttbr0_enable_c
 
 	.if	\el == 0
 	/*
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index aeafc03e961a..b0b4a86a09e2 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -6,7 +6,7 @@ 
  */
 #include <linux/linkage.h>
 
-#include <asm/asm-uaccess.h>
+#include <asm/alternative.h>
 #include <asm/assembler.h>
 
 	.text
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index ebb3c06cbb5d..142bc7505518 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -5,7 +5,7 @@ 
 
 #include <linux/linkage.h>
 
-#include <asm/asm-uaccess.h>
+#include <asm/alternative.h>
 #include <asm/assembler.h>
 #include <asm/cache.h>
 
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index 3d8153a1ebce..04dc48ca26f7 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -7,7 +7,7 @@ 
 
 #include <linux/linkage.h>
 
-#include <asm/asm-uaccess.h>
+#include <asm/alternative.h>
 #include <asm/assembler.h>
 #include <asm/cache.h>
 
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 357eae2c18eb..8f3218ae88ab 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -5,7 +5,7 @@ 
 
 #include <linux/linkage.h>
 
-#include <asm/asm-uaccess.h>
+#include <asm/alternative.h>
 #include <asm/assembler.h>
 #include <asm/cache.h>
 
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 408d317a47d2..7940d6ef5da5 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -12,7 +12,6 @@ 
 #include <asm/assembler.h>
 #include <asm/cpufeature.h>
 #include <asm/alternative.h>
-#include <asm/asm-uaccess.h>
 
 /*
  *	__arch_flush_icache_range(start,end)
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index b5e329fde2dd..4fc32c504dea 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -237,6 +237,18 @@  void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
 		cpu_switch_mm(mm->pgd, mm);
 }
 
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+asmlinkage void __uaccess_ttbr0_enable_c(void)
+{
+	__uaccess_ttbr0_enable();
+}
+
+asmlinkage void __uaccess_ttbr0_disable_c(void)
+{
+	__uaccess_ttbr0_disable();
+}
+#endif
+
 /* Errata workaround post TTBRx_EL1 update. */
 asmlinkage void post_ttbr_update_workaround(void)
 {