Message ID | 20210107172908.42686-4-vincenzo.frascino@arm.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | arm64: ARMv8.5-A: MTE: Add async mode support | expand |
On Thu, Jan 07, 2021 at 05:29:07PM +0000, Vincenzo Frascino wrote: > diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h > index d02aff9f493d..a60d3718baae 100644 > --- a/arch/arm64/include/asm/mte.h > +++ b/arch/arm64/include/asm/mte.h > @@ -39,6 +39,7 @@ void mte_free_tag_storage(char *storage); > /* track which pages have valid allocation tags */ > #define PG_mte_tagged PG_arch_2 > > +void mte_check_tfsr_el1(void); > void mte_sync_tags(pte_t *ptep, pte_t pte); > void mte_copy_page_tags(void *kto, const void *kfrom); > void flush_mte_state(void); > @@ -56,6 +57,9 @@ void mte_assign_mem_tag_range(void *addr, size_t size); > /* unused if !CONFIG_ARM64_MTE, silence the compiler */ > #define PG_mte_tagged 0 > > +static inline void mte_check_tfsr_el1(void) > +{ > +} I think we should enable this dummy function when !CONFIG_KASAN_HW_TAGS. It saves us an unnecessary function call in a few places. > static inline void mte_sync_tags(pte_t *ptep, pte_t pte) > { > } > diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c > index 5346953e4382..74b020ce72d7 100644 > --- a/arch/arm64/kernel/entry-common.c > +++ b/arch/arm64/kernel/entry-common.c > @@ -37,6 +37,8 @@ static void noinstr enter_from_kernel_mode(struct pt_regs *regs) > lockdep_hardirqs_off(CALLER_ADDR0); > rcu_irq_enter_check_tick(); > trace_hardirqs_off_finish(); > + > + mte_check_tfsr_el1(); > } > > /* > @@ -47,6 +49,8 @@ static void noinstr exit_to_kernel_mode(struct pt_regs *regs) > { > lockdep_assert_irqs_disabled(); > > + mte_check_tfsr_el1(); > + > if (interrupts_enabled(regs)) { > if (regs->exit_rcu) { > trace_hardirqs_on_prepare(); > @@ -243,6 +247,8 @@ asmlinkage void noinstr enter_from_user_mode(void) > > asmlinkage void noinstr exit_to_user_mode(void) > { > + mte_check_tfsr_el1(); While for kernel entry the asynchronous faults are sync'ed automatically with TFSR_EL1, we don't have this for exit, so we'd need an explicit DSB. But rather than placing it here, it's better if we add a bool sync argument to mte_check_tfsr_el1() which issues a dsb() before checking the register. I think that's the only place where such argument would be true (for now). > + > trace_hardirqs_on_prepare(); > lockdep_hardirqs_on_prepare(CALLER_ADDR0); > user_enter_irqoff(); > diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c > index 5d992e16b420..26030f0b79fe 100644 > --- a/arch/arm64/kernel/mte.c > +++ b/arch/arm64/kernel/mte.c > @@ -185,6 +185,34 @@ void mte_enable_kernel(enum kasan_arg_mode mode) > isb(); > } > > +void mte_check_tfsr_el1(void) > +{ > + u64 tfsr_el1; > + > + if (!IS_ENABLED(CONFIG_KASAN_HW_TAGS)) > + return; If we define the static inline when !CONFIG_KASAN_HW_TAGS, we could add the #ifdef here around the whole function. > + if (!system_supports_mte()) > + return; > + > + tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1); > + > + /* > + * The kernel should never hit the condition TF0 == 1 > + * at this point because for the futex code we set > + * PSTATE.TCO. > + */ > + WARN_ON(tfsr_el1 & SYS_TFSR_EL1_TF0); > + > + if (tfsr_el1 & SYS_TFSR_EL1_TF1) { > + write_sysreg_s(0, SYS_TFSR_EL1); > + isb(); > + > + pr_err("MTE: Asynchronous tag exception detected!"); > + } > +} > +NOKPROBE_SYMBOL(mte_check_tfsr_el1); Do we need this to be NOKPROBE_SYMBOL? It's not that low level. > + > static void update_sctlr_el1_tcf0(u64 tcf0) > { > /* ISB required for the kernel uaccess routines */ > @@ -250,6 +278,15 @@ void mte_thread_switch(struct task_struct *next) > /* avoid expensive SCTLR_EL1 accesses if no change */ > if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0) > update_sctlr_el1_tcf0(next->thread.sctlr_tcf0); > + > + /* > + * Check if an async tag exception occurred at EL1. > + * > + * Note: On the context switch patch we rely on the dsb() present s/patch/path/ > + * in __switch_to() to guaranty that the indirect writes to TFSR_EL1 s/guaranty/guarantee/ (well, still valid though I think rarely used). > + * are synchronized before this point. > + */ > + mte_check_tfsr_el1(); > } > > void mte_suspend_exit(void) > -- > 2.30.0
On 1/13/21 6:11 PM, Catalin Marinas wrote: > On Thu, Jan 07, 2021 at 05:29:07PM +0000, Vincenzo Frascino wrote: >> diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h >> index d02aff9f493d..a60d3718baae 100644 >> --- a/arch/arm64/include/asm/mte.h >> +++ b/arch/arm64/include/asm/mte.h >> @@ -39,6 +39,7 @@ void mte_free_tag_storage(char *storage); >> /* track which pages have valid allocation tags */ >> #define PG_mte_tagged PG_arch_2 >> >> +void mte_check_tfsr_el1(void); >> void mte_sync_tags(pte_t *ptep, pte_t pte); >> void mte_copy_page_tags(void *kto, const void *kfrom); >> void flush_mte_state(void); >> @@ -56,6 +57,9 @@ void mte_assign_mem_tag_range(void *addr, size_t size); >> /* unused if !CONFIG_ARM64_MTE, silence the compiler */ >> #define PG_mte_tagged 0 >> >> +static inline void mte_check_tfsr_el1(void) >> +{ >> +} > > I think we should enable this dummy function when !CONFIG_KASAN_HW_TAGS. > It saves us an unnecessary function call in a few places. > Ok, I will add it in v3. >> static inline void mte_sync_tags(pte_t *ptep, pte_t pte) >> { >> } >> diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c >> index 5346953e4382..74b020ce72d7 100644 >> --- a/arch/arm64/kernel/entry-common.c >> +++ b/arch/arm64/kernel/entry-common.c >> @@ -37,6 +37,8 @@ static void noinstr enter_from_kernel_mode(struct pt_regs *regs) >> lockdep_hardirqs_off(CALLER_ADDR0); >> rcu_irq_enter_check_tick(); >> trace_hardirqs_off_finish(); >> + >> + mte_check_tfsr_el1(); >> } >> >> /* >> @@ -47,6 +49,8 @@ static void noinstr exit_to_kernel_mode(struct pt_regs *regs) >> { >> lockdep_assert_irqs_disabled(); >> >> + mte_check_tfsr_el1(); >> + >> if (interrupts_enabled(regs)) { >> if (regs->exit_rcu) { >> trace_hardirqs_on_prepare(); >> @@ -243,6 +247,8 @@ asmlinkage void noinstr enter_from_user_mode(void) >> >> asmlinkage void noinstr exit_to_user_mode(void) >> { >> + mte_check_tfsr_el1(); > > While for kernel entry the asynchronous faults are sync'ed automatically > with TFSR_EL1, we don't have this for exit, so we'd need an explicit > DSB. But rather than placing it here, it's better if we add a bool sync > argument to mte_check_tfsr_el1() which issues a dsb() before checking > the register. I think that's the only place where such argument would be > true (for now). > Good point, I will add the dsb() in mte_check_tfsr_el1() but instead of a bool parameter I will add something more explicit. >> + >> trace_hardirqs_on_prepare(); >> lockdep_hardirqs_on_prepare(CALLER_ADDR0); >> user_enter_irqoff(); >> diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c >> index 5d992e16b420..26030f0b79fe 100644 >> --- a/arch/arm64/kernel/mte.c >> +++ b/arch/arm64/kernel/mte.c >> @@ -185,6 +185,34 @@ void mte_enable_kernel(enum kasan_arg_mode mode) >> isb(); >> } >> >> +void mte_check_tfsr_el1(void) >> +{ >> + u64 tfsr_el1; >> + >> + if (!IS_ENABLED(CONFIG_KASAN_HW_TAGS)) >> + return; > > If we define the static inline when !CONFIG_KASAN_HW_TAGS, we could add > the #ifdef here around the whole function. > Ok. I will add it in v3. >> + if (!system_supports_mte()) >> + return; >> + >> + tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1); >> + >> + /* >> + * The kernel should never hit the condition TF0 == 1 >> + * at this point because for the futex code we set >> + * PSTATE.TCO. >> + */ >> + WARN_ON(tfsr_el1 & SYS_TFSR_EL1_TF0); >> + >> + if (tfsr_el1 & SYS_TFSR_EL1_TF1) { >> + write_sysreg_s(0, SYS_TFSR_EL1); >> + isb(); >> + >> + pr_err("MTE: Asynchronous tag exception detected!"); >> + } >> +} >> +NOKPROBE_SYMBOL(mte_check_tfsr_el1); > > Do we need this to be NOKPROBE_SYMBOL? It's not that low level. > It is an inheritance from when I had this code called very early. I will remove it in the next version. >> + >> static void update_sctlr_el1_tcf0(u64 tcf0) >> { >> /* ISB required for the kernel uaccess routines */ >> @@ -250,6 +278,15 @@ void mte_thread_switch(struct task_struct *next) >> /* avoid expensive SCTLR_EL1 accesses if no change */ >> if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0) >> update_sctlr_el1_tcf0(next->thread.sctlr_tcf0); >> + >> + /* >> + * Check if an async tag exception occurred at EL1. >> + * >> + * Note: On the context switch patch we rely on the dsb() present > > s/patch/path/ > >> + * in __switch_to() to guaranty that the indirect writes to TFSR_EL1 > > s/guaranty/guarantee/ (well, still valid though I think rarely used). > >> + * are synchronized before this point. >> + */ >> + mte_check_tfsr_el1(); >> } >> >> void mte_suspend_exit(void) >> -- >> 2.30.0 >
On Thu, Jan 14, 2021 at 10:24:25AM +0000, Vincenzo Frascino wrote: > On 1/13/21 6:11 PM, Catalin Marinas wrote: > > On Thu, Jan 07, 2021 at 05:29:07PM +0000, Vincenzo Frascino wrote: > >> static inline void mte_sync_tags(pte_t *ptep, pte_t pte) > >> { > >> } > >> diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c > >> index 5346953e4382..74b020ce72d7 100644 > >> --- a/arch/arm64/kernel/entry-common.c > >> +++ b/arch/arm64/kernel/entry-common.c > >> @@ -37,6 +37,8 @@ static void noinstr enter_from_kernel_mode(struct pt_regs *regs) > >> lockdep_hardirqs_off(CALLER_ADDR0); > >> rcu_irq_enter_check_tick(); > >> trace_hardirqs_off_finish(); > >> + > >> + mte_check_tfsr_el1(); > >> } > >> > >> /* > >> @@ -47,6 +49,8 @@ static void noinstr exit_to_kernel_mode(struct pt_regs *regs) > >> { > >> lockdep_assert_irqs_disabled(); > >> > >> + mte_check_tfsr_el1(); > >> + > >> if (interrupts_enabled(regs)) { > >> if (regs->exit_rcu) { > >> trace_hardirqs_on_prepare(); > >> @@ -243,6 +247,8 @@ asmlinkage void noinstr enter_from_user_mode(void) > >> > >> asmlinkage void noinstr exit_to_user_mode(void) > >> { > >> + mte_check_tfsr_el1(); > > > > While for kernel entry the asynchronous faults are sync'ed automatically > > with TFSR_EL1, we don't have this for exit, so we'd need an explicit > > DSB. But rather than placing it here, it's better if we add a bool sync > > argument to mte_check_tfsr_el1() which issues a dsb() before checking > > the register. I think that's the only place where such argument would be > > true (for now). > > Good point, I will add the dsb() in mte_check_tfsr_el1() but instead of a bool > parameter I will add something more explicit. Or rename the function to mte_check_tfsr_el1_no_sync() and have a static inline mte_check_tfsr_el1() which issues a dsb() before calling the *no_sync variant. Adding an enum instead here is not worth it (if that's what you meant by not using a bool).
On 1/14/21 2:25 PM, Catalin Marinas wrote: > On Thu, Jan 14, 2021 at 10:24:25AM +0000, Vincenzo Frascino wrote: >> On 1/13/21 6:11 PM, Catalin Marinas wrote: >>> On Thu, Jan 07, 2021 at 05:29:07PM +0000, Vincenzo Frascino wrote: >>>> static inline void mte_sync_tags(pte_t *ptep, pte_t pte) >>>> { >>>> } >>>> diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c >>>> index 5346953e4382..74b020ce72d7 100644 >>>> --- a/arch/arm64/kernel/entry-common.c >>>> +++ b/arch/arm64/kernel/entry-common.c >>>> @@ -37,6 +37,8 @@ static void noinstr enter_from_kernel_mode(struct pt_regs *regs) >>>> lockdep_hardirqs_off(CALLER_ADDR0); >>>> rcu_irq_enter_check_tick(); >>>> trace_hardirqs_off_finish(); >>>> + >>>> + mte_check_tfsr_el1(); >>>> } >>>> >>>> /* >>>> @@ -47,6 +49,8 @@ static void noinstr exit_to_kernel_mode(struct pt_regs *regs) >>>> { >>>> lockdep_assert_irqs_disabled(); >>>> >>>> + mte_check_tfsr_el1(); >>>> + >>>> if (interrupts_enabled(regs)) { >>>> if (regs->exit_rcu) { >>>> trace_hardirqs_on_prepare(); >>>> @@ -243,6 +247,8 @@ asmlinkage void noinstr enter_from_user_mode(void) >>>> >>>> asmlinkage void noinstr exit_to_user_mode(void) >>>> { >>>> + mte_check_tfsr_el1(); >>> >>> While for kernel entry the asynchronous faults are sync'ed automatically >>> with TFSR_EL1, we don't have this for exit, so we'd need an explicit >>> DSB. But rather than placing it here, it's better if we add a bool sync >>> argument to mte_check_tfsr_el1() which issues a dsb() before checking >>> the register. I think that's the only place where such argument would be >>> true (for now). >> >> Good point, I will add the dsb() in mte_check_tfsr_el1() but instead of a bool >> parameter I will add something more explicit. > > Or rename the function to mte_check_tfsr_el1_no_sync() and have a static > inline mte_check_tfsr_el1() which issues a dsb() before calling the > *no_sync variant. > > Adding an enum instead here is not worth it (if that's what you meant by > not using a bool). > I like this option more, thanks for pointing it out.
diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h index d02aff9f493d..a60d3718baae 100644 --- a/arch/arm64/include/asm/mte.h +++ b/arch/arm64/include/asm/mte.h @@ -39,6 +39,7 @@ void mte_free_tag_storage(char *storage); /* track which pages have valid allocation tags */ #define PG_mte_tagged PG_arch_2 +void mte_check_tfsr_el1(void); void mte_sync_tags(pte_t *ptep, pte_t pte); void mte_copy_page_tags(void *kto, const void *kfrom); void flush_mte_state(void); @@ -56,6 +57,9 @@ void mte_assign_mem_tag_range(void *addr, size_t size); /* unused if !CONFIG_ARM64_MTE, silence the compiler */ #define PG_mte_tagged 0 +static inline void mte_check_tfsr_el1(void) +{ +} static inline void mte_sync_tags(pte_t *ptep, pte_t pte) { } diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 5346953e4382..74b020ce72d7 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -37,6 +37,8 @@ static void noinstr enter_from_kernel_mode(struct pt_regs *regs) lockdep_hardirqs_off(CALLER_ADDR0); rcu_irq_enter_check_tick(); trace_hardirqs_off_finish(); + + mte_check_tfsr_el1(); } /* @@ -47,6 +49,8 @@ static void noinstr exit_to_kernel_mode(struct pt_regs *regs) { lockdep_assert_irqs_disabled(); + mte_check_tfsr_el1(); + if (interrupts_enabled(regs)) { if (regs->exit_rcu) { trace_hardirqs_on_prepare(); @@ -243,6 +247,8 @@ asmlinkage void noinstr enter_from_user_mode(void) asmlinkage void noinstr exit_to_user_mode(void) { + mte_check_tfsr_el1(); + trace_hardirqs_on_prepare(); lockdep_hardirqs_on_prepare(CALLER_ADDR0); user_enter_irqoff(); diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c index 5d992e16b420..26030f0b79fe 100644 --- a/arch/arm64/kernel/mte.c +++ b/arch/arm64/kernel/mte.c @@ -185,6 +185,34 @@ void mte_enable_kernel(enum kasan_arg_mode mode) isb(); } +void mte_check_tfsr_el1(void) +{ + u64 tfsr_el1; + + if (!IS_ENABLED(CONFIG_KASAN_HW_TAGS)) + return; + + if (!system_supports_mte()) + return; + + tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1); + + /* + * The kernel should never hit the condition TF0 == 1 + * at this point because for the futex code we set + * PSTATE.TCO. + */ + WARN_ON(tfsr_el1 & SYS_TFSR_EL1_TF0); + + if (tfsr_el1 & SYS_TFSR_EL1_TF1) { + write_sysreg_s(0, SYS_TFSR_EL1); + isb(); + + pr_err("MTE: Asynchronous tag exception detected!"); + } +} +NOKPROBE_SYMBOL(mte_check_tfsr_el1); + static void update_sctlr_el1_tcf0(u64 tcf0) { /* ISB required for the kernel uaccess routines */ @@ -250,6 +278,15 @@ void mte_thread_switch(struct task_struct *next) /* avoid expensive SCTLR_EL1 accesses if no change */ if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0) update_sctlr_el1_tcf0(next->thread.sctlr_tcf0); + + /* + * Check if an async tag exception occurred at EL1. + * + * Note: On the context switch patch we rely on the dsb() present + * in __switch_to() to guaranty that the indirect writes to TFSR_EL1 + * are synchronized before this point. + */ + mte_check_tfsr_el1(); } void mte_suspend_exit(void)
MTE provides a mode that asynchronously updates the TFSR_EL1 register when a tag check exception is detected. To take advantage of this mode the kernel has to verify the status of the register at: 1. Context switching 2. Return to user/EL0 (Not required in entry from EL0 since the kernel did not run) 3. Kernel entry from EL1 4. Kernel exit to EL1 If the register is non-zero a trace is reported. Add the required features for EL1 detection and reporting. Note: ITFSB bit is set in the SCTLR_EL1 register hence it guaranties that the indirect writes to TFSR_EL1 are synchronized at exception entry to EL1. On the context switch path the synchronization is guarantied by the dsb() in __switch_to(). Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will@kernel.org> Signed-off-by: Vincenzo Frascino <vincenzo.frascino@arm.com> --- arch/arm64/include/asm/mte.h | 4 ++++ arch/arm64/kernel/entry-common.c | 6 ++++++ arch/arm64/kernel/mte.c | 37 ++++++++++++++++++++++++++++++++ 3 files changed, 47 insertions(+)