@@ -149,6 +149,13 @@ static inline unsigned long pstate_to_compat_psr(const unsigned long pstate)
return psr;
}
+typedef struct irqentry_state {
+ union {
+ bool exit_rcu;
+ bool lockdep;
+ };
+} irqentry_state_t;
+
/*
* This struct defines the way the registers are stored on the stack during an
* exception. struct user_pt_regs must form a prefix of struct pt_regs.
@@ -169,10 +176,6 @@ struct pt_regs {
u64 sdei_ttbr1;
struct frame_record_meta stackframe;
-
- /* Only valid for some EL1 exceptions. */
- u64 lockdep_hardirqs;
- u64 exit_rcu;
};
/* For correct stack alignment, pt_regs has to be a multiple of 16 bytes. */
@@ -36,29 +36,36 @@
* This is intended to match the logic in irqentry_enter(), handling the kernel
* mode transitions only.
*/
-static __always_inline void __enter_from_kernel_mode(struct pt_regs *regs)
+static __always_inline irqentry_state_t __enter_from_kernel_mode(struct pt_regs *regs)
{
- regs->exit_rcu = false;
+ irqentry_state_t ret = {
+ .exit_rcu = false,
+ };
if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
lockdep_hardirqs_off(CALLER_ADDR0);
ct_irq_enter();
trace_hardirqs_off_finish();
- regs->exit_rcu = true;
- return;
+ ret.exit_rcu = true;
+ return ret;
}
lockdep_hardirqs_off(CALLER_ADDR0);
rcu_irq_enter_check_tick();
trace_hardirqs_off_finish();
+
+ return ret;
}
-static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
+static noinstr irqentry_state_t enter_from_kernel_mode(struct pt_regs *regs)
{
- __enter_from_kernel_mode(regs);
+ irqentry_state_t ret = __enter_from_kernel_mode(regs);
+
mte_check_tfsr_entry();
mte_disable_tco_entry(current);
+
+ return ret;
}
/*
@@ -69,12 +76,13 @@ static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
* This is intended to match the logic in irqentry_exit(), handling the kernel
* mode transitions only, and with preemption handled elsewhere.
*/
-static __always_inline void __exit_to_kernel_mode(struct pt_regs *regs)
+static __always_inline void __exit_to_kernel_mode(struct pt_regs *regs,
+ irqentry_state_t state)
{
lockdep_assert_irqs_disabled();
if (!regs_irqs_disabled(regs)) {
- if (regs->exit_rcu) {
+ if (state.exit_rcu) {
trace_hardirqs_on_prepare();
lockdep_hardirqs_on_prepare();
ct_irq_exit();
@@ -84,15 +92,16 @@ static __always_inline void __exit_to_kernel_mode(struct pt_regs *regs)
trace_hardirqs_on();
} else {
- if (regs->exit_rcu)
+ if (state.exit_rcu)
ct_irq_exit();
}
}
-static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
+static void noinstr exit_to_kernel_mode(struct pt_regs *regs,
+ irqentry_state_t state)
{
mte_check_tfsr_exit();
- __exit_to_kernel_mode(regs);
+ __exit_to_kernel_mode(regs, state);
}
/*
@@ -190,9 +199,11 @@ asmlinkage void noinstr asm_exit_to_user_mode(struct pt_regs *regs)
* mode. Before this function is called it is not safe to call regular kernel
* code, instrumentable code, or any code which may trigger an exception.
*/
-static void noinstr arm64_enter_nmi(struct pt_regs *regs)
+static noinstr irqentry_state_t arm64_enter_nmi(struct pt_regs *regs)
{
- regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
+ irqentry_state_t irq_state;
+
+ irq_state.lockdep = lockdep_hardirqs_enabled();
__nmi_enter();
lockdep_hardirqs_off(CALLER_ADDR0);
@@ -201,6 +212,8 @@ static void noinstr arm64_enter_nmi(struct pt_regs *regs)
trace_hardirqs_off_finish();
ftrace_nmi_enter();
+
+ return irq_state;
}
/*
@@ -208,19 +221,18 @@ static void noinstr arm64_enter_nmi(struct pt_regs *regs)
* mode. After this function returns it is not safe to call regular kernel
* code, instrumentable code, or any code which may trigger an exception.
*/
-static void noinstr arm64_exit_nmi(struct pt_regs *regs)
+static void noinstr arm64_exit_nmi(struct pt_regs *regs,
+ irqentry_state_t irq_state)
{
- bool restore = regs->lockdep_hardirqs;
-
ftrace_nmi_exit();
- if (restore) {
+ if (irq_state.lockdep) {
trace_hardirqs_on_prepare();
lockdep_hardirqs_on_prepare();
}
ct_nmi_exit();
lockdep_hardirq_exit();
- if (restore)
+ if (irq_state.lockdep)
lockdep_hardirqs_on(CALLER_ADDR0);
__nmi_exit();
}
@@ -230,14 +242,18 @@ static void noinstr arm64_exit_nmi(struct pt_regs *regs)
* kernel mode. Before this function is called it is not safe to call regular
* kernel code, instrumentable code, or any code which may trigger an exception.
*/
-static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
+static noinstr irqentry_state_t arm64_enter_el1_dbg(struct pt_regs *regs)
{
- regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
+ irqentry_state_t state;
+
+ state.lockdep = lockdep_hardirqs_enabled();
lockdep_hardirqs_off(CALLER_ADDR0);
ct_nmi_enter();
trace_hardirqs_off_finish();
+
+ return state;
}
/*
@@ -245,17 +261,16 @@ static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
* kernel mode. After this function returns it is not safe to call regular
* kernel code, instrumentable code, or any code which may trigger an exception.
*/
-static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
+static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs,
+ irqentry_state_t state)
{
- bool restore = regs->lockdep_hardirqs;
-
- if (restore) {
+ if (state.lockdep) {
trace_hardirqs_on_prepare();
lockdep_hardirqs_on_prepare();
}
ct_nmi_exit();
- if (restore)
+ if (state.lockdep)
lockdep_hardirqs_on(CALLER_ADDR0);
}
@@ -426,78 +441,86 @@ UNHANDLED(el1t, 64, error)
static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
{
unsigned long far = read_sysreg(far_el1);
+ irqentry_state_t state;
- enter_from_kernel_mode(regs);
+ state = enter_from_kernel_mode(regs);
local_daif_inherit(regs);
do_mem_abort(far, esr, regs);
local_daif_mask();
- exit_to_kernel_mode(regs);
+ exit_to_kernel_mode(regs, state);
}
static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
{
unsigned long far = read_sysreg(far_el1);
+ irqentry_state_t state;
- enter_from_kernel_mode(regs);
+ state = enter_from_kernel_mode(regs);
local_daif_inherit(regs);
do_sp_pc_abort(far, esr, regs);
local_daif_mask();
- exit_to_kernel_mode(regs);
+ exit_to_kernel_mode(regs, state);
}
static void noinstr el1_undef(struct pt_regs *regs, unsigned long esr)
{
- enter_from_kernel_mode(regs);
+ irqentry_state_t state = enter_from_kernel_mode(regs);
+
local_daif_inherit(regs);
do_el1_undef(regs, esr);
local_daif_mask();
- exit_to_kernel_mode(regs);
+ exit_to_kernel_mode(regs, state);
}
static void noinstr el1_bti(struct pt_regs *regs, unsigned long esr)
{
- enter_from_kernel_mode(regs);
+ irqentry_state_t state = enter_from_kernel_mode(regs);
+
local_daif_inherit(regs);
do_el1_bti(regs, esr);
local_daif_mask();
- exit_to_kernel_mode(regs);
+ exit_to_kernel_mode(regs, state);
}
static void noinstr el1_gcs(struct pt_regs *regs, unsigned long esr)
{
- enter_from_kernel_mode(regs);
+ irqentry_state_t state = enter_from_kernel_mode(regs);
+
local_daif_inherit(regs);
do_el1_gcs(regs, esr);
local_daif_mask();
- exit_to_kernel_mode(regs);
+ exit_to_kernel_mode(regs, state);
}
static void noinstr el1_mops(struct pt_regs *regs, unsigned long esr)
{
- enter_from_kernel_mode(regs);
+ irqentry_state_t state = enter_from_kernel_mode(regs);
+
local_daif_inherit(regs);
do_el1_mops(regs, esr);
local_daif_mask();
- exit_to_kernel_mode(regs);
+ exit_to_kernel_mode(regs, state);
}
static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
{
unsigned long far = read_sysreg(far_el1);
+ irqentry_state_t state;
- arm64_enter_el1_dbg(regs);
+ state = arm64_enter_el1_dbg(regs);
if (!cortex_a76_erratum_1463225_debug_handler(regs))
do_debug_exception(far, esr, regs);
- arm64_exit_el1_dbg(regs);
+ arm64_exit_el1_dbg(regs, state);
}
static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
{
- enter_from_kernel_mode(regs);
+ irqentry_state_t state = enter_from_kernel_mode(regs);
+
local_daif_inherit(regs);
do_el1_fpac(regs, esr);
local_daif_mask();
- exit_to_kernel_mode(regs);
+ exit_to_kernel_mode(regs, state);
}
asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs)
@@ -546,15 +569,16 @@ asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs)
static __always_inline void __el1_pnmi(struct pt_regs *regs,
void (*handler)(struct pt_regs *))
{
- arm64_enter_nmi(regs);
+ irqentry_state_t state = arm64_enter_nmi(regs);
+
do_interrupt_handler(regs, handler);
- arm64_exit_nmi(regs);
+ arm64_exit_nmi(regs, state);
}
static __always_inline void __el1_irq(struct pt_regs *regs,
void (*handler)(struct pt_regs *))
{
- enter_from_kernel_mode(regs);
+ irqentry_state_t state = enter_from_kernel_mode(regs);
irq_enter_rcu();
do_interrupt_handler(regs, handler);
@@ -562,7 +586,7 @@ static __always_inline void __el1_irq(struct pt_regs *regs,
arm64_preempt_schedule_irq();
- exit_to_kernel_mode(regs);
+ exit_to_kernel_mode(regs, state);
}
static void noinstr el1_interrupt(struct pt_regs *regs,
void (*handler)(struct pt_regs *))
@@ -588,11 +612,12 @@ asmlinkage void noinstr el1h_64_fiq_handler(struct pt_regs *regs)
asmlinkage void noinstr el1h_64_error_handler(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
+ irqentry_state_t state;
local_daif_restore(DAIF_ERRCTX);
- arm64_enter_nmi(regs);
+ state = arm64_enter_nmi(regs);
do_serror(regs, esr);
- arm64_exit_nmi(regs);
+ arm64_exit_nmi(regs, state);
}
static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
@@ -855,12 +880,13 @@ asmlinkage void noinstr el0t_64_fiq_handler(struct pt_regs *regs)
static void noinstr __el0_error_handler_common(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
+ irqentry_state_t state;
enter_from_user_mode(regs);
local_daif_restore(DAIF_ERRCTX);
- arm64_enter_nmi(regs);
+ state = arm64_enter_nmi(regs);
do_serror(regs, esr);
- arm64_exit_nmi(regs);
+ arm64_exit_nmi(regs, state);
local_daif_restore(DAIF_PROCCTX);
exit_to_user_mode(regs);
}
@@ -968,6 +994,7 @@ asmlinkage void noinstr __noreturn handle_bad_stack(struct pt_regs *regs)
asmlinkage noinstr unsigned long
__sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
{
+ irqentry_state_t state;
unsigned long ret;
/*
@@ -992,9 +1019,9 @@ __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
else if (cpu_has_pan())
set_pstate_pan(0);
- arm64_enter_nmi(regs);
+ state = arm64_enter_nmi(regs);
ret = do_sdei_event(regs, arg);
- arm64_exit_nmi(regs);
+ arm64_exit_nmi(regs, state);
return ret;
}
These changes refactor the entry and exit routines for the exceptions from EL1. They store the RCU and lockdep state in a struct irqentry_state variable on the stack, rather than recording them in the fields of pt_regs, since it is safe enough for these context. Before: struct pt_regs { ... u64 lockdep_hardirqs; u64 exit_rcu; } enter_from_kernel_mode(regs); ... exit_to_kernel_mode(regs); After: typedef struct irqentry_state { union { bool exit_rcu; bool lockdep; }; } irqentry_state_t; irqentry_state_t state = enter_from_kernel_mode(regs); ... exit_to_kernel_mode(regs, state); No functional changes. Suggested-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com> --- arch/arm64/include/asm/ptrace.h | 11 ++- arch/arm64/kernel/entry-common.c | 129 +++++++++++++++++++------------ 2 files changed, 85 insertions(+), 55 deletions(-)