@@ -85,7 +85,12 @@ static inline int syscall_get_arch(struct task_struct *task)
return AUDIT_ARCH_AARCH64;
}
+static inline bool has_syscall_work(unsigned long flags)
+{
+ return unlikely(flags & _TIF_SYSCALL_WORK);
+}
+
int syscall_trace_enter(struct pt_regs *regs, long syscall, unsigned long flags);
-void syscall_trace_exit(struct pt_regs *regs, unsigned long flags);
+void syscall_exit_to_user_mode_prepare(struct pt_regs *regs);
#endif /* __ASM_SYSCALL_H */
@@ -2384,7 +2384,7 @@ int syscall_trace_enter(struct pt_regs *regs, long syscall, unsigned long flags)
return regs->syscallno;
}
-void syscall_trace_exit(struct pt_regs *regs, unsigned long flags)
+static void syscall_trace_exit(struct pt_regs *regs, unsigned long flags)
{
audit_syscall_exit(regs);
@@ -2393,8 +2393,16 @@ void syscall_trace_exit(struct pt_regs *regs, unsigned long flags)
if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP))
report_syscall_exit(regs);
+}
+
+void syscall_exit_to_user_mode_prepare(struct pt_regs *regs)
+{
+ unsigned long flags = read_thread_flags();
rseq_syscall(regs);
+
+ if (has_syscall_work(flags) || flags & _TIF_SINGLESTEP)
+ syscall_trace_exit(regs, flags);
}
/*
@@ -65,11 +65,6 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
choose_random_kstack_offset(get_random_u16());
}
-static inline bool has_syscall_work(unsigned long flags)
-{
- return unlikely(flags & _TIF_SYSCALL_WORK);
-}
-
static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
const syscall_fn_t syscall_table[])
{
@@ -125,26 +120,15 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
if (scno == NO_SYSCALL)
syscall_set_return_value(current, regs, -ENOSYS, 0);
scno = syscall_trace_enter(regs, regs->syscallno, flags);
- if (scno == NO_SYSCALL)
- goto trace_exit;
+ if (scno == NO_SYSCALL) {
+ syscall_exit_to_user_mode_prepare(regs);
+ return;
+ }
}
invoke_syscall(regs, scno, sc_nr, syscall_table);
- /*
- * The tracing status may have changed under our feet, so we have to
- * check again. However, if we were tracing entry, then we always trace
- * exit regardless, as the old entry assembly did.
- */
- if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) {
- flags = read_thread_flags();
- if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP))
- return;
- }
-
-trace_exit:
- flags = read_thread_flags();
- syscall_trace_exit(regs, flags);
+ syscall_exit_to_user_mode_prepare(regs);
}
void do_el0_svc(struct pt_regs *regs)
As the generic entry, before report_syscall_exit(), it terminate the process if the syscall is issued within a restartable sequence. In preparation for moving arm64 over to the generic entry code, refator el0_svc_common() as below: - Extract syscall_exit_to_user_mode_prepare() helper to replace the the combination of read_thread_flags() and syscall_trace_exit(), also move the syscall exit check logic into it. - Move rseq_syscall() ahead, so the CONFIG_DEBUG_RSEQ check is not needed. - Move has_syscall_work() helper into asm/syscall.h to be reused for ptrace.c. Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com> --- arch/arm64/include/asm/syscall.h | 7 ++++++- arch/arm64/kernel/ptrace.c | 10 +++++++++- arch/arm64/kernel/syscall.c | 26 +++++--------------------- 3 files changed, 20 insertions(+), 23 deletions(-)