@@ -86,6 +86,6 @@ static inline int syscall_get_arch(struct task_struct *task)
}
int syscall_trace_enter(struct pt_regs *regs, long syscall, unsigned long flags);
-void syscall_trace_exit(struct pt_regs *regs);
+void syscall_trace_exit(struct pt_regs *regs, unsigned long flags);
#endif /* __ASM_SYSCALL_H */
@@ -2384,10 +2384,8 @@ int syscall_trace_enter(struct pt_regs *regs, long syscall, unsigned long flags)
return regs->syscallno;
}
-void syscall_trace_exit(struct pt_regs *regs)
+void syscall_trace_exit(struct pt_regs *regs, unsigned long flags)
{
- unsigned long flags = read_thread_flags();
-
audit_syscall_exit(regs);
if (flags & _TIF_SYSCALL_TRACEPOINT)
@@ -143,7 +143,8 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
}
trace_exit:
- syscall_trace_exit(regs);
+ flags = read_thread_flags();
+ syscall_trace_exit(regs, flags);
}
void do_el0_svc(struct pt_regs *regs)
The generic entry syscall_exit_work() use the input syscall work flag. In preparation for moving arm64 over to the generic entry code, refactor syscall_trace_exit() to also pass thread flags. No functional changes. Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com> --- arch/arm64/include/asm/syscall.h | 2 +- arch/arm64/kernel/ptrace.c | 4 +--- arch/arm64/kernel/syscall.c | 3 ++- 3 files changed, 4 insertions(+), 5 deletions(-)