@@ -19,6 +19,9 @@
extern const unsigned long sys_call_table[];
+int invoke_syscall(void *table, struct pt_regs *regs, int scno, void *retp);
+int invoke_syscall_trace(void *table, struct pt_regs *regs, int scno, void *retp);
+
static inline int syscall_get_nr(struct task_struct *task,
struct pt_regs *regs)
{
@@ -20,7 +20,8 @@ CFLAGS_REMOVE_return_address.o = -pg
obj-y := elf.o entry-common.o irq.o opcodes.o \
process.o ptrace.o reboot.o io.o \
setup.o signal.o sigreturn_codes.o \
- stacktrace.o sys_arm.o time.o traps.o
+ stacktrace.o sys_arm.o time.o traps.o \
+ syscall.o
KASAN_SANITIZE_stacktrace.o := n
KASAN_SANITIZE_traps.o := n
@@ -254,21 +254,23 @@ ENTRY(vector_swi)
#else
str scno, [tsk, #TI_ABI_SYSCALL]
#endif
- /*
- * Reload the registers that may have been corrupted on entry to
- * the syscall assembly (by tracing or context tracking.)
- */
- TRACE( ldmia sp, {r0 - r3} )
-
+ mov r1, sp @ put regs into r1
ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing
stmdb sp!, {r4, r5} @ push fifth and sixth args
tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
bne __sys_trace
- invoke_syscall tbl, scno, r10, __ret_fast_syscall
+ mov r0, tbl
+ /* r1 already contains regs */
+ mov r2, scno @ syscall number from r7
+ badr r3, __ret_fast_syscall
+ bl invoke_syscall
+ /* Restore regs into r1 and lr after C call */
+ badr lr, __ret_fast_syscall
add r1, sp, #S_OFF
+
2: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
bcs arm_syscall
@@ -301,7 +303,16 @@ __sys_trace:
add r0, sp, #S_OFF
bl syscall_trace_enter
mov scno, r0
- invoke_syscall tbl, scno, r10, __sys_trace_return, reload=1
+ mov r2, r0 @ scno into r2
+ add r1, sp, #S_R0 + S_OFF @ pointer to regs
+ mov r0, tbl
+ badr r3, __sys_trace_return
+ bl invoke_syscall_trace
+
+ /* Restore regs into r1 and lr after C call */
+ add r1, sp, #S_R0 + S_OFF @ pointer to regs
+ badr lr, __sys_trace_return
+
cmp scno, #-1 @ skip the syscall?
bne 2b
add sp, sp, #S_OFF @ restore stack
@@ -415,6 +426,44 @@ sys_mmap2:
b sys_mmap_pgoff
ENDPROC(sys_mmap2)
+/*
+ * This call wants:
+ * r0: syscall table
+ * r1: regs
+ * r2: syscall number
+ * r3: pointer to return function
+ */
+SYM_TYPED_FUNC_START(invoke_syscall_asm)
+#ifdef CONFIG_CPU_SPECTRE
+ csdb
+#endif
+ mov tbl, r0
+ mov scno, r2
+ mov lr, r3 @ return address
+ ldmia r1, {r0 - r3} @ reload r0-r3
+ /* Arguments 5 and 6 are (hopefully) on the stack */
+ ldr pc, [tbl, scno, lsl #2] @ call sys_* routine
+SYM_FUNC_END(invoke_syscall_asm)
+
+/*
+ * This call wants:
+ * r0: syscall table
+ * r1: regs
+ * r2: syscall number
+ * r3: pointer to return function
+ */
+SYM_TYPED_FUNC_START(invoke_syscall_trace_asm)
+#ifdef CONFIG_CPU_SPECTRE
+ csdb
+#endif
+ mov tbl, r0
+ mov scno, r2
+ mov lr, r3 @ return address
+ ldmia r1, {r0 - r6} @ reload r0-r6
+ stmia sp, {r4, r5} @ update stack arguments
+ ldr pc, [tbl, scno, lsl #2] @ call sys_* routine
+SYM_FUNC_END(invoke_syscall_trace_asm)
+
#ifdef CONFIG_OABI_COMPAT
/*
@@ -389,31 +389,6 @@ ALT_UP_B(.L1_\@)
#endif
.endm
- .macro invoke_syscall, table, nr, tmp, ret, reload=0
-#ifdef CONFIG_CPU_SPECTRE
- mov \tmp, \nr
- cmp \tmp, #NR_syscalls @ check upper syscall limit
- movcs \tmp, #0
- csdb
- badr lr, \ret @ return address
- .if \reload
- add r1, sp, #S_R0 + S_OFF @ pointer to regs
- ldmiacc r1, {r0 - r6} @ reload r0-r6
- stmiacc sp, {r4, r5} @ update stack arguments
- .endif
- ldrcc pc, [\table, \tmp, lsl #2] @ call sys_* routine
-#else
- cmp \nr, #NR_syscalls @ check upper syscall limit
- badr lr, \ret @ return address
- .if \reload
- add r1, sp, #S_R0 + S_OFF @ pointer to regs
- ldmiacc r1, {r0 - r6} @ reload r0-r6
- stmiacc sp, {r4, r5} @ update stack arguments
- .endif
- ldrcc pc, [\table, \nr, lsl #2] @ call sys_* routine
-#endif
- .endm
-
/*
* These are the registers used in the syscall handler, and allow us to
* have in theory up to 7 arguments to a function - r0 to r6.
new file mode 100644
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/syscalls.h>
+#include <asm/syscall.h>
+
+int invoke_syscall_asm(void *table, struct pt_regs *regs, int scno, void *retp);
+
+__visible int invoke_syscall(void *table, struct pt_regs *regs, int scno, void *retp)
+{
+ if (scno < NR_syscalls)
+ /* Doing this with return makes sure the stack gets pop:ed */
+ return invoke_syscall_asm(table, regs, scno, retp);
+
+ return 0;
+}
+
+int invoke_syscall_trace_asm(void *table, struct pt_regs *regs, int scno, void *retp);
+
+__visible int invoke_syscall_trace(void *table, struct pt_regs *regs, int scno, void *retp)
+{
+ if (scno < NR_syscalls)
+ /* Doing this with return makes sure the stack gets pop:ed */
+ return invoke_syscall_trace_asm(table, regs, scno, retp);
+
+ return 0;
+}
This makes the assembly invoking syscalls switch over to doing this from C with a small assembly stub to actually jump into the syscall. Split the syscall invocation into two paths: one for plain invocation and one for tracing (also known as "reload" as it was reloading the registers from regs). We rename this path with the infix "trace" as that code will be trace-specific as we move code over to C. Some registers such as r1 and lr get cobbled during the C calls and need to be restored when we return. Right now the part in C doesn't do much more than check the syscall number to be valid (a test previously done with a cmp r #NR_syscalls inside the invoke_syscall macro) but we will gradually factor over more assembly to C that can then be switched to the generic entry code so the exercise gets a point. Tested with a full system boot and by issuing some command line tools with strace to make sure the tracing path still works. Signed-off-by: Linus Walleij <linus.walleij@linaro.org> --- arch/arm/include/asm/syscall.h | 3 ++ arch/arm/kernel/Makefile | 3 +- arch/arm/kernel/entry-common.S | 65 ++++++++++++++++++++++++++++++++++++------ arch/arm/kernel/entry-header.S | 25 ---------------- arch/arm/kernel/syscall.c | 26 +++++++++++++++++ 5 files changed, 88 insertions(+), 34 deletions(-)