@@ -40,7 +40,7 @@ asmlinkage void dump_backtrace_stm(u32 *stack, u32 instruction, const char *logl
asmlinkage void do_undefinstr(struct pt_regs *regs);
asmlinkage void handle_fiq_as_nmi(struct pt_regs *regs);
asmlinkage void bad_mode(struct pt_regs *regs, int reason);
-asmlinkage int arm_syscall(int no, struct pt_regs *regs);
+int arm_syscall(int no, struct pt_regs *regs);
asmlinkage void baddataabort(int code, unsigned long instr, struct pt_regs *regs);
asmlinkage void __div0(void);
asmlinkage void handle_bad_stack(struct pt_regs *regs);
@@ -264,18 +264,10 @@ ENTRY(vector_swi)
mov r0, tbl
/* r1 already contains regs */
mov r2, scno @ syscall number from r7
- badr r3, __ret_fast_syscall
- bl invoke_syscall
-
- /* Restore regs into r1 and lr after C call */
+ /* We return here no matter what, also pass this as an argument */
badr lr, __ret_fast_syscall
- add r1, sp, #S_OFF
-
-2: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
- eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
- bcs arm_syscall
- mov why, #0 @ no longer a real syscall
- b sys_ni_syscall @ not private func
+ mov r3, lr
+ b invoke_syscall
#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI)
/*
@@ -308,13 +300,8 @@ __sys_trace:
mov r0, tbl
badr r3, __sys_trace_return
bl invoke_syscall_trace
-
- /* Restore regs into r1 and lr after C call */
- add r1, sp, #S_R0 + S_OFF @ pointer to regs
- badr lr, __sys_trace_return
-
- cmp scno, #-1 @ skip the syscall?
- bne 2b
+ cmp r0, #-1
+ bne __sys_trace_return
add sp, sp, #S_OFF @ restore stack
__sys_trace_return_nosave:
@@ -11,16 +11,25 @@ __visible int invoke_syscall(void *table, struct pt_regs *regs, int scno, void *
/* Doing this with return makes sure the stack gets pop:ed */
return invoke_syscall_asm(table, regs, scno, retp);
- return 0;
+ if (scno >= __ARM_NR_BASE)
+ return arm_syscall(scno, regs);
+
+ return sys_ni_syscall();
}
int invoke_syscall_trace_asm(void *table, struct pt_regs *regs, int scno, void *retp);
__visible int invoke_syscall_trace(void *table, struct pt_regs *regs, int scno, void *retp)
{
+ if (scno == -1)
+ return -1;
+
if (scno < NR_syscalls)
/* Doing this with return makes sure the stack gets pop:ed */
return invoke_syscall_trace_asm(table, regs, scno, retp);
- return 0;
+ if (scno >= __ARM_NR_BASE)
+ return arm_syscall(scno, regs);
+
+ return sys_ni_syscall();
}
@@ -606,7 +606,7 @@ do_cache_op(unsigned long start, unsigned long end, int flags)
* 0x9f0000 - 0x9fffff are some more esoteric system calls
*/
#define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE)
-asmlinkage int arm_syscall(int no, struct pt_regs *regs)
+int arm_syscall(int no, struct pt_regs *regs)
{
if ((no >> 16) != (__ARM_NR_BASE>> 16))
return bad_syscall(no, regs);
The normal and trace entry code calls out to arm_syscall() and sys_ni_syscall() from assembly, but these calls can be moved over to the new C implementation. Signed-off-by: Linus Walleij <linus.walleij@linaro.org> --- arch/arm/include/asm/traps.h | 2 +- arch/arm/kernel/entry-common.S | 23 +++++------------------ arch/arm/kernel/syscall.c | 13 +++++++++++-- arch/arm/kernel/traps.c | 2 +- 4 files changed, 18 insertions(+), 22 deletions(-)