@@ -67,6 +67,7 @@ struct thread_info {
__u32 cpu_domain; /* cpu domain */
struct cpu_context_save cpu_context; /* cpu context */
__u32 abi_syscall; /* ABI type and syscall nr */
+ __u32 sp_syscall; /* SP when entering syscall */
unsigned long tp_value[2]; /* TLS registers */
union fp_state fpstate __attribute__((aligned(8)));
union vfp_state vfpstate;
@@ -49,6 +49,7 @@ int main(void)
DEFINE(TI_CPU_DOMAIN, offsetof(struct thread_info, cpu_domain));
DEFINE(TI_CPU_SAVE, offsetof(struct thread_info, cpu_context));
DEFINE(TI_ABI_SYSCALL, offsetof(struct thread_info, abi_syscall));
+ DEFINE(TI_SP_SYSCALL, offsetof(struct thread_info, sp_syscall));
DEFINE(TI_TP_VALUE, offsetof(struct thread_info, tp_value));
DEFINE(TI_FPSTATE, offsetof(struct thread_info, fpstate));
#ifdef CONFIG_VFP
@@ -232,6 +232,8 @@ ENTRY(vector_swi)
uaccess_disable tbl
get_thread_info tsk
+ /* Save a per-task copy of SP for sigreturn */
+ str sp, [tsk, #TI_SP_SYSCALL]
adr tbl, sys_call_table @ load syscall table pointer
@@ -377,13 +379,15 @@ sys_syscall:
ENDPROC(sys_syscall)
sys_sigreturn_wrapper:
- add r0, sp, #S_OFF
+ get_thread_info tsk
+ ldr r0, [tsk, #TI_SP_SYSCALL] @ read back SP
mov why, #0 @ prevent syscall restart handling
b sys_sigreturn
ENDPROC(sys_sigreturn_wrapper)
sys_rt_sigreturn_wrapper:
- add r0, sp, #S_OFF
+ get_thread_info tsk
+ ldr r0, [tsk, #TI_SP_SYSCALL] @ read back SP
mov why, #0 @ prevent syscall restart handling
b sys_rt_sigreturn
ENDPROC(sys_rt_sigreturn_wrapper)
We are going to rewrite the syscall handling in C, which means that the stack used by the call code is no longer predicatably 8 bytes (for syscall arguments r4 and r5) but a varying number of bytes depending on how nested the C code is. However the current code is just assuming it can rewind the stack by adding 8 to sp if a syscall is interrupted by a sigreturn call. Solve this by storing the entry sp in the per-task struct thread_info and use that in the sigreturn wrapper instead. We already have the thread info available in the SWI entry and sigreturn is probably not so common that retrieveing a pointer to thread_info should affect anything very much. Storing this per-task in thread_info makes the solution SMP robust. Signed-off-by: Linus Walleij <linus.walleij@linaro.org> --- arch/arm/include/asm/thread_info.h | 1 + arch/arm/kernel/asm-offsets.c | 1 + arch/arm/kernel/entry-common.S | 8 ++++++-- 3 files changed, 8 insertions(+), 2 deletions(-)