@@ -30,34 +30,8 @@ saved_pc .req lr
.section .entry.text,"ax",%progbits
.align 5
-#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING_USER) || \
- IS_ENABLED(CONFIG_DEBUG_RSEQ))
/*
- * This is the fast syscall return path. We do as little as possible here,
- * such as avoiding writing r0 to the stack. We only use this path if we
- * have tracing, context tracking and rseq debug disabled - the overheads
- * from those features make this path too inefficient.
- */
-ret_fast_syscall:
- UNWIND(.fnstart )
- UNWIND(.cantunwind )
- disable_irq_notrace @ disable interrupts
- ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
- movs r1, r1, lsl #16
- bne fast_work_pending
-
- restore_user_regs fast = 0, offset = S_OFF
- UNWIND(.fnend )
-ENDPROC(ret_fast_syscall)
-
- /* Ok, we need to do extra processing, enter the slow path. */
-fast_work_pending:
- add sp, sp, #(S_R0 + S_OFF)
- /* fall through to work_pending */
-#else
-/*
- * The "replacement" ret_fast_syscall for when tracing, context tracking,
- * or rseq debug is enabled. As we will need to call out to some C functions,
+ * As we will need to call out to some C functions,
* we save r0 first to avoid needing to save registers around each C function
* call.
*/
@@ -75,7 +49,6 @@ ret_fast_syscall:
ENDPROC(ret_fast_syscall)
/* Slower path - fall through to work_pending */
-#endif
tst r1, #_TIF_SYSCALL_WORK
beq slow_work_pending
When not using any irqflags tracing, context tracking or rseq debugging, the ARM kernel uses a "superfast" ret_fast_syscall with a very few assembly instructions. With generic entry, such local optimizations are not really possible, we need to call C unconditionally and take the (hopefully small) overhead. With generic entry in C, most of the tracing (etc) calls are replaced by stubs and optimized out by the compiler. Signed-off-by: Linus Walleij <linus.walleij@linaro.org> --- arch/arm/kernel/entry-common.S | 29 +---------------------------- 1 file changed, 1 insertion(+), 28 deletions(-)