diff mbox series

[V2,06/17] x86/entry/32: Remove redundant interrupt disable

Message ID 20191023123118.191230255@linutronix.de (mailing list archive)
State New, archived
Headers show
Series entry: Provide generic implementation for host and guest entry/exit work | expand

Commit Message

Thomas Gleixner Oct. 23, 2019, 12:27 p.m. UTC
Now that the trap handlers return with interrupts disabled, the
unconditional disabling of interrupts in the low level entry code can be
removed along with the trace calls and the misnomed preempt_stop macro.
As a consequence ret_from_exception and ret_from_intr collapse.

Add a debug check to verify that interrupts are disabled depending on
CONFIG_DEBUG_ENTRY.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 arch/x86/entry/entry_32.S |   21 ++++++---------------
 1 file changed, 6 insertions(+), 15 deletions(-)

Comments

Sean Christopherson Oct. 23, 2019, 2:17 p.m. UTC | #1
On Wed, Oct 23, 2019 at 02:27:11PM +0200, Thomas Gleixner wrote:
> Now that the trap handlers return with interrupts disabled, the
> unconditional disabling of interrupts in the low level entry code can be
> removed along with the trace calls and the misnomed preempt_stop macro.
> As a consequence ret_from_exception and ret_from_intr collapse.
> 
> Add a debug check to verify that interrupts are disabled depending on
> CONFIG_DEBUG_ENTRY.
> 
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
> ---

One nit below.

Reviewed-by: Sean Christopherson <sean.j.christopherson@intel.com>

>  arch/x86/entry/entry_32.S |   21 ++++++---------------
>  1 file changed, 6 insertions(+), 15 deletions(-)
> 
> --- a/arch/x86/entry/entry_32.S
> +++ b/arch/x86/entry/entry_32.S
> @@ -1207,7 +1198,7 @@ ENDPROC(common_spurious)
>  	TRACE_IRQS_OFF
>  	movl	%esp, %eax
>  	call	do_IRQ
> -	jmp	ret_from_intr
> +	jmp	ret_from_exception
>  ENDPROC(common_interrupt)
>  
>  #define BUILD_INTERRUPT3(name, nr, fn)			\
> @@ -1219,7 +1210,7 @@ ENTRY(name)						\
>  	TRACE_IRQS_OFF					\
>  	movl	%esp, %eax;				\
>  	call	fn;					\
> -	jmp	ret_from_intr;				\
> +	jmp	ret_from_exception;				\

This backslash is now unaligned.

>  ENDPROC(name)
>  
>  #define BUILD_INTERRUPT(name, nr)		\
> @@ -1366,7 +1357,7 @@ ENTRY(xen_do_upcall)
>  #ifndef CONFIG_PREEMPTION
>  	call	xen_maybe_preempt_hcall
>  #endif
> -	jmp	ret_from_intr
> +	jmp	ret_from_exception
>  ENDPROC(xen_hypervisor_callback)
>  
>  /*
> 
>
Alexandre Chartre Nov. 8, 2019, 10:41 a.m. UTC | #2
On 10/23/19 2:27 PM, Thomas Gleixner wrote:
> Now that the trap handlers return with interrupts disabled, the
> unconditional disabling of interrupts in the low level entry code can be
> removed along with the trace calls and the misnomed preempt_stop macro.
> As a consequence ret_from_exception and ret_from_intr collapse.
> 
> Add a debug check to verify that interrupts are disabled depending on
> CONFIG_DEBUG_ENTRY.
> 
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
> ---
>   arch/x86/entry/entry_32.S |   21 ++++++---------------
>   1 file changed, 6 insertions(+), 15 deletions(-)
> 

Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com>

alex.
diff mbox series

Patch

--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -63,12 +63,6 @@ 
  * enough to patch inline, increasing performance.
  */
 
-#ifdef CONFIG_PREEMPTION
-# define preempt_stop(clobbers)	DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
-#else
-# define preempt_stop(clobbers)
-#endif
-
 .macro TRACE_IRQS_IRET
 #ifdef CONFIG_TRACE_IRQFLAGS
 	testl	$X86_EFLAGS_IF, PT_EFLAGS(%esp)     # interrupts off?
@@ -809,8 +803,7 @@  END(ret_from_fork)
 	# userspace resumption stub bypassing syscall exit tracing
 	ALIGN
 ret_from_exception:
-	preempt_stop(CLBR_ANY)
-ret_from_intr:
+	DEBUG_ENTRY_ASSERT_IRQS_OFF
 #ifdef CONFIG_VM86
 	movl	PT_EFLAGS(%esp), %eax		# mix EFLAGS and CS
 	movb	PT_CS(%esp), %al
@@ -825,8 +818,6 @@  END(ret_from_fork)
 	cmpl	$USER_RPL, %eax
 	jb	restore_all_kernel		# not returning to v8086 or userspace
 
-	DISABLE_INTERRUPTS(CLBR_ANY)
-	TRACE_IRQS_OFF
 	movl	%esp, %eax
 	call	prepare_exit_to_usermode
 	jmp	restore_all
@@ -1084,7 +1075,7 @@  ENTRY(entry_INT80_32)
 
 restore_all_kernel:
 #ifdef CONFIG_PREEMPTION
-	DISABLE_INTERRUPTS(CLBR_ANY)
+	/* Interrupts are disabled and debug-checked */
 	cmpl	$0, PER_CPU_VAR(__preempt_count)
 	jnz	.Lno_preempt
 	testl	$X86_EFLAGS_IF, PT_EFLAGS(%esp)	# interrupts off (exception path) ?
@@ -1189,7 +1180,7 @@  END(spurious_entries_start)
 	TRACE_IRQS_OFF
 	movl	%esp, %eax
 	call	smp_spurious_interrupt
-	jmp	ret_from_intr
+	jmp	ret_from_exception
 ENDPROC(common_spurious)
 #endif
 
@@ -1207,7 +1198,7 @@  ENDPROC(common_spurious)
 	TRACE_IRQS_OFF
 	movl	%esp, %eax
 	call	do_IRQ
-	jmp	ret_from_intr
+	jmp	ret_from_exception
 ENDPROC(common_interrupt)
 
 #define BUILD_INTERRUPT3(name, nr, fn)			\
@@ -1219,7 +1210,7 @@  ENTRY(name)						\
 	TRACE_IRQS_OFF					\
 	movl	%esp, %eax;				\
 	call	fn;					\
-	jmp	ret_from_intr;				\
+	jmp	ret_from_exception;				\
 ENDPROC(name)
 
 #define BUILD_INTERRUPT(name, nr)		\
@@ -1366,7 +1357,7 @@  ENTRY(xen_do_upcall)
 #ifndef CONFIG_PREEMPTION
 	call	xen_maybe_preempt_hcall
 #endif
-	jmp	ret_from_intr
+	jmp	ret_from_exception
 ENDPROC(xen_hypervisor_callback)
 
 /*