diff mbox series

[v8,33/33] KVM: x86/vmx: refactor VMX_DO_EVENT_IRQOFF to generate FRED stack frames

Message ID 20230410081438.1750-34-xin3.li@intel.com (mailing list archive)
State New, archived
Headers show
Series x86: enable FRED for x86-64 | expand

Commit Message

Li, Xin3 April 10, 2023, 8:14 a.m. UTC
Comparing to an IDT stack frame, a FRED stack frame has extra 16 bytes of
information pushed at the regular stack top and 8 bytes of error code _always_
pushed at the regular stack bottom, VMX_DO_EVENT_IRQOFF can be refactored
to generate FRED stack frames with event type and vector properly set. Thus,
IRQ/NMI can be handled with the existing approach when FRED is enabled.

As a FRED stack frame always contains an error code pushed by hardware, call
a trampoline code first to have the return instruction address pushed on
the regular stack. Then the trampoline code pushes an error code (0 for
both IRQ and NMI) and jumps to fred_entrypoint_kernel() for NMI handling or
calls external_interrupt() for IRQ handling.

The trampoline code for IRQ handling pushes general purpose registers to
form a pt_regs structure and then use it to call external_interrupt(). As a
result, IRQ handling no longer re-enter the noinstr code.

Export fred_entrypoint_kernel() and external_interrupt() for above changes.

Tested-by: Shan Kang <shan.kang@intel.com>
Signed-off-by: Xin Li <xin3.li@intel.com>
---

Changes since v7:
* Always call external_interrupt() for IRQ handling on x86_64, thus avoid
  re-entering the noinstr code.
* Create a FRED stack frame when FRED is compiled-in but not enabled, which
  uses some extra stack space but simplifies the code.

Changes since v6:
* Export fred_entrypoint_kernel(), required when kvm-intel built as a module.
* Reserve a REDZONE for CALL emulation and align RSP to a 64-byte boundary
  before pushing a new FRED stack frame.
---
 arch/x86/entry/entry_64_fred.S        |  1 +
 arch/x86/include/asm/asm-prototypes.h |  1 +
 arch/x86/include/asm/fred.h           |  1 +
 arch/x86/include/asm/traps.h          |  2 +
 arch/x86/kernel/traps.c               |  5 ++
 arch/x86/kvm/vmx/vmenter.S            | 78 +++++++++++++++++++++++++--
 arch/x86/kvm/vmx/vmx.c                | 12 +++--
 7 files changed, 93 insertions(+), 7 deletions(-)

Comments

Sean Christopherson April 10, 2023, 9:50 p.m. UTC | #1
"KVM: VMX:" for the shortlog please.

On Mon, Apr 10, 2023, Xin Li wrote:
> -.macro VMX_DO_EVENT_IRQOFF call_insn call_target
> +.macro VMX_DO_EVENT_IRQOFF call_insn call_target fred=0 nmi=0
>  	/*
>  	 * Unconditionally create a stack frame, getting the correct RSP on the
>  	 * stack (for x86-64) would take two instructions anyways, and RBP can
> @@ -41,16 +43,55 @@
>  	mov %_ASM_SP, %_ASM_BP
>  
>  #ifdef CONFIG_X86_64
> +#ifdef CONFIG_X86_FRED
> +	/*
> +	 * It's not necessary to change current stack level for handling IRQ/NMI
> +	 * because the state of the kernel stack is well defined in this place
> +	 * in the code, and it is known not to be deep in a bunch of nested I/O
> +	 * layer handlers that eat up the stack.
> +	 *
> +	 * Before starting to push a FRED stack frame, FRED reserves a redzone
> +	 * (for CALL emulation) and aligns RSP to a 64-byte boundary.
> +	 */
> +	sub $(FRED_CONFIG_REDZONE_AMOUNT << 6), %rsp
> +	and $FRED_STACK_FRAME_RSP_MASK, %rsp
> +
> +	/*
> +	 * A FRED stack frame has extra 16 bytes of information pushed at the
> +	 * regular stack top comparing to an IDT stack frame.
> +	 */
> +	push $0		/* Reserved by FRED, must be 0 */
> +	push $0		/* FRED event data, 0 for NMI and external interrupts */
> +#else
>  	/*
>  	 * Align RSP to a 16-byte boundary (to emulate CPU behavior) before
>  	 * creating the synthetic interrupt stack frame for the IRQ/NMI.
>  	 */
>  	and  $-16, %rsp
> -	push $__KERNEL_DS
> +#endif
> +
> +	.if \fred
> +	.if \nmi
> +	mov $(2 << 32 | 2 << 48), %rax		/* NMI event type and vector */
> +	.else
> +	mov %rdi, %rax
> +	shl $32, %rax				/* External interrupt vector */
> +	.endif
> +	add $__KERNEL_DS, %rax
> +	bts $57, %rax				/* Set 64-bit mode */
> +	.else
> +	mov $__KERNEL_DS, %rax
> +	.endif
> +	push %rax

This is painfully difficult to read, and the trampolines only add to that pain.
Using macros instead of magic numbers would alleviate a small amount of pain, but
but the #ifdefs and .if \fred/\nmi are the real culprits.

>  static void handle_nm_fault_irqoff(struct kvm_vcpu *vcpu)
> @@ -6916,14 +6916,20 @@ static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
>  {
>  	u32 intr_info = vmx_get_intr_info(vcpu);
>  	unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK;
> -	gate_desc *desc = (gate_desc *)host_idt_base + vector;
> +	unsigned long entry_or_vector;
> +
> +#ifdef CONFIG_X86_64
> +	entry_or_vector = vector;
> +#else
> +	entry_or_vector = gate_offset((gate_desc *)host_idt_base + vector);
> +#endif

And then this is equally gross.  Rather than funnel FRED+legacy into a single
function only to split them back out, just route FRED into its own asm subroutine.
The common bits are basically the creation/destruction of the stack frame and the
CALL itself, i.e. the truly interesting bits are what's different.

Pretty much all of the #ifdeffery goes away, the helpers just need #ifdefs to
play nice with CONFIG_X86_FRED=n.  E.g. something like the below as a starting
point (it most definitely doesn't compile, and most definitely isn't 100% correct).

---
 arch/x86/kvm/vmx/vmenter.S | 72 ++++++++++++++++++++++++++++++++++++++
 arch/x86/kvm/vmx/vmx.c     | 19 ++++++++--
 2 files changed, 88 insertions(+), 3 deletions(-)

diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
index 631fd7da2bc3..a6929c78e038 100644
--- a/arch/x86/kvm/vmx/vmenter.S
+++ b/arch/x86/kvm/vmx/vmenter.S
@@ -2,12 +2,14 @@
 #include <linux/linkage.h>
 #include <asm/asm.h>
 #include <asm/bitsperlong.h>
+#include <asm/fred.h>
 #include <asm/kvm_vcpu_regs.h>
 #include <asm/nospec-branch.h>
 #include <asm/percpu.h>
 #include <asm/segment.h>
 #include "kvm-asm-offsets.h"
 #include "run_flags.h"
+#include "../../entry/calling.h"
 
 #define WORD_SIZE (BITS_PER_LONG / 8)
 
@@ -31,6 +33,62 @@
 #define VCPU_R15	__VCPU_REGS_R15 * WORD_SIZE
 #endif
 
+#ifdef CONFIG_X86_FRED
+.macro VMX_DO_FRED_EVENT_IRQOFF call_target cs_val
+	/*
+	 * Unconditionally create a stack frame, getting the correct RSP on the
+	 * stack (for x86-64) would take two instructions anyways, and RBP can
+	 * be used to restore RSP to make objtool happy (see below).
+	 */
+	push %_ASM_BP
+	mov %_ASM_SP, %_ASM_BP
+
+	/*
+	 * Don't check the FRED stack level, the call stack leading to this
+	 * helper is effectively constant and shallow (relatively speaking).
+	 *
+	 * Emulate the FRED-defined redzone and stack alignment (128 bytes and
+	 * 64 bytes respectively).
+	 */
+	sub $(FRED_CONFIG_REDZONE_AMOUNT << 6), %rsp
+	and $FRED_STACK_FRAME_RSP_MASK, %rsp
+
+	/*
+	* A FRED stack frame has extra 16 bytes of information pushed at the
+	* regular stack top compared to an IDT stack frame.
+	*/
+	push $0         /* Reserved by FRED, must be 0 */
+	push $0         /* FRED event data, 0 for NMI and external interrupts */
+	shl $32, %rax
+	orq $__KERNEL_DS | $FRED_64_BIT_MODE, %ax
+	push %rax	/* Vector (from the "caller") and DS */
+
+	push %rbp
+	pushf
+	push \cs_val
+
+	push $0 /* FRED error code, 0 for NMI and external interrupts */
+	PUSH_REGS
+
+	/* Load @pt_regs */
+	movq    %rsp, %_ASM_ARG1
+
+	call \call_target
+
+	POP_REGS
+
+	/*
+	 * "Restore" RSP from RBP, even though IRET has already unwound RSP to
+	 * the correct value.  objtool doesn't know the callee will IRET and,
+	 * without the explicit restore, thinks the stack is getting walloped.
+	 * Using an unwind hint is problematic due to x86-64's dynamic alignment.
+	 */
+	mov %_ASM_BP, %_ASM_SP
+	pop %_ASM_BP
+	RET
+.endm
+#endif
+
 .macro VMX_DO_EVENT_IRQOFF call_insn call_target
 	/*
 	 * Unconditionally create a stack frame, getting the correct RSP on the
@@ -299,6 +357,14 @@ SYM_INNER_LABEL(vmx_vmexit, SYM_L_GLOBAL)
 
 SYM_FUNC_END(__vmx_vcpu_run)
 
+#ifdef CONFIG_X86_FRED
+SYM_FUNC_START(vmx_do_fred_nmi_irqoff)
+	push $FRED_NMI_ERROR_CODE
+	mov $NMI_VECTOR | $FRED_NMI_SOMETHING, %eax
+	VMX_DO_FRED_EVENT_IRQOFF call fred_entrypoint_kernel $FRED_NMI_CS_VAL
+SYM_FUNC_END(vmx_do_nmi_irqoff)
+#endif
+
 SYM_FUNC_START(vmx_do_nmi_irqoff)
 	VMX_DO_EVENT_IRQOFF call asm_exc_nmi_kvm_vmx
 SYM_FUNC_END(vmx_do_nmi_irqoff)
@@ -357,6 +423,12 @@ SYM_FUNC_START(vmread_error_trampoline)
 SYM_FUNC_END(vmread_error_trampoline)
 #endif
 
+#ifdef CONFIG_X86_FRED
+SYM_FUNC_START(vmx_do_fred_interrupt_irqoff)
+	mov %_ASM_ARG1, %rax
+	VMX_DO_FRED_EVENT_IRQOFF call external_interrupt
+#endif
+
 SYM_FUNC_START(vmx_do_interrupt_irqoff)
 	VMX_DO_EVENT_IRQOFF CALL_NOSPEC _ASM_ARG1
 SYM_FUNC_END(vmx_do_interrupt_irqoff)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 11080a649f60..42f50b0cc125 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6891,6 +6891,14 @@ static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
 	memset(vmx->pi_desc.pir, 0, sizeof(vmx->pi_desc.pir));
 }
 
+#ifdef CONFIG_X86_FRED
+void vmx_do_fred_interrupt_irqoff(unsigned int vector);
+void vmx_do_fred_nmi_irqoff(unsigned int vector);
+#else
+#define vmx_do_fred_interrupt_irqoff(x) BUG();
+#define vmx_do_fred_nmi_irqoff(x) BUG();
+#endif
+
 void vmx_do_interrupt_irqoff(unsigned long entry);
 void vmx_do_nmi_irqoff(void);
 
@@ -6933,14 +6941,16 @@ static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
 {
 	u32 intr_info = vmx_get_intr_info(vcpu);
 	unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK;
-	gate_desc *desc = (gate_desc *)host_idt_base + vector;
 
 	if (KVM_BUG(!is_external_intr(intr_info), vcpu->kvm,
 	    "unexpected VM-Exit interrupt info: 0x%x", intr_info))
 		return;
 
 	kvm_before_interrupt(vcpu, KVM_HANDLING_IRQ);
-	vmx_do_interrupt_irqoff(gate_offset(desc));
+	if (cpu_feature_enabled(X86_FEATURE_FRED))
+		vmx_do_fred_interrupt_irqoff(vector);
+	else
+		vmx_do_interrupt_irqoff(gate_offset((gate_desc *)host_idt_base + vector));
 	kvm_after_interrupt(vcpu);
 
 	vcpu->arch.at_instruction_boundary = true;
@@ -7226,7 +7236,10 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
 	if ((u16)vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI &&
 	    is_nmi(vmx_get_intr_info(vcpu))) {
 		kvm_before_interrupt(vcpu, KVM_HANDLING_NMI);
-		vmx_do_nmi_irqoff();
+		if (cpu_feature_enabled(X86_FEATURE_FRED))
+			vmx_do_fred_nmi_irqoff();
+		else
+			vmx_do_nmi_irqoff();
 		kvm_after_interrupt(vcpu);
 	}
 

base-commit: 33d1a64081c98e390e064db18738428d6fb96f95
--
Li, Xin3 April 11, 2023, 5:06 a.m. UTC | #2
> 
>  	kvm_before_interrupt(vcpu, KVM_HANDLING_IRQ);
> -	vmx_do_interrupt_irqoff(gate_offset(desc));
> +	if (cpu_feature_enabled(X86_FEATURE_FRED))
> +		vmx_do_fred_interrupt_irqoff(vector);
> +	else
> +		vmx_do_interrupt_irqoff(gate_offset((gate_desc *)host_idt_base


external_interrupt() is always available on x86_64, even when CONFIG_X86_FRED
is not defined. I prefer to always call external_interrupt() on x86_64 for IRQ
handling, which avoids re-entering noinstr code. how do you think? Too
aggressive?

Thanks!
  Xin


> +
> +vector));
>  	kvm_after_interrupt(vcpu);
Sean Christopherson April 11, 2023, 6:34 p.m. UTC | #3
On Tue, Apr 11, 2023, Xin3 Li wrote:
> > 
> >  	kvm_before_interrupt(vcpu, KVM_HANDLING_IRQ);
> > -	vmx_do_interrupt_irqoff(gate_offset(desc));
> > +	if (cpu_feature_enabled(X86_FEATURE_FRED))
> > +		vmx_do_fred_interrupt_irqoff(vector);
> > +	else
> > +		vmx_do_interrupt_irqoff(gate_offset((gate_desc *)host_idt_base
> 
> 
> external_interrupt() is always available on x86_64, even when CONFIG_X86_FRED
> is not defined. I prefer to always call external_interrupt() on x86_64 for IRQ
> handling, which avoids re-entering noinstr code. how do you think? Too
> aggressive?

I think it's completely orthogonal to FRED enabling.  If you or anyone else wants
to convert the non-FRED handling to external_interrupt(), then do so after FRED
lands, or at the very least in a separate patch after enabling FRED in KVM.
Li, Xin3 April 11, 2023, 10:50 p.m. UTC | #4
> > external_interrupt() is always available on x86_64, even when CONFIG_X86_FRED
> > is not defined. I prefer to always call external_interrupt() on x86_64 for IRQ
> > handling, which avoids re-entering noinstr code. how do you think? Too
> > aggressive?
> 
> I think it's completely orthogonal to FRED enabling.  If you or anyone else wants
> to convert the non-FRED handling to external_interrupt(), then do so after FRED
> lands, or at the very least in a separate patch after enabling FRED in KVM.

That sounds a reasonable plan.
Li, Xin3 April 12, 2023, 6:26 p.m. UTC | #5
> And then this is equally gross.  Rather than funnel FRED+legacy into a single
> function only to split them back out, just route FRED into its own asm subroutine.
> The common bits are basically the creation/destruction of the stack frame and
> the CALL itself, i.e. the truly interesting bits are what's different.

I try to catch up with you but am still confused.

Because a FRED stack frame always contains an error code pushed after RIP,
the FRED entry code doesn't push any error code.

Thus I introduced a trampoline code, which is called to have the return
instruction address pushed first. Then the trampoline code pushes an error
code (0 for both IRQ and NMI) and jumps to fred_entrypoint_kernel() for NMI
handling or calls external_interrupt() for IRQ handling.

The return RIP is used to return from fred_entrypoint_kernel(), but not
external_interrupt().


> Pretty much all of the #ifdeffery goes away, the helpers just need #ifdefs to play
> nice with CONFIG_X86_FRED=n.  E.g. something like the below as a starting point
> (it most definitely doesn't compile, and most definitely isn't 100% correct).
> 
> ---
>  arch/x86/kvm/vmx/vmenter.S | 72
> ++++++++++++++++++++++++++++++++++++++
>  arch/x86/kvm/vmx/vmx.c     | 19 ++++++++--
>  2 files changed, 88 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S index
> 631fd7da2bc3..a6929c78e038 100644
> --- a/arch/x86/kvm/vmx/vmenter.S
> +++ b/arch/x86/kvm/vmx/vmenter.S
> @@ -2,12 +2,14 @@
>  #include <linux/linkage.h>
>  #include <asm/asm.h>
>  #include <asm/bitsperlong.h>
> +#include <asm/fred.h>
>  #include <asm/kvm_vcpu_regs.h>
>  #include <asm/nospec-branch.h>
>  #include <asm/percpu.h>
>  #include <asm/segment.h>
>  #include "kvm-asm-offsets.h"
>  #include "run_flags.h"
> +#include "../../entry/calling.h"
> 
>  #define WORD_SIZE (BITS_PER_LONG / 8)
> 
> @@ -31,6 +33,62 @@
>  #define VCPU_R15	__VCPU_REGS_R15 * WORD_SIZE
>  #endif
> 
> +#ifdef CONFIG_X86_FRED
> +.macro VMX_DO_FRED_EVENT_IRQOFF call_target cs_val
> +	/*
> +	 * Unconditionally create a stack frame, getting the correct RSP on the
> +	 * stack (for x86-64) would take two instructions anyways, and RBP can
> +	 * be used to restore RSP to make objtool happy (see below).
> +	 */
> +	push %_ASM_BP
> +	mov %_ASM_SP, %_ASM_BP
> +
> +	/*
> +	 * Don't check the FRED stack level, the call stack leading to this
> +	 * helper is effectively constant and shallow (relatively speaking).
> +	 *
> +	 * Emulate the FRED-defined redzone and stack alignment (128 bytes and
> +	 * 64 bytes respectively).
> +	 */
> +	sub $(FRED_CONFIG_REDZONE_AMOUNT << 6), %rsp
> +	and $FRED_STACK_FRAME_RSP_MASK, %rsp
> +
> +	/*
> +	* A FRED stack frame has extra 16 bytes of information pushed at the
> +	* regular stack top compared to an IDT stack frame.
> +	*/
> +	push $0         /* Reserved by FRED, must be 0 */
> +	push $0         /* FRED event data, 0 for NMI and external interrupts */
> +	shl $32, %rax
> +	orq $__KERNEL_DS | $FRED_64_BIT_MODE, %ax
> +	push %rax	/* Vector (from the "caller") and DS */
> +
> +	push %rbp
> +	pushf
> +	push \cs_val

We need to push the RIP of the next instruction here. Or are you suggesting
we don't need to care about it because it may not be used to return from the
callee?

As mentioned above, the return RIP is used when returning from NMI handling.

Or I totally missed a key idea to build a FRED stack frame?

Thanks!
  Xin

> +	push $0 /* FRED error code, 0 for NMI and external interrupts */
> +	PUSH_REGS
> +
> +	/* Load @pt_regs */
> +	movq    %rsp, %_ASM_ARG1
> +
> +	call \call_target
> +
> +	POP_REGS
> +
> +	/*
> +	 * "Restore" RSP from RBP, even though IRET has already unwound RSP
> to
> +	 * the correct value.  objtool doesn't know the callee will IRET and,
> +	 * without the explicit restore, thinks the stack is getting walloped.
> +	 * Using an unwind hint is problematic due to x86-64's dynamic
> alignment.
> +	 */
> +	mov %_ASM_BP, %_ASM_SP
> +	pop %_ASM_BP
> +	RET
> +.endm
> +#endif
> +
Sean Christopherson April 12, 2023, 7:37 p.m. UTC | #6
On Wed, Apr 12, 2023, Xin3 Li wrote:
> 
> > And then this is equally gross.  Rather than funnel FRED+legacy into a single
> > function only to split them back out, just route FRED into its own asm subroutine.
> > The common bits are basically the creation/destruction of the stack frame and
> > the CALL itself, i.e. the truly interesting bits are what's different.
> 
> I try to catch up with you but am still confused.
> 
> Because a FRED stack frame always contains an error code pushed after RIP,
> the FRED entry code doesn't push any error code.
> 
> Thus I introduced a trampoline code, which is called to have the return
> instruction address pushed first. Then the trampoline code pushes an error
> code (0 for both IRQ and NMI) and jumps to fred_entrypoint_kernel() for NMI
> handling or calls external_interrupt() for IRQ handling.
> 
> The return RIP is used to return from fred_entrypoint_kernel(), but not
> external_interrupt().

...

> > +	/*
> > +	* A FRED stack frame has extra 16 bytes of information pushed at the
> > +	* regular stack top compared to an IDT stack frame.
> > +	*/
> > +	push $0         /* Reserved by FRED, must be 0 */
> > +	push $0         /* FRED event data, 0 for NMI and external interrupts */
> > +	shl $32, %rax
> > +	orq $__KERNEL_DS | $FRED_64_BIT_MODE, %ax
> > +	push %rax	/* Vector (from the "caller") and DS */
> > +
> > +	push %rbp
> > +	pushf
> > +	push \cs_val
> 
> We need to push the RIP of the next instruction here. Or are you suggesting
> we don't need to care about it because it may not be used to return from the
> callee?

...

> > +	push $0 /* FRED error code, 0 for NMI and external interrupts */
> > +	PUSH_REGS
> > +
> > +	/* Load @pt_regs */
> > +	movq    %rsp, %_ASM_ARG1
> > +
> > +	call \call_target

The CALL here would push RIP, I missed/forgot the detail that the error code needs
to be pushed _after_ RIP, not before.

Unless CET complains, there's no need for a trampoline, just LEA+PUSH the return
RIP, PUSH the error code, and JMP to the handler.  IMO, that isn't any weirder than
a trampoline, and it's a bit more obviously weird, e.g. the LEA+PUSH can have a nice
big comment.
diff mbox series

Patch

diff --git a/arch/x86/entry/entry_64_fred.S b/arch/x86/entry/entry_64_fred.S
index efe2bcd11273..de74ab97ff00 100644
--- a/arch/x86/entry/entry_64_fred.S
+++ b/arch/x86/entry/entry_64_fred.S
@@ -59,3 +59,4 @@  SYM_CODE_START_NOALIGN(fred_entrypoint_kernel)
 	FRED_EXIT
 	ERETS
 SYM_CODE_END(fred_entrypoint_kernel)
+EXPORT_SYMBOL(fred_entrypoint_kernel)
diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h
index b1a98fa38828..076bf8dee702 100644
--- a/arch/x86/include/asm/asm-prototypes.h
+++ b/arch/x86/include/asm/asm-prototypes.h
@@ -12,6 +12,7 @@ 
 #include <asm/special_insns.h>
 #include <asm/preempt.h>
 #include <asm/asm.h>
+#include <asm/fred.h>
 #include <asm/gsseg.h>
 
 #ifndef CONFIG_X86_CMPXCHG64
diff --git a/arch/x86/include/asm/fred.h b/arch/x86/include/asm/fred.h
index f7caf3b2f3f7..d00b9cab6aa6 100644
--- a/arch/x86/include/asm/fred.h
+++ b/arch/x86/include/asm/fred.h
@@ -129,6 +129,7 @@  DECLARE_FRED_HANDLER(fred_exc_machine_check);
  * The actual assembly entry and exit points
  */
 extern __visible void fred_entrypoint_user(void);
+extern __visible void fred_entrypoint_kernel(void);
 
 /*
  * Initialization
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index 612b3d6fec53..017b95624325 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -58,4 +58,6 @@  typedef DECLARE_SYSTEM_INTERRUPT_HANDLER((*system_interrupt_handler));
 
 system_interrupt_handler get_system_interrupt_handler(unsigned int i);
 
+int external_interrupt(struct pt_regs *regs);
+
 #endif /* _ASM_X86_TRAPS_H */
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 73471053ed02..0f1fcd53cb52 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -1573,6 +1573,11 @@  int external_interrupt(struct pt_regs *regs)
 	return 0;
 }
 
+#if IS_ENABLED(CONFIG_KVM_INTEL)
+/* For KVM VMX to handle IRQs in IRQ induced VM exits. */
+EXPORT_SYMBOL_GPL(external_interrupt);
+#endif
+
 #endif /* CONFIG_X86_64 */
 
 void __init install_system_interrupt_handler(unsigned int n, const void *asm_addr, const void *addr)
diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
index 631fd7da2bc3..f2e1f8e61be9 100644
--- a/arch/x86/kvm/vmx/vmenter.S
+++ b/arch/x86/kvm/vmx/vmenter.S
@@ -2,12 +2,14 @@ 
 #include <linux/linkage.h>
 #include <asm/asm.h>
 #include <asm/bitsperlong.h>
+#include <asm/fred.h>
 #include <asm/kvm_vcpu_regs.h>
 #include <asm/nospec-branch.h>
 #include <asm/percpu.h>
 #include <asm/segment.h>
 #include "kvm-asm-offsets.h"
 #include "run_flags.h"
+#include "../../entry/calling.h"
 
 #define WORD_SIZE (BITS_PER_LONG / 8)
 
@@ -31,7 +33,7 @@ 
 #define VCPU_R15	__VCPU_REGS_R15 * WORD_SIZE
 #endif
 
-.macro VMX_DO_EVENT_IRQOFF call_insn call_target
+.macro VMX_DO_EVENT_IRQOFF call_insn call_target fred=0 nmi=0
 	/*
 	 * Unconditionally create a stack frame, getting the correct RSP on the
 	 * stack (for x86-64) would take two instructions anyways, and RBP can
@@ -41,16 +43,55 @@ 
 	mov %_ASM_SP, %_ASM_BP
 
 #ifdef CONFIG_X86_64
+#ifdef CONFIG_X86_FRED
+	/*
+	 * It's not necessary to change current stack level for handling IRQ/NMI
+	 * because the state of the kernel stack is well defined in this place
+	 * in the code, and it is known not to be deep in a bunch of nested I/O
+	 * layer handlers that eat up the stack.
+	 *
+	 * Before starting to push a FRED stack frame, FRED reserves a redzone
+	 * (for CALL emulation) and aligns RSP to a 64-byte boundary.
+	 */
+	sub $(FRED_CONFIG_REDZONE_AMOUNT << 6), %rsp
+	and $FRED_STACK_FRAME_RSP_MASK, %rsp
+
+	/*
+	 * A FRED stack frame has extra 16 bytes of information pushed at the
+	 * regular stack top comparing to an IDT stack frame.
+	 */
+	push $0		/* Reserved by FRED, must be 0 */
+	push $0		/* FRED event data, 0 for NMI and external interrupts */
+#else
 	/*
 	 * Align RSP to a 16-byte boundary (to emulate CPU behavior) before
 	 * creating the synthetic interrupt stack frame for the IRQ/NMI.
 	 */
 	and  $-16, %rsp
-	push $__KERNEL_DS
+#endif
+
+	.if \fred
+	.if \nmi
+	mov $(2 << 32 | 2 << 48), %rax		/* NMI event type and vector */
+	.else
+	mov %rdi, %rax
+	shl $32, %rax				/* External interrupt vector */
+	.endif
+	add $__KERNEL_DS, %rax
+	bts $57, %rax				/* Set 64-bit mode */
+	.else
+	mov $__KERNEL_DS, %rax
+	.endif
+	push %rax
+
 	push %rbp
 #endif
 	pushf
-	push $__KERNEL_CS
+	mov $__KERNEL_CS, %_ASM_AX
+	.if \fred && \nmi
+	bts $28, %_ASM_AX			/* Set the NMI bit */
+	.endif
+	push %_ASM_AX
 	\call_insn \call_target
 
 	/*
@@ -299,8 +340,19 @@  SYM_INNER_LABEL(vmx_vmexit, SYM_L_GLOBAL)
 
 SYM_FUNC_END(__vmx_vcpu_run)
 
+SYM_CODE_START(vmx_do_nmi_trampoline)
+#ifdef CONFIG_X86_FRED
+	ALTERNATIVE "jmp .Lno_errorcode_push", "", X86_FEATURE_FRED
+	push $0		/* FRED error code, 0 for NMI */
+	jmp fred_entrypoint_kernel
+#endif
+
+.Lno_errorcode_push:
+	jmp asm_exc_nmi_kvm_vmx
+SYM_CODE_END(vmx_do_nmi_trampoline)
+
 SYM_FUNC_START(vmx_do_nmi_irqoff)
-	VMX_DO_EVENT_IRQOFF call asm_exc_nmi_kvm_vmx
+	VMX_DO_EVENT_IRQOFF call vmx_do_nmi_trampoline fred=1 nmi=1
 SYM_FUNC_END(vmx_do_nmi_irqoff)
 
 
@@ -357,6 +409,24 @@  SYM_FUNC_START(vmread_error_trampoline)
 SYM_FUNC_END(vmread_error_trampoline)
 #endif
 
+#ifdef CONFIG_X86_64
+SYM_CODE_START(vmx_do_interrupt_trampoline)
+	push $0	/* FRED error code, 0 for NMI and external interrupts */
+	PUSH_REGS
+
+	movq	%rsp, %rdi	/* %rdi -> pt_regs */
+	call external_interrupt
+
+	POP_REGS
+	addq $8,%rsp		/* Drop FRED error code */
+	RET
+SYM_CODE_END(vmx_do_interrupt_trampoline)
+#endif
+
 SYM_FUNC_START(vmx_do_interrupt_irqoff)
+#ifdef CONFIG_X86_64
+	VMX_DO_EVENT_IRQOFF call vmx_do_interrupt_trampoline fred=1
+#else
 	VMX_DO_EVENT_IRQOFF CALL_NOSPEC _ASM_ARG1
+#endif
 SYM_FUNC_END(vmx_do_interrupt_irqoff)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index d2d6e1b6c788..d85bcfd191b7 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6874,7 +6874,7 @@  static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
 	memset(vmx->pi_desc.pir, 0, sizeof(vmx->pi_desc.pir));
 }
 
-void vmx_do_interrupt_irqoff(unsigned long entry);
+void vmx_do_interrupt_irqoff(unsigned long entry_or_vector);
 void vmx_do_nmi_irqoff(void);
 
 static void handle_nm_fault_irqoff(struct kvm_vcpu *vcpu)
@@ -6916,14 +6916,20 @@  static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
 {
 	u32 intr_info = vmx_get_intr_info(vcpu);
 	unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK;
-	gate_desc *desc = (gate_desc *)host_idt_base + vector;
+	unsigned long entry_or_vector;
+
+#ifdef CONFIG_X86_64
+	entry_or_vector = vector;
+#else
+	entry_or_vector = gate_offset((gate_desc *)host_idt_base + vector);
+#endif
 
 	if (KVM_BUG(!is_external_intr(intr_info), vcpu->kvm,
 	    "unexpected VM-Exit interrupt info: 0x%x", intr_info))
 		return;
 
 	kvm_before_interrupt(vcpu, KVM_HANDLING_IRQ);
-	vmx_do_interrupt_irqoff(gate_offset(desc));
+	vmx_do_interrupt_irqoff(entry_or_vector);
 	kvm_after_interrupt(vcpu);
 
 	vcpu->arch.at_instruction_boundary = true;