@@ -6729,9 +6729,21 @@ static fastpath_t vmx_vcpu_run(struct kv
x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
/*
- * Tell context tracking that this CPU is about to enter guest mode.
+ * VMENTER enables interrupts (host state), but the kernel state is
+ * interrupts disabled when this is invoked. Also tell RCU about
+ * it. This is the same logic as for exit_to_user_mode().
+ *
+ * This ensures that e.g. latency analysis on the host observes
+ * guest mode as interrupt enabled.
+ *
+ * guest_enter_irqoff() informs context tracking about the
+ * transition to guest mode and if enabled adjusts RCU state
+ * accordingly.
*/
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare(CALLER_ADDR0);
guest_enter_irqoff();
+ lockdep_hardirqs_on(CALLER_ADDR0);
/* L1D Flush includes CPU buffer clear to mitigate MDS */
if (static_branch_unlikely(&vmx_l1d_should_flush))
@@ -6748,9 +6760,20 @@ static fastpath_t vmx_vcpu_run(struct kv
vcpu->arch.cr2 = read_cr2();
/*
- * Tell context tracking that this CPU is back.
+ * VMEXIT disables interrupts (host state), but tracing and lockdep
+ * have them in state 'on' as recorded before entering guest mode.
+ * Same as enter_from_user_mode().
+ *
+ * guest_exit_irqoff() restores host context and reinstates RCU if
+ * enabled and required.
+ *
+ * This needs to be done before the below as native_read_msr()
+ * contains a tracepoint and x86_spec_ctrl_restore_host() calls
+ * into world and some more.
*/
+ lockdep_hardirqs_off(CALLER_ADDR0);
guest_exit_irqoff();
+ trace_hardirqs_off_finish();
/*
* We do not use IBRS in the kernel. If this vCPU has used the