@@ -3399,6 +3399,14 @@ static __no_kcsan fastpath_t svm_vcpu_ru
*/
x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
+ /*
+ * Tell context tracking that this CPU is about to enter guest
+ * mode. This has to be after x86_spec_ctrl_set_guest() because
+ * that can take locks (lockdep needs RCU) and calls into world and
+ * some more.
+ */
+ guest_enter_irqoff();
+
__svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs);
#ifdef CONFIG_X86_64
@@ -3409,6 +3417,14 @@ static __no_kcsan fastpath_t svm_vcpu_ru
loadsegment(gs, svm->host.gs);
#endif
#endif
+ /*
+ * Tell context tracking that this CPU is back.
+ *
+ * This needs to be done before the below as native_read_msr()
+ * contains a tracepoint and x86_spec_ctrl_restore_host() calls
+ * into world and some more.
+ */
+ guest_exit_irqoff();
/*
* We do not use IBRS in the kernel. If this vCPU has used the
@@ -6728,6 +6728,11 @@ static fastpath_t vmx_vcpu_run(struct kv
*/
x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
+ /*
+ * Tell context tracking that this CPU is about to enter guest mode.
+ */
+ guest_enter_irqoff();
+
/* L1D Flush includes CPU buffer clear to mitigate MDS */
if (static_branch_unlikely(&vmx_l1d_should_flush))
vmx_l1d_flush(vcpu);
@@ -6743,6 +6748,11 @@ static fastpath_t vmx_vcpu_run(struct kv
vcpu->arch.cr2 = read_cr2();
/*
+ * Tell context tracking that this CPU is back.
+ */
+ guest_exit_irqoff();
+
+ /*
* We do not use IBRS in the kernel. If this vCPU has used the
* SPEC_CTRL MSR it may have left it on; save the value and
* turn it off. This is much more efficient than blindly adding
@@ -8501,7 +8501,6 @@ static int vcpu_enter_guest(struct kvm_v
}
trace_kvm_entry(vcpu->vcpu_id);
- guest_enter_irqoff();
fpregs_assert_state_consistent();
if (test_thread_flag(TIF_NEED_FPU_LOAD))
@@ -8563,7 +8562,6 @@ static int vcpu_enter_guest(struct kvm_v
local_irq_disable();
kvm_after_interrupt(vcpu);
- guest_exit_irqoff();
if (lapic_in_kernel(vcpu)) {
s64 delta = vcpu->arch.apic->lapic_timer.advance_expire_delta;
if (delta != S64_MIN) {