@@ -3595,6 +3595,9 @@ bool svm_nmi_blocked(struct kvm_vcpu *vcpu)
if (is_guest_mode(vcpu) && nested_exit_on_nmi(svm))
return false;
+ if (is_vnmi_enabled(svm) && is_vnmi_pending_set(svm))
+ return true;
+
ret = (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) ||
(vcpu->arch.hflags & HF_NMI_MASK);
@@ -3732,6 +3735,9 @@ static void svm_enable_nmi_window(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ if (is_vnmi_enabled(svm))
+ return;
+
if ((vcpu->arch.hflags & (HF_NMI_MASK | HF_IRET_MASK)) == HF_NMI_MASK)
return; /* IRET will cause a vm exit */
@@ -583,6 +583,17 @@ static inline void clear_vnmi_mask(struct vcpu_svm *svm)
vmcb->control.int_ctl &= ~V_NMI_MASK;
}
+
+static inline bool is_vnmi_pending_set(struct vcpu_svm *svm)
+{
+ struct vmcb *vmcb = get_vnmi_vmcb(svm);
+
+ if (vmcb)
+ return !!(vmcb->control.int_ctl & V_NMI_PENDING);
+ else
+ return false;
+}
+
/* svm.c */
#define MSR_INVALID 0xffffffffU