@@ -3598,6 +3598,9 @@ bool svm_nmi_blocked(struct kvm_vcpu *vcpu)
if (is_guest_mode(vcpu) && nested_exit_on_nmi(svm))
return false;
+ if (is_vnmi_enabled(svm) && is_vnmi_pending_set(svm))
+ return true;
+
ret = (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) ||
(vcpu->arch.hflags & HF_NMI_MASK);
@@ -3734,6 +3737,9 @@ static void svm_enable_nmi_window(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ if (is_vnmi_enabled(svm) && is_vnmi_pending_set(svm))
+ return;
+
if ((vcpu->arch.hflags & (HF_NMI_MASK | HF_IRET_MASK)) == HF_NMI_MASK)
return; /* IRET will cause a vm exit */
@@ -584,6 +584,16 @@ static inline void clear_vnmi_mask(struct vcpu_svm *svm)
svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
}
+static inline bool is_vnmi_pending_set(struct vcpu_svm *svm)
+{
+ struct vmcb *vmcb = get_vnmi_vmcb(svm);
+
+ if (vmcb)
+ return !!(vmcb->control.int_ctl & V_NMI_PENDING);
+ else
+ return false;
+}
+
/* svm.c */
#define MSR_INVALID 0xffffffffU
In the VNMI case, Report NMI is not allowed when V_NMI_PENDING is set which mean virtual NMI already pended for Guest to process while the Guest is busy handling the current virtual NMI. The Guest will first finish handling the current virtual NMI and then it will take the pended event w/o vmexit. Signed-off-by: Santosh Shukla <santosh.shukla@amd.com> --- v3: - Added is_vnmi_pending_set API so to check the vnmi pending state. - Replaced is_vnmi_mask_set check with is_vnmi_pending_set. v2: - Moved vnmi check after is_guest_mode() in func _nmi_blocked(). - Removed is_vnmi_mask_set check from _enable_nmi_window(). as it was a redundent check. arch/x86/kvm/svm/svm.c | 6 ++++++ arch/x86/kvm/svm/svm.h | 10 ++++++++++ 2 files changed, 16 insertions(+)