@@ -107,8 +107,6 @@ struct vcpu_svm {
u32 *msrpm;
struct nested_state nested;
-
- bool nmi_singlestep;
};
/* enable NPT for AMD64 and X86 with PAE */
@@ -1075,9 +1073,6 @@ static void update_db_intercept(struct kvm_vcpu *vcpu)
svm->vmcb->control.intercept_exceptions &=
~((1 << DB_VECTOR) | (1 << BP_VECTOR));
- if (svm->nmi_singlestep)
- svm->vmcb->control.intercept_exceptions |= (1 << DB_VECTOR);
-
if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
if (vcpu->guest_debug &
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
@@ -1213,20 +1208,11 @@ static int db_interception(struct vcpu_svm *svm)
struct kvm_run *kvm_run = svm->vcpu.run;
if (!(svm->vcpu.guest_debug &
- (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
- !svm->nmi_singlestep) {
+ (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
kvm_queue_exception(&svm->vcpu, DB_VECTOR);
return 1;
}
- if (svm->nmi_singlestep) {
- svm->nmi_singlestep = false;
- if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP))
- svm->vmcb->save.rflags &=
- ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
- update_db_intercept(&svm->vcpu);
- }
-
if (svm->vcpu.guest_debug &
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)){
kvm_run->exit_reason = KVM_EXIT_DEBUG;
@@ -2471,6 +2457,17 @@ static void svm_inject_nmi(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ if (svm->vcpu.arch.hflags & HF_IRET_MASK) {
+ /*
+ * Inject the NMI before IRET completed, but defer delivery
+ * by one instruction with the help of the interrupt shadow.
+ * Works at least as long as the IRET does not trigger an
+ * exception.
+ */
+ svm->vcpu.arch.hflags &= ~HF_IRET_MASK;
+ svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
+ }
+
svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
vcpu->arch.hflags |= HF_NMI_MASK;
svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET);
@@ -2576,18 +2573,7 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
static void enable_nmi_window(struct kvm_vcpu *vcpu)
{
- struct vcpu_svm *svm = to_svm(vcpu);
-
- if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
- == HF_NMI_MASK)
- return; /* IRET will cause a vm exit */
-
- /* Something prevents NMI from been injected. Single step over
- possible problem (IRET or exception injection or interrupt
- shadow) */
- svm->nmi_singlestep = true;
- svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
- update_db_intercept(vcpu);
+ /* VM exit on IRET was already armed on injection */
}
static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)