Message ID | 20090419131155.GF10126@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Gleb Natapov wrote: > On Fri, Apr 17, 2009 at 03:12:57PM +0000, Dmitry Eremin-Solenikov wrote: >> >> This patch does expose some problems on real HW. The first NMI >> completes w/o problems. However If I try to boot the kernel w/ >> nmi_watchdog=1 or to trigger two NMIs from the monitor, kernel is stuck >> somewhere. >> > Can you try this patch instead patch13: > Seems to work.
Gleb Natapov wrote: > On Fri, Apr 17, 2009 at 03:12:57PM +0000, Dmitry Eremin-Solenikov wrote: >> This patch does expose some problems on real HW. The first NMI completes w/o >> problems. However If I try to boot the kernel w/ nmi_watchdog=1 or to trigger >> two NMIs from the monitor, kernel is stuck somewhere. >> > Can you try this patch instead patch13: > > > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h > index 8b6f6e9..057a612 100644 > --- a/arch/x86/include/asm/kvm_host.h > +++ b/arch/x86/include/asm/kvm_host.h > @@ -766,6 +766,7 @@ enum { > #define HF_GIF_MASK (1 << 0) > #define HF_HIF_MASK (1 << 1) > #define HF_VINTR_MASK (1 << 2) > +#define HF_NMI_MASK (1 << 3) > > /* > * Hardware virtualization extension instructions may fault if a > diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c > index c605477..0a2b3f1 100644 > --- a/arch/x86/kvm/svm.c > +++ b/arch/x86/kvm/svm.c > @@ -1834,6 +1834,13 @@ static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) > return 1; > } > > +static int iret_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) > +{ > + svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET); > + svm->vcpu.arch.hflags &= ~HF_NMI_MASK; Two minor issues: ++vcpu->stat.nmi_window_exits; > + return 1; > +} > + > static int invlpg_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) > { > if (emulate_instruction(&svm->vcpu, kvm_run, 0, 0, 0) != EMULATE_DONE) > @@ -2111,6 +2118,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm, > [SVM_EXIT_VINTR] = interrupt_window_interception, > /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */ > [SVM_EXIT_CPUID] = cpuid_interception, > + [SVM_EXIT_IRET] = iret_interception, > [SVM_EXIT_INVD] = emulate_on_interception, > [SVM_EXIT_HLT] = halt_interception, > [SVM_EXIT_INVLPG] = invlpg_interception, > @@ -2218,6 +2226,12 @@ static void pre_svm_run(struct vcpu_svm *svm) > new_asid(svm, svm_data); > } > > +static void svm_inject_nmi(struct vcpu_svm *svm) > +{ > + svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; > + svm->vcpu.arch.hflags |= HF_NMI_MASK; > + svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET); and: ++svm->vcpu.stat.nmi_injections; > +} > > static inline void svm_inject_irq(struct vcpu_svm *svm, int irq) > { > @@ -2269,6 +2283,14 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu) > vmcb->control.intercept_cr_write |= INTERCEPT_CR8_MASK; > } > > +static int svm_nmi_allowed(struct kvm_vcpu *vcpu) > +{ > + struct vcpu_svm *svm = to_svm(vcpu); > + struct vmcb *vmcb = svm->vmcb; > + return !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) && > + !(svm->vcpu.arch.hflags & HF_NMI_MASK); > +} > + > static int svm_interrupt_allowed(struct kvm_vcpu *vcpu) > { > struct vcpu_svm *svm = to_svm(vcpu); > @@ -2284,16 +2306,35 @@ static void enable_irq_window(struct kvm_vcpu *vcpu) > svm_inject_irq(to_svm(vcpu), 0x0); > } > > +static void enable_nmi_window(struct kvm_vcpu *vcpu) > +{ > + struct vcpu_svm *svm = to_svm(vcpu); > + > + if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) > + enable_irq_window(vcpu); > +} > + > static void svm_intr_inject(struct kvm_vcpu *vcpu) > { > /* try to reinject previous events if any */ > + if (vcpu->arch.nmi_injected) { > + svm_inject_nmi(to_svm(vcpu)); > + return; > + } > + > if (vcpu->arch.interrupt.pending) { > svm_queue_irq(to_svm(vcpu), vcpu->arch.interrupt.nr); > return; > } > > /* try to inject new event if pending */ > - if (kvm_cpu_has_interrupt(vcpu)) { > + if (vcpu->arch.nmi_pending) { > + if (svm_nmi_allowed(vcpu)) { > + vcpu->arch.nmi_pending = false; > + vcpu->arch.nmi_injected = true; > + svm_inject_nmi(vcpu); > + } > + } else if (kvm_cpu_has_interrupt(vcpu)) { > if (svm_interrupt_allowed(vcpu)) { > kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu)); > svm_queue_irq(to_svm(vcpu), vcpu->arch.interrupt.nr); > @@ -2312,7 +2353,10 @@ static void svm_intr_assist(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) > > svm_intr_inject(vcpu); > > - if (kvm_cpu_has_interrupt(vcpu) || req_int_win) > + /* enable NMI/IRQ window open exits if needed */ > + if (vcpu->arch.nmi_pending) > + enable_nmi_window(vcpu); > + else if (kvm_cpu_has_interrupt(vcpu) || req_int_win) > enable_irq_window(vcpu); > > out: > -- > Gleb. > -- > To unsubscribe from this list: send the line "unsubscribe kvm" in > the body of a message to majordomo@vger.kernel.org > More majordomo info at http://vger.kernel.org/majordomo-info.html > Jan
On Mon, Apr 20, 2009 at 05:50:01PM +0200, Jan Kiszka wrote: > > +static int iret_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) > > +{ > > + svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET); > > + svm->vcpu.arch.hflags &= ~HF_NMI_MASK; > > Two minor issues: > > ++vcpu->stat.nmi_window_exits; > [...] > > +static void svm_inject_nmi(struct vcpu_svm *svm) > > +{ > > + svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; > > + svm->vcpu.arch.hflags |= HF_NMI_MASK; > > + svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET); > > and: > > ++svm->vcpu.stat.nmi_injections; > Added both. Thanks. -- Gleb. -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 8b6f6e9..057a612 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -766,6 +766,7 @@ enum { #define HF_GIF_MASK (1 << 0) #define HF_HIF_MASK (1 << 1) #define HF_VINTR_MASK (1 << 2) +#define HF_NMI_MASK (1 << 3) /* * Hardware virtualization extension instructions may fault if a diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index c605477..0a2b3f1 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1834,6 +1834,13 @@ static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) return 1; } +static int iret_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) +{ + svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET); + svm->vcpu.arch.hflags &= ~HF_NMI_MASK; + return 1; +} + static int invlpg_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) { if (emulate_instruction(&svm->vcpu, kvm_run, 0, 0, 0) != EMULATE_DONE) @@ -2111,6 +2118,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm, [SVM_EXIT_VINTR] = interrupt_window_interception, /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */ [SVM_EXIT_CPUID] = cpuid_interception, + [SVM_EXIT_IRET] = iret_interception, [SVM_EXIT_INVD] = emulate_on_interception, [SVM_EXIT_HLT] = halt_interception, [SVM_EXIT_INVLPG] = invlpg_interception, @@ -2218,6 +2226,12 @@ static void pre_svm_run(struct vcpu_svm *svm) new_asid(svm, svm_data); } +static void svm_inject_nmi(struct vcpu_svm *svm) +{ + svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; + svm->vcpu.arch.hflags |= HF_NMI_MASK; + svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET); +} static inline void svm_inject_irq(struct vcpu_svm *svm, int irq) { @@ -2269,6 +2283,14 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu) vmcb->control.intercept_cr_write |= INTERCEPT_CR8_MASK; } +static int svm_nmi_allowed(struct kvm_vcpu *vcpu) +{ + struct vcpu_svm *svm = to_svm(vcpu); + struct vmcb *vmcb = svm->vmcb; + return !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) && + !(svm->vcpu.arch.hflags & HF_NMI_MASK); +} + static int svm_interrupt_allowed(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -2284,16 +2306,35 @@ static void enable_irq_window(struct kvm_vcpu *vcpu) svm_inject_irq(to_svm(vcpu), 0x0); } +static void enable_nmi_window(struct kvm_vcpu *vcpu) +{ + struct vcpu_svm *svm = to_svm(vcpu); + + if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) + enable_irq_window(vcpu); +} + static void svm_intr_inject(struct kvm_vcpu *vcpu) { /* try to reinject previous events if any */ + if (vcpu->arch.nmi_injected) { + svm_inject_nmi(to_svm(vcpu)); + return; + } + if (vcpu->arch.interrupt.pending) { svm_queue_irq(to_svm(vcpu), vcpu->arch.interrupt.nr); return; } /* try to inject new event if pending */ - if (kvm_cpu_has_interrupt(vcpu)) { + if (vcpu->arch.nmi_pending) { + if (svm_nmi_allowed(vcpu)) { + vcpu->arch.nmi_pending = false; + vcpu->arch.nmi_injected = true; + svm_inject_nmi(vcpu); + } + } else if (kvm_cpu_has_interrupt(vcpu)) { if (svm_interrupt_allowed(vcpu)) { kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu)); svm_queue_irq(to_svm(vcpu), vcpu->arch.interrupt.nr); @@ -2312,7 +2353,10 @@ static void svm_intr_assist(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) svm_intr_inject(vcpu); - if (kvm_cpu_has_interrupt(vcpu) || req_int_win) + /* enable NMI/IRQ window open exits if needed */ + if (vcpu->arch.nmi_pending) + enable_nmi_window(vcpu); + else if (kvm_cpu_has_interrupt(vcpu) || req_int_win) enable_irq_window(vcpu); out: