Message ID | 20170725044156.7605-1-andi@firstfloor.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On 25/07/2017 06:41, Andi Kleen wrote: > From: Andi Kleen <ak@linux.intel.com> > > KVM and perf have a special backdoor mechanism to report the IP for interrupts > re-executed after vm exit. This works for the NMIs that perf normally uses. > > However when perf is in timer mode it doesn't work because the timer interrupt > doesn't get this special treatment. This is common when KVM is running > nested in another hypervisor which may not implement the PMU, so only > timer mode is available. > > Call the functions to set up the backdoor IP also for non NMI interrupts. > > I renamed the functions to set up the backdoor IP reporting to be more > appropiate for their new use. The SVM change is only compile tested. > > Signed-off-by: Andi Kleen <ak@linux.intel.com> > --- > arch/x86/kvm/svm.c | 6 ++++-- > arch/x86/kvm/vmx.c | 6 ++++-- > arch/x86/kvm/x86.c | 8 ++++---- > arch/x86/kvm/x86.h | 4 ++-- > 4 files changed, 14 insertions(+), 10 deletions(-) > > diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c > index ba9891ac5c56..41bf6a9de853 100644 > --- a/arch/x86/kvm/svm.c > +++ b/arch/x86/kvm/svm.c > @@ -4872,14 +4872,14 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) > vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; > > if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) > - kvm_before_handle_nmi(&svm->vcpu); > + kvm_before_interrupt(&svm->vcpu); > > stgi(); > > /* Any pending NMI will happen here */ > > if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) > - kvm_after_handle_nmi(&svm->vcpu); > + kvm_after_interrupt(&svm->vcpu); > > sync_cr8_to_lapic(vcpu); > > @@ -5234,6 +5234,7 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu, > > static void svm_handle_external_intr(struct kvm_vcpu *vcpu) > { > + kvm_before_interrupt(vcpu); > local_irq_enable(); > /* > * We must have an instruction with interrupts enabled, so > @@ -5241,6 +5242,7 @@ static void svm_handle_external_intr(struct kvm_vcpu *vcpu) > */ > asm("nop"); > local_irq_disable(); > + kvm_after_interrupt(vcpu); > } Can you do this directly in vcpu_enter_guest? Maybe you could even inline the functions entirely, setting current_vcpu right after the local_irq_disable() and clearing it after kvm_x86_ops->handle_external_intr. This would remove the NMI code in vmx.c/svm.c, too. Paolo > static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu) > diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c > index ca5d2b93385c..a02178914f6c 100644 > --- a/arch/x86/kvm/vmx.c > +++ b/arch/x86/kvm/vmx.c > @@ -8606,9 +8606,9 @@ static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx) > > /* We need to handle NMIs before interrupts are enabled */ > if (is_nmi(exit_intr_info)) { > - kvm_before_handle_nmi(&vmx->vcpu); > + kvm_before_interrupt(&vmx->vcpu); > asm("int $2"); > - kvm_after_handle_nmi(&vmx->vcpu); > + kvm_after_interrupt(&vmx->vcpu); > } > } > > @@ -8627,6 +8627,7 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) > unsigned long tmp; > #endif > > + kvm_before_interrupt(vcpu); > vector = exit_intr_info & INTR_INFO_VECTOR_MASK; > desc = (gate_desc *)vmx->host_idt_base + vector; > entry = gate_offset(*desc); > @@ -8650,6 +8651,7 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) > [ss]"i"(__KERNEL_DS), > [cs]"i"(__KERNEL_CS) > ); > + kvm_after_interrupt(vcpu); > } > } > > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index 0e846f0cb83b..96dae9ca7641 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -5975,17 +5975,17 @@ static struct perf_guest_info_callbacks kvm_guest_cbs = { > .get_guest_ip = kvm_get_guest_ip, > }; > > -void kvm_before_handle_nmi(struct kvm_vcpu *vcpu) > +void kvm_before_interrupt(struct kvm_vcpu *vcpu) > { > __this_cpu_write(current_vcpu, vcpu); > } > -EXPORT_SYMBOL_GPL(kvm_before_handle_nmi); > +EXPORT_SYMBOL_GPL(kvm_before_interrupt); > > -void kvm_after_handle_nmi(struct kvm_vcpu *vcpu) > +void kvm_after_interrupt(struct kvm_vcpu *vcpu) > { > __this_cpu_write(current_vcpu, NULL); > } > -EXPORT_SYMBOL_GPL(kvm_after_handle_nmi); > +EXPORT_SYMBOL_GPL(kvm_after_interrupt); > > static void kvm_set_mmio_spte_mask(void) > { > diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h > index 612067074905..adb269538e7c 100644 > --- a/arch/x86/kvm/x86.h > +++ b/arch/x86/kvm/x86.h > @@ -155,8 +155,8 @@ static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk) > return !(kvm->arch.disabled_quirks & quirk); > } > > -void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); > -void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); > +void kvm_before_interrupt(struct kvm_vcpu *vcpu); > +void kvm_after_interrupt(struct kvm_vcpu *vcpu); > void kvm_set_pending_timer(struct kvm_vcpu *vcpu); > int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); > >
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index ba9891ac5c56..41bf6a9de853 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -4872,14 +4872,14 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) - kvm_before_handle_nmi(&svm->vcpu); + kvm_before_interrupt(&svm->vcpu); stgi(); /* Any pending NMI will happen here */ if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) - kvm_after_handle_nmi(&svm->vcpu); + kvm_after_interrupt(&svm->vcpu); sync_cr8_to_lapic(vcpu); @@ -5234,6 +5234,7 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu, static void svm_handle_external_intr(struct kvm_vcpu *vcpu) { + kvm_before_interrupt(vcpu); local_irq_enable(); /* * We must have an instruction with interrupts enabled, so @@ -5241,6 +5242,7 @@ static void svm_handle_external_intr(struct kvm_vcpu *vcpu) */ asm("nop"); local_irq_disable(); + kvm_after_interrupt(vcpu); } static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index ca5d2b93385c..a02178914f6c 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -8606,9 +8606,9 @@ static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx) /* We need to handle NMIs before interrupts are enabled */ if (is_nmi(exit_intr_info)) { - kvm_before_handle_nmi(&vmx->vcpu); + kvm_before_interrupt(&vmx->vcpu); asm("int $2"); - kvm_after_handle_nmi(&vmx->vcpu); + kvm_after_interrupt(&vmx->vcpu); } } @@ -8627,6 +8627,7 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) unsigned long tmp; #endif + kvm_before_interrupt(vcpu); vector = exit_intr_info & INTR_INFO_VECTOR_MASK; desc = (gate_desc *)vmx->host_idt_base + vector; entry = gate_offset(*desc); @@ -8650,6 +8651,7 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) [ss]"i"(__KERNEL_DS), [cs]"i"(__KERNEL_CS) ); + kvm_after_interrupt(vcpu); } } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0e846f0cb83b..96dae9ca7641 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5975,17 +5975,17 @@ static struct perf_guest_info_callbacks kvm_guest_cbs = { .get_guest_ip = kvm_get_guest_ip, }; -void kvm_before_handle_nmi(struct kvm_vcpu *vcpu) +void kvm_before_interrupt(struct kvm_vcpu *vcpu) { __this_cpu_write(current_vcpu, vcpu); } -EXPORT_SYMBOL_GPL(kvm_before_handle_nmi); +EXPORT_SYMBOL_GPL(kvm_before_interrupt); -void kvm_after_handle_nmi(struct kvm_vcpu *vcpu) +void kvm_after_interrupt(struct kvm_vcpu *vcpu) { __this_cpu_write(current_vcpu, NULL); } -EXPORT_SYMBOL_GPL(kvm_after_handle_nmi); +EXPORT_SYMBOL_GPL(kvm_after_interrupt); static void kvm_set_mmio_spte_mask(void) { diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 612067074905..adb269538e7c 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -155,8 +155,8 @@ static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk) return !(kvm->arch.disabled_quirks & quirk); } -void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); -void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); +void kvm_before_interrupt(struct kvm_vcpu *vcpu); +void kvm_after_interrupt(struct kvm_vcpu *vcpu); void kvm_set_pending_timer(struct kvm_vcpu *vcpu); int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);