Message ID | 1587632507-18997-2-git-send-email-wanpengli@tencent.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: VMX: Tscdeadline timer emulation fastpath | expand |
On 23/04/20 11:01, Wanpeng Li wrote: > +static void fast_deliver_interrupt(struct kvm_lapic *apic, int vector) > +{ > + struct kvm_vcpu *vcpu = apic->vcpu; > + > + kvm_lapic_clear_vector(vector, apic->regs + APIC_TMR); > + > + if (vcpu->arch.apicv_active) { > + if (kvm_x86_ops.pi_test_and_set_pir_on(vcpu, vector)) > + return; > + > + kvm_x86_ops.sync_pir_to_irr(vcpu); > + } else { > + kvm_lapic_set_irr(vector, apic); > + if (kvm_cpu_has_injectable_intr(vcpu)) { > + if (kvm_x86_ops.interrupt_allowed(vcpu)) { > + kvm_queue_interrupt(vcpu, > + kvm_cpu_get_interrupt(vcpu), false); > + kvm_x86_ops.set_irq(vcpu); > + } else > + kvm_x86_ops.enable_irq_window(vcpu); > + } > + } > +} > + Ok, got it now. The problem is that deliver_posted_interrupt goes through if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false)) kvm_vcpu_kick(vcpu); Would it help to make the above if (vcpu != kvm_get_running_vcpu() && !kvm_vcpu_trigger_posted_interrupt(vcpu, false)) kvm_vcpu_kick(vcpu); ? If that is enough for the APICv case, it's good enough. Paolo
On Thu, 23 Apr 2020 at 17:25, Paolo Bonzini <pbonzini@redhat.com> wrote: > > On 23/04/20 11:01, Wanpeng Li wrote: > > +static void fast_deliver_interrupt(struct kvm_lapic *apic, int vector) > > +{ > > + struct kvm_vcpu *vcpu = apic->vcpu; > > + > > + kvm_lapic_clear_vector(vector, apic->regs + APIC_TMR); > > + > > + if (vcpu->arch.apicv_active) { > > + if (kvm_x86_ops.pi_test_and_set_pir_on(vcpu, vector)) > > + return; > > + > > + kvm_x86_ops.sync_pir_to_irr(vcpu); > > + } else { > > + kvm_lapic_set_irr(vector, apic); > > + if (kvm_cpu_has_injectable_intr(vcpu)) { > > + if (kvm_x86_ops.interrupt_allowed(vcpu)) { > > + kvm_queue_interrupt(vcpu, > > + kvm_cpu_get_interrupt(vcpu), false); > > + kvm_x86_ops.set_irq(vcpu); > > + } else > > + kvm_x86_ops.enable_irq_window(vcpu); > > + } > > + } > > +} > > + > > Ok, got it now. The problem is that deliver_posted_interrupt goes through > > if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false)) > kvm_vcpu_kick(vcpu); > > Would it help to make the above > > if (vcpu != kvm_get_running_vcpu() && > !kvm_vcpu_trigger_posted_interrupt(vcpu, false)) > kvm_vcpu_kick(vcpu); > > ? If that is enough for the APICv case, it's good enough. We will not exit from vmx_vcpu_run to vcpu_enter_guest, so it will not help, right? Wanpeng
On 23/04/20 11:35, Wanpeng Li wrote: >> Ok, got it now. The problem is that deliver_posted_interrupt goes through >> >> if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false)) >> kvm_vcpu_kick(vcpu); >> >> Would it help to make the above >> >> if (vcpu != kvm_get_running_vcpu() && >> !kvm_vcpu_trigger_posted_interrupt(vcpu, false)) >> kvm_vcpu_kick(vcpu); >> >> ? If that is enough for the APICv case, it's good enough. > We will not exit from vmx_vcpu_run to vcpu_enter_guest, so it will not > help, right? Oh indeed---the call to sync_pir_to_irr is in vcpu_enter_guest. You can add it to patch 3 right before "goto cont_run", since AMD does not need it. Paolo
On Thu, 23 Apr 2020 at 17:41, Paolo Bonzini <pbonzini@redhat.com> wrote: > > On 23/04/20 11:35, Wanpeng Li wrote: > >> Ok, got it now. The problem is that deliver_posted_interrupt goes through > >> > >> if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false)) > >> kvm_vcpu_kick(vcpu); > >> > >> Would it help to make the above > >> > >> if (vcpu != kvm_get_running_vcpu() && > >> !kvm_vcpu_trigger_posted_interrupt(vcpu, false)) > >> kvm_vcpu_kick(vcpu); > >> > >> ? If that is enough for the APICv case, it's good enough. > > We will not exit from vmx_vcpu_run to vcpu_enter_guest, so it will not > > help, right? > > Oh indeed---the call to sync_pir_to_irr is in vcpu_enter_guest. You can > add it to patch 3 right before "goto cont_run", since AMD does not need it. Just move kvm_x86_ops.sync_pir_to_irr(vcpu)? How about the set pir/on part for APICv and non-APICv part in fast_deliver_interrupt()? Wanpeng
On 23/04/20 11:44, Wanpeng Li wrote: >>>> Would it help to make the above >>>> >>>> if (vcpu != kvm_get_running_vcpu() && >>>> !kvm_vcpu_trigger_posted_interrupt(vcpu, false)) >>>> kvm_vcpu_kick(vcpu); >>>> >>>> ? If that is enough for the APICv case, it's good enough. >>> >>> We will not exit from vmx_vcpu_run to vcpu_enter_guest, so it will not >>> help, right? >> >> Oh indeed---the call to sync_pir_to_irr is in vcpu_enter_guest. You can >> add it to patch 3 right before "goto cont_run", since AMD does not need it. > > Just move kvm_x86_ops.sync_pir_to_irr(vcpu)? How about the set pir/on > part for APICv and non-APICv part in fast_deliver_interrupt()? That should be handled by deliver_posted_interrupt with no performance penalty, if you add "vcpu != kvm_get_running_vcpu()" before it calls kvm_vcpu_trigger_posted_interrupt. Paolo
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index f26df2c..f809763 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1157,6 +1157,7 @@ struct kvm_x86_ops { void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu); int (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector); int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu); + bool (*pi_test_and_set_pir_on)(struct kvm_vcpu *vcpu, int vector); int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr); int (*get_tdp_level)(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 38f7dc9..7703142 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1259,6 +1259,30 @@ void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high) kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL); } +static void fast_deliver_interrupt(struct kvm_lapic *apic, int vector) +{ + struct kvm_vcpu *vcpu = apic->vcpu; + + kvm_lapic_clear_vector(vector, apic->regs + APIC_TMR); + + if (vcpu->arch.apicv_active) { + if (kvm_x86_ops.pi_test_and_set_pir_on(vcpu, vector)) + return; + + kvm_x86_ops.sync_pir_to_irr(vcpu); + } else { + kvm_lapic_set_irr(vector, apic); + if (kvm_cpu_has_injectable_intr(vcpu)) { + if (kvm_x86_ops.interrupt_allowed(vcpu)) { + kvm_queue_interrupt(vcpu, + kvm_cpu_get_interrupt(vcpu), false); + kvm_x86_ops.set_irq(vcpu); + } else + kvm_x86_ops.enable_irq_window(vcpu); + } + } +} + static u32 apic_get_tmcct(struct kvm_lapic *apic) { ktime_t remaining, now; @@ -2351,6 +2375,14 @@ int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type) return 0; } +static void kvm_apic_local_deliver_fast(struct kvm_lapic *apic, int lvt_type) +{ + u32 reg = kvm_lapic_get_reg(apic, lvt_type); + + if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) + fast_deliver_interrupt(apic, reg & APIC_VECTOR_MASK); +} + void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index e80daa9..ab9e0fd 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -905,6 +905,11 @@ int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq, return ret; } +bool svm_pi_test_and_set_pir_on(struct kvm_vcpu *vcpu, int vector) +{ + return false; +} + bool svm_check_apicv_inhibit_reasons(ulong bit) { ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) | diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index eb95283..fd0cab3 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4035,6 +4035,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .deliver_posted_interrupt = svm_deliver_avic_intr, .dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt, .update_pi_irte = svm_update_pi_irte, + .pi_test_and_set_pir_on = svm_pi_test_and_set_pir_on, .setup_mce = svm_setup_mce, .smi_allowed = svm_smi_allowed, diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index ca95204..8a62a8b 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -457,6 +457,7 @@ int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq, bool set); void svm_vcpu_blocking(struct kvm_vcpu *vcpu); void svm_vcpu_unblocking(struct kvm_vcpu *vcpu); +bool svm_pi_test_and_set_pir_on(struct kvm_vcpu *vcpu, int vector); /* sev.c */ diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 766303b..fd20cb3 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -3883,6 +3883,21 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, } return -1; } + +static bool vmx_pi_test_and_set_pir_on(struct kvm_vcpu *vcpu, int vector) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + if (pi_test_and_set_pir(vector, &vmx->pi_desc)) + return true; + + /* If a previous notification has sent the IPI, nothing to do. */ + if (pi_test_and_set_on(&vmx->pi_desc)) + return true; + + return false; +} + /* * Send interrupt to vcpu via posted interrupt way. * 1. If target vcpu is running(non-root mode), send posted interrupt @@ -3892,7 +3907,6 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, */ static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector) { - struct vcpu_vmx *vmx = to_vmx(vcpu); int r; r = vmx_deliver_nested_posted_interrupt(vcpu, vector); @@ -3902,11 +3916,7 @@ static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector) if (!vcpu->arch.apicv_active) return -1; - if (pi_test_and_set_pir(vector, &vmx->pi_desc)) - return 0; - - /* If a previous notification has sent the IPI, nothing to do. */ - if (pi_test_and_set_on(&vmx->pi_desc)) + if (vmx_pi_test_and_set_pir_on(vcpu, vector)) return 0; if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false)) @@ -7826,6 +7836,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { .hwapic_isr_update = vmx_hwapic_isr_update, .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt, .sync_pir_to_irr = vmx_sync_pir_to_irr, + .pi_test_and_set_pir_on = vmx_pi_test_and_set_pir_on, .deliver_posted_interrupt = vmx_deliver_posted_interrupt, .dy_apicv_has_pending_interrupt = vmx_dy_apicv_has_pending_interrupt,