Message ID | 1587709364-19090-2-git-send-email-wanpengli@tencent.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: VMX: Tscdeadline timer emulation fastpath | expand |
On Fri, Apr 24, 2020 at 02:22:40PM +0800, Wanpeng Li wrote: > From: Wanpeng Li <wanpengli@tencent.com> > > Introduce generic fastpath handler to handle MSR fastpath, VMX-preemption > timer fastpath etc. In addition, we can't observe benefit from single > target IPI fastpath when APICv is disabled, let's just enable IPI and > Timer fastpath when APICv is enabled for now. There are three different changes being squished into a single patch: - Refactor code to add helper - Change !APICv behavior for WRMSR fastpath - Introduce EXIT_FASTPATH_CONT_RUN I don't think you necessarily need to break this into three separate patches, but's the !APICv change needs to be a standalone patch, especially given the shortlog. E.g. the refactoring could be introduced along with the second fastpath case, and CONT_RUN could be introduced with its first usage. > > Tested-by: Haiwei Li <lihaiwei@tencent.com> > Cc: Haiwei Li <lihaiwei@tencent.com> > Signed-off-by: Wanpeng Li <wanpengli@tencent.com> > --- > arch/x86/include/asm/kvm_host.h | 1 + > arch/x86/kvm/vmx/vmx.c | 25 ++++++++++++++++++++----- > 2 files changed, 21 insertions(+), 5 deletions(-) > > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h > index f26df2c..6397723 100644 > --- a/arch/x86/include/asm/kvm_host.h > +++ b/arch/x86/include/asm/kvm_host.h > @@ -188,6 +188,7 @@ enum { > enum exit_fastpath_completion { > EXIT_FASTPATH_NONE, > EXIT_FASTPATH_SKIP_EMUL_INS, > + EXIT_FASTPATH_CONT_RUN, > }; > > struct x86_emulate_ctxt; > diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c > index 766303b..f1f6638 100644 > --- a/arch/x86/kvm/vmx/vmx.c > +++ b/arch/x86/kvm/vmx/vmx.c > @@ -6559,6 +6559,20 @@ void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp) > } > } > > +static enum exit_fastpath_completion vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu) > +{ > + if (!is_guest_mode(vcpu) && vcpu->arch.apicv_active) { > + switch (to_vmx(vcpu)->exit_reason) { > + case EXIT_REASON_MSR_WRITE: > + return handle_fastpath_set_msr_irqoff(vcpu); > + default: > + return EXIT_FASTPATH_NONE; > + } > + } > + > + return EXIT_FASTPATH_NONE; > +} > + > bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched); > > static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu) > @@ -6567,6 +6581,7 @@ static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu) > struct vcpu_vmx *vmx = to_vmx(vcpu); > unsigned long cr3, cr4; > > +cont_run: > /* Record the guest's net vcpu time for enforced NMI injections. */ > if (unlikely(!enable_vnmi && > vmx->loaded_vmcs->soft_vnmi_blocked)) > @@ -6733,17 +6748,17 @@ static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu) > if (unlikely(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) > return EXIT_FASTPATH_NONE; > > - if (!is_guest_mode(vcpu) && vmx->exit_reason == EXIT_REASON_MSR_WRITE) > - exit_fastpath = handle_fastpath_set_msr_irqoff(vcpu); > - else > - exit_fastpath = EXIT_FASTPATH_NONE; > - > vmx->loaded_vmcs->launched = 1; > vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); > > vmx_recover_nmi_blocking(vmx); > vmx_complete_interrupts(vmx); > > + exit_fastpath = vmx_exit_handlers_fastpath(vcpu); > + /* static call is better with retpolines */ > + if (exit_fastpath == EXIT_FASTPATH_CONT_RUN) > + goto cont_run; > + > return exit_fastpath; > } > > -- > 2.7.4 >
On Tue, 28 Apr 2020 at 02:26, Sean Christopherson <sean.j.christopherson@intel.com> wrote: > > On Fri, Apr 24, 2020 at 02:22:40PM +0800, Wanpeng Li wrote: > > From: Wanpeng Li <wanpengli@tencent.com> > > > > Introduce generic fastpath handler to handle MSR fastpath, VMX-preemption > > timer fastpath etc. In addition, we can't observe benefit from single > > target IPI fastpath when APICv is disabled, let's just enable IPI and > > Timer fastpath when APICv is enabled for now. > > There are three different changes being squished into a single patch: > > - Refactor code to add helper > - Change !APICv behavior for WRMSR fastpath > - Introduce EXIT_FASTPATH_CONT_RUN > > I don't think you necessarily need to break this into three separate > patches, but's the !APICv change needs to be a standalone patch, especially > given the shortlog. E.g. the refactoring could be introduced along with > the second fastpath case, and CONT_RUN could be introduced with its first > usage. Agreed, will split to two separate patches. Wanpeng
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index f26df2c..6397723 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -188,6 +188,7 @@ enum { enum exit_fastpath_completion { EXIT_FASTPATH_NONE, EXIT_FASTPATH_SKIP_EMUL_INS, + EXIT_FASTPATH_CONT_RUN, }; struct x86_emulate_ctxt; diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 766303b..f1f6638 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -6559,6 +6559,20 @@ void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp) } } +static enum exit_fastpath_completion vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu) +{ + if (!is_guest_mode(vcpu) && vcpu->arch.apicv_active) { + switch (to_vmx(vcpu)->exit_reason) { + case EXIT_REASON_MSR_WRITE: + return handle_fastpath_set_msr_irqoff(vcpu); + default: + return EXIT_FASTPATH_NONE; + } + } + + return EXIT_FASTPATH_NONE; +} + bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched); static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu) @@ -6567,6 +6581,7 @@ static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu) struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long cr3, cr4; +cont_run: /* Record the guest's net vcpu time for enforced NMI injections. */ if (unlikely(!enable_vnmi && vmx->loaded_vmcs->soft_vnmi_blocked)) @@ -6733,17 +6748,17 @@ static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu) if (unlikely(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) return EXIT_FASTPATH_NONE; - if (!is_guest_mode(vcpu) && vmx->exit_reason == EXIT_REASON_MSR_WRITE) - exit_fastpath = handle_fastpath_set_msr_irqoff(vcpu); - else - exit_fastpath = EXIT_FASTPATH_NONE; - vmx->loaded_vmcs->launched = 1; vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); vmx_recover_nmi_blocking(vmx); vmx_complete_interrupts(vmx); + exit_fastpath = vmx_exit_handlers_fastpath(vcpu); + /* static call is better with retpolines */ + if (exit_fastpath == EXIT_FASTPATH_CONT_RUN) + goto cont_run; + return exit_fastpath; }