Message ID | 1588055009-12677-2-git-send-email-wanpengli@tencent.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: VMX: Tscdeadline timer emulation fastpath | expand |
Wanpeng Li <kernellwp@gmail.com> writes: > From: Wanpeng Li <wanpengli@tencent.com> > > Introduce generic fastpath handler to handle MSR fastpath, VMX-preemption > timer fastpath etc, move it after vmx_complete_interrupts() in order that > later patch can catch the case vmexit occurred while another event was > being delivered to guest. There is no obversed performance difference for > IPI fastpath testing after this move. > > Tested-by: Haiwei Li <lihaiwei@tencent.com> > Cc: Haiwei Li <lihaiwei@tencent.com> > Signed-off-by: Wanpeng Li <wanpengli@tencent.com> > --- > arch/x86/kvm/vmx/vmx.c | 21 ++++++++++++++++----- > 1 file changed, 16 insertions(+), 5 deletions(-) > > diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c > index 3ab6ca6..9b5adb4 100644 > --- a/arch/x86/kvm/vmx/vmx.c > +++ b/arch/x86/kvm/vmx/vmx.c > @@ -6583,6 +6583,20 @@ void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp) > } > } > > +static enum exit_fastpath_completion vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu) > +{ > + if (!is_guest_mode(vcpu)) { Nitpick: do we actually expect to have any fastpath handlers anytime soon? If not, we could've written this as if (is_guest_mode(vcpu)) return EXIT_FASTPATH_NONE; and save on identation) > + switch (to_vmx(vcpu)->exit_reason) { > + case EXIT_REASON_MSR_WRITE: > + return handle_fastpath_set_msr_irqoff(vcpu); > + default: > + return EXIT_FASTPATH_NONE; > + } > + } > + > + return EXIT_FASTPATH_NONE; > +} > + > bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched); > > static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu) > @@ -6757,17 +6771,14 @@ static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu) > if (unlikely(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) > return EXIT_FASTPATH_NONE; > > - if (!is_guest_mode(vcpu) && vmx->exit_reason == EXIT_REASON_MSR_WRITE) > - exit_fastpath = handle_fastpath_set_msr_irqoff(vcpu); > - else > - exit_fastpath = EXIT_FASTPATH_NONE; > - > vmx->loaded_vmcs->launched = 1; > vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); > > vmx_recover_nmi_blocking(vmx); > vmx_complete_interrupts(vmx); > > + exit_fastpath = vmx_exit_handlers_fastpath(vcpu); > + > return exit_fastpath; > } Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>
On Thu, Apr 30, 2020 at 03:28:46PM +0200, Vitaly Kuznetsov wrote: > Wanpeng Li <kernellwp@gmail.com> writes: > > > From: Wanpeng Li <wanpengli@tencent.com> > > > > Introduce generic fastpath handler to handle MSR fastpath, VMX-preemption > > timer fastpath etc, move it after vmx_complete_interrupts() in order that > > later patch can catch the case vmexit occurred while another event was > > being delivered to guest. There is no obversed performance difference for > > IPI fastpath testing after this move. > > > > Tested-by: Haiwei Li <lihaiwei@tencent.com> > > Cc: Haiwei Li <lihaiwei@tencent.com> > > Signed-off-by: Wanpeng Li <wanpengli@tencent.com> > > --- > > arch/x86/kvm/vmx/vmx.c | 21 ++++++++++++++++----- > > 1 file changed, 16 insertions(+), 5 deletions(-) > > > > diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c > > index 3ab6ca6..9b5adb4 100644 > > --- a/arch/x86/kvm/vmx/vmx.c > > +++ b/arch/x86/kvm/vmx/vmx.c > > @@ -6583,6 +6583,20 @@ void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp) > > } > > } > > > > +static enum exit_fastpath_completion vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu) > > +{ > > + if (!is_guest_mode(vcpu)) { > > Nitpick: do we actually expect to have any fastpath handlers anytime > soon? If not, we could've written this as > > if (is_guest_mode(vcpu)) > return EXIT_FASTPATH_NONE; > > and save on identation) Agreed. An alternative approach would be to do the check in the caller, e.g. if (is_guest_mode(vcpu)) return EXIT_FASTPATH_NONE; return vmx_exit_handlers_fastpath(vcpu); I don't have a strong preference either way. > > + switch (to_vmx(vcpu)->exit_reason) { > > + case EXIT_REASON_MSR_WRITE: > > + return handle_fastpath_set_msr_irqoff(vcpu); > > + default: > > + return EXIT_FASTPATH_NONE; > > + } > > + } > > + > > + return EXIT_FASTPATH_NONE; > > +} > > + > > bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched); > > > > static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu) > > @@ -6757,17 +6771,14 @@ static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu) > > if (unlikely(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) > > return EXIT_FASTPATH_NONE; > > > > - if (!is_guest_mode(vcpu) && vmx->exit_reason == EXIT_REASON_MSR_WRITE) > > - exit_fastpath = handle_fastpath_set_msr_irqoff(vcpu); > > - else > > - exit_fastpath = EXIT_FASTPATH_NONE; > > - > > vmx->loaded_vmcs->launched = 1; > > vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); > > > > vmx_recover_nmi_blocking(vmx); > > vmx_complete_interrupts(vmx); > > > > + exit_fastpath = vmx_exit_handlers_fastpath(vcpu); No need for capturing the result in a local variable, just return the function call. > > + > > return exit_fastpath; > > } > > Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com> > > -- > Vitaly >
On Fri, 1 May 2020 at 22:12, Sean Christopherson <sean.j.christopherson@intel.com> wrote: > > On Thu, Apr 30, 2020 at 03:28:46PM +0200, Vitaly Kuznetsov wrote: > > Wanpeng Li <kernellwp@gmail.com> writes: > > > > > From: Wanpeng Li <wanpengli@tencent.com> > > > > > > Introduce generic fastpath handler to handle MSR fastpath, VMX-preemption > > > timer fastpath etc, move it after vmx_complete_interrupts() in order that > > > later patch can catch the case vmexit occurred while another event was > > > being delivered to guest. There is no obversed performance difference for > > > IPI fastpath testing after this move. > > > > > > Tested-by: Haiwei Li <lihaiwei@tencent.com> > > > Cc: Haiwei Li <lihaiwei@tencent.com> > > > Signed-off-by: Wanpeng Li <wanpengli@tencent.com> > > > --- > > > arch/x86/kvm/vmx/vmx.c | 21 ++++++++++++++++----- > > > 1 file changed, 16 insertions(+), 5 deletions(-) > > > > > > diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c > > > index 3ab6ca6..9b5adb4 100644 > > > --- a/arch/x86/kvm/vmx/vmx.c > > > +++ b/arch/x86/kvm/vmx/vmx.c > > > @@ -6583,6 +6583,20 @@ void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp) > > > } > > > } > > > > > > +static enum exit_fastpath_completion vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu) > > > +{ > > > + if (!is_guest_mode(vcpu)) { > > > > Nitpick: do we actually expect to have any fastpath handlers anytime > > soon? If not, we could've written this as > > > > if (is_guest_mode(vcpu)) > > return EXIT_FASTPATH_NONE; > > > > and save on identation) > > Agreed. An alternative approach would be to do the check in the caller, e.g. > > if (is_guest_mode(vcpu)) > return EXIT_FASTPATH_NONE; > > return vmx_exit_handlers_fastpath(vcpu); > > I don't have a strong preference either way. > > > > + switch (to_vmx(vcpu)->exit_reason) { > > > + case EXIT_REASON_MSR_WRITE: > > > + return handle_fastpath_set_msr_irqoff(vcpu); > > > + default: > > > + return EXIT_FASTPATH_NONE; > > > + } > > > + } > > > + > > > + return EXIT_FASTPATH_NONE; > > > +} > > > + > > > bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched); > > > > > > static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu) > > > @@ -6757,17 +6771,14 @@ static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu) > > > if (unlikely(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) > > > return EXIT_FASTPATH_NONE; > > > > > > - if (!is_guest_mode(vcpu) && vmx->exit_reason == EXIT_REASON_MSR_WRITE) > > > - exit_fastpath = handle_fastpath_set_msr_irqoff(vcpu); > > > - else > > > - exit_fastpath = EXIT_FASTPATH_NONE; > > > - > > > vmx->loaded_vmcs->launched = 1; > > > vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); > > > > > > vmx_recover_nmi_blocking(vmx); > > > vmx_complete_interrupts(vmx); > > > > > > + exit_fastpath = vmx_exit_handlers_fastpath(vcpu); > > No need for capturing the result in a local variable, just return the function > call. As you know later patches need to handle an local variable even through we can make 1/7 nicer, it is just overridden. Wanpeng
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 3ab6ca6..9b5adb4 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -6583,6 +6583,20 @@ void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp) } } +static enum exit_fastpath_completion vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu) +{ + if (!is_guest_mode(vcpu)) { + switch (to_vmx(vcpu)->exit_reason) { + case EXIT_REASON_MSR_WRITE: + return handle_fastpath_set_msr_irqoff(vcpu); + default: + return EXIT_FASTPATH_NONE; + } + } + + return EXIT_FASTPATH_NONE; +} + bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched); static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu) @@ -6757,17 +6771,14 @@ static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu) if (unlikely(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) return EXIT_FASTPATH_NONE; - if (!is_guest_mode(vcpu) && vmx->exit_reason == EXIT_REASON_MSR_WRITE) - exit_fastpath = handle_fastpath_set_msr_irqoff(vcpu); - else - exit_fastpath = EXIT_FASTPATH_NONE; - vmx->loaded_vmcs->launched = 1; vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); vmx_recover_nmi_blocking(vmx); vmx_complete_interrupts(vmx); + exit_fastpath = vmx_exit_handlers_fastpath(vcpu); + return exit_fastpath; }