diff mbox series

[v3,2/5] KVM: X86: Introduce need_cancel_enter_guest helper

Message ID 1587709364-19090-3-git-send-email-wanpengli@tencent.com (mailing list archive)
State New, archived
Headers show
Series KVM: VMX: Tscdeadline timer emulation fastpath | expand

Commit Message

Wanpeng Li April 24, 2020, 6:22 a.m. UTC
From: Wanpeng Li <wanpengli@tencent.com>

Introduce need_cancel_enter_guest() helper, we need to check some 
conditions before doing CONT_RUN, in addition, it can also catch 
the case vmexit occurred while another event was being delivered 
to guest software since vmx_complete_interrupts() adds the request 
bit.

Tested-by: Haiwei Li <lihaiwei@tencent.com>
Cc: Haiwei Li <lihaiwei@tencent.com>
Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
---
 arch/x86/kvm/vmx/vmx.c | 12 +++++++-----
 arch/x86/kvm/x86.c     | 10 ++++++++--
 arch/x86/kvm/x86.h     |  1 +
 3 files changed, 16 insertions(+), 7 deletions(-)

Comments

Wanpeng Li April 26, 2020, 2:05 a.m. UTC | #1
On Fri, 24 Apr 2020 at 14:23, Wanpeng Li <kernellwp@gmail.com> wrote:
>
> From: Wanpeng Li <wanpengli@tencent.com>
>
> Introduce need_cancel_enter_guest() helper, we need to check some
> conditions before doing CONT_RUN, in addition, it can also catch
> the case vmexit occurred while another event was being delivered
> to guest software since vmx_complete_interrupts() adds the request
> bit.
>
> Tested-by: Haiwei Li <lihaiwei@tencent.com>
> Cc: Haiwei Li <lihaiwei@tencent.com>
> Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
> ---
>  arch/x86/kvm/vmx/vmx.c | 12 +++++++-----
>  arch/x86/kvm/x86.c     | 10 ++++++++--
>  arch/x86/kvm/x86.h     |  1 +
>  3 files changed, 16 insertions(+), 7 deletions(-)
>
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index f1f6638..5c21027 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -6577,7 +6577,7 @@ bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
>
>  static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu)
>  {
> -       enum exit_fastpath_completion exit_fastpath;
> +       enum exit_fastpath_completion exit_fastpath = EXIT_FASTPATH_NONE;
>         struct vcpu_vmx *vmx = to_vmx(vcpu);
>         unsigned long cr3, cr4;
>
> @@ -6754,10 +6754,12 @@ static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu)
>         vmx_recover_nmi_blocking(vmx);
>         vmx_complete_interrupts(vmx);
>
> -       exit_fastpath = vmx_exit_handlers_fastpath(vcpu);
> -       /* static call is better with retpolines */
> -       if (exit_fastpath == EXIT_FASTPATH_CONT_RUN)
> -               goto cont_run;
> +       if (!kvm_need_cancel_enter_guest(vcpu)) {
> +               exit_fastpath = vmx_exit_handlers_fastpath(vcpu);
> +               /* static call is better with retpolines */
> +               if (exit_fastpath == EXIT_FASTPATH_CONT_RUN)
> +                       goto cont_run;
> +       }

The kvm_need_cancel_enter_guest() should not before
vmx_exit_handlers_fastpath() which will break IPI fastpath. How about
applying something like below, otherwise, maybe introduce another
EXIT_FASTPATH_CONT_FAIL to indicate fails due to
kvm_need_cancel_enter_guest() if checking it after
vmx_exit_handlers_fastpath(), then we return 1 in vmx_handle_exit()
directly instead of kvm_skip_emulated_instruction(). VMX-preemption
timer exit doesn't need to skip emulated instruction but wrmsr
TSCDEADLINE MSR exit does which results in a little complex here.

Paolo, what do you think?

diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 853d3af..9317924 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6564,6 +6564,9 @@ static enum exit_fastpath_completion
handle_fastpath_preemption_timer(struct kvm
 {
     struct vcpu_vmx *vmx = to_vmx(vcpu);

+    if (kvm_need_cancel_enter_guest(vcpu))
+        return EXIT_FASTPATH_NONE;
+
     if (!vmx->req_immediate_exit &&
         !unlikely(vmx->loaded_vmcs->hv_timer_soft_disabled)) {
             kvm_lapic_expired_hv_timer(vcpu);
@@ -6771,12 +6774,10 @@ static enum exit_fastpath_completion
vmx_vcpu_run(struct kvm_vcpu *vcpu)
     vmx_recover_nmi_blocking(vmx);
     vmx_complete_interrupts(vmx);

-    if (!(kvm_need_cancel_enter_guest(vcpu))) {
-        exit_fastpath = vmx_exit_handlers_fastpath(vcpu);
-        if (exit_fastpath == EXIT_FASTPATH_CONT_RUN) {
-            vmx_sync_pir_to_irr(vcpu);
-            goto cont_run;
-        }
+    exit_fastpath = vmx_exit_handlers_fastpath(vcpu);
+    if (exit_fastpath == EXIT_FASTPATH_CONT_RUN) {
+        vmx_sync_pir_to_irr(vcpu);
+        goto cont_run;
     }

     return exit_fastpath;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 99061ba..11b309c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1618,6 +1618,9 @@ static int
handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data

 static int handle_fastpath_set_tscdeadline(struct kvm_vcpu *vcpu, u64 data)
 {
+    if (kvm_need_cancel_enter_guest(vcpu))
+        return 1;
+
     if (!kvm_x86_ops.set_hv_timer ||
         kvm_mwait_in_guest(vcpu->kvm) ||
         kvm_can_post_timer_interrupt(vcpu))
Sean Christopherson April 27, 2020, 6:30 p.m. UTC | #2
On Fri, Apr 24, 2020 at 02:22:41PM +0800, Wanpeng Li wrote:
> From: Wanpeng Li <wanpengli@tencent.com>
> 
> Introduce need_cancel_enter_guest() helper, we need to check some 
> conditions before doing CONT_RUN, in addition, it can also catch 
> the case vmexit occurred while another event was being delivered 
> to guest software since vmx_complete_interrupts() adds the request 
> bit.
> 
> Tested-by: Haiwei Li <lihaiwei@tencent.com>
> Cc: Haiwei Li <lihaiwei@tencent.com>
> Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
> ---
>  arch/x86/kvm/vmx/vmx.c | 12 +++++++-----
>  arch/x86/kvm/x86.c     | 10 ++++++++--
>  arch/x86/kvm/x86.h     |  1 +
>  3 files changed, 16 insertions(+), 7 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index f1f6638..5c21027 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -6577,7 +6577,7 @@ bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
>  
>  static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu)
>  {
> -	enum exit_fastpath_completion exit_fastpath;
> +	enum exit_fastpath_completion exit_fastpath = EXIT_FASTPATH_NONE;
>  	struct vcpu_vmx *vmx = to_vmx(vcpu);
>  	unsigned long cr3, cr4;
>  
> @@ -6754,10 +6754,12 @@ static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu)
>  	vmx_recover_nmi_blocking(vmx);
>  	vmx_complete_interrupts(vmx);
>  
> -	exit_fastpath = vmx_exit_handlers_fastpath(vcpu);
> -	/* static call is better with retpolines */
> -	if (exit_fastpath == EXIT_FASTPATH_CONT_RUN)
> -		goto cont_run;
> +	if (!kvm_need_cancel_enter_guest(vcpu)) {
> +		exit_fastpath = vmx_exit_handlers_fastpath(vcpu);
> +		/* static call is better with retpolines */
> +		if (exit_fastpath == EXIT_FASTPATH_CONT_RUN)
> +			goto cont_run;
> +	}
>  
>  	return exit_fastpath;
>  }
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 59958ce..4561104 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -1581,6 +1581,13 @@ int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu)
>  }
>  EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr);
>  
> +bool kvm_need_cancel_enter_guest(struct kvm_vcpu *vcpu)

What about kvm_vcpu_<???>_pending()?  Not sure what a good ??? would be.
The "cancel_enter_guest" wording is a bit confusing when this is called
from the VM-Exit path.

> +{
> +	return (vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu)
> +	    || need_resched() || signal_pending(current));

Parantheses around the whole statement are unnecessary.  Personal preference
is to put the || before the newline.

> +}
> +EXPORT_SYMBOL_GPL(kvm_need_cancel_enter_guest);
> +
>  /*
>   * The fast path for frequent and performance sensitive wrmsr emulation,
>   * i.e. the sending of IPI, sending IPI early in the VM-Exit flow reduces
> @@ -8373,8 +8380,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
>  	if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active)
>  		kvm_x86_ops.sync_pir_to_irr(vcpu);
>  
> -	if (vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu)
> -	    || need_resched() || signal_pending(current)) {
> +	if (kvm_need_cancel_enter_guest(vcpu)) {
>  		vcpu->mode = OUTSIDE_GUEST_MODE;
>  		smp_wmb();
>  		local_irq_enable();
> diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
> index 7b5ed8e..1906e7e 100644
> --- a/arch/x86/kvm/x86.h
> +++ b/arch/x86/kvm/x86.h
> @@ -364,5 +364,6 @@ static inline bool kvm_dr7_valid(u64 data)
>  void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu);
>  void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
>  u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu);
> +bool kvm_need_cancel_enter_guest(struct kvm_vcpu *vcpu);
>  
>  #endif
> -- 
> 2.7.4
>
Sean Christopherson April 27, 2020, 6:36 p.m. UTC | #3
On Sun, Apr 26, 2020 at 10:05:00AM +0800, Wanpeng Li wrote:
> On Fri, 24 Apr 2020 at 14:23, Wanpeng Li <kernellwp@gmail.com> wrote:
> >
> > From: Wanpeng Li <wanpengli@tencent.com>
> >
> > Introduce need_cancel_enter_guest() helper, we need to check some
> > conditions before doing CONT_RUN, in addition, it can also catch
> > the case vmexit occurred while another event was being delivered
> > to guest software since vmx_complete_interrupts() adds the request
> > bit.
> >
> > Tested-by: Haiwei Li <lihaiwei@tencent.com>
> > Cc: Haiwei Li <lihaiwei@tencent.com>
> > Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
> > ---
> >  arch/x86/kvm/vmx/vmx.c | 12 +++++++-----
> >  arch/x86/kvm/x86.c     | 10 ++++++++--
> >  arch/x86/kvm/x86.h     |  1 +
> >  3 files changed, 16 insertions(+), 7 deletions(-)
> >
> > diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> > index f1f6638..5c21027 100644
> > --- a/arch/x86/kvm/vmx/vmx.c
> > +++ b/arch/x86/kvm/vmx/vmx.c
> > @@ -6577,7 +6577,7 @@ bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
> >
> >  static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu)
> >  {
> > -       enum exit_fastpath_completion exit_fastpath;
> > +       enum exit_fastpath_completion exit_fastpath = EXIT_FASTPATH_NONE;
> >         struct vcpu_vmx *vmx = to_vmx(vcpu);
> >         unsigned long cr3, cr4;
> >
> > @@ -6754,10 +6754,12 @@ static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu)
> >         vmx_recover_nmi_blocking(vmx);
> >         vmx_complete_interrupts(vmx);
> >
> > -       exit_fastpath = vmx_exit_handlers_fastpath(vcpu);
> > -       /* static call is better with retpolines */
> > -       if (exit_fastpath == EXIT_FASTPATH_CONT_RUN)
> > -               goto cont_run;
> > +       if (!kvm_need_cancel_enter_guest(vcpu)) {
> > +               exit_fastpath = vmx_exit_handlers_fastpath(vcpu);
> > +               /* static call is better with retpolines */
> > +               if (exit_fastpath == EXIT_FASTPATH_CONT_RUN)
> > +                       goto cont_run;
> > +       }
> 
> The kvm_need_cancel_enter_guest() should not before
> vmx_exit_handlers_fastpath() which will break IPI fastpath. How about
> applying something like below, otherwise, maybe introduce another
> EXIT_FASTPATH_CONT_FAIL to indicate fails due to
> kvm_need_cancel_enter_guest() if checking it after
> vmx_exit_handlers_fastpath(), then we return 1 in vmx_handle_exit()
> directly instead of kvm_skip_emulated_instruction(). VMX-preemption
> timer exit doesn't need to skip emulated instruction but wrmsr
> TSCDEADLINE MSR exit does which results in a little complex here.
> 
> Paolo, what do you think?
> 
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index 853d3af..9317924 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -6564,6 +6564,9 @@ static enum exit_fastpath_completion
> handle_fastpath_preemption_timer(struct kvm
>  {
>      struct vcpu_vmx *vmx = to_vmx(vcpu);
> 
> +    if (kvm_need_cancel_enter_guest(vcpu))
> +        return EXIT_FASTPATH_NONE;
> +
>      if (!vmx->req_immediate_exit &&
>          !unlikely(vmx->loaded_vmcs->hv_timer_soft_disabled)) {
>              kvm_lapic_expired_hv_timer(vcpu);
> @@ -6771,12 +6774,10 @@ static enum exit_fastpath_completion
> vmx_vcpu_run(struct kvm_vcpu *vcpu)
>      vmx_recover_nmi_blocking(vmx);
>      vmx_complete_interrupts(vmx);
> 
> -    if (!(kvm_need_cancel_enter_guest(vcpu))) {
> -        exit_fastpath = vmx_exit_handlers_fastpath(vcpu);
> -        if (exit_fastpath == EXIT_FASTPATH_CONT_RUN) {
> -            vmx_sync_pir_to_irr(vcpu);
> -            goto cont_run;
> -        }
> +    exit_fastpath = vmx_exit_handlers_fastpath(vcpu);
> +    if (exit_fastpath == EXIT_FASTPATH_CONT_RUN) {

Relying on the handlers to check kvm_need_cancel_enter_guest() will be
error prone and costly to maintain.  I also don't like that it buries the
logic.

What about adding another flavor, e.g.:

	exit_fastpath = vmx_exit_handlers_fastpath(vcpu);
	if (exit_fastpath == EXIT_FASTPATH_CONT_RUN &&
	    kvm_need_cancel_enter_guest(vcpu))
		exit_fastpath = EXIT_FASTPATH_NOP;

That would also allow you to enable preemption timer without first having
to add CONT_RUN, which would be a very good thing for bisection.

> +        vmx_sync_pir_to_irr(vcpu);
> +        goto cont_run;
>      }
> 
>      return exit_fastpath;
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 99061ba..11b309c 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -1618,6 +1618,9 @@ static int
> handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data
> 
>  static int handle_fastpath_set_tscdeadline(struct kvm_vcpu *vcpu, u64 data)
>  {
> +    if (kvm_need_cancel_enter_guest(vcpu))
> +        return 1;
> +
>      if (!kvm_x86_ops.set_hv_timer ||
>          kvm_mwait_in_guest(vcpu->kvm) ||
>          kvm_can_post_timer_interrupt(vcpu))
Wanpeng Li April 28, 2020, 12:44 a.m. UTC | #4
On Tue, 28 Apr 2020 at 02:36, Sean Christopherson
<sean.j.christopherson@intel.com> wrote:
>
> On Sun, Apr 26, 2020 at 10:05:00AM +0800, Wanpeng Li wrote:
> > On Fri, 24 Apr 2020 at 14:23, Wanpeng Li <kernellwp@gmail.com> wrote:
> > >
> > > From: Wanpeng Li <wanpengli@tencent.com>
> > >
> > > Introduce need_cancel_enter_guest() helper, we need to check some
> > > conditions before doing CONT_RUN, in addition, it can also catch
> > > the case vmexit occurred while another event was being delivered
> > > to guest software since vmx_complete_interrupts() adds the request
> > > bit.
> > >
> > > Tested-by: Haiwei Li <lihaiwei@tencent.com>
> > > Cc: Haiwei Li <lihaiwei@tencent.com>
> > > Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
> > > ---
> > >  arch/x86/kvm/vmx/vmx.c | 12 +++++++-----
> > >  arch/x86/kvm/x86.c     | 10 ++++++++--
> > >  arch/x86/kvm/x86.h     |  1 +
> > >  3 files changed, 16 insertions(+), 7 deletions(-)
> > >
> > > diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> > > index f1f6638..5c21027 100644
> > > --- a/arch/x86/kvm/vmx/vmx.c
> > > +++ b/arch/x86/kvm/vmx/vmx.c
> > > @@ -6577,7 +6577,7 @@ bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
> > >
> > >  static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu)
> > >  {
> > > -       enum exit_fastpath_completion exit_fastpath;
> > > +       enum exit_fastpath_completion exit_fastpath = EXIT_FASTPATH_NONE;
> > >         struct vcpu_vmx *vmx = to_vmx(vcpu);
> > >         unsigned long cr3, cr4;
> > >
> > > @@ -6754,10 +6754,12 @@ static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu)
> > >         vmx_recover_nmi_blocking(vmx);
> > >         vmx_complete_interrupts(vmx);
> > >
> > > -       exit_fastpath = vmx_exit_handlers_fastpath(vcpu);
> > > -       /* static call is better with retpolines */
> > > -       if (exit_fastpath == EXIT_FASTPATH_CONT_RUN)
> > > -               goto cont_run;
> > > +       if (!kvm_need_cancel_enter_guest(vcpu)) {
> > > +               exit_fastpath = vmx_exit_handlers_fastpath(vcpu);
> > > +               /* static call is better with retpolines */
> > > +               if (exit_fastpath == EXIT_FASTPATH_CONT_RUN)
> > > +                       goto cont_run;
> > > +       }
> >
> > The kvm_need_cancel_enter_guest() should not before
> > vmx_exit_handlers_fastpath() which will break IPI fastpath. How about
> > applying something like below, otherwise, maybe introduce another
> > EXIT_FASTPATH_CONT_FAIL to indicate fails due to
> > kvm_need_cancel_enter_guest() if checking it after
> > vmx_exit_handlers_fastpath(), then we return 1 in vmx_handle_exit()
> > directly instead of kvm_skip_emulated_instruction(). VMX-preemption
> > timer exit doesn't need to skip emulated instruction but wrmsr
> > TSCDEADLINE MSR exit does which results in a little complex here.
> >
> > Paolo, what do you think?
> >
> > diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> > index 853d3af..9317924 100644
> > --- a/arch/x86/kvm/vmx/vmx.c
> > +++ b/arch/x86/kvm/vmx/vmx.c
> > @@ -6564,6 +6564,9 @@ static enum exit_fastpath_completion
> > handle_fastpath_preemption_timer(struct kvm
> >  {
> >      struct vcpu_vmx *vmx = to_vmx(vcpu);
> >
> > +    if (kvm_need_cancel_enter_guest(vcpu))
> > +        return EXIT_FASTPATH_NONE;
> > +
> >      if (!vmx->req_immediate_exit &&
> >          !unlikely(vmx->loaded_vmcs->hv_timer_soft_disabled)) {
> >              kvm_lapic_expired_hv_timer(vcpu);
> > @@ -6771,12 +6774,10 @@ static enum exit_fastpath_completion
> > vmx_vcpu_run(struct kvm_vcpu *vcpu)
> >      vmx_recover_nmi_blocking(vmx);
> >      vmx_complete_interrupts(vmx);
> >
> > -    if (!(kvm_need_cancel_enter_guest(vcpu))) {
> > -        exit_fastpath = vmx_exit_handlers_fastpath(vcpu);
> > -        if (exit_fastpath == EXIT_FASTPATH_CONT_RUN) {
> > -            vmx_sync_pir_to_irr(vcpu);
> > -            goto cont_run;
> > -        }
> > +    exit_fastpath = vmx_exit_handlers_fastpath(vcpu);
> > +    if (exit_fastpath == EXIT_FASTPATH_CONT_RUN) {
>
> Relying on the handlers to check kvm_need_cancel_enter_guest() will be
> error prone and costly to maintain.  I also don't like that it buries the
> logic.
>
> What about adding another flavor, e.g.:
>
>         exit_fastpath = vmx_exit_handlers_fastpath(vcpu);
>         if (exit_fastpath == EXIT_FASTPATH_CONT_RUN &&
>             kvm_need_cancel_enter_guest(vcpu))
>                 exit_fastpath = EXIT_FASTPATH_NOP;
>
> That would also allow you to enable preemption timer without first having
> to add CONT_RUN, which would be a very good thing for bisection.

I miss understand the second part, do you mean don't need to add
CONT_RUN in patch 1/5?

    Wanpeng
Sean Christopherson April 28, 2020, 1:03 a.m. UTC | #5
On Tue, Apr 28, 2020 at 08:44:13AM +0800, Wanpeng Li wrote:
> On Tue, 28 Apr 2020 at 02:36, Sean Christopherson
> <sean.j.christopherson@intel.com> wrote:
> >
> > > @@ -6771,12 +6774,10 @@ static enum exit_fastpath_completion
> > > vmx_vcpu_run(struct kvm_vcpu *vcpu)
> > >      vmx_recover_nmi_blocking(vmx);
> > >      vmx_complete_interrupts(vmx);
> > >
> > > -    if (!(kvm_need_cancel_enter_guest(vcpu))) {
> > > -        exit_fastpath = vmx_exit_handlers_fastpath(vcpu);
> > > -        if (exit_fastpath == EXIT_FASTPATH_CONT_RUN) {
> > > -            vmx_sync_pir_to_irr(vcpu);
> > > -            goto cont_run;
> > > -        }
> > > +    exit_fastpath = vmx_exit_handlers_fastpath(vcpu);
> > > +    if (exit_fastpath == EXIT_FASTPATH_CONT_RUN) {
> >
> > Relying on the handlers to check kvm_need_cancel_enter_guest() will be
> > error prone and costly to maintain.  I also don't like that it buries the
> > logic.
> >
> > What about adding another flavor, e.g.:
> >
> >         exit_fastpath = vmx_exit_handlers_fastpath(vcpu);
> >         if (exit_fastpath == EXIT_FASTPATH_CONT_RUN &&
> >             kvm_need_cancel_enter_guest(vcpu))
> >                 exit_fastpath = EXIT_FASTPATH_NOP;
> >
> > That would also allow you to enable preemption timer without first having
> > to add CONT_RUN, which would be a very good thing for bisection.
> 
> I miss understand the second part, do you mean don't need to add
> CONT_RUN in patch 1/5?

Yes, with the disclaimer that I haven't worked through all the flows to
ensure it's actually doable and/or a good idea. 

The idea is to add EXIT_FASTPATH_NOP and use that for the preemption timer
fastpath.  KVM would still go through it's full run loop, but it would skip
invoking the exit handler.  In theory that would disassociate fast handling
of the preemption timer from resuming the guest without going back to the
run loop, i.e. provide a separate bisection point for enabling CONT_RUN.

Like I said, might not be a good idea, e.g. if preemption timer ends up
being the only user of EXIT_FASTPATH_CONT_RUN then EXIT_FASTPATH_NOP is a
waste of space.

Side topic, what about EXIT_FASTPATH_RESUME instead of CONT_RUN?  Or maybe
REENTER_GUEST?  Something that start with RE :-)
Wanpeng Li April 28, 2020, 7:17 a.m. UTC | #6
On Tue, 28 Apr 2020 at 02:30, Sean Christopherson
<sean.j.christopherson@intel.com> wrote:
>
> On Fri, Apr 24, 2020 at 02:22:41PM +0800, Wanpeng Li wrote:
> > From: Wanpeng Li <wanpengli@tencent.com>
> >
> > Introduce need_cancel_enter_guest() helper, we need to check some
> > conditions before doing CONT_RUN, in addition, it can also catch
> > the case vmexit occurred while another event was being delivered
> > to guest software since vmx_complete_interrupts() adds the request
> > bit.
> >
> > Tested-by: Haiwei Li <lihaiwei@tencent.com>
> > Cc: Haiwei Li <lihaiwei@tencent.com>
> > Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
> > ---
> >  arch/x86/kvm/vmx/vmx.c | 12 +++++++-----
> >  arch/x86/kvm/x86.c     | 10 ++++++++--
> >  arch/x86/kvm/x86.h     |  1 +
> >  3 files changed, 16 insertions(+), 7 deletions(-)
> >
> > diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> > index f1f6638..5c21027 100644
> > --- a/arch/x86/kvm/vmx/vmx.c
> > +++ b/arch/x86/kvm/vmx/vmx.c
> > @@ -6577,7 +6577,7 @@ bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
> >
> >  static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu)
> >  {
> > -     enum exit_fastpath_completion exit_fastpath;
> > +     enum exit_fastpath_completion exit_fastpath = EXIT_FASTPATH_NONE;
> >       struct vcpu_vmx *vmx = to_vmx(vcpu);
> >       unsigned long cr3, cr4;
> >
> > @@ -6754,10 +6754,12 @@ static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu)
> >       vmx_recover_nmi_blocking(vmx);
> >       vmx_complete_interrupts(vmx);
> >
> > -     exit_fastpath = vmx_exit_handlers_fastpath(vcpu);
> > -     /* static call is better with retpolines */
> > -     if (exit_fastpath == EXIT_FASTPATH_CONT_RUN)
> > -             goto cont_run;
> > +     if (!kvm_need_cancel_enter_guest(vcpu)) {
> > +             exit_fastpath = vmx_exit_handlers_fastpath(vcpu);
> > +             /* static call is better with retpolines */
> > +             if (exit_fastpath == EXIT_FASTPATH_CONT_RUN)
> > +                     goto cont_run;
> > +     }
> >
> >       return exit_fastpath;
> >  }
> > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> > index 59958ce..4561104 100644
> > --- a/arch/x86/kvm/x86.c
> > +++ b/arch/x86/kvm/x86.c
> > @@ -1581,6 +1581,13 @@ int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu)
> >  }
> >  EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr);
> >
> > +bool kvm_need_cancel_enter_guest(struct kvm_vcpu *vcpu)
>
> What about kvm_vcpu_<???>_pending()?  Not sure what a good ??? would be.
> The "cancel_enter_guest" wording is a bit confusing when this is called
> from the VM-Exit path.
>
> > +{
> > +     return (vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu)
> > +         || need_resched() || signal_pending(current));
>
> Parantheses around the whole statement are unnecessary.  Personal preference
> is to put the || before the newline.

Handle the comments in v4.

    Wanpeng
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index f1f6638..5c21027 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6577,7 +6577,7 @@  bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
 
 static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu)
 {
-	enum exit_fastpath_completion exit_fastpath;
+	enum exit_fastpath_completion exit_fastpath = EXIT_FASTPATH_NONE;
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	unsigned long cr3, cr4;
 
@@ -6754,10 +6754,12 @@  static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu)
 	vmx_recover_nmi_blocking(vmx);
 	vmx_complete_interrupts(vmx);
 
-	exit_fastpath = vmx_exit_handlers_fastpath(vcpu);
-	/* static call is better with retpolines */
-	if (exit_fastpath == EXIT_FASTPATH_CONT_RUN)
-		goto cont_run;
+	if (!kvm_need_cancel_enter_guest(vcpu)) {
+		exit_fastpath = vmx_exit_handlers_fastpath(vcpu);
+		/* static call is better with retpolines */
+		if (exit_fastpath == EXIT_FASTPATH_CONT_RUN)
+			goto cont_run;
+	}
 
 	return exit_fastpath;
 }
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 59958ce..4561104 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1581,6 +1581,13 @@  int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu)
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr);
 
+bool kvm_need_cancel_enter_guest(struct kvm_vcpu *vcpu)
+{
+	return (vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu)
+	    || need_resched() || signal_pending(current));
+}
+EXPORT_SYMBOL_GPL(kvm_need_cancel_enter_guest);
+
 /*
  * The fast path for frequent and performance sensitive wrmsr emulation,
  * i.e. the sending of IPI, sending IPI early in the VM-Exit flow reduces
@@ -8373,8 +8380,7 @@  static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 	if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active)
 		kvm_x86_ops.sync_pir_to_irr(vcpu);
 
-	if (vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu)
-	    || need_resched() || signal_pending(current)) {
+	if (kvm_need_cancel_enter_guest(vcpu)) {
 		vcpu->mode = OUTSIDE_GUEST_MODE;
 		smp_wmb();
 		local_irq_enable();
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 7b5ed8e..1906e7e 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -364,5 +364,6 @@  static inline bool kvm_dr7_valid(u64 data)
 void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu);
 void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
 u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu);
+bool kvm_need_cancel_enter_guest(struct kvm_vcpu *vcpu);
 
 #endif