diff mbox series

[v2,3/3] KVM: SVM: Fix disable pause loop exit/pause filtering capability on SVM

Message ID 1595929506-9203-3-git-send-email-wanpengli@tencent.com (mailing list archive)
State New, archived
Headers show
Series [v2,1/3] KVM: LAPIC: Prevent setting the tscdeadline timer if the lapic is hw disabled | expand

Commit Message

Wanpeng Li July 28, 2020, 9:45 a.m. UTC
From: Wanpeng Li <wanpengli@tencent.com>

Commit 8566ac8b (KVM: SVM: Implement pause loop exit logic in SVM) drops
disable pause loop exit/pause filtering capability completely, I guess it
is a merge fault by Radim since disable vmexits capabilities and pause
loop exit for SVM patchsets are merged at the same time. This patch
reintroduces the disable pause loop exit/pause filtering capability
support.

We can observe 2.9% hackbench improvement for a 92 vCPUs guest on AMD 
Rome Server.

Reported-by: Haiwei Li <lihaiwei@tencent.com>
Tested-by: Haiwei Li <lihaiwei@tencent.com>
Fixes: 8566ac8b (KVM: SVM: Implement pause loop exit logic in SVM)
Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
---
 arch/x86/kvm/svm/svm.c | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)

Comments

Vitaly Kuznetsov July 29, 2020, 12:20 p.m. UTC | #1
Wanpeng Li <kernellwp@gmail.com> writes:

> From: Wanpeng Li <wanpengli@tencent.com>
>
> Commit 8566ac8b (KVM: SVM: Implement pause loop exit logic in SVM) drops
> disable pause loop exit/pause filtering capability completely, I guess it
> is a merge fault by Radim since disable vmexits capabilities and pause
> loop exit for SVM patchsets are merged at the same time. This patch
> reintroduces the disable pause loop exit/pause filtering capability
> support.
>
> We can observe 2.9% hackbench improvement for a 92 vCPUs guest on AMD 
> Rome Server.
>
> Reported-by: Haiwei Li <lihaiwei@tencent.com>
> Tested-by: Haiwei Li <lihaiwei@tencent.com>
> Fixes: 8566ac8b (KVM: SVM: Implement pause loop exit logic in SVM)
> Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
> ---
>  arch/x86/kvm/svm/svm.c | 9 ++++++---
>  1 file changed, 6 insertions(+), 3 deletions(-)
>
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index c0da4dd..c20f127 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -1090,7 +1090,7 @@ static void init_vmcb(struct vcpu_svm *svm)
>  	svm->nested.vmcb = 0;
>  	svm->vcpu.arch.hflags = 0;
>  
> -	if (pause_filter_count) {
> +	if (pause_filter_count && !kvm_pause_in_guest(svm->vcpu.kvm)) {
>  		control->pause_filter_count = pause_filter_count;
>  		if (pause_filter_thresh)
>  			control->pause_filter_thresh = pause_filter_thresh;
> @@ -2693,7 +2693,7 @@ static int pause_interception(struct vcpu_svm *svm)
>  	struct kvm_vcpu *vcpu = &svm->vcpu;
>  	bool in_kernel = (svm_get_cpl(vcpu) == 0);
>  
> -	if (pause_filter_thresh)
> +	if (!kvm_pause_in_guest(vcpu->kvm))
>  		grow_ple_window(vcpu);
>  
>  	kvm_vcpu_on_spin(vcpu, in_kernel);
> @@ -3780,7 +3780,7 @@ static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
>  
>  static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
>  {
> -	if (pause_filter_thresh)
> +	if (!kvm_pause_in_guest(vcpu->kvm))
>  		shrink_ple_window(vcpu);
>  }
>  
> @@ -3958,6 +3958,9 @@ static void svm_vm_destroy(struct kvm *kvm)
>  
>  static int svm_vm_init(struct kvm *kvm)
>  {
> +	if (!pause_filter_thresh)
> +		kvm->arch.pause_in_guest = true;

Would it make sense to do

        if (!pause_filter_count || !pause_filter_thresh)
		kvm->arch.pause_in_guest = true;

here and simplify the condition in init_vmcb()?

> +
>  	if (avic) {
>  		int ret = avic_vm_init(kvm);
>  		if (ret)
Wanpeng Li July 30, 2020, 12:56 a.m. UTC | #2
On Wed, 29 Jul 2020 at 20:21, Vitaly Kuznetsov <vkuznets@redhat.com> wrote:
>
> Wanpeng Li <kernellwp@gmail.com> writes:
>
> > From: Wanpeng Li <wanpengli@tencent.com>
> >
> > Commit 8566ac8b (KVM: SVM: Implement pause loop exit logic in SVM) drops
> > disable pause loop exit/pause filtering capability completely, I guess it
> > is a merge fault by Radim since disable vmexits capabilities and pause
> > loop exit for SVM patchsets are merged at the same time. This patch
> > reintroduces the disable pause loop exit/pause filtering capability
> > support.
> >
> > We can observe 2.9% hackbench improvement for a 92 vCPUs guest on AMD
> > Rome Server.
> >
> > Reported-by: Haiwei Li <lihaiwei@tencent.com>
> > Tested-by: Haiwei Li <lihaiwei@tencent.com>
> > Fixes: 8566ac8b (KVM: SVM: Implement pause loop exit logic in SVM)
> > Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
> > ---
> >  arch/x86/kvm/svm/svm.c | 9 ++++++---
> >  1 file changed, 6 insertions(+), 3 deletions(-)
> >
> > diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> > index c0da4dd..c20f127 100644
> > --- a/arch/x86/kvm/svm/svm.c
> > +++ b/arch/x86/kvm/svm/svm.c
> > @@ -1090,7 +1090,7 @@ static void init_vmcb(struct vcpu_svm *svm)
> >       svm->nested.vmcb = 0;
> >       svm->vcpu.arch.hflags = 0;
> >
> > -     if (pause_filter_count) {
> > +     if (pause_filter_count && !kvm_pause_in_guest(svm->vcpu.kvm)) {
> >               control->pause_filter_count = pause_filter_count;
> >               if (pause_filter_thresh)
> >                       control->pause_filter_thresh = pause_filter_thresh;
> > @@ -2693,7 +2693,7 @@ static int pause_interception(struct vcpu_svm *svm)
> >       struct kvm_vcpu *vcpu = &svm->vcpu;
> >       bool in_kernel = (svm_get_cpl(vcpu) == 0);
> >
> > -     if (pause_filter_thresh)
> > +     if (!kvm_pause_in_guest(vcpu->kvm))
> >               grow_ple_window(vcpu);
> >
> >       kvm_vcpu_on_spin(vcpu, in_kernel);
> > @@ -3780,7 +3780,7 @@ static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
> >
> >  static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
> >  {
> > -     if (pause_filter_thresh)
> > +     if (!kvm_pause_in_guest(vcpu->kvm))
> >               shrink_ple_window(vcpu);
> >  }
> >
> > @@ -3958,6 +3958,9 @@ static void svm_vm_destroy(struct kvm *kvm)
> >
> >  static int svm_vm_init(struct kvm *kvm)
> >  {
> > +     if (!pause_filter_thresh)
> > +             kvm->arch.pause_in_guest = true;
>
> Would it make sense to do
>
>         if (!pause_filter_count || !pause_filter_thresh)
>                 kvm->arch.pause_in_guest = true;
>
> here and simplify the condition in init_vmcb()?

kvm->arch.pause_in_guest can also be true when userspace sets the
KVM_CAP_X86_DISABLE_EXITS capability, so we can't simplify the
condition in init_vmcb().

    Wanpeng
Vitaly Kuznetsov July 30, 2020, 11:16 a.m. UTC | #3
Wanpeng Li <kernellwp@gmail.com> writes:

> On Wed, 29 Jul 2020 at 20:21, Vitaly Kuznetsov <vkuznets@redhat.com> wrote:
>>
>> Wanpeng Li <kernellwp@gmail.com> writes:
>>
>> > From: Wanpeng Li <wanpengli@tencent.com>
>> >
>> > Commit 8566ac8b (KVM: SVM: Implement pause loop exit logic in SVM) drops
>> > disable pause loop exit/pause filtering capability completely, I guess it
>> > is a merge fault by Radim since disable vmexits capabilities and pause
>> > loop exit for SVM patchsets are merged at the same time. This patch
>> > reintroduces the disable pause loop exit/pause filtering capability
>> > support.
>> >
>> > We can observe 2.9% hackbench improvement for a 92 vCPUs guest on AMD
>> > Rome Server.
>> >
>> > Reported-by: Haiwei Li <lihaiwei@tencent.com>
>> > Tested-by: Haiwei Li <lihaiwei@tencent.com>
>> > Fixes: 8566ac8b (KVM: SVM: Implement pause loop exit logic in SVM)
>> > Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
>> > ---
>> >  arch/x86/kvm/svm/svm.c | 9 ++++++---
>> >  1 file changed, 6 insertions(+), 3 deletions(-)
>> >
>> > diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
>> > index c0da4dd..c20f127 100644
>> > --- a/arch/x86/kvm/svm/svm.c
>> > +++ b/arch/x86/kvm/svm/svm.c
>> > @@ -1090,7 +1090,7 @@ static void init_vmcb(struct vcpu_svm *svm)
>> >       svm->nested.vmcb = 0;
>> >       svm->vcpu.arch.hflags = 0;
>> >
>> > -     if (pause_filter_count) {
>> > +     if (pause_filter_count && !kvm_pause_in_guest(svm->vcpu.kvm)) {
>> >               control->pause_filter_count = pause_filter_count;
>> >               if (pause_filter_thresh)
>> >                       control->pause_filter_thresh = pause_filter_thresh;
>> > @@ -2693,7 +2693,7 @@ static int pause_interception(struct vcpu_svm *svm)
>> >       struct kvm_vcpu *vcpu = &svm->vcpu;
>> >       bool in_kernel = (svm_get_cpl(vcpu) == 0);
>> >
>> > -     if (pause_filter_thresh)
>> > +     if (!kvm_pause_in_guest(vcpu->kvm))
>> >               grow_ple_window(vcpu);
>> >
>> >       kvm_vcpu_on_spin(vcpu, in_kernel);
>> > @@ -3780,7 +3780,7 @@ static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
>> >
>> >  static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
>> >  {
>> > -     if (pause_filter_thresh)
>> > +     if (!kvm_pause_in_guest(vcpu->kvm))
>> >               shrink_ple_window(vcpu);
>> >  }
>> >
>> > @@ -3958,6 +3958,9 @@ static void svm_vm_destroy(struct kvm *kvm)
>> >
>> >  static int svm_vm_init(struct kvm *kvm)
>> >  {
>> > +     if (!pause_filter_thresh)
>> > +             kvm->arch.pause_in_guest = true;
>>
>> Would it make sense to do
>>
>>         if (!pause_filter_count || !pause_filter_thresh)
>>                 kvm->arch.pause_in_guest = true;
>>
>> here and simplify the condition in init_vmcb()?
>
> kvm->arch.pause_in_guest can also be true when userspace sets the
> KVM_CAP_X86_DISABLE_EXITS capability, so we can't simplify the
> condition in init_vmcb().
>

I meant we simplify it to

if (!kvm_pause_in_guest(svm->vcpu.kvm))

as "!pause_filter_count" gets included.
Wanpeng Li July 31, 2020, 3:18 a.m. UTC | #4
On Thu, 30 Jul 2020 at 19:16, Vitaly Kuznetsov <vkuznets@redhat.com> wrote:
>
> Wanpeng Li <kernellwp@gmail.com> writes:
>
> > On Wed, 29 Jul 2020 at 20:21, Vitaly Kuznetsov <vkuznets@redhat.com> wrote:
> >>
> >> Wanpeng Li <kernellwp@gmail.com> writes:
> >>
> >> > From: Wanpeng Li <wanpengli@tencent.com>
> >> >
> >> > Commit 8566ac8b (KVM: SVM: Implement pause loop exit logic in SVM) drops
> >> > disable pause loop exit/pause filtering capability completely, I guess it
> >> > is a merge fault by Radim since disable vmexits capabilities and pause
> >> > loop exit for SVM patchsets are merged at the same time. This patch
> >> > reintroduces the disable pause loop exit/pause filtering capability
> >> > support.
> >> >
> >> > We can observe 2.9% hackbench improvement for a 92 vCPUs guest on AMD
> >> > Rome Server.
> >> >
> >> > Reported-by: Haiwei Li <lihaiwei@tencent.com>
> >> > Tested-by: Haiwei Li <lihaiwei@tencent.com>
> >> > Fixes: 8566ac8b (KVM: SVM: Implement pause loop exit logic in SVM)
> >> > Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
> >> > ---
> >> >  arch/x86/kvm/svm/svm.c | 9 ++++++---
> >> >  1 file changed, 6 insertions(+), 3 deletions(-)
> >> >
> >> > diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> >> > index c0da4dd..c20f127 100644
> >> > --- a/arch/x86/kvm/svm/svm.c
> >> > +++ b/arch/x86/kvm/svm/svm.c
> >> > @@ -1090,7 +1090,7 @@ static void init_vmcb(struct vcpu_svm *svm)
> >> >       svm->nested.vmcb = 0;
> >> >       svm->vcpu.arch.hflags = 0;
> >> >
> >> > -     if (pause_filter_count) {
> >> > +     if (pause_filter_count && !kvm_pause_in_guest(svm->vcpu.kvm)) {
> >> >               control->pause_filter_count = pause_filter_count;
> >> >               if (pause_filter_thresh)
> >> >                       control->pause_filter_thresh = pause_filter_thresh;
> >> > @@ -2693,7 +2693,7 @@ static int pause_interception(struct vcpu_svm *svm)
> >> >       struct kvm_vcpu *vcpu = &svm->vcpu;
> >> >       bool in_kernel = (svm_get_cpl(vcpu) == 0);
> >> >
> >> > -     if (pause_filter_thresh)
> >> > +     if (!kvm_pause_in_guest(vcpu->kvm))
> >> >               grow_ple_window(vcpu);
> >> >
> >> >       kvm_vcpu_on_spin(vcpu, in_kernel);
> >> > @@ -3780,7 +3780,7 @@ static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
> >> >
> >> >  static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
> >> >  {
> >> > -     if (pause_filter_thresh)
> >> > +     if (!kvm_pause_in_guest(vcpu->kvm))
> >> >               shrink_ple_window(vcpu);
> >> >  }
> >> >
> >> > @@ -3958,6 +3958,9 @@ static void svm_vm_destroy(struct kvm *kvm)
> >> >
> >> >  static int svm_vm_init(struct kvm *kvm)
> >> >  {
> >> > +     if (!pause_filter_thresh)
> >> > +             kvm->arch.pause_in_guest = true;
> >>
> >> Would it make sense to do
> >>
> >>         if (!pause_filter_count || !pause_filter_thresh)
> >>                 kvm->arch.pause_in_guest = true;
> >>
> >> here and simplify the condition in init_vmcb()?
> >
> > kvm->arch.pause_in_guest can also be true when userspace sets the
> > KVM_CAP_X86_DISABLE_EXITS capability, so we can't simplify the
> > condition in init_vmcb().
> >
>
> I meant we simplify it to
>
> if (!kvm_pause_in_guest(svm->vcpu.kvm))
>
> as "!pause_filter_count" gets included.

Just do it in v3.

    Wanpeng
diff mbox series

Patch

diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index c0da4dd..c20f127 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1090,7 +1090,7 @@  static void init_vmcb(struct vcpu_svm *svm)
 	svm->nested.vmcb = 0;
 	svm->vcpu.arch.hflags = 0;
 
-	if (pause_filter_count) {
+	if (pause_filter_count && !kvm_pause_in_guest(svm->vcpu.kvm)) {
 		control->pause_filter_count = pause_filter_count;
 		if (pause_filter_thresh)
 			control->pause_filter_thresh = pause_filter_thresh;
@@ -2693,7 +2693,7 @@  static int pause_interception(struct vcpu_svm *svm)
 	struct kvm_vcpu *vcpu = &svm->vcpu;
 	bool in_kernel = (svm_get_cpl(vcpu) == 0);
 
-	if (pause_filter_thresh)
+	if (!kvm_pause_in_guest(vcpu->kvm))
 		grow_ple_window(vcpu);
 
 	kvm_vcpu_on_spin(vcpu, in_kernel);
@@ -3780,7 +3780,7 @@  static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
 
 static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
 {
-	if (pause_filter_thresh)
+	if (!kvm_pause_in_guest(vcpu->kvm))
 		shrink_ple_window(vcpu);
 }
 
@@ -3958,6 +3958,9 @@  static void svm_vm_destroy(struct kvm *kvm)
 
 static int svm_vm_init(struct kvm *kvm)
 {
+	if (!pause_filter_thresh)
+		kvm->arch.pause_in_guest = true;
+
 	if (avic) {
 		int ret = avic_vm_init(kvm);
 		if (ret)