diff mbox

[v3,2/2] KVM: x86: directly use kvm_make_request again

Message ID 1411058317-23646-3-git-send-email-liangchen.linux@gmail.com (mailing list archive)
State New, archived
Headers show

Commit Message

Liang Chen Sept. 18, 2014, 4:38 p.m. UTC
A one-line wrapper around kvm_make_request does not seem
particularly useful. Replace kvm_mmu_flush_tlb() with
kvm_make_request() again to free the namespace a bit.

Signed-off-by: Liang Chen <liangchen.linux@gmail.com>
---
 arch/x86/include/asm/kvm_host.h |  1 -
 arch/x86/kvm/mmu.c              | 16 +++++-----------
 arch/x86/kvm/vmx.c              |  2 +-
 arch/x86/kvm/x86.c              | 11 ++++++++---
 4 files changed, 14 insertions(+), 16 deletions(-)

Comments

Radim Krčmář Sept. 18, 2014, 6:12 p.m. UTC | #1
2014-09-18 12:38-0400, Liang Chen:
> A one-line wrapper around kvm_make_request does not seem
> particularly useful. Replace kvm_mmu_flush_tlb() with
> kvm_make_request() again to free the namespace a bit.
> 
> Signed-off-by: Liang Chen <liangchen.linux@gmail.com>
> ---

Reviewed-by: Radim Kr?má? <rkrcmar@redhat.com>

>  arch/x86/include/asm/kvm_host.h |  1 -
>  arch/x86/kvm/mmu.c              | 16 +++++-----------
>  arch/x86/kvm/vmx.c              |  2 +-
>  arch/x86/kvm/x86.c              | 11 ++++++++---
>  4 files changed, 14 insertions(+), 16 deletions(-)
> [...]
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 9eb5458..fc3df50 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> [...]
> @@ -6018,8 +6024,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
>  		if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
>  			kvm_mmu_sync_roots(vcpu);
>  		if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
> -			++vcpu->stat.tlb_flush;
> -			kvm_x86_ops->tlb_flush(vcpu);
> +			kvm_vcpu_flush_tlb(vcpu);
>  		}

(Braces are not necessary.)
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Xiao Guangrong Sept. 19, 2014, 6:12 a.m. UTC | #2
On 09/19/2014 12:38 AM, Liang Chen wrote:
> A one-line wrapper around kvm_make_request does not seem
> particularly useful. Replace kvm_mmu_flush_tlb() with
> kvm_make_request() again to free the namespace a bit.
> 
> Signed-off-by: Liang Chen <liangchen.linux@gmail.com>
> ---
>  arch/x86/include/asm/kvm_host.h |  1 -
>  arch/x86/kvm/mmu.c              | 16 +++++-----------
>  arch/x86/kvm/vmx.c              |  2 +-
>  arch/x86/kvm/x86.c              | 11 ++++++++---
>  4 files changed, 14 insertions(+), 16 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 7c492ed..77ade89 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -917,7 +917,6 @@ void kvm_inject_nmi(struct kvm_vcpu *vcpu);
> 
>  int fx_init(struct kvm_vcpu *vcpu);
> 
> -void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
>  void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
>  		       const u8 *new, int bytes);
>  int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index b41fd97..acc2d0c5 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -1749,7 +1749,7 @@ static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
>  		return 1;
>  	}
> 
> -	kvm_mmu_flush_tlb(vcpu);
> +	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
>  	return 0;
>  }
> 
> @@ -1802,7 +1802,7 @@ static void kvm_sync_pages(struct kvm_vcpu *vcpu,  gfn_t gfn)
> 
>  	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
>  	if (flush)
> -		kvm_mmu_flush_tlb(vcpu);
> +		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
>  }
> 
>  struct mmu_page_path {
> @@ -2536,7 +2536,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
>  	      true, host_writable)) {
>  		if (write_fault)
>  			*emulate = 1;
> -		kvm_mmu_flush_tlb(vcpu);
> +		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
>  	}
> 
>  	if (unlikely(is_mmio_spte(*sptep) && emulate))
> @@ -3450,12 +3450,6 @@ static void nonpaging_init_context(struct kvm_vcpu *vcpu,
>  	context->nx = false;
>  }
> 
> -void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
> -{
> -	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
> -}
> -EXPORT_SYMBOL_GPL(kvm_mmu_flush_tlb);
> -
>  void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu)
>  {
>  	mmu_free_roots(vcpu);
> @@ -3961,7 +3955,7 @@ static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, bool zap_page,
>  	if (remote_flush)
>  		kvm_flush_remote_tlbs(vcpu->kvm);
>  	else if (local_flush)
> -		kvm_mmu_flush_tlb(vcpu);
> +		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
>  }
> 
>  static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
> @@ -4222,7 +4216,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
>  void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
>  {
>  	vcpu->arch.mmu.invlpg(vcpu, gva);
> -	kvm_mmu_flush_tlb(vcpu);
> +	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
>  	++vcpu->stat.invlpg;
>  }
>  EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index bfe11cf..bb0a7ab 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -6617,7 +6617,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
>  	switch (type) {
>  	case VMX_EPT_EXTENT_GLOBAL:
>  		kvm_mmu_sync_roots(vcpu);
> -		kvm_mmu_flush_tlb(vcpu);
> +		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
>  		nested_vmx_succeed(vcpu);
>  		break;
>  	default:
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 9eb5458..fc3df50 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -726,7 +726,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
>  {
>  	if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
>  		kvm_mmu_sync_roots(vcpu);
> -		kvm_mmu_flush_tlb(vcpu);
> +		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
>  		return 0;
>  	}
> 
> @@ -5989,6 +5989,12 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
>  	kvm_apic_update_tmr(vcpu, tmr);
>  }
> 
> +static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
> +{
> +	++vcpu->stat.tlb_flush;
> +	kvm_x86_ops->tlb_flush(vcpu);
> +}
> +
>  /*
>   * Returns 1 to let __vcpu_run() continue the guest execution loop without
>   * exiting to the userspace.  Otherwise, the value will be returned to the
> @@ -6018,8 +6024,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
>  		if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
>  			kvm_mmu_sync_roots(vcpu);
>  		if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
> -			++vcpu->stat.tlb_flush;
> -			kvm_x86_ops->tlb_flush(vcpu);
> +			kvm_vcpu_flush_tlb(vcpu);

NACK!

Do not understand why you have to introduce a meaningful name
here - it's used just inner a function, which can not help to
improve a readability of the code at all.

What i suggested is renaming kvm_mmu_flush_tlb() since it's a
API used in multiple files - a good name helps developer to
know what it's doing and definitely easier typing.



--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Radim Krčmář Sept. 19, 2014, 12:25 p.m. UTC | #3
2014-09-19 14:12+0800, Xiao Guangrong:
> On 09/19/2014 12:38 AM, Liang Chen wrote:
> > A one-line wrapper around kvm_make_request does not seem
> > particularly useful. Replace kvm_mmu_flush_tlb() with
> > kvm_make_request() again to free the namespace a bit.
> > 
> > Signed-off-by: Liang Chen <liangchen.linux@gmail.com>
> > ---
> >  arch/x86/include/asm/kvm_host.h |  1 -
> >  arch/x86/kvm/mmu.c              | 16 +++++-----------
> >  arch/x86/kvm/vmx.c              |  2 +-
> >  arch/x86/kvm/x86.c              | 11 ++++++++---
> >  4 files changed, 14 insertions(+), 16 deletions(-)
> > 
> > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> > index 7c492ed..77ade89 100644
> > --- a/arch/x86/include/asm/kvm_host.h
> > +++ b/arch/x86/include/asm/kvm_host.h
> > @@ -917,7 +917,6 @@ void kvm_inject_nmi(struct kvm_vcpu *vcpu);
> > 
> >  int fx_init(struct kvm_vcpu *vcpu);
> > 
> > -void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
> >  void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
> >  		       const u8 *new, int bytes);
> >  int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
> > diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> > index b41fd97..acc2d0c5 100644
> > --- a/arch/x86/kvm/mmu.c
> > +++ b/arch/x86/kvm/mmu.c
> > @@ -1749,7 +1749,7 @@ static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
> >  		return 1;
> >  	}
> > 
> > -	kvm_mmu_flush_tlb(vcpu);
> > +	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
> >  	return 0;
> >  }
> > 
> > @@ -1802,7 +1802,7 @@ static void kvm_sync_pages(struct kvm_vcpu *vcpu,  gfn_t gfn)
> > 
> >  	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
> >  	if (flush)
> > -		kvm_mmu_flush_tlb(vcpu);
> > +		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
> >  }
> > 
> >  struct mmu_page_path {
> > @@ -2536,7 +2536,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
> >  	      true, host_writable)) {
> >  		if (write_fault)
> >  			*emulate = 1;
> > -		kvm_mmu_flush_tlb(vcpu);
> > +		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
> >  	}
> > 
> >  	if (unlikely(is_mmio_spte(*sptep) && emulate))
> > @@ -3450,12 +3450,6 @@ static void nonpaging_init_context(struct kvm_vcpu *vcpu,
> >  	context->nx = false;
> >  }
> > 
> > -void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
> > -{
> > -	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
> > -}
> > -EXPORT_SYMBOL_GPL(kvm_mmu_flush_tlb);
> > -
> >  void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu)
> >  {
> >  	mmu_free_roots(vcpu);
> > @@ -3961,7 +3955,7 @@ static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, bool zap_page,
> >  	if (remote_flush)
> >  		kvm_flush_remote_tlbs(vcpu->kvm);
> >  	else if (local_flush)
> > -		kvm_mmu_flush_tlb(vcpu);
> > +		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
> >  }
> > 
> >  static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
> > @@ -4222,7 +4216,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
> >  void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
> >  {
> >  	vcpu->arch.mmu.invlpg(vcpu, gva);
> > -	kvm_mmu_flush_tlb(vcpu);
> > +	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
> >  	++vcpu->stat.invlpg;
> >  }
> >  EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
> > diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> > index bfe11cf..bb0a7ab 100644
> > --- a/arch/x86/kvm/vmx.c
> > +++ b/arch/x86/kvm/vmx.c
> > @@ -6617,7 +6617,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
> >  	switch (type) {
> >  	case VMX_EPT_EXTENT_GLOBAL:
> >  		kvm_mmu_sync_roots(vcpu);
> > -		kvm_mmu_flush_tlb(vcpu);
> > +		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
> >  		nested_vmx_succeed(vcpu);
> >  		break;
> >  	default:
> > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> > index 9eb5458..fc3df50 100644
> > --- a/arch/x86/kvm/x86.c
> > +++ b/arch/x86/kvm/x86.c
> > @@ -726,7 +726,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
> >  {
> >  	if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
> >  		kvm_mmu_sync_roots(vcpu);
> > -		kvm_mmu_flush_tlb(vcpu);
> > +		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
> >  		return 0;
> >  	}
> > 
> > @@ -5989,6 +5989,12 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
> >  	kvm_apic_update_tmr(vcpu, tmr);
> >  }
> > 
> > +static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
> > +{
> > +	++vcpu->stat.tlb_flush;
> > +	kvm_x86_ops->tlb_flush(vcpu);
> > +}
> > +
> >  /*
> >   * Returns 1 to let __vcpu_run() continue the guest execution loop without
> >   * exiting to the userspace.  Otherwise, the value will be returned to the
> > @@ -6018,8 +6024,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
> >  		if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
> >  			kvm_mmu_sync_roots(vcpu);
> >  		if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
> > -			++vcpu->stat.tlb_flush;
> > -			kvm_x86_ops->tlb_flush(vcpu);
> > +			kvm_vcpu_flush_tlb(vcpu);
> 
> NACK!
> 
> Do not understand why you have to introduce a meaningful name
> here - it's used just inner a function, which can not help to
> improve a readability of the code at all.

I prefer the new hunk
 - it makes the parent function simpler (not everyone wants to read how
   we do tlb flushes when looking at vcpu_enter_guest)
 - the function is properly named
 - we do a similar thing with kvm_gen_kvmclock_update

> What i suggested is renaming kvm_mmu_flush_tlb() since it's a
> API used in multiple files - a good name helps developer to
> know what it's doing and definitely easier typing.

I think it is a good idea.
The proposed name is definitely better than what we have now.

You can see reasons that led me to prefer raw request below.
(Preferring something else is no way means that I'm against your idea.)

---
I'm always trying to reach some ideal code in my mind, which makes me
seemingly oppose good proposals because I see how it could be even
better ...  and I opt not to do them.
(Pushing minor refactoring patches upstream is hard!)

My issues with kvm_mmu_flush_tlb:

 - 'kvm_flush_remote_tlbs()' calls tlb request directly;
    our wrapper thus cannot be extended with features, which makes it a
    poor abstraction
 - we don't do this for other requests
 - direct request isn't absolutely horrible to read and write
   (I totally agree that it is bad.)
 - we call one function 'kvm_mmu_flush_tlb()' and the second one
   'kvm_flush_remote_tlbs()' and I'd need to look why

Which is why just removing it solves more problems for me :)
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Xiao Guangrong Sept. 19, 2014, 1:35 p.m. UTC | #4
On 09/19/2014 08:25 PM, Radim Kr?má? wrote:

>>>   * Returns 1 to let __vcpu_run() continue the guest execution loop without
>>>   * exiting to the userspace.  Otherwise, the value will be returned to the
>>> @@ -6018,8 +6024,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
>>>  		if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
>>>  			kvm_mmu_sync_roots(vcpu);
>>>  		if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
>>> -			++vcpu->stat.tlb_flush;
>>> -			kvm_x86_ops->tlb_flush(vcpu);
>>> +			kvm_vcpu_flush_tlb(vcpu);
>>
>> NACK!
>>
>> Do not understand why you have to introduce a meaningful name
>> here - it's used just inner a function, which can not help to
>> improve a readability of the code at all.
> 
> I prefer the new hunk
>  - it makes the parent function simpler (not everyone wants to read how
>    we do tlb flushes when looking at vcpu_enter_guest)

Using one line instead of two lines does not simplify parent function much.

>  - the function is properly named

kvm_x86_ops->tlb_flush(vcpu) is also a good hit to tell the reader it is
doing tlb flush. :)

>  - we do a similar thing with kvm_gen_kvmclock_update

I understand this raw-bit-set style is largely used in current kvm code,
however, it does not mean it's a best way do it. It may be turned off
someday as it is be used in more and more places.

Anyway, the meaningful name wrapping raw-bit-set is a right direction
and let's keep this right direction.

> 
>> What i suggested is renaming kvm_mmu_flush_tlb() since it's a
>> API used in multiple files - a good name helps developer to
>> know what it's doing and definitely easier typing.
> 
> I think it is a good idea.
> The proposed name is definitely better than what we have now.
> 
> You can see reasons that led me to prefer raw request below.
> (Preferring something else is no way means that I'm against your idea.)

I understand that, Radim! :)

> 
> ---
> I'm always trying to reach some ideal code in my mind, which makes me
> seemingly oppose good proposals because I see how it could be even
> better ...  and I opt not to do them.
> (Pushing minor refactoring patches upstream is hard!)
> 
> My issues with kvm_mmu_flush_tlb:
> 
>  - 'kvm_flush_remote_tlbs()' calls tlb request directly;
>     our wrapper thus cannot be extended with features, which makes it a
>     poor abstraction

kvm_flush_remote_tlbs does not only set tlb request but also handles memory
order and syncs the tlb state.

I guess you wanted to say kvm_mmu_flush_tlb here, it is a API name and let
it be easily used in other files. It's not worth committing a patch doing
nothing except reverting the meaningful name.

>  - we don't do this for other requests

See above.

>  - direct request isn't absolutely horrible to read and write
>    (I totally agree that it is bad.)
>  - we call one function 'kvm_mmu_flush_tlb()' and the second one
>    'kvm_flush_remote_tlbs()' and I'd need to look why

Yeah, this is why i suggested to rename kvm_mmu_flush_tlb since which clarifies
things better:
- kvm_flush_remote_tlbs: flush tlb in all vcpus
- kvm_vcpu_flush_tlb: only flush tlb on the vcpu specified by @vcpu.

> 
> Which is why just removing it solves more problems for me :)

Thank you for raising this question and letting me know the patch's history. :)



--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Paolo Bonzini Sept. 19, 2014, 2 p.m. UTC | #5
Il 19/09/2014 15:35, Xiao Guangrong ha scritto:
>> > Which is why just removing it solves more problems for me :)
> Thank you for raising this question and letting me know the patch's history. :)

I agree with Radim, so I'll apply the patch.

Paolo
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Liang Chen Sept. 19, 2014, 2:08 p.m. UTC | #6
On 09/19/2014 02:12 AM, Xiao Guangrong wrote:
> On 09/19/2014 12:38 AM, Liang Chen wrote:
>> A one-line wrapper around kvm_make_request does not seem
>> particularly useful. Replace kvm_mmu_flush_tlb() with
>> kvm_make_request() again to free the namespace a bit.
>>
>> Signed-off-by: Liang Chen <liangchen.linux@gmail.com>
>> ---
>>  arch/x86/include/asm/kvm_host.h |  1 -
>>  arch/x86/kvm/mmu.c              | 16 +++++-----------
>>  arch/x86/kvm/vmx.c              |  2 +-
>>  arch/x86/kvm/x86.c              | 11 ++++++++---
>>  4 files changed, 14 insertions(+), 16 deletions(-)
>>
>> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
>> index 7c492ed..77ade89 100644
>> --- a/arch/x86/include/asm/kvm_host.h
>> +++ b/arch/x86/include/asm/kvm_host.h
>> @@ -917,7 +917,6 @@ void kvm_inject_nmi(struct kvm_vcpu *vcpu);
>>
>>  int fx_init(struct kvm_vcpu *vcpu);
>>
>> -void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
>>  void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
>>  		       const u8 *new, int bytes);
>>  int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
>> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
>> index b41fd97..acc2d0c5 100644
>> --- a/arch/x86/kvm/mmu.c
>> +++ b/arch/x86/kvm/mmu.c
>> @@ -1749,7 +1749,7 @@ static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
>>  		return 1;
>>  	}
>>
>> -	kvm_mmu_flush_tlb(vcpu);
>> +	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
>>  	return 0;
>>  }
>>
>> @@ -1802,7 +1802,7 @@ static void kvm_sync_pages(struct kvm_vcpu *vcpu,  gfn_t gfn)
>>
>>  	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
>>  	if (flush)
>> -		kvm_mmu_flush_tlb(vcpu);
>> +		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
>>  }
>>
>>  struct mmu_page_path {
>> @@ -2536,7 +2536,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
>>  	      true, host_writable)) {
>>  		if (write_fault)
>>  			*emulate = 1;
>> -		kvm_mmu_flush_tlb(vcpu);
>> +		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
>>  	}
>>
>>  	if (unlikely(is_mmio_spte(*sptep) && emulate))
>> @@ -3450,12 +3450,6 @@ static void nonpaging_init_context(struct kvm_vcpu *vcpu,
>>  	context->nx = false;
>>  }
>>
>> -void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
>> -{
>> -	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
>> -}
>> -EXPORT_SYMBOL_GPL(kvm_mmu_flush_tlb);
>> -
>>  void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu)
>>  {
>>  	mmu_free_roots(vcpu);
>> @@ -3961,7 +3955,7 @@ static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, bool zap_page,
>>  	if (remote_flush)
>>  		kvm_flush_remote_tlbs(vcpu->kvm);
>>  	else if (local_flush)
>> -		kvm_mmu_flush_tlb(vcpu);
>> +		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
>>  }
>>
>>  static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
>> @@ -4222,7 +4216,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
>>  void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
>>  {
>>  	vcpu->arch.mmu.invlpg(vcpu, gva);
>> -	kvm_mmu_flush_tlb(vcpu);
>> +	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
>>  	++vcpu->stat.invlpg;
>>  }
>>  EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
>> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
>> index bfe11cf..bb0a7ab 100644
>> --- a/arch/x86/kvm/vmx.c
>> +++ b/arch/x86/kvm/vmx.c
>> @@ -6617,7 +6617,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
>>  	switch (type) {
>>  	case VMX_EPT_EXTENT_GLOBAL:
>>  		kvm_mmu_sync_roots(vcpu);
>> -		kvm_mmu_flush_tlb(vcpu);
>> +		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
>>  		nested_vmx_succeed(vcpu);
>>  		break;
>>  	default:
>> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
>> index 9eb5458..fc3df50 100644
>> --- a/arch/x86/kvm/x86.c
>> +++ b/arch/x86/kvm/x86.c
>> @@ -726,7 +726,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
>>  {
>>  	if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
>>  		kvm_mmu_sync_roots(vcpu);
>> -		kvm_mmu_flush_tlb(vcpu);
>> +		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
>>  		return 0;
>>  	}
>>
>> @@ -5989,6 +5989,12 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
>>  	kvm_apic_update_tmr(vcpu, tmr);
>>  }
>>
>> +static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
>> +{
>> +	++vcpu->stat.tlb_flush;
>> +	kvm_x86_ops->tlb_flush(vcpu);
>> +}
>> +
>>  /*
>>   * Returns 1 to let __vcpu_run() continue the guest execution loop without
>>   * exiting to the userspace.  Otherwise, the value will be returned to the
>> @@ -6018,8 +6024,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
>>  		if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
>>  			kvm_mmu_sync_roots(vcpu);
>>  		if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
>> -			++vcpu->stat.tlb_flush;
>> -			kvm_x86_ops->tlb_flush(vcpu);
>> +			kvm_vcpu_flush_tlb(vcpu);
> NACK!
>
> Do not understand why you have to introduce a meaningful name
> here - it's used just inner a function, which can not help to
> improve a readability of the code at all.
>
> What i suggested is renaming kvm_mmu_flush_tlb() since it's a
> API used in multiple files - a good name helps developer to
> know what it's doing and definitely easier typing.
>
>
>
Thanks for the comments. However ...

Leaving kvm_mmu_flush_tlb might be error prone. People might have a
tendency to make simalir change as the ++vcpu->stat.tlb_flush in
the function in the future. I know we have gatekeepers who can spot
 out that kind of mistake, but why bothering with that... 

I agree raw request doesn't look good. But 
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu) is completely
understandable even to a beginner, and future-proof is just
as important.


Thanks,
Liang

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Liang Chen Sept. 19, 2014, 2:26 p.m. UTC | #7
oops, didn't see this ;)   Thank you!

Thanks,
Liang

On 09/19/2014 10:00 AM, Paolo Bonzini wrote:
> Il 19/09/2014 15:35, Xiao Guangrong ha scritto:
>>>> Which is why just removing it solves more problems for me :)
>> Thank you for raising this question and letting me know the patch's history. :)
> I agree with Radim, so I'll apply the patch.
>
> Paolo

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Radim Krčmář Sept. 19, 2014, 9:10 p.m. UTC | #8
2014-09-19 21:35+0800, Xiao Guangrong:
> On 09/19/2014 08:25 PM, Radim Kr?má? wrote:
> >>>   * Returns 1 to let __vcpu_run() continue the guest execution loop without
> >>>   * exiting to the userspace.  Otherwise, the value will be returned to the
> >>> @@ -6018,8 +6024,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
> >>>  		if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
> >>>  			kvm_mmu_sync_roots(vcpu);
> >>>  		if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
> >>> -			++vcpu->stat.tlb_flush;
> >>> -			kvm_x86_ops->tlb_flush(vcpu);
> >>> +			kvm_vcpu_flush_tlb(vcpu);
> >>
> >> NACK!
> >>
> >> Do not understand why you have to introduce a meaningful name
> >> here - it's used just inner a function, which can not help to
> >> improve a readability of the code at all.
> > 
> > I prefer the new hunk
> >  - it makes the parent function simpler (not everyone wants to read how
> >    we do tlb flushes when looking at vcpu_enter_guest)
> 
> Using one line instead of two lines does not simplify parent function much.

(Don't forget braces!)

There might come a patch that pushes the length above a readability
threshold.  With our development process, I think it is quite likely
that new function won't get created then;
and preventing this situation makes the function nicer now as well.

(Most of my thinking that is about cases that will never happen.)

> >  - the function is properly named
> 
> kvm_x86_ops->tlb_flush(vcpu) is also a good hit to tell the reader it is
> doing tlb flush. :)

Yep.  (The surprise was leaked by KVM_REQ_TLB_FLUSH.)

It was more like safety check -- if we wanted a new function, it should
be called like that.

> >  - we do a similar thing with kvm_gen_kvmclock_update
> 
> I understand this raw-bit-set style is largely used in current kvm code,
> however, it does not mean it's a best way do it. It may be turned off
> someday as it is be used in more and more places.
> 
> Anyway, the meaningful name wrapping raw-bit-set is a right direction
> and let's keep this right direction.

Agreed, it would be nice to have an indirection that hides the
underlying request-mechanic from higher-level code.

(More below.)

> > My issues with kvm_mmu_flush_tlb:
> > 
> >  - 'kvm_flush_remote_tlbs()' calls tlb request directly;
> >     our wrapper thus cannot be extended with features, which makes it a
> >     poor abstraction
> 
> kvm_flush_remote_tlbs does not only set tlb request but also handles memory
> order and syncs the tlb state.
> 
> I guess you wanted to say kvm_mmu_flush_tlb here, it is a API name and let
> it be easily used in other files. It's not worth committing a patch doing
> nothing except reverting the meaningful name.

(I really meant kvm_flush_remote_tlbs().)

When we change kvm_mmu_flush_tlb(), it doesn't get propagated to
"remote" TLB flushes => we might have a false sense of API and
the code is harder to work with because of that.

(I don't consider kvm_mmu_flush_tlb() a step in the right direction ...
 close, like all bugs.)

> >  - we don't do this for other requests
> 
> See above.

(Below is here.)

Between half-new half-old and unmixed API, I'm leaning towards the
latter option ...
(My arguments for this are weak though; not enough experience.)

> >  - direct request isn't absolutely horrible to read and write
> >    (I totally agree that it is bad.)
> >  - we call one function 'kvm_mmu_flush_tlb()' and the second one
> >    'kvm_flush_remote_tlbs()' and I'd need to look why
> 
> Yeah, this is why i suggested to rename kvm_mmu_flush_tlb since which clarifies
> things better:
> - kvm_flush_remote_tlbs: flush tlb in all vcpus
> - kvm_vcpu_flush_tlb: only flush tlb on the vcpu specified by @vcpu.

(I am confused about "mmu" in names -- kvm_flush_remote_tlbs is shared
 through host.h, which is probably why it didn't get "mmu".)

> > Which is why just removing it solves more problems for me :)
> 
> Thank you for raising this question and letting me know the patch's history. :)

Thanks for the reply, I hope I have understood you correctly,
now just to find a person to write all the good code :)
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 7c492ed..77ade89 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -917,7 +917,6 @@  void kvm_inject_nmi(struct kvm_vcpu *vcpu);
 
 int fx_init(struct kvm_vcpu *vcpu);
 
-void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
 		       const u8 *new, int bytes);
 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index b41fd97..acc2d0c5 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1749,7 +1749,7 @@  static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 		return 1;
 	}
 
-	kvm_mmu_flush_tlb(vcpu);
+	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
 	return 0;
 }
 
@@ -1802,7 +1802,7 @@  static void kvm_sync_pages(struct kvm_vcpu *vcpu,  gfn_t gfn)
 
 	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
 	if (flush)
-		kvm_mmu_flush_tlb(vcpu);
+		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
 }
 
 struct mmu_page_path {
@@ -2536,7 +2536,7 @@  static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
 	      true, host_writable)) {
 		if (write_fault)
 			*emulate = 1;
-		kvm_mmu_flush_tlb(vcpu);
+		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
 	}
 
 	if (unlikely(is_mmio_spte(*sptep) && emulate))
@@ -3450,12 +3450,6 @@  static void nonpaging_init_context(struct kvm_vcpu *vcpu,
 	context->nx = false;
 }
 
-void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
-{
-	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
-}
-EXPORT_SYMBOL_GPL(kvm_mmu_flush_tlb);
-
 void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu)
 {
 	mmu_free_roots(vcpu);
@@ -3961,7 +3955,7 @@  static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, bool zap_page,
 	if (remote_flush)
 		kvm_flush_remote_tlbs(vcpu->kvm);
 	else if (local_flush)
-		kvm_mmu_flush_tlb(vcpu);
+		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
 }
 
 static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
@@ -4222,7 +4216,7 @@  EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
 {
 	vcpu->arch.mmu.invlpg(vcpu, gva);
-	kvm_mmu_flush_tlb(vcpu);
+	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
 	++vcpu->stat.invlpg;
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index bfe11cf..bb0a7ab 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -6617,7 +6617,7 @@  static int handle_invept(struct kvm_vcpu *vcpu)
 	switch (type) {
 	case VMX_EPT_EXTENT_GLOBAL:
 		kvm_mmu_sync_roots(vcpu);
-		kvm_mmu_flush_tlb(vcpu);
+		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
 		nested_vmx_succeed(vcpu);
 		break;
 	default:
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9eb5458..fc3df50 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -726,7 +726,7 @@  int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
 {
 	if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
 		kvm_mmu_sync_roots(vcpu);
-		kvm_mmu_flush_tlb(vcpu);
+		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
 		return 0;
 	}
 
@@ -5989,6 +5989,12 @@  static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
 	kvm_apic_update_tmr(vcpu, tmr);
 }
 
+static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
+{
+	++vcpu->stat.tlb_flush;
+	kvm_x86_ops->tlb_flush(vcpu);
+}
+
 /*
  * Returns 1 to let __vcpu_run() continue the guest execution loop without
  * exiting to the userspace.  Otherwise, the value will be returned to the
@@ -6018,8 +6024,7 @@  static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 		if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
 			kvm_mmu_sync_roots(vcpu);
 		if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
-			++vcpu->stat.tlb_flush;
-			kvm_x86_ops->tlb_flush(vcpu);
+			kvm_vcpu_flush_tlb(vcpu);
 		}
 		if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
 			vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;