diff mbox

[v5,8/8] KVM: MMU: zap pages in batch

Message ID 1368706673-8530-9-git-send-email-xiaoguangrong@linux.vnet.ibm.com (mailing list archive)
State New, archived
Headers show

Commit Message

Xiao Guangrong May 16, 2013, 12:17 p.m. UTC
Zap at lease 10 pages before releasing mmu-lock to reduce the overload
caused by requiring lock

[ It improves kernel building 0.6% ~ 1% ]

Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
---
 arch/x86/kvm/mmu.c |   11 ++++++++---
 1 files changed, 8 insertions(+), 3 deletions(-)

Comments

Paolo Bonzini May 16, 2013, 12:45 p.m. UTC | #1
Il 16/05/2013 14:17, Xiao Guangrong ha scritto:
> Zap at lease 10 pages before releasing mmu-lock to reduce the overload
> caused by requiring lock
> 
> [ It improves kernel building 0.6% ~ 1% ]
> 
> Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
> ---
>  arch/x86/kvm/mmu.c |   11 ++++++++---
>  1 files changed, 8 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index e12f431..9c27fda 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -4216,10 +4216,12 @@ restart:
>  	spin_unlock(&kvm->mmu_lock);
>  }
>  
> +#define BATCH_ZAP_PAGES	10
>  static void zap_invalid_pages(struct kvm *kvm)
>  {
>  	struct kvm_mmu_page *sp, *node;
>  	LIST_HEAD(invalid_list);
> +	int batch = 0;
>  
>  restart:
>  	list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
> @@ -4256,11 +4258,14 @@ restart:
>  		 * Need not flush tlb since we only zap the sp with invalid
>  		 * generation number.
>  		 */
> -		if (cond_resched_lock(&kvm->mmu_lock))
> +		if ((batch >= BATCH_ZAP_PAGES) &&
> +		      cond_resched_lock(&kvm->mmu_lock)) {
> +			batch = 0;
>  			goto restart;
> +		}
>  
> -		if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list))
> -			goto restart;
> +		batch += kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
> +		goto restart;

Would this look again and again at the same page if
kvm_mmu_prepare_zap_page returns 0?

Paolo

>  	}
>  
>  	/*
> 

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Xiao Guangrong May 16, 2013, 1:31 p.m. UTC | #2
On 05/16/2013 08:45 PM, Paolo Bonzini wrote:
> Il 16/05/2013 14:17, Xiao Guangrong ha scritto:
>> Zap at lease 10 pages before releasing mmu-lock to reduce the overload
>> caused by requiring lock
>>
>> [ It improves kernel building 0.6% ~ 1% ]
>>
>> Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
>> ---
>>  arch/x86/kvm/mmu.c |   11 ++++++++---
>>  1 files changed, 8 insertions(+), 3 deletions(-)
>>
>> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
>> index e12f431..9c27fda 100644
>> --- a/arch/x86/kvm/mmu.c
>> +++ b/arch/x86/kvm/mmu.c
>> @@ -4216,10 +4216,12 @@ restart:
>>  	spin_unlock(&kvm->mmu_lock);
>>  }
>>  
>> +#define BATCH_ZAP_PAGES	10
>>  static void zap_invalid_pages(struct kvm *kvm)
>>  {
>>  	struct kvm_mmu_page *sp, *node;
>>  	LIST_HEAD(invalid_list);
>> +	int batch = 0;
>>  
>>  restart:
>>  	list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
>> @@ -4256,11 +4258,14 @@ restart:
>>  		 * Need not flush tlb since we only zap the sp with invalid
>>  		 * generation number.
>>  		 */
>> -		if (cond_resched_lock(&kvm->mmu_lock))
>> +		if ((batch >= BATCH_ZAP_PAGES) &&
>> +		      cond_resched_lock(&kvm->mmu_lock)) {
>> +			batch = 0;
>>  			goto restart;
>> +		}
>>  
>> -		if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list))
>> -			goto restart;
>> +		batch += kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
>> +		goto restart;
> 
> Would this look again and again at the same page if
> kvm_mmu_prepare_zap_page returns 0?

We skip the invalid page (sp->role.invalid) before call
kvm_mmu_prepare_zap_page so that kvm_mmu_prepare_zap_page can not
meet the same page. ;)



--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index e12f431..9c27fda 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4216,10 +4216,12 @@  restart:
 	spin_unlock(&kvm->mmu_lock);
 }
 
+#define BATCH_ZAP_PAGES	10
 static void zap_invalid_pages(struct kvm *kvm)
 {
 	struct kvm_mmu_page *sp, *node;
 	LIST_HEAD(invalid_list);
+	int batch = 0;
 
 restart:
 	list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
@@ -4256,11 +4258,14 @@  restart:
 		 * Need not flush tlb since we only zap the sp with invalid
 		 * generation number.
 		 */
-		if (cond_resched_lock(&kvm->mmu_lock))
+		if ((batch >= BATCH_ZAP_PAGES) &&
+		      cond_resched_lock(&kvm->mmu_lock)) {
+			batch = 0;
 			goto restart;
+		}
 
-		if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list))
-			goto restart;
+		batch += kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
+		goto restart;
 	}
 
 	/*