diff mbox

[v2] KVM: MMU: optimize pte write path if don't have protected sp

Message ID 4DC9F803.3050602@cn.fujitsu.com (mailing list archive)
State New, archived
Headers show

Commit Message

Xiao Guangrong May 11, 2011, 2:44 a.m. UTC
Simply return from kvm_mmu_pte_write path if no shadow page is
write-protected, then we can avoid to walk all shadow pages and hold
mmu-lock

Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
---
 arch/x86/include/asm/kvm_host.h |    1 +
 arch/x86/kvm/mmu.c              |   12 +++++++++++-
 2 files changed, 12 insertions(+), 1 deletions(-)

Comments

Avi Kivity May 11, 2011, 11:28 a.m. UTC | #1
On 05/11/2011 05:44 AM, Xiao Guangrong wrote:
> Simply return from kvm_mmu_pte_write path if no shadow page is
> write-protected, then we can avoid to walk all shadow pages and hold
> mmu-lock
>
> @@ -1038,8 +1038,10 @@ static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
>   	hlist_del(&sp->hash_link);
>   	list_del(&sp->link);
>   	free_page((unsigned long)sp->spt);
> -	if (!sp->role.direct)
> +	if (!sp->role.direct) {
>   		free_page((unsigned long)sp->gfns);
> +		atomic_dec(&kvm->arch.indirect_shadow_pages);
> +	}
>   	kmem_cache_free(mmu_page_header_cache, sp);
>   	kvm_mod_used_mmu_pages(kvm, -1);
>   }
> @@ -1536,6 +1538,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
>   			kvm_sync_pages(vcpu, gfn);
>
>   		account_shadowed(vcpu->kvm, gfn);
> +		atomic_inc(&vcpu->kvm->arch.indirect_shadow_pages);
>   	}

Better in account_shadowed()/unaccount_shadowed(), no?
Xiao Guangrong May 11, 2011, 12:01 p.m. UTC | #2
On 05/11/2011 07:28 PM, Avi Kivity wrote:
> On 05/11/2011 05:44 AM, Xiao Guangrong wrote:
>> Simply return from kvm_mmu_pte_write path if no shadow page is
>> write-protected, then we can avoid to walk all shadow pages and hold
>> mmu-lock
>>
>> @@ -1038,8 +1038,10 @@ static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
>>       hlist_del(&sp->hash_link);
>>       list_del(&sp->link);
>>       free_page((unsigned long)sp->spt);
>> -    if (!sp->role.direct)
>> +    if (!sp->role.direct) {
>>           free_page((unsigned long)sp->gfns);
>> +        atomic_dec(&kvm->arch.indirect_shadow_pages);
>> +    }
>>       kmem_cache_free(mmu_page_header_cache, sp);
>>       kvm_mod_used_mmu_pages(kvm, -1);
>>   }
>> @@ -1536,6 +1538,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
>>               kvm_sync_pages(vcpu, gfn);
>>
>>           account_shadowed(vcpu->kvm, gfn);
>> +        atomic_inc(&vcpu->kvm->arch.indirect_shadow_pages);
>>       }
> 
> Better in account_shadowed()/unaccount_shadowed(), no?
> 

Yes, will fix. thanks for your reminder!

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index d2ac8e2..d2e5fb8 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -442,6 +442,7 @@  struct kvm_arch {
 	unsigned int n_requested_mmu_pages;
 	unsigned int n_max_mmu_pages;
 	atomic_t invlpg_counter;
+	atomic_t indirect_shadow_pages;
 	struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
 	/*
 	 * Hash table of struct kvm_mmu_page.
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 2841805..7e6117dc 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1038,8 +1038,10 @@  static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
 	hlist_del(&sp->hash_link);
 	list_del(&sp->link);
 	free_page((unsigned long)sp->spt);
-	if (!sp->role.direct)
+	if (!sp->role.direct) {
 		free_page((unsigned long)sp->gfns);
+		atomic_dec(&kvm->arch.indirect_shadow_pages);
+	}
 	kmem_cache_free(mmu_page_header_cache, sp);
 	kvm_mod_used_mmu_pages(kvm, -1);
 }
@@ -1536,6 +1538,7 @@  static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
 			kvm_sync_pages(vcpu, gfn);
 
 		account_shadowed(vcpu->kvm, gfn);
+		atomic_inc(&vcpu->kvm->arch.indirect_shadow_pages);
 	}
 	if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)
 		vcpu->arch.mmu.prefetch_page(vcpu, sp);
@@ -3233,6 +3236,13 @@  void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
 	int level, npte, invlpg_counter, r, flooded = 0;
 	bool remote_flush, local_flush, zap_page;
 
+	/*
+	 * If we don't have indirect shadow pages, it means no page is
+	 * write-protected, so we can simply exit.
+	 */
+	if (!atomic_read(&vcpu->kvm->arch.indirect_shadow_pages))
+		return;
+
 	zap_page = remote_flush = local_flush = false;
 	offset = offset_in_page(gpa);