diff mbox series

[v3] KVM: x86/MMU: Do not check unsync status for root SP.

Message ID 20210209170111.4770-1-yu.c.zhang@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series [v3] KVM: x86/MMU: Do not check unsync status for root SP. | expand

Commit Message

Yu Zhang Feb. 9, 2021, 5:01 p.m. UTC
In shadow page table, only leaf SPs may be marked as unsync;
instead, for non-leaf SPs, we store the number of unsynced
children in unsync_children. Therefore, in kvm_mmu_sync_root(),
sp->unsync shall always be zero for the root SP and there is
no need to check it. Remove the check, and add a warning
inside mmu_sync_children() to assert that the flags are used
properly.

While at it, move the warning from mmu_need_write_protect()
to kvm_unsync_page().

Co-developed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Yu Zhang <yu.c.zhang@linux.intel.com>
---
 arch/x86/kvm/mmu/mmu.c | 12 +++++++++---
 1 file changed, 9 insertions(+), 3 deletions(-)

Comments

Yu Zhang Feb. 9, 2021, 10:55 a.m. UTC | #1
Sorry, forget the change log:

Changes in V3:
- fixed a bug in warnings inside mmu_sync_children().
- commit message changes based on Paolo's suggestion.
- added Co-developed-by: Sean Christopherson <seanjc@google.com>

Changes in V2:
- warnings added based on Sean's suggestion.


On Wed, Feb 10, 2021 at 01:01:11AM +0800, Yu Zhang wrote:
> In shadow page table, only leaf SPs may be marked as unsync;
> instead, for non-leaf SPs, we store the number of unsynced
> children in unsync_children. Therefore, in kvm_mmu_sync_root(),
> sp->unsync shall always be zero for the root SP and there is
> no need to check it. Remove the check, and add a warning
> inside mmu_sync_children() to assert that the flags are used
> properly.
> 
> While at it, move the warning from mmu_need_write_protect()
> to kvm_unsync_page().
> 
> Co-developed-by: Sean Christopherson <seanjc@google.com>
> Signed-off-by: Sean Christopherson <seanjc@google.com>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> Signed-off-by: Yu Zhang <yu.c.zhang@linux.intel.com>
> ---
>  arch/x86/kvm/mmu/mmu.c | 12 +++++++++---
>  1 file changed, 9 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 86af58294272..5f482af125b4 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -1995,6 +1995,12 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
>  	LIST_HEAD(invalid_list);
>  	bool flush = false;
>  
> +	/*
> +	 * Only 4k SPTEs can directly be made unsync, the parent pages
> +	 * should never be unsyc'd.
> +	 */
> +	WARN_ON_ONCE(parent->unsync);
> +
>  	while (mmu_unsync_walk(parent, &pages)) {
>  		bool protected = false;
>  
> @@ -2502,6 +2508,8 @@ EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page);
>  
>  static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
>  {
> +	WARN_ON(sp->role.level != PG_LEVEL_4K);
> +
>  	trace_kvm_mmu_unsync_page(sp);
>  	++vcpu->kvm->stat.mmu_unsync;
>  	sp->unsync = 1;
> @@ -2524,7 +2532,6 @@ bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
>  		if (sp->unsync)
>  			continue;
>  
> -		WARN_ON(sp->role.level != PG_LEVEL_4K);
>  		kvm_unsync_page(vcpu, sp);
>  	}
>  
> @@ -3406,8 +3413,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
>  		 * mmu_need_write_protect() describe what could go wrong if this
>  		 * requirement isn't satisfied.
>  		 */
> -		if (!smp_load_acquire(&sp->unsync) &&
> -		    !smp_load_acquire(&sp->unsync_children))
> +		if (!smp_load_acquire(&sp->unsync_children))
>  			return;
>  
>  		write_lock(&vcpu->kvm->mmu_lock);
> -- 
> 2.17.1
>
Paolo Bonzini Feb. 10, 2021, 5:14 p.m. UTC | #2
On 09/02/21 18:01, Yu Zhang wrote:
> In shadow page table, only leaf SPs may be marked as unsync;
> instead, for non-leaf SPs, we store the number of unsynced
> children in unsync_children. Therefore, in kvm_mmu_sync_root(),
> sp->unsync shall always be zero for the root SP and there is
> no need to check it. Remove the check, and add a warning
> inside mmu_sync_children() to assert that the flags are used
> properly.
> 
> While at it, move the warning from mmu_need_write_protect()
> to kvm_unsync_page().
> 
> Co-developed-by: Sean Christopherson <seanjc@google.com>
> Signed-off-by: Sean Christopherson <seanjc@google.com>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> Signed-off-by: Yu Zhang <yu.c.zhang@linux.intel.com>
> ---
>   arch/x86/kvm/mmu/mmu.c | 12 +++++++++---
>   1 file changed, 9 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 86af58294272..5f482af125b4 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -1995,6 +1995,12 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
>   	LIST_HEAD(invalid_list);
>   	bool flush = false;
>   
> +	/*
> +	 * Only 4k SPTEs can directly be made unsync, the parent pages
> +	 * should never be unsyc'd.
> +	 */
> +	WARN_ON_ONCE(parent->unsync);
> +
>   	while (mmu_unsync_walk(parent, &pages)) {
>   		bool protected = false;
>   
> @@ -2502,6 +2508,8 @@ EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page);
>   
>   static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
>   {
> +	WARN_ON(sp->role.level != PG_LEVEL_4K);
> +
>   	trace_kvm_mmu_unsync_page(sp);
>   	++vcpu->kvm->stat.mmu_unsync;
>   	sp->unsync = 1;
> @@ -2524,7 +2532,6 @@ bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
>   		if (sp->unsync)
>   			continue;
>   
> -		WARN_ON(sp->role.level != PG_LEVEL_4K);
>   		kvm_unsync_page(vcpu, sp);
>   	}
>   
> @@ -3406,8 +3413,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
>   		 * mmu_need_write_protect() describe what could go wrong if this
>   		 * requirement isn't satisfied.
>   		 */
> -		if (!smp_load_acquire(&sp->unsync) &&
> -		    !smp_load_acquire(&sp->unsync_children))
> +		if (!smp_load_acquire(&sp->unsync_children))
>   			return;
>   
>   		write_lock(&vcpu->kvm->mmu_lock);
> 

Queued, thanks.

Paolo
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 86af58294272..5f482af125b4 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1995,6 +1995,12 @@  static void mmu_sync_children(struct kvm_vcpu *vcpu,
 	LIST_HEAD(invalid_list);
 	bool flush = false;
 
+	/*
+	 * Only 4k SPTEs can directly be made unsync, the parent pages
+	 * should never be unsyc'd.
+	 */
+	WARN_ON_ONCE(parent->unsync);
+
 	while (mmu_unsync_walk(parent, &pages)) {
 		bool protected = false;
 
@@ -2502,6 +2508,8 @@  EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page);
 
 static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 {
+	WARN_ON(sp->role.level != PG_LEVEL_4K);
+
 	trace_kvm_mmu_unsync_page(sp);
 	++vcpu->kvm->stat.mmu_unsync;
 	sp->unsync = 1;
@@ -2524,7 +2532,6 @@  bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
 		if (sp->unsync)
 			continue;
 
-		WARN_ON(sp->role.level != PG_LEVEL_4K);
 		kvm_unsync_page(vcpu, sp);
 	}
 
@@ -3406,8 +3413,7 @@  void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
 		 * mmu_need_write_protect() describe what could go wrong if this
 		 * requirement isn't satisfied.
 		 */
-		if (!smp_load_acquire(&sp->unsync) &&
-		    !smp_load_acquire(&sp->unsync_children))
+		if (!smp_load_acquire(&sp->unsync_children))
 			return;
 
 		write_lock(&vcpu->kvm->mmu_lock);