diff mbox

[7/8] KVM: MMU: Split out free_zapped_mmu_pages() from kvm_mmu_commit_zap_page()

Message ID 20130123191733.ced24b9d.yoshikawa_takuya_b1@lab.ntt.co.jp (mailing list archive)
State New, archived
Headers show

Commit Message

Takuya Yoshikawa Jan. 23, 2013, 10:17 a.m. UTC
Just trivial conversions at this point.  Some of these will be moved out
of the protection of the mmu_lock in the following patch.

Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
---
 arch/x86/kvm/mmu.c |   24 +++++++++++++++++++++---
 1 files changed, 21 insertions(+), 3 deletions(-)
diff mbox

Patch

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 97d372a..dd7b455 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1721,8 +1721,10 @@  static int kvm_sync_page_transient(struct kvm_vcpu *vcpu,
 	int ret;
 
 	ret = __kvm_sync_page(vcpu, sp, &invalid_list, false);
-	if (ret)
+	if (ret) {
 		kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
+		free_zapped_mmu_pages(vcpu->kvm, &invalid_list);
+	}
 
 	return ret;
 }
@@ -1765,6 +1767,8 @@  static void kvm_sync_pages(struct kvm_vcpu *vcpu,  gfn_t gfn)
 	}
 
 	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
+	free_zapped_mmu_pages(vcpu->kvm, &invalid_list);
+
 	if (flush)
 		kvm_mmu_flush_tlb(vcpu);
 }
@@ -1852,6 +1856,8 @@  static void mmu_sync_children(struct kvm_vcpu *vcpu,
 			mmu_pages_clear_parents(&parents);
 		}
 		kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
+		free_zapped_mmu_pages(vcpu->kvm, &invalid_list);
+
 		cond_resched_lock(&vcpu->kvm->mmu_lock);
 		kvm_mmu_pages_init(parent, &parents, &pages);
 	}
@@ -2152,8 +2158,6 @@  static void kvm_mmu_commit_zap_page(struct kvm *kvm,
 	 * page table walks.
 	 */
 	kvm_flush_remote_tlbs(kvm);
-
-	free_zapped_mmu_pages(kvm, invalid_list);
 }
 
 /*
@@ -2181,6 +2185,8 @@  void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
 			kvm_mmu_prepare_zap_page(kvm, page, &invalid_list, NULL);
 		}
 		kvm_mmu_commit_zap_page(kvm, &invalid_list);
+		free_zapped_mmu_pages(kvm, &invalid_list);
+
 		goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
 	}
 
@@ -2207,6 +2213,7 @@  int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
 		kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &npos);
 	}
 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
+	free_zapped_mmu_pages(kvm, &invalid_list);
 	spin_unlock(&kvm->mmu_lock);
 
 	return r;
@@ -2927,6 +2934,7 @@  static void mmu_free_roots(struct kvm_vcpu *vcpu)
 			kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
 						 &invalid_list, NULL);
 			kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
+			free_zapped_mmu_pages(vcpu->kvm, &invalid_list);
 		}
 		vcpu->arch.mmu.root_hpa = INVALID_PAGE;
 		spin_unlock(&vcpu->kvm->mmu_lock);
@@ -2946,6 +2954,8 @@  static void mmu_free_roots(struct kvm_vcpu *vcpu)
 		vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
 	}
 	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
+	free_zapped_mmu_pages(vcpu->kvm, &invalid_list);
+
 	spin_unlock(&vcpu->kvm->mmu_lock);
 	vcpu->arch.mmu.root_hpa = INVALID_PAGE;
 }
@@ -4042,7 +4052,10 @@  void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
 		}
 	}
 	mmu_pte_write_flush_tlb(vcpu, zap_page, remote_flush, local_flush);
+
 	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
+	free_zapped_mmu_pages(vcpu->kvm, &invalid_list);
+
 	kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
 	spin_unlock(&vcpu->kvm->mmu_lock);
 }
@@ -4076,7 +4089,9 @@  void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
 		kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list, NULL);
 		++vcpu->kvm->stat.mmu_recycled;
 	}
+
 	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
+	free_zapped_mmu_pages(vcpu->kvm, &invalid_list);
 }
 
 static bool is_mmio_page_fault(struct kvm_vcpu *vcpu, gva_t addr)
@@ -4239,6 +4254,8 @@  restart:
 			goto restart;
 
 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
+	free_zapped_mmu_pages(kvm, &invalid_list);
+
 	spin_unlock(&kvm->mmu_lock);
 }
 
@@ -4291,6 +4308,7 @@  static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
 
 		kvm_mmu_remove_some_alloc_mmu_pages(kvm, &invalid_list);
 		kvm_mmu_commit_zap_page(kvm, &invalid_list);
+		free_zapped_mmu_pages(kvm, &invalid_list);
 
 		spin_unlock(&kvm->mmu_lock);
 		srcu_read_unlock(&kvm->srcu, idx);