diff mbox

[4/8] KVM: MMU: Introduce for_each_gfn_indirect_valid_sp_safe macro

Message ID 20130123191517.d9bdb432.yoshikawa_takuya_b1@lab.ntt.co.jp (mailing list archive)
State New, archived
Headers show

Commit Message

Takuya Yoshikawa Jan. 23, 2013, 10:15 a.m. UTC
This is a preparation for moving hlist_del(&sp->hash_link) from
kvm_mmu_isolate_page() to kvm_mmu_prepare_zap_page().

All for_each_gfn_indirect_valid_sp's whose bodies contain a function
call which will reach kvm_mmu_prepare_zap_page(), and not break the
loop right after the call, are converted to this new one.

Note: ignored the following checkpatch report:
  ERROR: Macros with complex values should be enclosed in parenthesis

Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
---
 arch/x86/kvm/mmu.c |   27 +++++++++++++++++++++------
 1 files changed, 21 insertions(+), 6 deletions(-)
diff mbox

Patch

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 2a48533..d5bf373 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1677,6 +1677,18 @@  static void kvm_mmu_commit_zap_page(struct kvm *kvm,
 	for_each_gfn_sp(_kvm, _sp, _gfn, _pos)				\
 		if (!(_sp)->role.direct && !(_sp)->role.invalid)
 
+/*
+ * Used for zapping mmu pages while traversing the mmu page hash list.
+ * Users must update @_n so that it points to the new next node after deleting
+ * any entries in such a way that can make the value prepared by
+ * hlist_for_each_entry_safe invalid.
+ */
+#define for_each_gfn_indirect_valid_sp_safe(_kvm, _sp, _gfn, _pos, _n)	\
+	hlist_for_each_entry_safe(_sp, _pos, _n,			\
+	  &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
+		if (((_sp)->gfn == (_gfn)) &&				\
+		    !(_sp)->role.direct && !(_sp)->role.invalid)
+
 /* @sp->gfn should be write-protected at the call site */
 static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 			   struct list_head *invalid_list, bool clear_unsync)
@@ -1729,10 +1741,11 @@  static void kvm_sync_pages(struct kvm_vcpu *vcpu,  gfn_t gfn)
 {
 	struct kvm_mmu_page *s;
 	struct hlist_node *node;
+	struct sp_next_pos npos;
 	LIST_HEAD(invalid_list);
 	bool flush = false;
 
-	for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
+	for_each_gfn_indirect_valid_sp_safe(vcpu->kvm, s, gfn, node, npos.hn) {
 		if (!s->unsync)
 			continue;
 
@@ -1741,7 +1754,7 @@  static void kvm_sync_pages(struct kvm_vcpu *vcpu,  gfn_t gfn)
 		if ((s->role.cr4_pae != !!is_pae(vcpu)) ||
 			(vcpu->arch.mmu.sync_page(vcpu, s))) {
 			kvm_mmu_prepare_zap_page(vcpu->kvm, s,
-						 &invalid_list, NULL);
+						 &invalid_list, &npos);
 			continue;
 		}
 		flush = true;
@@ -2176,17 +2189,18 @@  int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
 {
 	struct kvm_mmu_page *sp;
 	struct hlist_node *node;
+	struct sp_next_pos npos;
 	LIST_HEAD(invalid_list);
 	int r;
 
 	pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
 	r = 0;
 	spin_lock(&kvm->mmu_lock);
-	for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
+	for_each_gfn_indirect_valid_sp_safe(kvm, sp, gfn, node, npos.hn) {
 		pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
 			 sp->role.word);
 		r = 1;
-		kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, NULL);
+		kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &npos);
 	}
 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
 	spin_unlock(&kvm->mmu_lock);
@@ -3966,6 +3980,7 @@  void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
 	union kvm_mmu_page_role mask = { .word = 0 };
 	struct kvm_mmu_page *sp;
 	struct hlist_node *node;
+	struct sp_next_pos npos;
 	LIST_HEAD(invalid_list);
 	u64 entry, gentry, *spte;
 	int npte;
@@ -3996,11 +4011,11 @@  void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
 	kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
 
 	mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
-	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) {
+	for_each_gfn_indirect_valid_sp_safe(vcpu->kvm, sp, gfn, node, npos.hn) {
 		if (detect_write_misaligned(sp, gpa, bytes) ||
 		      detect_write_flooding(sp)) {
 			zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
-							&invalid_list, NULL);
+							&invalid_list, &npos);
 			++vcpu->kvm->stat.mmu_flooded;
 			continue;
 		}