diff mbox

[2/2] KVM MMU: fix race in invlpg code

Message ID 4BE162B9.201@cn.fujitsu.com (mailing list archive)
State New, archived
Headers show

Commit Message

Xiao Guangrong May 5, 2010, 12:21 p.m. UTC
None
diff mbox

Patch

diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 624b38f..13ea675 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -462,11 +462,16 @@  out_unlock:
 
 static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
 {
-	struct kvm_mmu_page *sp = NULL;
+	struct kvm_mmu_page *sp = NULL, *s;
 	struct kvm_shadow_walk_iterator iterator;
+	struct hlist_head *bucket;
+	struct hlist_node *node, *tmp;
 	gfn_t gfn = -1;
 	u64 *sptep = NULL, gentry;
 	int invlpg_counter, level, offset = 0, need_flush = 0;
+	unsigned index;
+	bool live = false;
+	union kvm_mmu_page_role role;
 
 	spin_lock(&vcpu->kvm->mmu_lock);
 
@@ -480,7 +485,7 @@  static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
 
 			if (!sp->unsync)
 				break;
-
+			role = sp->role;
 			WARN_ON(level != PT_PAGE_TABLE_LEVEL);
 			shift = PAGE_SHIFT -
 				  (PT_LEVEL_BITS - PT64_LEVEL_BITS) * level;
@@ -519,10 +524,23 @@  static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
 
 	mmu_guess_page_from_pte_write(vcpu, gfn_to_gpa(gfn) + offset, gentry);
 	spin_lock(&vcpu->kvm->mmu_lock);
+	index = kvm_page_table_hashfn(gfn);
+	bucket = &vcpu->kvm->arch.mmu_page_hash[index];
+	hlist_for_each_entry_safe(s, node, tmp, bucket, hash_link)
+		if (s == sp) {
+			if (s->gfn == gfn && s->role.word == role.word)
+				live = true;
+			break;
+		}
+
+	if (!live)
+		goto unlock_exit;
+
 	if (atomic_read(&vcpu->kvm->arch.invlpg_counter) == invlpg_counter) {
 		++vcpu->kvm->stat.mmu_pte_updated;
 		FNAME(update_pte)(vcpu, sp, sptep, &gentry);
 	}
+unlock_exit:
 	spin_unlock(&vcpu->kvm->mmu_lock);
 	mmu_release_page_from_pte_write(vcpu);
 }