@@ -462,11 +462,16 @@ out_unlock:
static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
{
- struct kvm_mmu_page *sp = NULL;
+ struct kvm_mmu_page *sp = NULL, *s;
struct kvm_shadow_walk_iterator iterator;
+ struct hlist_head *bucket;
+ struct hlist_node *node, *tmp;
gfn_t gfn = -1;
u64 *sptep = NULL, gentry;
int invlpg_counter, level, offset = 0, need_flush = 0;
+ unsigned index;
+ bool live = false;
+ union kvm_mmu_page_role role;
spin_lock(&vcpu->kvm->mmu_lock);
@@ -480,7 +485,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
if (!sp->unsync)
break;
-
+ role = sp->role;
WARN_ON(level != PT_PAGE_TABLE_LEVEL);
shift = PAGE_SHIFT -
(PT_LEVEL_BITS - PT64_LEVEL_BITS) * level;
@@ -519,10 +524,23 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
mmu_guess_page_from_pte_write(vcpu, gfn_to_gpa(gfn) + offset, gentry);
spin_lock(&vcpu->kvm->mmu_lock);
+ index = kvm_page_table_hashfn(gfn);
+ bucket = &vcpu->kvm->arch.mmu_page_hash[index];
+ hlist_for_each_entry_safe(s, node, tmp, bucket, hash_link)
+ if (s == sp) {
+ if (s->gfn == gfn && s->role.word == role.word)
+ live = true;
+ break;
+ }
+
+ if (!live)
+ goto unlock_exit;
+
if (atomic_read(&vcpu->kvm->arch.invlpg_counter) == invlpg_counter) {
++vcpu->kvm->stat.mmu_pte_updated;
FNAME(update_pte)(vcpu, sp, sptep, &gentry);
}
+unlock_exit:
spin_unlock(&vcpu->kvm->mmu_lock);
mmu_release_page_from_pte_write(vcpu);
}