@@ -1810,11 +1810,14 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
bool need_unsync = false;
for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
+ if (!can_unsync)
+ return 1;
+
if (s->role.level != PT_PAGE_TABLE_LEVEL)
return 1;
if (!need_unsync && !s->unsync) {
- if (!can_unsync || !oos_shadow)
+ if (!oos_shadow)
return 1;
need_unsync = true;
}
@@ -1877,15 +1880,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
if (!tdp_enabled && !(pte_access & ACC_WRITE_MASK))
spte &= ~PT_USER_MASK;
- /*
- * Optimization: for pte sync, if spte was writable the hash
- * lookup is unnecessary (and expensive). Write protection
- * is responsibility of mmu_get_page / kvm_sync_page.
- * Same reasoning can be applied to dirty page accounting.
- */
- if (!can_unsync && is_writable_pte(*sptep))
- goto set_pte;
-
if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
pgprintk("%s: found shadow page for %lx, marking ro\n",
__func__, gfn);