@@ -315,21 +315,19 @@ static void set_spte_atomic(u64 *sptep, u64 new_spte)
pfn = spte_to_pfn(old_spte);
if (old_spte & shadow_accessed_mask)
kvm_set_pfn_accessed(pfn);
- if (is_writable_pte(old_spte))
+
+ if ((shadow_dirty_mask && (old_spte & shadow_dirty_mask)) ||
+ (!shadow_dirty_mask && is_writable_pte(old_spte)))
kvm_set_pfn_dirty(pfn);
}
static void update_spte(u64 *sptep, u64 new_spte)
{
- u64 old_spte;
-
- if (!shadow_accessed_mask || (new_spte & shadow_accessed_mask)) {
+ if ((!shadow_accessed_mask || (new_spte & shadow_accessed_mask)) &&
+ (!shadow_dirty_mask || (new_spte & shadow_dirty_mask)))
__set_spte(sptep, new_spte);
- } else {
- old_spte = __xchg_spte(sptep, new_spte);
- if (old_spte & shadow_accessed_mask)
- mark_page_accessed(pfn_to_page(spte_to_pfn(old_spte)));
- }
+ else
+ set_spte_atomic(sptep, new_spte);
}
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
@@ -745,7 +743,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
}
spte = rmap_next(kvm, rmapp, spte);
}
- if (write_protected) {
+ if (!shadow_dirty_mask && write_protected) {
pfn_t pfn;
spte = rmap_next(kvm, rmapp, NULL);
@@ -1879,9 +1877,9 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
* whether the guest actually used the pte (in order to detect
* demand paging).
*/
- spte = shadow_base_present_pte | shadow_dirty_mask;
+ spte = shadow_base_present_pte;
if (!speculative)
- spte |= shadow_accessed_mask;
+ spte |= shadow_accessed_mask | shadow_dirty_mask;
if (!dirty)
pte_access &= ~ACC_WRITE_MASK;
if (pte_access & ACC_EXEC_MASK)
@@ -2007,7 +2005,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
if (rmap_count > RMAP_RECYCLE_THRESHOLD)
rmap_recycle(vcpu, sptep, gfn);
} else {
- if (was_writable)
+ if (!shadow_dirty_mask && was_writable)
kvm_release_pfn_dirty(pfn);
else
kvm_release_pfn_clean(pfn);