@@ -3225,6 +3225,15 @@ fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
WARN_ON(!sp->role.direct);
/*
+ * the original spte can not be writable as only the spte which
+ * fulfills is_access_track_spte() or
+ * spte_can_locklessly_be_made_writable() can be locklessly fixed,
+ * for the former, the W bit is always cleared, for the latter,
+ * there is nothing to do if it is already writable.
+ */
+ WARN_ON(is_writable_pte(old_spte));
+
+ /*
* Theoretically we could also set dirty bit (and flush TLB) here in
* order to eliminate unnecessary PML logging. See comments in
* set_spte. But fast_page_fault is very unlikely to happen with PML
@@ -3239,7 +3248,7 @@ fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
if (cmpxchg64(sptep, old_spte, new_spte) != old_spte)
return false;
- if (is_writable_pte(new_spte) && !is_writable_pte(old_spte)) {
+ if (is_writable_pte(new_spte)) {
/*
* The gfn of direct spte is stable since it is
* calculated by sp->gfn.