diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index f9b7e3a7370f..e14b84d2f55b 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4386,7 +4386,9 @@ static void kvm_mmu_finish_page_fault(struct kvm_vcpu *vcpu, * fault handler, and so KVM must (somewhat) speculatively mark the * folio dirty if KVM could locklessly make the SPTE writable. */ - if (!fault->map_writable || r == RET_PF_RETRY) + if (r == RET_PF_RETRY) + kvm_release_page_unused(fault->refcounted_page); + else if (!fault->map_writable) kvm_release_page_clean(fault->refcounted_page); else kvm_release_page_dirty(fault->refcounted_page);