@@ -4250,7 +4250,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
r = handle_abnormal_pfn(vcpu, fault, ACC_ALL);
if (r != RET_PF_CONTINUE)
- return r;
+ goto out_release;
r = RET_PF_RETRY;
@@ -4276,6 +4276,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
read_unlock(&vcpu->kvm->mmu_lock);
else
write_unlock(&vcpu->kvm->mmu_lock);
+out_release:
kvm_release_pfn_clean(fault->pfn);
return r;
}
@@ -847,7 +847,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
r = handle_abnormal_pfn(vcpu, fault, walker.pte_access);
if (r != RET_PF_CONTINUE)
- return r;
+ goto out_release;
/*
* Do not change pte_access if the pfn is a mmio page, otherwise
@@ -881,6 +881,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
out_unlock:
write_unlock(&vcpu->kvm->mmu_lock);
+out_release:
kvm_release_pfn_clean(fault->pfn);
return r;
}
Currently in case of error it returns without releasing the pfn faulted in earlier. Signed-off-by: Fuad Tabba <tabba@google.com> --- Applies to 6.1-rc1. I think that kvm_release_pfn_clean() has been replaced with kvm_mmu_release_fault() in development branches, but the same issue is still there. --- arch/x86/kvm/mmu/mmu.c | 3 ++- arch/x86/kvm/mmu/paging_tmpl.h | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) base-commit: 9abf2313adc1ca1b6180c508c25f22f9395cc780