diff mbox series

[7/9] KVM: x86/mmu: Handle "error PFNs" in kvm_faultin_pfn()

Message ID 20220815230110.2266741-8-dmatlack@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86/mmu: Always enable the TDP MMU when TDP is enabled | expand

Commit Message

David Matlack Aug. 15, 2022, 11:01 p.m. UTC
Handle "error PFNs" directly in kvm_faultin_pfn() rather than relying on
the caller to invoke handle_abnormal_pfn() after kvm_faultin_pfn().
Opportunistically rename kvm_handle_bad_page() to kvm_handle_error_pfn()
to make it more consistent with e.g. is_error_pfn().

The reason for this change is to reduce the number of things being
handled in handle_abnormal_pfn(), which is currently a grab bag for edge
conditions (the other of which being updating the vCPU MMIO cache).

No functional change intended.

Signed-off-by: David Matlack <dmatlack@google.com>
---
 arch/x86/kvm/mmu/mmu.c | 15 ++++++++-------
 1 file changed, 8 insertions(+), 7 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 6613ae387e1b..36960ea0d4ef 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3134,7 +3134,7 @@  static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *
 	send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, PAGE_SHIFT, tsk);
 }
 
-static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
+static int kvm_handle_error_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
 {
 	/*
 	 * Do not cache the mmio info caused by writing the readonly gfn
@@ -3155,10 +3155,6 @@  static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
 static int handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
 			       unsigned int access)
 {
-	/* The pfn is invalid, report the error! */
-	if (unlikely(is_error_pfn(fault->pfn)))
-		return kvm_handle_bad_page(vcpu, fault->gfn, fault->pfn);
-
 	if (unlikely(!fault->slot)) {
 		gva_t gva = fault->is_tdp ? 0 : fault->addr;
 
@@ -4144,7 +4140,7 @@  static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
 			fault->slot = NULL;
 			fault->pfn = KVM_PFN_NOSLOT;
 			fault->map_writable = false;
-			return RET_PF_CONTINUE;
+			goto out;
 		}
 		/*
 		 * If the APIC access page exists but is disabled, go directly
@@ -4162,7 +4158,7 @@  static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
 					  fault->write, &fault->map_writable,
 					  &fault->hva);
 	if (!async)
-		return RET_PF_CONTINUE; /* *pfn has correct page already */
+		goto out; /* *pfn has correct page already */
 
 	if (!fault->prefetch && kvm_can_do_async_pf(vcpu)) {
 		trace_kvm_try_async_get_page(fault->addr, fault->gfn);
@@ -4178,6 +4174,11 @@  static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
 	fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, NULL,
 					  fault->write, &fault->map_writable,
 					  &fault->hva);
+
+out:
+	if (unlikely(is_error_pfn(fault->pfn)))
+		return kvm_handle_error_pfn(vcpu, fault->gfn, fault->pfn);
+
 	return RET_PF_CONTINUE;
 }