@@ -262,6 +262,8 @@ enum x86_intercept_stage;
#define PFERR_FETCH_BIT 4
#define PFERR_PK_BIT 5
#define PFERR_SGX_BIT 15
+#define PFERR_LEVEL_START_BIT 29
+#define PFERR_LEVEL_END_BIT 31
#define PFERR_GUEST_FINAL_BIT 32
#define PFERR_GUEST_PAGE_BIT 33
#define PFERR_GUEST_ENC_BIT 34
@@ -274,6 +276,7 @@ enum x86_intercept_stage;
#define PFERR_FETCH_MASK BIT(PFERR_FETCH_BIT)
#define PFERR_PK_MASK BIT(PFERR_PK_BIT)
#define PFERR_SGX_MASK BIT(PFERR_SGX_BIT)
+#define PFERR_LEVEL_MASK GENMASK_ULL(PFERR_LEVEL_END_BIT, PFERR_LEVEL_START_BIT)
#define PFERR_GUEST_FINAL_MASK BIT_ULL(PFERR_GUEST_FINAL_BIT)
#define PFERR_GUEST_PAGE_MASK BIT_ULL(PFERR_GUEST_PAGE_BIT)
#define PFERR_GUEST_ENC_MASK BIT_ULL(PFERR_GUEST_ENC_BIT)
@@ -283,6 +286,8 @@ enum x86_intercept_stage;
PFERR_WRITE_MASK | \
PFERR_PRESENT_MASK)
+#define PFERR_LEVEL(err_code) (((err_code) & PFERR_LEVEL_MASK) >> PFERR_LEVEL_START_BIT)
+
/* apic attention bits */
#define KVM_APIC_CHECK_VAPIC 0
/*
@@ -4625,6 +4625,11 @@ bool __kvm_mmu_honors_guest_mtrrs(bool vm_has_noncoherent_dma)
int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{
+ u8 err_level = PFERR_LEVEL(fault->error_code);
+
+ if (err_level)
+ fault->max_level = min(fault->max_level, err_level);
+
/*
* If the guest's MTRRs may be used to compute the "real" memtype,
* restrict the mapping level to ensure KVM uses a consistent memtype