@@ -47,6 +47,7 @@
#define PFERR_USER_MASK (1U << 2)
#define PFERR_RSVD_MASK (1U << 3)
#define PFERR_FETCH_MASK (1U << 4)
+#define PFERR_NESTED_MASK (1U << 31)
int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
@@ -154,6 +154,7 @@ walk:
pte_gpa = mmu->translate_gpa(vcpu, pte_gpa, &error);
if (pte_gpa == UNMAPPED_GVA) {
+ error |= PFERR_NESTED_MASK;
walker->error_code = error;
return 0;
}
@@ -223,6 +224,7 @@ walk:
pte_gpa = gfn_to_gpa(walker->gfn);
pte_gpa = mmu->translate_gpa(vcpu, pte_gpa, &error);
if (pte_gpa == UNMAPPED_GVA) {
+ error |= PFERR_NESTED_MASK;
walker->error_code = error;
return 0;
}
@@ -314,6 +314,19 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
}
+void kvm_propagate_fault(struct kvm_vcpu *vcpu, unsigned long addr, u32 error_code)
+{
+ u32 nested, error;
+
+ nested = error_code & PFERR_NESTED_MASK;
+ error = error_code & ~PFERR_NESTED_MASK;
+
+ if (vcpu->arch.mmu.nested && !(error_code && PFERR_NESTED_MASK))
+ vcpu->arch.nested_mmu.inject_page_fault(vcpu, addr, error);
+ else
+ vcpu->arch.mmu.inject_page_fault(vcpu, addr, error);
+}
+
void kvm_inject_nmi(struct kvm_vcpu *vcpu)
{
vcpu->arch.nmi_pending = 1;
@@ -3546,7 +3559,7 @@ static int pio_copy_data(struct kvm_vcpu *vcpu)
ret = kvm_read_guest_virt(q, p, bytes, vcpu, &error_code);
if (ret == X86EMUL_PROPAGATE_FAULT)
- kvm_inject_page_fault(vcpu, q, error_code);
+ kvm_propagate_fault(vcpu, q, error_code);
return ret;
}