@@ -645,6 +645,7 @@ void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2,
u32 error_code);
+void kvm_propagate_fault(struct kvm_vcpu *vcpu);
int kvm_read_guest_page_tdp(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
gfn_t gfn, void *data, int offset, int len,
u32 *error);
@@ -1336,7 +1336,7 @@ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
addr = dt.address + index * 8;
ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu, &err);
if (ret == X86EMUL_PROPAGATE_FAULT)
- kvm_inject_page_fault(ctxt->vcpu, addr, err);
+ kvm_propagate_fault(ctxt->vcpu);
return ret;
}
@@ -1362,7 +1362,7 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
addr = dt.address + index * 8;
ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, &err);
if (ret == X86EMUL_PROPAGATE_FAULT)
- kvm_inject_page_fault(ctxt->vcpu, addr, err);
+ kvm_propagate_fault(ctxt->vcpu);
return ret;
}
@@ -2165,7 +2165,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
&err);
if (ret == X86EMUL_PROPAGATE_FAULT) {
/* FIXME: need to provide precise fault address */
- kvm_inject_page_fault(ctxt->vcpu, old_tss_base, err);
+ kvm_propagate_fault(ctxt->vcpu);
return ret;
}
@@ -2175,7 +2175,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
&err);
if (ret == X86EMUL_PROPAGATE_FAULT) {
/* FIXME: need to provide precise fault address */
- kvm_inject_page_fault(ctxt->vcpu, old_tss_base, err);
+ kvm_propagate_fault(ctxt->vcpu);
return ret;
}
@@ -2183,7 +2183,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
&err);
if (ret == X86EMUL_PROPAGATE_FAULT) {
/* FIXME: need to provide precise fault address */
- kvm_inject_page_fault(ctxt->vcpu, new_tss_base, err);
+ kvm_propagate_fault(ctxt->vcpu);
return ret;
}
@@ -2196,7 +2196,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
ctxt->vcpu, &err);
if (ret == X86EMUL_PROPAGATE_FAULT) {
/* FIXME: need to provide precise fault address */
- kvm_inject_page_fault(ctxt->vcpu, new_tss_base, err);
+ kvm_propagate_fault(ctxt->vcpu);
return ret;
}
}
@@ -2304,7 +2304,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
&err);
if (ret == X86EMUL_PROPAGATE_FAULT) {
/* FIXME: need to provide precise fault address */
- kvm_inject_page_fault(ctxt->vcpu, old_tss_base, err);
+ kvm_propagate_fault(ctxt->vcpu);
return ret;
}
@@ -2314,7 +2314,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
&err);
if (ret == X86EMUL_PROPAGATE_FAULT) {
/* FIXME: need to provide precise fault address */
- kvm_inject_page_fault(ctxt->vcpu, old_tss_base, err);
+ kvm_propagate_fault(ctxt->vcpu);
return ret;
}
@@ -2322,7 +2322,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
&err);
if (ret == X86EMUL_PROPAGATE_FAULT) {
/* FIXME: need to provide precise fault address */
- kvm_inject_page_fault(ctxt->vcpu, new_tss_base, err);
+ kvm_propagate_fault(ctxt->vcpu);
return ret;
}
@@ -2335,7 +2335,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
ctxt->vcpu, &err);
if (ret == X86EMUL_PROPAGATE_FAULT) {
/* FIXME: need to provide precise fault address */
- kvm_inject_page_fault(ctxt->vcpu, new_tss_base, err);
+ kvm_propagate_fault(ctxt->vcpu);
return ret;
}
}
@@ -47,6 +47,7 @@
#define PFERR_USER_MASK (1U << 2)
#define PFERR_RSVD_MASK (1U << 3)
#define PFERR_FETCH_MASK (1U << 4)
+#define PFERR_NESTED_MASK (1U << 31)
int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
@@ -324,6 +324,22 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
}
+void kvm_propagate_fault(struct kvm_vcpu *vcpu)
+{
+ unsigned long address;
+ u32 nested, error;
+
+ address = vcpu->arch.fault_address;
+ error = vcpu->arch.fault_error_code;
+ nested = error & PFERR_NESTED_MASK;
+ error = error & ~PFERR_NESTED_MASK;
+
+ if (vcpu->arch.mmu.nested && !nested)
+ vcpu->arch.nested_mmu.inject_page_fault(vcpu, address, error);
+ else
+ vcpu->arch.mmu.inject_page_fault(vcpu, address, error);
+}
+
void kvm_inject_nmi(struct kvm_vcpu *vcpu)
{
vcpu->arch.nmi_pending = 1;
@@ -3338,7 +3354,7 @@ static int emulator_read_emulated(unsigned long addr,
gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, &error_code);
if (gpa == UNMAPPED_GVA) {
- kvm_inject_page_fault(vcpu, addr, error_code);
+ kvm_propagate_fault(vcpu);
return X86EMUL_PROPAGATE_FAULT;
}
@@ -3392,7 +3408,7 @@ static int emulator_write_emulated_onepage(unsigned long addr,
gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, &error_code);
if (gpa == UNMAPPED_GVA) {
- kvm_inject_page_fault(vcpu, addr, error_code);
+ kvm_propagate_fault(vcpu);
return X86EMUL_PROPAGATE_FAULT;
}