@@ -264,6 +264,13 @@ struct kvm_mmu {
u64 *pae_root;
u64 rsvd_bits_mask[2][4];
+
+ /*
+ * If true the mmu runs in two-level mode.
+ * vcpu->arch.nested_mmu needs to contain meaningful
+ * values then.
+ */
+ bool nested;
};
struct kvm_vcpu_arch {
@@ -296,6 +303,7 @@ struct kvm_vcpu_arch {
struct kvm_mmu mmu;
+ /* This will hold the mmu context of the second level guest */
struct kvm_mmu nested_mmu;
/* only needed in kvm_pv_mmu_op() path, but it's hot so
@@ -2154,6 +2154,18 @@ static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 *error)
return gpa;
}
+static gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 *error)
+{
+ u32 access;
+
+ BUG_ON(!vcpu->arch.mmu.nested);
+
+ /* NPT walks are treated as user writes */
+ access = PFERR_WRITE_MASK | PFERR_USER_MASK;
+
+ return vcpu->arch.nested_mmu.gva_to_gpa(vcpu, gpa, access, error);
+}
+
static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
u32 access, u32 *error)
{
@@ -2476,11 +2488,45 @@ static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
return r;
}
+static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
+{
+ struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
+ struct kvm_mmu *h_context = &vcpu->arch.mmu;
+
+ g_context->get_cr3 = get_cr3;
+ g_context->translate_gpa = translate_nested_gpa;
+ g_context->inject_page_fault = kvm_inject_page_fault;
+
+ /*
+ * Note that arch.mmu.gva_to_gpa translates l2_gva to l1_gpa. The
+ * translation of l2_gpa to l1_gpa addresses is done using the
+ * arch.nested_mmu.gva_to_gpa function. Basically the gva_to_gpa
+ * functions between mmu and nested_mmu are swapped.
+ */
+ if (!is_paging(vcpu)) {
+ g_context->root_level = 0;
+ h_context->gva_to_gpa = nonpaging_gva_to_gpa_nested;
+ } else if (is_long_mode(vcpu)) {
+ g_context->root_level = PT64_ROOT_LEVEL;
+ h_context->gva_to_gpa = paging64_gva_to_gpa_nested;
+ } else if (is_pae(vcpu)) {
+ g_context->root_level = PT32E_ROOT_LEVEL;
+ h_context->gva_to_gpa = paging64_gva_to_gpa_nested;
+ } else {
+ g_context->root_level = PT32_ROOT_LEVEL;
+ h_context->gva_to_gpa = paging32_gva_to_gpa_nested;
+ }
+
+ return 0;
+}
+
static int init_kvm_mmu(struct kvm_vcpu *vcpu)
{
vcpu->arch.update_pte.pfn = bad_pfn;
- if (tdp_enabled)
+ if (vcpu->arch.mmu.nested)
+ return init_kvm_nested_mmu(vcpu);
+ else if (tdp_enabled)
return init_kvm_tdp_mmu(vcpu);
else
return init_kvm_softmmu(vcpu);