@@ -126,6 +126,34 @@ struct shared_msr_entry {
};
/*
+ * struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a
+ * single nested guest (L2), hence the name vmcs12. Any VMX implementation has
+ * a VMCS structure, and vmcs12 is our emulated VMX's VMCS. This structure is
+ * stored in guest memory specified by VMPTRLD, but is opaque to the guest,
+ * which must access it using VMREAD/VMWRITE/VMCLEAR instructions. More
+ * than one of these structures may exist, if L1 runs multiple L2 guests.
+ * nested_vmx_run() will use the data here to build a VMCS for the underlying
+ * hardware which will be used to run L2.
+ * This structure is packed in order to preserve the binary content after live
+ * migration. If there are changes in the content or layout, VMCS12_REVISION
+ * must be changed.
+ */
+struct __packed vmcs12 {
+ /* According to the Intel spec, a VMCS region must start with the
+ * following two fields. Then follow implementation-specific data.
+ */
+ u32 revision_id;
+ u32 abort;
+};
+
+/*
+ * VMCS12_REVISION is an arbitrary id that should be changed if the content or
+ * layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and
+ * VMPTRLD verifies that the VMCS region that L1 is loading contains this id.
+ */
+#define VMCS12_REVISION 0x11e57ed0
+
+/*
* The nested_vmx structure is part of vcpu_vmx, and holds information we need
* for correct emulation of VMX (i.e., nested VMX) on this vcpu. For example,
* the current VMCS set by L1, a list of the VMCSs used to run the active
@@ -134,6 +162,12 @@ struct shared_msr_entry {
struct nested_vmx {
/* Has the level1 guest done vmxon? */
bool vmxon;
+
+ /* The guest-physical address of the current VMCS L1 keeps for L2 */
+ gpa_t current_vmptr;
+ /* The host-usable pointer to the above */
+ struct page *current_vmcs12_page;
+ struct vmcs12 *current_vmcs12;
};
struct vcpu_vmx {
@@ -197,6 +231,21 @@ static inline struct vcpu_vmx *to_vmx(st
return container_of(vcpu, struct vcpu_vmx, vcpu);
}
+static struct page *nested_get_page(struct kvm_vcpu *vcpu, gpa_t addr)
+{
+ struct page *page = gfn_to_page(vcpu->kvm, addr >> PAGE_SHIFT);
+ if (is_error_page(page)) {
+ kvm_release_page_clean(page);
+ return NULL;
+ }
+ return page;
+}
+
+static void nested_release_page(struct page *page)
+{
+ kvm_release_page_dirty(page);
+}
+
static int init_rmode(struct kvm *kvm);
static u64 construct_eptp(unsigned long root_hpa);
static void kvm_cpu_vmxon(u64 addr);
@@ -3464,6 +3513,11 @@ static int handle_vmoff(struct kvm_vcpu
to_vmx(vcpu)->nested.vmxon = false;
+ if(to_vmx(vcpu)->nested.current_vmptr != -1ull){
+ kunmap(to_vmx(vcpu)->nested.current_vmcs12_page);
+ nested_release_page(to_vmx(vcpu)->nested.current_vmcs12_page);
+ }
+
skip_emulated_instruction(vcpu);
return 1;
}
@@ -4136,6 +4190,10 @@ static void vmx_free_vcpu(struct kvm_vcp
struct vcpu_vmx *vmx = to_vmx(vcpu);
free_vpid(vmx);
+ if (vmx->nested.vmxon && to_vmx(vcpu)->nested.current_vmptr != -1ull){
+ kunmap(to_vmx(vcpu)->nested.current_vmcs12_page);
+ nested_release_page(to_vmx(vcpu)->nested.current_vmcs12_page);
+ }
vmx_free_vmcs(vcpu);
kfree(vmx->guest_msrs);
kvm_vcpu_uninit(vcpu);
@@ -4201,6 +4259,9 @@ static struct kvm_vcpu *vmx_create_vcpu(
goto free_vmcs;
}
+ vmx->nested.current_vmptr = -1ull;
+ vmx->nested.current_vmcs12 = NULL;
+
return &vmx->vcpu;
free_vmcs: