diff mbox

[5/24] Introduce vmcs12: a VMCS structure for L1

Message ID 201006131225.o5DCP79H012922@rice.haifa.ibm.com (mailing list archive)
State New, archived
Headers show

Commit Message

Nadav Har'El June 13, 2010, 12:25 p.m. UTC
None
diff mbox

Patch

--- .before/arch/x86/kvm/vmx.c	2010-06-13 15:01:28.000000000 +0300
+++ .after/arch/x86/kvm/vmx.c	2010-06-13 15:01:28.000000000 +0300
@@ -117,6 +117,29 @@  struct shared_msr_entry {
 	u64 mask;
 };
 
+#define VMCS12_REVISION 0x11e57ed0
+
+/*
+ * struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a
+ * single nested guest (L2), hence the name vmcs12. Any VMX implementation has
+ * a VMCS structure (which is opaque to the guest), and vmcs12 is our emulated
+ * VMX's VMCS. This structure is stored in guest memory specified by VMPTRLD,
+ * and accessed by the guest using VMREAD/VMWRITE/VMCLEAR instructions. More
+ * than one of these structures may exist, if L1 runs multiple L2 guests.
+ * nested_vmx_run() will use the data here to build a VMCS for the underlying
+ * hardware which will be used to run L2.
+ * This structure is packed in order to preseve the binary content after live
+ * migration. If there are changes in the content or layout, VMCS12_REVISION
+ * must be changed.
+ */
+struct __attribute__ ((__packed__)) vmcs12 {
+	/* According to the Intel spec, a VMCS region must start with the
+	 * following two fields. Then follow implementation-specific data.
+	 */
+	u32 revision_id;
+	u32 abort;
+};
+
 /* The nested_vmx structure is part of vcpu_vmx, and holds information we need
  * for correct emulation of VMX (i.e., nested VMX) on this vcpu. For example,
  * the current VMCS set by L1, a list of the VMCSs used to run the active
@@ -125,6 +148,11 @@  struct shared_msr_entry {
 struct nested_vmx {
 	/* Has the level1 guest done vmxon? */
 	bool vmxon;
+
+	/* The guest-physical address of the current VMCS L1 keeps for L2 */
+	gpa_t current_vmptr;
+	/* The host-usable pointer to the above. Set by nested_map_current() */
+	struct vmcs12 *current_l2_page;
 };
 
 struct vcpu_vmx {
@@ -188,6 +216,61 @@  static inline struct vcpu_vmx *to_vmx(st
 	return container_of(vcpu, struct vcpu_vmx, vcpu);
 }
 
+static struct page *nested_get_page(struct kvm_vcpu *vcpu, u64 vmcs_addr)
+{
+	struct page *vmcs_page =
+		gfn_to_page(vcpu->kvm, vmcs_addr >> PAGE_SHIFT);
+
+	if (is_error_page(vmcs_page)) {
+		printk(KERN_ERR "%s error allocating page 0x%llx\n",
+		       __func__, vmcs_addr);
+		kvm_release_page_clean(vmcs_page);
+		return NULL;
+	}
+	return vmcs_page;
+}
+
+static int nested_map_current(struct kvm_vcpu *vcpu)
+{
+	struct vcpu_vmx *vmx = to_vmx(vcpu);
+	struct page *vmcs_page =
+		nested_get_page(vcpu, vmx->nested.current_vmptr);
+
+	if (vmcs_page == NULL) {
+		printk(KERN_INFO "%s: failure in nested_get_page\n", __func__);
+		return 0;
+	}
+
+	if (vmx->nested.current_l2_page) {
+		printk(KERN_INFO "Shadow vmcs already mapped\n");
+		BUG_ON(1);
+		return 0;
+	}
+
+	vmx->nested.current_l2_page = kmap_atomic(vmcs_page, KM_USER0);
+	return 1;
+}
+
+static void nested_unmap_current(struct kvm_vcpu *vcpu)
+{
+	struct page *page;
+	struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+	if (!vmx->nested.current_l2_page) {
+		printk(KERN_INFO "Shadow vmcs already unmapped\n");
+		BUG_ON(1);
+		return;
+	}
+
+	page = kmap_atomic_to_page(vmx->nested.current_l2_page);
+
+	kunmap_atomic(vmx->nested.current_l2_page, KM_USER0);
+
+	kvm_release_page_dirty(page);
+
+	vmx->nested.current_l2_page = NULL;
+}
+
 static int init_rmode(struct kvm *kvm);
 static u64 construct_eptp(unsigned long root_hpa);
 static void kvm_cpu_vmxon(u64 addr);
@@ -4186,6 +4269,9 @@  static struct kvm_vcpu *vmx_create_vcpu(
 			goto free_vmcs;
 	}
 
+	vmx->nested.current_vmptr = -1ull;
+	vmx->nested.current_l2_page = NULL;
+
 	return &vmx->vcpu;
 
 free_vmcs: