From patchwork Wed Sep 2 15:38:33 2009 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: oritw@il.ibm.com X-Patchwork-Id: 45231 Received: from vger.kernel.org (vger.kernel.org [209.132.176.167]) by demeter.kernel.org (8.14.2/8.14.2) with ESMTP id n82Fco8Q004330 for ; Wed, 2 Sep 2009 15:38:50 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752817AbZIBPim (ORCPT ); Wed, 2 Sep 2009 11:38:42 -0400 Received: (majordomo@vger.kernel.org) by vger.kernel.org id S1752823AbZIBPil (ORCPT ); Wed, 2 Sep 2009 11:38:41 -0400 Received: from mtagate1.de.ibm.com ([195.212.17.161]:39703 "EHLO mtagate1.de.ibm.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752721AbZIBPij (ORCPT ); Wed, 2 Sep 2009 11:38:39 -0400 Received: from d12nrmr1607.megacenter.de.ibm.com (d12nrmr1607.megacenter.de.ibm.com [9.149.167.49]) by mtagate1.de.ibm.com (8.13.1/8.13.1) with ESMTP id n82FcfSs021702 for ; Wed, 2 Sep 2009 15:38:41 GMT Received: from d12av01.megacenter.de.ibm.com (d12av01.megacenter.de.ibm.com [9.149.165.212]) by d12nrmr1607.megacenter.de.ibm.com (8.13.8/8.13.8/NCO v10.0) with ESMTP id n82Fcfn02719784 for ; Wed, 2 Sep 2009 17:38:41 +0200 Received: from d12av01.megacenter.de.ibm.com (loopback [127.0.0.1]) by d12av01.megacenter.de.ibm.com (8.12.11.20060308/8.13.3) with ESMTP id n82FceLS007873 for ; Wed, 2 Sep 2009 17:38:40 +0200 Received: from localhost.localdomain (cluwyn.haifa.ibm.com [9.148.27.75]) by d12av01.megacenter.de.ibm.com (8.12.11.20060308/8.12.11) with ESMTP id n82Fca9I007813; Wed, 2 Sep 2009 17:38:39 +0200 From: oritw@il.ibm.com To: kvm@vger.kernel.org Cc: oritw@il.ibm.com, benami@il.ibm.com, muli@il.ibm.com, abelg@il.ibm.com, aliguori@us.ibm.com, mmday@us.ibm.com Subject: [PATCH 3/6] Nested VMX patch 3 implements vmptrld and vmptrst Date: Wed, 2 Sep 2009 18:38:33 +0300 Message-Id: <1251905916-2834-4-git-send-email-oritw@il.ibm.com> X-Mailer: git-send-email 1.6.0.4 In-Reply-To: <1251905916-2834-3-git-send-email-oritw@il.ibm.com> References: <1251905916-2834-1-git-send-email-oritw@il.ibm.com> <1251905916-2834-2-git-send-email-oritw@il.ibm.com> <1251905916-2834-3-git-send-email-oritw@il.ibm.com> Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org From: Orit Wasserman --- arch/x86/kvm/vmx.c | 533 +++++++++++++++++++++++++++++++++++++++++++++++++++- 1 files changed, 523 insertions(+), 10 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 2b1fc3b..5ab07a0 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -61,6 +61,151 @@ module_param_named(unrestricted_guest, static int __read_mostly emulate_invalid_guest_state = 0; module_param(emulate_invalid_guest_state, bool, S_IRUGO); +struct __attribute__ ((__packed__)) shadow_vmcs { + uint16_t virtual_processor_id; + uint16_t guest_es_selector; + uint16_t guest_cs_selector; + uint16_t guest_ss_selector; + uint16_t guest_ds_selector; + uint16_t guest_fs_selector; + uint16_t guest_gs_selector; + uint16_t guest_ldtr_selector; + uint16_t guest_tr_selector; + uint16_t host_es_selector; + uint16_t host_cs_selector; + uint16_t host_ss_selector; + uint16_t host_ds_selector; + uint16_t host_fs_selector; + uint16_t host_gs_selector; + uint16_t host_tr_selector; + uint64_t io_bitmap_a; + uint64_t io_bitmap_b; + uint64_t msr_bitmap; + uint64_t vm_exit_msr_store_addr; + uint64_t vm_exit_msr_load_addr; + uint64_t vm_entry_msr_load_addr; + uint64_t tsc_offset; + uint64_t virtual_apic_page_addr; + uint64_t apic_access_addr; + uint64_t ept_pointer; + uint64_t guest_physical_address; + uint64_t vmcs_link_pointer; + uint64_t guest_ia32_debugctl; + uint64_t guest_ia32_pat; + uint64_t guest_pdptr0; + uint64_t guest_pdptr1; + uint64_t guest_pdptr2; + uint64_t guest_pdptr3; + uint64_t host_ia32_pat; + uint32_t pin_based_vm_exec_control; + uint32_t cpu_based_vm_exec_control; + uint32_t exception_bitmap; + uint32_t page_fault_error_code_mask; + uint32_t page_fault_error_code_match; + uint32_t cr3_target_count; + uint32_t vm_exit_controls; + uint32_t vm_exit_msr_store_count; + uint32_t vm_exit_msr_load_count; + uint32_t vm_entry_controls; + uint32_t vm_entry_msr_load_count; + uint32_t vm_entry_intr_info_field; + uint32_t vm_entry_exception_error_code; + uint32_t vm_entry_instruction_len; + uint32_t tpr_threshold; + uint32_t secondary_vm_exec_control; + uint32_t vm_instruction_error; + uint32_t vm_exit_reason; + uint32_t vm_exit_intr_info; + uint32_t vm_exit_intr_error_code; + uint32_t idt_vectoring_info_field; + uint32_t idt_vectoring_error_code; + uint32_t vm_exit_instruction_len; + uint32_t vmx_instruction_info; + uint32_t guest_es_limit; + uint32_t guest_cs_limit; + uint32_t guest_ss_limit; + uint32_t guest_ds_limit; + uint32_t guest_fs_limit; + uint32_t guest_gs_limit; + uint32_t guest_ldtr_limit; + uint32_t guest_tr_limit; + uint32_t guest_gdtr_limit; + uint32_t guest_idtr_limit; + uint32_t guest_es_ar_bytes; + uint32_t guest_cs_ar_bytes; + uint32_t guest_ss_ar_bytes; + uint32_t guest_ds_ar_bytes; + uint32_t guest_fs_ar_bytes; + uint32_t guest_gs_ar_bytes; + uint32_t guest_ldtr_ar_bytes; + uint32_t guest_tr_ar_bytes; + uint32_t guest_interruptibility_info; + uint32_t guest_activity_state; + uint32_t guest_sysenter_cs; + uint32_t host_ia32_sysenter_cs; + unsigned long cr0_guest_host_mask; + unsigned long cr4_guest_host_mask; + unsigned long cr0_read_shadow; + unsigned long cr4_read_shadow; + unsigned long cr3_target_value0; + unsigned long cr3_target_value1; + unsigned long cr3_target_value2; + unsigned long cr3_target_value3; + unsigned long exit_qualification; + unsigned long guest_linear_address; + unsigned long guest_cr0; + unsigned long guest_cr3; + unsigned long guest_cr4; + unsigned long guest_es_base; + unsigned long guest_cs_base; + unsigned long guest_ss_base; + unsigned long guest_ds_base; + unsigned long guest_fs_base; + unsigned long guest_gs_base; + unsigned long guest_ldtr_base; + unsigned long guest_tr_base; + unsigned long guest_gdtr_base; + unsigned long guest_idtr_base; + unsigned long guest_dr7; + unsigned long guest_rsp; + unsigned long guest_rip; + unsigned long guest_rflags; + unsigned long guest_pending_dbg_exceptions; + unsigned long guest_sysenter_esp; + unsigned long guest_sysenter_eip; + unsigned long host_cr0; + unsigned long host_cr3; + unsigned long host_cr4; + unsigned long host_fs_base; + unsigned long host_gs_base; + unsigned long host_tr_base; + unsigned long host_gdtr_base; + unsigned long host_idtr_base; + unsigned long host_ia32_sysenter_esp; + unsigned long host_ia32_sysenter_eip; + unsigned long host_rsp; + unsigned long host_rip; +}; + +struct __attribute__ ((__packed__)) level_state { + struct shadow_vmcs *shadow_vmcs; + + u16 vpid; + u64 shadow_efer; + unsigned long cr2; + unsigned long cr3; + unsigned long cr4; + unsigned long cr8; + + u64 io_bitmap_a; + u64 io_bitmap_b; + u64 msr_bitmap; + + struct vmcs *vmcs; + int cpu; + int launched; +}; + struct vmcs { u32 revision_id; u32 abort; @@ -72,6 +217,17 @@ struct nested_vmx { bool vmon; /* Has the level1 guest done vmclear? */ bool vmclear; + /* What is the location of the vmcs l1 keeps for l2? (in level1 gpa) */ + u64 l1_cur_vmcs; + /* + * Level 2 state : includes vmcs,registers and + * a copy of vmcs12 for vmread/vmwrite + */ + struct level_state *l2_state; + + /* Level 1 state for switching to level 2 and back */ + struct level_state *l1_state; + }; struct vcpu_vmx { @@ -131,6 +287,25 @@ static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) return container_of(vcpu, struct vcpu_vmx, vcpu); } +static struct page *nested_get_page(struct kvm_vcpu *vcpu, + u64 vmcs_addr) +{ + struct page *vmcs_page = NULL; + + down_read(¤t->mm->mmap_sem); + vmcs_page = gfn_to_page(vcpu->kvm, vmcs_addr >> PAGE_SHIFT); + up_read(¤t->mm->mmap_sem); + + if (is_error_page(vmcs_page)) { + printk(KERN_ERR "%s error allocating page \n", __func__); + kvm_release_page_clean(vmcs_page); + return NULL; + } + + return vmcs_page; + +} + static int init_rmode(struct kvm *kvm); static u64 construct_eptp(unsigned long root_hpa); @@ -188,6 +363,10 @@ static struct kvm_vmx_segment_field { static void ept_save_pdptrs(struct kvm_vcpu *vcpu); +static int create_l1_state(struct kvm_vcpu *vcpu); +static int create_l2_state(struct kvm_vcpu *vcpu); +static int shadow_vmcs_load(struct kvm_vcpu *vcpu); + /* * Keep MSR_K6_STAR at the end, as setup_msrs() will try to optimize it * away by decrementing the array size. @@ -704,6 +883,24 @@ static void vmx_load_host_state(struct vcpu_vmx *vmx) preempt_enable(); } + +static int vmptrld(struct kvm_vcpu *vcpu, + u64 phys_addr) +{ + u8 error; + + asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0" + : "=g"(error) : "a"(&phys_addr), "m"(phys_addr) + : "cc"); + if (error) { + printk(KERN_ERR "kvm: %s vmptrld %llx failed\n", + __func__, phys_addr); + return 1; + } + + return 0; +} + /* * Switches to specified vcpu, until a matching vcpu_put(), but assumes * vcpu mutex is already taken. @@ -725,15 +922,8 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) } if (per_cpu(current_vmcs, cpu) != vmx->vmcs) { - u8 error; - per_cpu(current_vmcs, cpu) = vmx->vmcs; - asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0" - : "=g"(error) : "a"(&phys_addr), "m"(phys_addr) - : "cc"); - if (error) - printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n", - vmx->vmcs, phys_addr); + vmptrld(vcpu, phys_addr); } if (vcpu->cpu != cpu) { @@ -3252,6 +3442,115 @@ static int handle_vmclear(struct kvm_vcpu *vcpu) return 1; } +int read_guest_vmcs_gpa(struct kvm_vcpu *vcpu, u64 *gentry) +{ + gpa_t gpa; + struct page *page; + int r = 0; + + gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vcpu->arch.regs[VCPU_REGS_RAX]); + + /* checking guest gpa */ + page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); + if (is_error_page(page)) { + printk(KERN_ERR "%s Invalid guest vmcs addr %llx\n", + __func__, gpa); + r = 1; + goto out; + } + + r = kvm_read_guest(vcpu->kvm, gpa, gentry, sizeof(u64)); + if (r) { + printk(KERN_ERR "%s cannot read guest vmcs addr %llx : %d\n", + __func__, gpa, r); + goto out; + } + + if (!IS_ALIGNED(*gentry, PAGE_SIZE)) { + printk(KERN_DEBUG "%s addr %llx not aligned\n", + __func__, *gentry); + return 1; + } + +out: + kvm_release_page_clean(page); + return r; +} + +static int handle_vmptrld(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + struct page *vmcs_page; + u64 guest_vmcs_addr; + + if (!nested_vmx_check_permission(vcpu)) + return 1; + + if (read_guest_vmcs_gpa(vcpu, &guest_vmcs_addr)) + return 1; + + if (create_l1_state(vcpu)) { + printk(KERN_ERR "%s create_l1_state failed\n", __func__); + return 1; + } + + if (create_l2_state(vcpu)) { + printk(KERN_ERR "%s create_l2_state failed\n", __func__); + return 1; + } + + vmx->nested.l2_state->vmcs = alloc_vmcs(); + if (!vmx->nested.l2_state->vmcs) { + printk(KERN_ERR "%s error in creating level 2 vmcs", __func__); + return 1; + } + + if (vmx->nested.l1_cur_vmcs != guest_vmcs_addr) { + vmcs_page = nested_get_page(vcpu, guest_vmcs_addr); + if (vmcs_page == NULL) + return 1; + + /* load nested vmcs to processor */ + if (vmptrld(vcpu, page_to_phys(vmcs_page))) { + printk(KERN_INFO "%s error in vmptrld \n", + __func__); + kvm_release_page_clean(vmcs_page); + return 1; + } + + /* save nested vmcs in the shadow vmcs */ + if (shadow_vmcs_load(vcpu)) { + kvm_release_page_clean(vmcs_page); + return 1; + } + + vmx->nested.l1_cur_vmcs = guest_vmcs_addr; + + /* load to previous vmcs */ + if (vmptrld(vcpu, __pa(to_vmx(vcpu)->vmcs))) { + kvm_release_page_clean(vmcs_page); + return 1; + } + + kvm_release_page_clean(vmcs_page); + } + clear_rflags_cf_zf(vcpu); + skip_emulated_instruction(vcpu); + return 1; +} + +static int handle_vmptrst(struct kvm_vcpu *vcpu) +{ + if (!nested_vmx_check_permission(vcpu)) + return 1; + + vcpu->arch.regs[VCPU_REGS_RAX] = to_vmx(vcpu)->nested.l1_cur_vmcs; + + clear_rflags_cf_zf(vcpu); + skip_emulated_instruction(vcpu); + return 1; +} + static int handle_vmx_insn(struct kvm_vcpu *vcpu) { kvm_queue_exception(vcpu, UD_VECTOR); @@ -3576,8 +3875,8 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { [EXIT_REASON_VMCALL] = handle_vmcall, [EXIT_REASON_VMCLEAR] = handle_vmclear, [EXIT_REASON_VMLAUNCH] = handle_vmx_insn, - [EXIT_REASON_VMPTRLD] = handle_vmx_insn, - [EXIT_REASON_VMPTRST] = handle_vmx_insn, + [EXIT_REASON_VMPTRLD] = handle_vmptrld, + [EXIT_REASON_VMPTRST] = handle_vmptrst, [EXIT_REASON_VMREAD] = handle_vmx_insn, [EXIT_REASON_VMRESUME] = handle_vmx_insn, [EXIT_REASON_VMWRITE] = handle_vmx_insn, @@ -4115,6 +4414,220 @@ static bool vmx_gb_page_enable(void) return false; } +void save_vmcs(struct shadow_vmcs *dst) +{ + dst->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR); + dst->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR); + dst->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR); + dst->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR); + dst->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR); + dst->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR); + dst->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR); + dst->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR); + dst->host_es_selector = vmcs_read16(HOST_ES_SELECTOR); + dst->host_cs_selector = vmcs_read16(HOST_CS_SELECTOR); + dst->host_ss_selector = vmcs_read16(HOST_SS_SELECTOR); + dst->host_ds_selector = vmcs_read16(HOST_DS_SELECTOR); + dst->host_fs_selector = vmcs_read16(HOST_FS_SELECTOR); + dst->host_gs_selector = vmcs_read16(HOST_GS_SELECTOR); + dst->host_tr_selector = vmcs_read16(HOST_TR_SELECTOR); + dst->io_bitmap_a = vmcs_read64(IO_BITMAP_A); + dst->io_bitmap_b = vmcs_read64(IO_BITMAP_B); + if (cpu_has_vmx_msr_bitmap()) + dst->msr_bitmap = vmcs_read64(MSR_BITMAP); + + dst->vm_exit_msr_store_addr = vmcs_read64(VM_EXIT_MSR_STORE_ADDR); + dst->vm_exit_msr_load_addr = vmcs_read64(VM_EXIT_MSR_LOAD_ADDR); + dst->vm_entry_msr_load_addr = vmcs_read64(VM_ENTRY_MSR_LOAD_ADDR); + dst->tsc_offset = vmcs_read64(TSC_OFFSET); + dst->virtual_apic_page_addr = vmcs_read64(VIRTUAL_APIC_PAGE_ADDR); + dst->apic_access_addr = vmcs_read64(APIC_ACCESS_ADDR); + if (enable_ept) + dst->ept_pointer = vmcs_read64(EPT_POINTER); + + dst->guest_physical_address = vmcs_read64(GUEST_PHYSICAL_ADDRESS); + dst->vmcs_link_pointer = vmcs_read64(VMCS_LINK_POINTER); + dst->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); + if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) + dst->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT); + if (enable_ept) { + dst->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0); + dst->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1); + dst->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2); + dst->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3); + } + dst->pin_based_vm_exec_control = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL); + dst->cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); + dst->exception_bitmap = vmcs_read32(EXCEPTION_BITMAP); + dst->page_fault_error_code_mask = + vmcs_read32(PAGE_FAULT_ERROR_CODE_MASK); + dst->page_fault_error_code_match = + vmcs_read32(PAGE_FAULT_ERROR_CODE_MATCH); + dst->cr3_target_count = vmcs_read32(CR3_TARGET_COUNT); + dst->vm_exit_controls = vmcs_read32(VM_EXIT_CONTROLS); + dst->vm_exit_msr_store_count = vmcs_read32(VM_EXIT_MSR_STORE_COUNT); + dst->vm_exit_msr_load_count = vmcs_read32(VM_EXIT_MSR_LOAD_COUNT); + dst->vm_entry_controls = vmcs_read32(VM_ENTRY_CONTROLS); + dst->vm_entry_msr_load_count = vmcs_read32(VM_ENTRY_MSR_LOAD_COUNT); + dst->vm_entry_intr_info_field = vmcs_read32(VM_ENTRY_INTR_INFO_FIELD); + dst->vm_entry_exception_error_code = + vmcs_read32(VM_ENTRY_EXCEPTION_ERROR_CODE); + dst->vm_entry_instruction_len = vmcs_read32(VM_ENTRY_INSTRUCTION_LEN); + dst->tpr_threshold = vmcs_read32(TPR_THRESHOLD); + dst->secondary_vm_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); + if (enable_vpid && dst->secondary_vm_exec_control & + SECONDARY_EXEC_ENABLE_VPID) + dst->virtual_processor_id = vmcs_read16(VIRTUAL_PROCESSOR_ID); + dst->vm_instruction_error = vmcs_read32(VM_INSTRUCTION_ERROR); + dst->vm_exit_reason = vmcs_read32(VM_EXIT_REASON); + dst->vm_exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); + dst->vm_exit_intr_error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); + dst->idt_vectoring_info_field = vmcs_read32(IDT_VECTORING_INFO_FIELD); + dst->idt_vectoring_error_code = vmcs_read32(IDT_VECTORING_ERROR_CODE); + dst->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); + dst->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); + dst->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT); + dst->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT); + dst->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT); + dst->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT); + dst->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT); + dst->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT); + dst->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT); + dst->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT); + dst->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT); + dst->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT); + dst->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES); + dst->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES); + dst->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES); + dst->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES); + dst->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES); + dst->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES); + dst->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES); + dst->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES); + dst->guest_interruptibility_info = + vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); + dst->guest_activity_state = vmcs_read32(GUEST_ACTIVITY_STATE); + dst->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS); + dst->host_ia32_sysenter_cs = vmcs_read32(HOST_IA32_SYSENTER_CS); + dst->cr0_guest_host_mask = vmcs_readl(CR0_GUEST_HOST_MASK); + dst->cr4_guest_host_mask = vmcs_readl(CR4_GUEST_HOST_MASK); + dst->cr0_read_shadow = vmcs_readl(CR0_READ_SHADOW); + dst->cr4_read_shadow = vmcs_readl(CR4_READ_SHADOW); + dst->cr3_target_value0 = vmcs_readl(CR3_TARGET_VALUE0); + dst->cr3_target_value1 = vmcs_readl(CR3_TARGET_VALUE1); + dst->cr3_target_value2 = vmcs_readl(CR3_TARGET_VALUE2); + dst->cr3_target_value3 = vmcs_readl(CR3_TARGET_VALUE3); + dst->exit_qualification = vmcs_readl(EXIT_QUALIFICATION); + dst->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS); + dst->guest_cr0 = vmcs_readl(GUEST_CR0); + dst->guest_cr3 = vmcs_readl(GUEST_CR3); + dst->guest_cr4 = vmcs_readl(GUEST_CR4); + dst->guest_es_base = vmcs_readl(GUEST_ES_BASE); + dst->guest_cs_base = vmcs_readl(GUEST_CS_BASE); + dst->guest_ss_base = vmcs_readl(GUEST_SS_BASE); + dst->guest_ds_base = vmcs_readl(GUEST_DS_BASE); + dst->guest_fs_base = vmcs_readl(GUEST_FS_BASE); + dst->guest_gs_base = vmcs_readl(GUEST_GS_BASE); + dst->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE); + dst->guest_tr_base = vmcs_readl(GUEST_TR_BASE); + dst->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE); + dst->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE); + dst->guest_dr7 = vmcs_readl(GUEST_DR7); + dst->guest_rsp = vmcs_readl(GUEST_RSP); + dst->guest_rip = vmcs_readl(GUEST_RIP); + dst->guest_rflags = vmcs_readl(GUEST_RFLAGS); + dst->guest_pending_dbg_exceptions = + vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS); + dst->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP); + dst->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP); + dst->host_cr0 = vmcs_readl(HOST_CR0); + dst->host_cr3 = vmcs_readl(HOST_CR3); + dst->host_cr4 = vmcs_readl(HOST_CR4); + dst->host_fs_base = vmcs_readl(HOST_FS_BASE); + dst->host_gs_base = vmcs_readl(HOST_GS_BASE); + dst->host_tr_base = vmcs_readl(HOST_TR_BASE); + dst->host_gdtr_base = vmcs_readl(HOST_GDTR_BASE); + dst->host_idtr_base = vmcs_readl(HOST_IDTR_BASE); + dst->host_ia32_sysenter_esp = vmcs_readl(HOST_IA32_SYSENTER_ESP); + dst->host_ia32_sysenter_eip = vmcs_readl(HOST_IA32_SYSENTER_EIP); + dst->host_rsp = vmcs_readl(HOST_RSP); + dst->host_rip = vmcs_readl(HOST_RIP); + if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) + dst->host_ia32_pat = vmcs_read64(HOST_IA32_PAT); +} + +static int shadow_vmcs_load(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + if (!vmx->nested.l2_state->shadow_vmcs) { + vmx->nested.l2_state->shadow_vmcs = + kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!vmx->nested.l2_state->shadow_vmcs) { + printk(KERN_INFO "%s error creating nested vmcs\n", + __func__); + return -ENOMEM; + } + } + + save_vmcs(vmx->nested.l2_state->shadow_vmcs); + + return 0; +} + +struct level_state *create_state(void) +{ + struct level_state *state = NULL; + + state = kzalloc(sizeof(struct level_state), GFP_KERNEL); + if (!state) { + printk(KERN_INFO "Error create level state\n"); + return NULL; + } + state->shadow_vmcs = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!state->shadow_vmcs) { + printk(KERN_INFO "%s error creating shadow vmcs\n", + __func__); + kfree(state); + return NULL; + } + return state; +} + +int create_l1_state(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + if (!vmx->nested.l1_state) { + vmx->nested.l1_state = create_state(); + if (!vmx->nested.l1_state) + return -ENOMEM; + } + + return 0; +} + +int create_l2_state(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + if (!vmx->nested.l2_state) { + vmx->nested.l2_state = create_state(); + if (!vmx->nested.l2_state) + return -ENOMEM; + } + + if (cpu_has_vmx_msr_bitmap()) + vmx->nested.l2_state->msr_bitmap = vmcs_read64(MSR_BITMAP); + else + vmx->nested.l2_state->msr_bitmap = 0; + + vmx->nested.l2_state->io_bitmap_a = vmcs_read64(IO_BITMAP_A); + vmx->nested.l2_state->io_bitmap_b = vmcs_read64(IO_BITMAP_B); + + return 0; +} + static struct kvm_x86_ops vmx_x86_ops = { .cpu_has_kvm_support = cpu_has_kvm_support, .disabled_by_bios = vmx_disabled_by_bios,