@@ -281,6 +281,9 @@ struct __packed vmcs12 {
struct vmcs_fields fields;
bool launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */
+
+ int cpu;
+ int launched;
};
/*
@@ -315,6 +318,21 @@ struct nested_vmx {
/* list of real (hardware) VMCS, one for each L2 guest of L1 */
struct list_head vmcs02_list; /* a vmcs_list */
int vmcs02_num;
+
+ /* Level 1 state for switching to level 2 and back */
+ struct {
+ u64 efer;
+ u64 io_bitmap_a;
+ u64 io_bitmap_b;
+ u64 msr_bitmap;
+ int cpu;
+ int launched;
+ } l1_state;
+ /* Saving the VMCS that we used for running L1 */
+ struct vmcs *vmcs01;
+ struct vmcs_fields *vmcs01_fields;
+ /* Saving some vcpu->arch.* data we had for L1, while running L2 */
+ unsigned long l1_arch_cr3;
};
struct vcpu_vmx {
@@ -1344,6 +1362,16 @@ static void vmx_vcpu_load(struct kvm_vcp
rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
+
+ if (vmx->nested.vmcs01_fields != NULL) {
+ struct vmcs_fields *vmcs01 = vmx->nested.vmcs01_fields;
+ vmcs01->host_tr_base = vmcs_readl(HOST_TR_BASE);
+ vmcs01->host_gdtr_base = vmcs_readl(HOST_GDTR_BASE);
+ vmcs01->host_ia32_sysenter_esp =
+ vmcs_readl(HOST_IA32_SYSENTER_ESP);
+ if (is_guest_mode(vcpu))
+ load_vmcs_host_state(vmcs01);
+ }
}
}
@@ -2173,6 +2201,9 @@ static void free_l1_state(struct kvm_vcp
kfree(list_item);
}
vmx->nested.vmcs02_num = 0;
+
+ kfree(vmx->nested.vmcs01_fields);
+ vmx->nested.vmcs01_fields = NULL;
}
static void free_kvm_area(void)
@@ -4326,6 +4357,10 @@ static int handle_vmon(struct kvm_vcpu *
INIT_LIST_HEAD(&(vmx->nested.vmcs02_list));
vmx->nested.vmcs02_num = 0;
+ vmx->nested.vmcs01_fields = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!vmx->nested.vmcs01_fields)
+ return -ENOMEM;
+
vmx->nested.vmxon = true;
skip_emulated_instruction(vcpu);
@@ -4524,6 +4559,50 @@ static int handle_vmclear(struct kvm_vcp
return 1;
}
+static int nested_vmx_run(struct kvm_vcpu *vcpu);
+
+static int handle_launch_or_resume(struct kvm_vcpu *vcpu, bool launch)
+{
+ if (!nested_vmx_check_permission(vcpu))
+ return 1;
+
+ /* yet another strange pre-requisite listed in the VMX spec */
+ if (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
+ GUEST_INTR_STATE_MOV_SS) {
+ nested_vmx_failValid(vcpu,
+ VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS);
+ skip_emulated_instruction(vcpu);
+ return 1;
+ }
+
+ if (to_vmx(vcpu)->nested.current_vmcs12->launch_state == launch) {
+ /* Must use VMLAUNCH for the first time, VMRESUME later */
+ nested_vmx_failValid(vcpu,
+ launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS :
+ VMXERR_VMRESUME_NONLAUNCHED_VMCS);
+ skip_emulated_instruction(vcpu);
+ return 1;
+ }
+
+ skip_emulated_instruction(vcpu);
+
+ nested_vmx_run(vcpu);
+ return 1;
+}
+
+/* Emulate the VMLAUNCH instruction */
+static int handle_vmlaunch(struct kvm_vcpu *vcpu)
+{
+ return handle_launch_or_resume(vcpu, true);
+}
+
+/* Emulate the VMRESUME instruction */
+static int handle_vmresume(struct kvm_vcpu *vcpu)
+{
+
+ return handle_launch_or_resume(vcpu, false);
+}
+
enum vmcs_field_type {
VMCS_FIELD_TYPE_U16 = 0,
VMCS_FIELD_TYPE_U64 = 1,
@@ -4797,11 +4876,11 @@ static int (*kvm_vmx_exit_handlers[])(st
[EXIT_REASON_INVLPG] = handle_invlpg,
[EXIT_REASON_VMCALL] = handle_vmcall,
[EXIT_REASON_VMCLEAR] = handle_vmclear,
- [EXIT_REASON_VMLAUNCH] = handle_vmx_insn,
+ [EXIT_REASON_VMLAUNCH] = handle_vmlaunch,
[EXIT_REASON_VMPTRLD] = handle_vmptrld,
[EXIT_REASON_VMPTRST] = handle_vmptrst,
[EXIT_REASON_VMREAD] = handle_vmread,
- [EXIT_REASON_VMRESUME] = handle_vmx_insn,
+ [EXIT_REASON_VMRESUME] = handle_vmresume,
[EXIT_REASON_VMWRITE] = handle_vmwrite,
[EXIT_REASON_VMOFF] = handle_vmoff,
[EXIT_REASON_VMON] = handle_vmon,
@@ -4870,7 +4949,8 @@ static int vmx_handle_exit(struct kvm_vc
"(0x%x) and exit reason is 0x%x\n",
__func__, vectoring_info, exit_reason);
- if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) {
+ if (!is_guest_mode(vcpu) &&
+ unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) {
if (vmx_interrupt_allowed(vcpu)) {
vmx->soft_vnmi_blocked = 0;
} else if (vmx->vnmi_blocked_time > 1000000000LL &&
@@ -5779,6 +5859,155 @@ int prepare_vmcs02(struct kvm_vcpu *vcpu
return 0;
}
+
+
+/*
+ * Return the cr0 value that a guest would read. This is a combination of
+ * the real cr0 used to run the guest (guest_cr0), and the bits shadowed by
+ * the hypervisor (cr0_read_shadow).
+ */
+static inline unsigned long guest_readable_cr0(struct vmcs_fields *fields)
+{
+ return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) |
+ (fields->cr0_read_shadow & fields->cr0_guest_host_mask);
+}
+static inline unsigned long guest_readable_cr4(struct vmcs_fields *fields)
+{
+ return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) |
+ (fields->cr4_read_shadow & fields->cr4_guest_host_mask);
+}
+static inline void set_cr3_and_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
+{
+ vcpu->arch.cr3 = cr3;
+ vmcs_writel(GUEST_CR3, cr3);
+ load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3);
+ vmcs_write64(GUEST_PDPTR0, vcpu->arch.mmu.pdptrs[0]);
+ vmcs_write64(GUEST_PDPTR1, vcpu->arch.mmu.pdptrs[1]);
+ vmcs_write64(GUEST_PDPTR2, vcpu->arch.mmu.pdptrs[2]);
+ vmcs_write64(GUEST_PDPTR3, vcpu->arch.mmu.pdptrs[3]);
+}
+
+static int nested_vmx_run(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ enter_guest_mode(vcpu);
+ sync_cached_regs_to_vmcs(vcpu);
+ save_vmcs(vmx->nested.vmcs01_fields);
+
+ vmx->nested.l1_state.efer = vcpu->arch.efer;
+ /* arch.cr3 - the guest's original page table (not the shadow)
+ needs to be saved. */
+ vmx->nested.l1_arch_cr3 = vcpu->arch.cr3;
+
+ if (cpu_has_vmx_msr_bitmap())
+ vmx->nested.l1_state.msr_bitmap = vmcs_read64(MSR_BITMAP);
+ else
+ vmx->nested.l1_state.msr_bitmap = 0;
+
+ vmx->nested.l1_state.io_bitmap_a = vmcs_read64(IO_BITMAP_A);
+ vmx->nested.l1_state.io_bitmap_b = vmcs_read64(IO_BITMAP_B);
+ vmx->nested.vmcs01 = vmx->vmcs;
+ vmx->nested.l1_state.cpu = vcpu->cpu;
+ vmx->nested.l1_state.launched = vmx->launched;
+
+ vmx->vmcs = nested_get_current_vmcs(vcpu);
+ if (!vmx->vmcs) {
+ printk(KERN_ERR "Missing VMCS\n");
+ nested_vmx_failValid(vcpu, VMXERR_VMRESUME_CORRUPTED_VMCS);
+ return 1;
+ }
+
+ vcpu->cpu = vmx->nested.current_vmcs12->cpu;
+ vmx->launched = vmx->nested.current_vmcs12->launched;
+
+ if (!vmx->nested.current_vmcs12->launch_state || !vmx->launched) {
+ vmcs_clear(vmx->vmcs);
+ vmx->launched = 0;
+ vmx->nested.current_vmcs12->launch_state = 1;
+ }
+
+ vmx_vcpu_load(vcpu, get_cpu());
+ put_cpu();
+
+ prepare_vmcs02(vcpu,
+ get_vmcs12_fields(vcpu), vmx->nested.vmcs01_fields);
+
+ if (get_vmcs12_fields(vcpu)->vm_entry_controls &
+ VM_ENTRY_IA32E_MODE) {
+ if (!((vcpu->arch.efer & EFER_LMA) &&
+ (vcpu->arch.efer & EFER_LME)))
+ vcpu->arch.efer |= (EFER_LMA | EFER_LME);
+ } else {
+ if ((vcpu->arch.efer & EFER_LMA) ||
+ (vcpu->arch.efer & EFER_LME))
+ vcpu->arch.efer = 0;
+ }
+
+ vmx->rmode.vm86_active =
+ !(get_vmcs12_fields(vcpu)->cr0_read_shadow & X86_CR0_PE);
+
+ /* vmx_set_cr0() sets the cr0 that L2 will read, to be the one that L1
+ * dictated, and takes appropriate actions for special cr0 bits (like
+ * real mode, etc.).
+ */
+ vmx_set_cr0(vcpu, guest_readable_cr0(get_vmcs12_fields(vcpu)));
+
+ /* However, vmx_set_cr0 incorrectly enforces KVM's relationship between
+ * GUEST_CR0 and CR0_READ_SHADOW, e.g., that the former is the same as
+ * the latter with with TS added if !fpu_active. We need to take the
+ * actual GUEST_CR0 that L1 wanted, just with added TS if !fpu_active
+ * like KVM wants (for the "lazy fpu" feature, to avoid the costly
+ * restoration of fpu registers until the FPU is really used).
+ */
+ vmcs_writel(GUEST_CR0, get_vmcs12_fields(vcpu)->guest_cr0 |
+ (vcpu->fpu_active ? 0 : X86_CR0_TS));
+
+ /* we have to set the X86_CR0_PG bit of the cached cr0, because
+ * kvm_mmu_reset_context enables paging only if X86_CR0_PG is set in
+ * CR0 (we need the paging so that KVM treat this guest as a paging
+ * guest so we can easly forward page faults to L1.)
+ */
+ vcpu->arch.cr0 |= X86_CR0_PG;
+
+ if (enable_ept) {
+ /* shadow page tables on EPT */
+ vcpu->arch.cr4 = guest_readable_cr4(get_vmcs12_fields(vcpu));
+ vmcs_writel(CR4_READ_SHADOW, vcpu->arch.cr4);
+ vmcs_writel(GUEST_CR4, get_vmcs12_fields(vcpu)->guest_cr4);
+ set_cr3_and_pdptrs(vcpu, get_vmcs12_fields(vcpu)->guest_cr3);
+ } else {
+ /* shadow page tables on shadow page tables */
+ vmx_set_cr4(vcpu, get_vmcs12_fields(vcpu)->guest_cr4);
+ vmcs_writel(CR4_READ_SHADOW,
+ get_vmcs12_fields(vcpu)->cr4_read_shadow);
+ kvm_set_cr3(vcpu, get_vmcs12_fields(vcpu)->guest_cr3);
+ kvm_mmu_reset_context(vcpu);
+
+ if (unlikely(kvm_mmu_load(vcpu))) {
+ nested_vmx_failValid(vcpu,
+ VMXERR_VMRESUME_CORRUPTED_VMCS /* ? */);
+ /* switch back to L1 */
+ leave_guest_mode(vcpu);
+ vmx->vmcs = vmx->nested.vmcs01;
+ vcpu->cpu = vmx->nested.l1_state.cpu;
+ vmx->launched = vmx->nested.l1_state.launched;
+
+ vmx_vcpu_load(vcpu, get_cpu());
+ put_cpu();
+
+ return 1;
+ }
+ }
+
+ kvm_register_write(vcpu, VCPU_REGS_RSP,
+ get_vmcs12_fields(vcpu)->guest_rsp);
+ kvm_register_write(vcpu, VCPU_REGS_RIP,
+ get_vmcs12_fields(vcpu)->guest_rip);
+
+ return 1;
+}
+
static struct kvm_x86_ops vmx_x86_ops = {
.cpu_has_kvm_support = cpu_has_kvm_support,
.disabled_by_bios = vmx_disabled_by_bios,