@@ -335,6 +335,8 @@ struct nested_vmx {
/* Saving the VMCS that we used for running L1 */
struct vmcs *vmcs01;
struct vmcs_fields *vmcs01_fields;
+ /* L2 must run next, and mustn't decide to exit to L1. */
+ bool nested_run_pending;
};
struct vcpu_vmx {
@@ -845,6 +847,20 @@ static inline bool nested_vm_need_virtua
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
}
+static inline bool nested_cpu_has_vmx_msr_bitmap(struct kvm_vcpu *vcpu)
+{
+ return get_vmcs12_fields(vcpu)->cpu_based_vm_exec_control &
+ CPU_BASED_USE_MSR_BITMAPS;
+}
+
+static inline bool is_exception(u32 intr_info)
+{
+ return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
+ == (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK);
+}
+
+static int nested_vmx_vmexit(struct kvm_vcpu *vcpu, bool is_interrupt);
+
static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
{
int i;
@@ -4894,6 +4910,183 @@ static const int kvm_vmx_max_exit_handle
ARRAY_SIZE(kvm_vmx_exit_handlers);
/*
+ * Return 1 if we should exit from L2 to L1 to handle an MSR access access,
+ * rather than handle it ourselves in L0. I.e., check L1's MSR bitmap whether
+ * it expressed interest in the current event (read or write a specific MSR).
+ */
+static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
+ struct vmcs_fields *vmcs12, u32 exit_reason)
+{
+ u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX];
+ struct page *msr_bitmap_page;
+ void *va;
+ bool ret;
+
+ if (!cpu_has_vmx_msr_bitmap() || !nested_cpu_has_vmx_msr_bitmap(vcpu))
+ return 1;
+
+ msr_bitmap_page = nested_get_page(vcpu, vmcs12->msr_bitmap);
+ if (!msr_bitmap_page) {
+ printk(KERN_INFO "%s error in nested_get_page\n", __func__);
+ return 0;
+ }
+
+ va = kmap_atomic(msr_bitmap_page, KM_USER1);
+ if (exit_reason == EXIT_REASON_MSR_WRITE)
+ va += 0x800;
+ if (msr_index >= 0xc0000000) {
+ msr_index -= 0xc0000000;
+ va += 0x400;
+ }
+ if (msr_index > 0x1fff)
+ return 0;
+ ret = test_bit(msr_index, va);
+ kunmap_atomic(va, KM_USER1);
+ return ret;
+}
+
+/*
+ * Return 1 if we should exit from L2 to L1 to handle a CR access exit,
+ * rather than handle it ourselves in L0. I.e., check if L1 wanted to
+ * intercept (via guest_host_mask etc.) the current event.
+ */
+static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
+ struct vmcs_fields *vmcs12)
+{
+ unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ int cr = exit_qualification & 15;
+ int reg = (exit_qualification >> 8) & 15;
+ unsigned long val = kvm_register_read(vcpu, reg);
+
+ switch ((exit_qualification >> 4) & 3) {
+ case 0: /* mov to cr */
+ switch (cr) {
+ case 0:
+ if (vmcs12->cr0_guest_host_mask &
+ (val ^ vmcs12->cr0_read_shadow))
+ return 1;
+ break;
+ case 3:
+ if (vmcs12->cpu_based_vm_exec_control &
+ CPU_BASED_CR3_LOAD_EXITING)
+ return 1;
+ break;
+ case 4:
+ if (vmcs12->cr4_guest_host_mask &
+ (vmcs12->cr4_read_shadow ^ val))
+ return 1;
+ break;
+ case 8:
+ if (vmcs12->cpu_based_vm_exec_control &
+ CPU_BASED_CR8_LOAD_EXITING)
+ return 1;
+ /*
+ * TODO: missing else if control & CPU_BASED_TPR_SHADOW
+ * then set tpr shadow and if below tpr_threshold, exit.
+ */
+ break;
+ }
+ break;
+ case 2: /* clts */
+ if (vmcs12->cr0_guest_host_mask & X86_CR0_TS)
+ return 1;
+ break;
+ case 1: /* mov from cr */
+ switch (cr) {
+ case 0:
+ return 1;
+ case 3:
+ if (vmcs12->cpu_based_vm_exec_control &
+ CPU_BASED_CR3_STORE_EXITING)
+ return 1;
+ break;
+ case 4:
+ return 1;
+ break;
+ case 8:
+ if (vmcs12->cpu_based_vm_exec_control &
+ CPU_BASED_CR8_STORE_EXITING)
+ return 1;
+ break;
+ }
+ break;
+ case 3: /* lmsw */
+ /*
+ * lmsw can change bits 1..3 of cr0, and only set bit 0 of
+ * cr0. Other attempted changes are ignored, with no exit.
+ */
+ if (vmcs12->cr0_guest_host_mask & 0xe &
+ (val ^ vmcs12->cr0_read_shadow))
+ return 1;
+ if ((vmcs12->cr0_guest_host_mask & 0x1) &&
+ !(vmcs12->cr0_read_shadow & 0x1) &&
+ (val & 0x1))
+ return 1;
+ break;
+ }
+ return 0;
+}
+
+/*
+ * Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we
+ * should handle it ourselves in L0 (and then continue L2). Only call this
+ * when in nested_mode (L2).
+ */
+static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
+{
+ u32 exit_reason = vmcs_read32(VM_EXIT_REASON);
+ u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct vmcs_fields *vmcs12 = get_vmcs12_fields(vcpu);
+
+ if (vmx->nested.nested_run_pending)
+ return 0;
+
+ if (unlikely(vmx->fail)) {
+ printk(KERN_INFO "%s failed vm entry %x\n",
+ __func__, vmcs_read32(VM_INSTRUCTION_ERROR));
+ return 1;
+ }
+
+ switch (exit_reason) {
+ case EXIT_REASON_EXTERNAL_INTERRUPT:
+ return 0;
+ case EXIT_REASON_EXCEPTION_NMI:
+ if (!is_exception(intr_info))
+ return 0;
+ else if (is_page_fault(intr_info) && (!enable_ept))
+ return 0;
+ return (vmcs12->exception_bitmap &
+ (1u << (intr_info & INTR_INFO_VECTOR_MASK)));
+ case EXIT_REASON_EPT_VIOLATION:
+ return 0;
+ case EXIT_REASON_INVLPG:
+ return (vmcs12->cpu_based_vm_exec_control &
+ CPU_BASED_INVLPG_EXITING);
+ case EXIT_REASON_MSR_READ:
+ case EXIT_REASON_MSR_WRITE:
+ return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
+ case EXIT_REASON_CR_ACCESS:
+ return nested_vmx_exit_handled_cr(vcpu, vmcs12);
+ case EXIT_REASON_DR_ACCESS:
+ return (vmcs12->cpu_based_vm_exec_control &
+ CPU_BASED_MOV_DR_EXITING);
+ default:
+ /*
+ * One particularly interesting case that is covered here is an
+ * exit caused by L2 running a VMX instruction. L2 is guest
+ * mode in L1's world, and according to the VMX spec running a
+ * VMX instruction in guest mode should cause an exit to root
+ * mode, i.e., to L1. This is why we need to return r=1 for
+ * those exit reasons too. This enables further nesting: Like
+ * L0 emulates VMX for L1, we now allow L1 to emulate VMX for
+ * L2, who will then be able to run L3.
+ */
+ return 1;
+ }
+}
+
+/*
* The guest has exited. See if we can fix it or if we need userspace
* assistance.
*/
@@ -4909,6 +5102,17 @@ static int vmx_handle_exit(struct kvm_vc
if (vmx->emulation_required && emulate_invalid_guest_state)
return handle_invalid_guest_state(vcpu);
+ if (exit_reason == EXIT_REASON_VMLAUNCH ||
+ exit_reason == EXIT_REASON_VMRESUME)
+ vmx->nested.nested_run_pending = 1;
+ else
+ vmx->nested.nested_run_pending = 0;
+
+ if (vmx->nested.nested_mode && nested_vmx_exit_handled(vcpu)) {
+ nested_vmx_vmexit(vcpu, false);
+ return 1;
+ }
+
/* Access CR3 don't cause VMExit in paging mode, so we need
* to sync with guest real CR3. */
if (enable_ept && is_paging(vcpu))
@@ -5960,6 +6164,7 @@ static int nested_vmx_run(struct kvm_vcp
r = kvm_mmu_load(vcpu);
if (unlikely(r)) {
printk(KERN_ERR "Error in kvm_mmu_load r %d\n", r);
+ nested_vmx_vmexit(vcpu, false);
nested_vmx_failValid(vcpu,
VMXERR_VMRESUME_CORRUPTED_VMCS /* ? */);
/* switch back to L1 */