@@ -333,6 +333,8 @@ struct nested_vmx {
struct vmcs_fields *vmcs01_fields;
/* Saving some vcpu->arch.* data we had for L1, while running L2 */
unsigned long l1_arch_cr3;
+ /* L2 must run next, and mustn't decide to exit to L1. */
+ bool nested_run_pending;
};
struct vcpu_vmx {
@@ -845,6 +847,20 @@ static inline bool nested_vm_need_virtua
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
}
+static inline bool nested_cpu_has_vmx_msr_bitmap(struct kvm_vcpu *vcpu)
+{
+ return get_vmcs12_fields(vcpu)->cpu_based_vm_exec_control &
+ CPU_BASED_USE_MSR_BITMAPS;
+}
+
+static inline bool is_exception(u32 intr_info)
+{
+ return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
+ == (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK);
+}
+
+static int nested_vmx_vmexit(struct kvm_vcpu *vcpu, bool is_interrupt);
+
static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
{
int i;
@@ -4894,6 +4910,195 @@ static int (*kvm_vmx_exit_handlers[])(st
static const int kvm_vmx_max_exit_handlers =
ARRAY_SIZE(kvm_vmx_exit_handlers);
+/*
+ * Return 1 if we should exit from L2 to L1 to handle an MSR access access,
+ * rather than handle it ourselves in L0. I.e., check L1's MSR bitmap whether
+ * it expressed interest in the current event (read or write a specific MSR).
+ */
+static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
+ struct vmcs_fields *vmcs12, u32 exit_reason)
+{
+ u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX];
+ struct page *msr_bitmap_page;
+ void *va;
+ bool ret;
+
+ if (!cpu_has_vmx_msr_bitmap() || !nested_cpu_has_vmx_msr_bitmap(vcpu))
+ return 1;
+
+ msr_bitmap_page = nested_get_page(vcpu, vmcs12->msr_bitmap);
+ if (!msr_bitmap_page) {
+ printk(KERN_INFO "%s error in nested_get_page\n", __func__);
+ return 0;
+ }
+
+ va = kmap_atomic(msr_bitmap_page, KM_USER1);
+ if (exit_reason == EXIT_REASON_MSR_WRITE)
+ va += 0x800;
+ if (msr_index >= 0xc0000000) {
+ msr_index -= 0xc0000000;
+ va += 0x400;
+ }
+ if (msr_index > 0x1fff)
+ return 0;
+ ret = test_bit(msr_index, va);
+ kunmap_atomic(va, KM_USER1);
+ return ret;
+}
+
+/*
+ * Return 1 if we should exit from L2 to L1 to handle a CR access exit,
+ * rather than handle it ourselves in L0. I.e., check if L1 wanted to
+ * intercept (via guest_host_mask etc.) the current event.
+ */
+static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
+ struct vmcs_fields *vmcs12)
+{
+ unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ int cr = exit_qualification & 15;
+ int reg = (exit_qualification >> 8) & 15;
+ unsigned long val = kvm_register_read(vcpu, reg);
+
+ switch ((exit_qualification >> 4) & 3) {
+ case 0: /* mov to cr */
+ switch (cr) {
+ case 0:
+ if (vmcs12->cr0_guest_host_mask &
+ (val ^ vmcs12->cr0_read_shadow))
+ return 1;
+ break;
+ case 3:
+ if ((vmcs12->cr3_target_count >= 1 &&
+ vmcs12->cr3_target_value0 == val) ||
+ (vmcs12->cr3_target_count >= 2 &&
+ vmcs12->cr3_target_value1 == val) ||
+ (vmcs12->cr3_target_count >= 3 &&
+ vmcs12->cr3_target_value2 == val) ||
+ (vmcs12->cr3_target_count >= 4 &&
+ vmcs12->cr3_target_value3 == val))
+ return 0;
+ if (nested_cpu_has_secondary_exec_ctrls(vcpu) &&
+ (vmcs12->cpu_based_vm_exec_control &
+ CPU_BASED_CR3_LOAD_EXITING)){
+ return 1;
+ }
+ break;
+ case 4:
+ if (vmcs12->cr4_guest_host_mask &
+ (vmcs12->cr4_read_shadow ^ val))
+ return 1;
+ break;
+ case 8:
+ if (nested_cpu_has_secondary_exec_ctrls(vcpu) &&
+ (vmcs12->cpu_based_vm_exec_control &
+ CPU_BASED_CR8_LOAD_EXITING))
+ return 1;
+ /*
+ * TODO: missing else if control & CPU_BASED_TPR_SHADOW
+ * then set tpr shadow and if below tpr_threshold, exit.
+ */
+ break;
+ }
+ break;
+ case 2: /* clts */
+ if (vmcs12->cr0_guest_host_mask & X86_CR0_TS)
+ return 1;
+ break;
+ case 1: /* mov from cr */
+ switch (cr) {
+ case 0:
+ return 1;
+ case 3:
+ if (vmcs12->cpu_based_vm_exec_control &
+ CPU_BASED_CR3_STORE_EXITING)
+ return 1;
+ break;
+ case 4:
+ return 1;
+ break;
+ case 8:
+ if (vmcs12->cpu_based_vm_exec_control &
+ CPU_BASED_CR8_STORE_EXITING)
+ return 1;
+ break;
+ }
+ break;
+ case 3: /* lmsw */
+ /*
+ * lmsw can change bits 1..3 of cr0, and only set bit 0 of
+ * cr0. Other attempted changes are ignored, with no exit.
+ */
+ if (vmcs12->cr0_guest_host_mask & 0xe &
+ (val ^ vmcs12->cr0_read_shadow))
+ return 1;
+ if ((vmcs12->cr0_guest_host_mask & 0x1) &&
+ !(vmcs12->cr0_read_shadow & 0x1) &&
+ (val & 0x1))
+ return 1;
+ break;
+ }
+ return 0;
+}
+
+/*
+ * Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we
+ * should handle it ourselves in L0 (and then continue L2). Only call this
+ * when in is_guest_mode (L2).
+ */
+static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
+{
+ u32 exit_reason = vmcs_read32(VM_EXIT_REASON);
+ u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct vmcs_fields *vmcs12 = get_vmcs12_fields(vcpu);
+
+ if (vmx->nested.nested_run_pending)
+ return 0;
+
+ if (unlikely(vmx->fail)) {
+ printk(KERN_INFO "%s failed vm entry %x\n",
+ __func__, vmcs_read32(VM_INSTRUCTION_ERROR));
+ return 1;
+ }
+
+ switch (exit_reason) {
+ case EXIT_REASON_EXTERNAL_INTERRUPT:
+ return 0;
+ case EXIT_REASON_EXCEPTION_NMI:
+ if (!is_exception(intr_info))
+ return 0;
+ else if (is_page_fault(intr_info))
+ return enable_ept;
+ return vmcs12->exception_bitmap &
+ (1u << (intr_info & INTR_INFO_VECTOR_MASK));
+ case EXIT_REASON_EPT_VIOLATION:
+ return 0;
+ case EXIT_REASON_INVLPG:
+ return vmcs12->cpu_based_vm_exec_control &
+ CPU_BASED_INVLPG_EXITING;
+ case EXIT_REASON_MSR_READ:
+ case EXIT_REASON_MSR_WRITE:
+ return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
+ case EXIT_REASON_CR_ACCESS:
+ return nested_vmx_exit_handled_cr(vcpu, vmcs12);
+ case EXIT_REASON_DR_ACCESS:
+ return vmcs12->cpu_based_vm_exec_control &
+ CPU_BASED_MOV_DR_EXITING;
+ default:
+ /*
+ * One particularly interesting case that is covered here is an
+ * exit caused by L2 running a VMX instruction. L2 is guest
+ * mode in L1's world, and according to the VMX spec running a
+ * VMX instruction in guest mode should cause an exit to root
+ * mode, i.e., to L1. This is why we need to return r=1 for
+ * those exit reasons too. This enables further nesting: Like
+ * L0 emulates VMX for L1, we now allow L1 to emulate VMX for
+ * L2, who will then be able to run L3.
+ */
+ return 1;
+ }
+}
+
static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
{
*info1 = vmcs_readl(EXIT_QUALIFICATION);
@@ -4921,6 +5126,17 @@ static int vmx_handle_exit(struct kvm_vc
if (enable_ept && is_paging(vcpu))
vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
+ if (exit_reason == EXIT_REASON_VMLAUNCH ||
+ exit_reason == EXIT_REASON_VMRESUME)
+ vmx->nested.nested_run_pending = 1;
+ else
+ vmx->nested.nested_run_pending = 0;
+
+ if (is_guest_mode(vcpu) && nested_vmx_exit_handled(vcpu)) {
+ nested_vmx_vmexit(vcpu, false);
+ return 1;
+ }
+
if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
vcpu->run->fail_entry.hardware_entry_failure_reason
@@ -5981,6 +6197,7 @@ static int nested_vmx_run(struct kvm_vcp
kvm_mmu_reset_context(vcpu);
if (unlikely(kvm_mmu_load(vcpu))) {
+ nested_vmx_vmexit(vcpu, false);
nested_vmx_failValid(vcpu,
VMXERR_VMRESUME_CORRUPTED_VMCS /* ? */);
/* switch back to L1 */