@@ -188,6 +188,7 @@ enum {
enum exit_fastpath_completion {
EXIT_FASTPATH_NONE,
EXIT_FASTPATH_SKIP_EMUL_INS,
+ EXIT_FASTPATH_CONT_RUN,
};
struct x86_emulate_ctxt;
@@ -6569,6 +6569,20 @@ void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
}
}
+static enum exit_fastpath_completion vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
+{
+ if (!is_guest_mode(vcpu)) {
+ switch (to_vmx(vcpu)->exit_reason) {
+ case EXIT_REASON_MSR_WRITE:
+ return handle_fastpath_set_msr_irqoff(vcpu);
+ default:
+ return EXIT_FASTPATH_NONE;
+ }
+ }
+
+ return EXIT_FASTPATH_NONE;
+}
+
bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu)
@@ -6577,6 +6591,7 @@ static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long cr3, cr4;
+cont_run:
/* Record the guest's net vcpu time for enforced NMI injections. */
if (unlikely(!enable_vnmi &&
vmx->loaded_vmcs->soft_vnmi_blocked))
@@ -6743,17 +6758,16 @@ static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu)
if (unlikely(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
return EXIT_FASTPATH_NONE;
- if (!is_guest_mode(vcpu) && vmx->exit_reason == EXIT_REASON_MSR_WRITE)
- exit_fastpath = handle_fastpath_set_msr_irqoff(vcpu);
- else
- exit_fastpath = EXIT_FASTPATH_NONE;
-
vmx->loaded_vmcs->launched = 1;
vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
vmx_recover_nmi_blocking(vmx);
vmx_complete_interrupts(vmx);
+ exit_fastpath = vmx_exit_handlers_fastpath(vcpu);
+ if (exit_fastpath == EXIT_FASTPATH_CONT_RUN)
+ goto cont_run;
+
return exit_fastpath;
}