Message ID | 1359549372-4764-1-git-send-email-yang.z.zhang@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Wed, Jan 30, 2013 at 08:36:12PM +0800, Yang Zhang wrote: > From: Yang Zhang <yang.z.zhang@Intel.com> > > The "acknowledge interrupt on exit" feature controls processor behavior > for external interrupt acknowledgement. When this control is set, the > processor acknowledges the interrupt controller to acquire the > interrupt vector on VM exit. > > After enabling this feature, an interrupt which arrived when target cpu is > running in vmx non-root mode will be handled by vmx handler instead of handler > in idt. Currently, vmx handler only fakes an interrupt stack and jump to idt > table to let real handler to handle it. Further, we will recognize the interrupt > and only delivery the interrupt which not belong to current vcpu through idt table. > The interrupt which belonged to current vcpu will be handled inside vmx handler. > This will reduce the interrupt handle cost of KVM. > > Also, interrupt enable logic is changed if this feature is turnning on: > Before this patch, hypervior call local_irq_enable() to enable it directly. > Now IF bit is set on interrupt stack frame, and will be enabled on a return from > interrupt handler if exterrupt interrupt exists. If no external interrupt, still > call local_irq_enable() to enable it. > > Refer to Intel SDM volum 3, chapter 33.2. > Looks good to me except one comment bellow. Send that patch as part of posted interrupt series, there is not point to apply it separately. > Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com> > --- > arch/x86/include/asm/kvm_host.h | 1 + > arch/x86/kvm/svm.c | 6 +++ > arch/x86/kvm/vmx.c | 70 ++++++++++++++++++++++++++++++++++++-- > arch/x86/kvm/x86.c | 4 ++- > 4 files changed, 76 insertions(+), 5 deletions(-) > > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h > index 77d56a4..1f1b2f8 100644 > --- a/arch/x86/include/asm/kvm_host.h > +++ b/arch/x86/include/asm/kvm_host.h > @@ -725,6 +725,7 @@ struct kvm_x86_ops { > int (*check_intercept)(struct kvm_vcpu *vcpu, > struct x86_instruction_info *info, > enum x86_intercept_stage stage); > + void (*handle_external_intr)(struct kvm_vcpu *vcpu); > }; > > struct kvm_arch_async_pf { > diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c > index d29d3cd..c283185 100644 > --- a/arch/x86/kvm/svm.c > +++ b/arch/x86/kvm/svm.c > @@ -4227,6 +4227,11 @@ out: > return ret; > } > > +static void svm_handle_external_intr(struct kvm_vcpu *vcpu) > +{ > + local_irq_enable(); > +} > + > static struct kvm_x86_ops svm_x86_ops = { > .cpu_has_kvm_support = has_svm, > .disabled_by_bios = is_disabled, > @@ -4318,6 +4323,7 @@ static struct kvm_x86_ops svm_x86_ops = { > .set_tdp_cr3 = set_tdp_cr3, > > .check_intercept = svm_check_intercept, > + .handle_external_intr = svm_handle_external_intr, > }; > > static int __init svm_init(void) > diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c > index 02eeba8..eaef185 100644 > --- a/arch/x86/kvm/vmx.c > +++ b/arch/x86/kvm/vmx.c > @@ -379,6 +379,7 @@ struct vcpu_vmx { > struct shared_msr_entry *guest_msrs; > int nmsrs; > int save_nmsrs; > + unsigned long host_idt_base; > #ifdef CONFIG_X86_64 > u64 msr_host_kernel_gs_base; > u64 msr_guest_kernel_gs_base; > @@ -2565,7 +2566,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) > #ifdef CONFIG_X86_64 > min |= VM_EXIT_HOST_ADDR_SPACE_SIZE; > #endif > - opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT; > + opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT | > + VM_EXIT_ACK_INTR_ON_EXIT; > if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS, > &_vmexit_control) < 0) > return -EIO; > @@ -3742,11 +3744,12 @@ static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only) > * Note that host-state that does change is set elsewhere. E.g., host-state > * that is set differently for each CPU is set in vmx_vcpu_load(), not here. > */ > -static void vmx_set_constant_host_state(void) > +static void vmx_set_constant_host_state(struct kvm_vcpu *vcpu) Pass vmx to the function. No need to convert vmx op vcpu and back. > { > u32 low32, high32; > unsigned long tmpl; > struct desc_ptr dt; > + struct vcpu_vmx *vmx = to_vmx(vcpu); > > vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */ > vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */ > @@ -3770,6 +3773,7 @@ static void vmx_set_constant_host_state(void) > > native_store_idt(&dt); > vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */ > + vmx->host_idt_base = dt.address; > > vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */ > > @@ -3884,7 +3888,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) > > vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */ > vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */ > - vmx_set_constant_host_state(); > + vmx_set_constant_host_state(&vmx->vcpu); > #ifdef CONFIG_X86_64 > rdmsrl(MSR_FS_BASE, a); > vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */ > @@ -6094,6 +6098,63 @@ static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx) > } > } > > +static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) > +{ > + u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); > + > + /* > + * If external interrupt exists, IF bit is set in rflags/eflags on the > + * interrupt stack frame, and interrupt will be enabled on a return > + * from interrupt handler. > + */ > + if ((exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK)) > + == (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR)) { > + unsigned int vector; > + unsigned long entry; > + gate_desc *desc; > + struct vcpu_vmx *vmx = to_vmx(vcpu); > + > + vector = exit_intr_info & INTR_INFO_VECTOR_MASK; > +#ifdef CONFIG_X86_64 > + desc = (void *)vmx->host_idt_base + vector * 16; > +#else > + desc = (void *)vmx->host_idt_base + vector * 8; > +#endif > + > + entry = gate_offset(*desc); > + asm( > + "mov %0, %%" _ASM_DX " \n\t" > +#ifdef CONFIG_X86_64 > + "mov %%" _ASM_SP ", %%" _ASM_BX " \n\t" > + "and $0xfffffffffffffff0, %%" _ASM_SP " \n\t" > + "mov %%ss, %%" _ASM_AX " \n\t" > + "push %%" _ASM_AX " \n\t" > + "push %%" _ASM_BX " \n\t" > +#endif > + "pushf \n\t" > + "pop %%" _ASM_AX " \n\t" > + "or $0x200, %%" _ASM_AX " \n\t" > + "push %%" _ASM_AX " \n\t" > + "mov %%cs, %%" _ASM_AX " \n\t" > + "push %%" _ASM_AX " \n\t" > + "push intr_return \n\t" > + "jmp *%% " _ASM_DX " \n\t" > + "1: \n\t" > + ".pushsection .rodata \n\t" > + ".global intr_return \n\t" > + "intr_return: " _ASM_PTR " 1b \n\t" > + ".popsection \n\t" > + : : "m"(entry) : > +#ifdef CONFIG_X86_64 > + "rax", "rbx", "rdx" > +#else > + "eax", "edx" > +#endif > + ); > + } else > + local_irq_enable(); > +} > + > static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) > { > u32 exit_intr_info; > @@ -6764,7 +6825,7 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) > * Other fields are different per CPU, and will be set later when > * vmx_vcpu_load() is called, and when vmx_save_host_state() is called. > */ > - vmx_set_constant_host_state(); > + vmx_set_constant_host_state(vcpu); > > /* > * HOST_RSP is normally set correctly in vmx_vcpu_run() just before > @@ -7361,6 +7422,7 @@ static struct kvm_x86_ops vmx_x86_ops = { > .set_tdp_cr3 = vmx_set_cr3, > > .check_intercept = vmx_check_intercept, > + .handle_external_intr = vmx_handle_external_intr, > }; > > static int __init vmx_init(void) > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index b9f5529..e019437 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -5767,7 +5767,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) > > vcpu->mode = OUTSIDE_GUEST_MODE; > smp_wmb(); > - local_irq_enable(); > + > + /* Interrupt is enabled by handle_external_intr() */ > + kvm_x86_ops->handle_external_intr(vcpu); > > ++vcpu->stat.exits; > > -- > 1.7.1 -- Gleb. -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
Gleb Natapov wrote on 2013-01-30: > On Wed, Jan 30, 2013 at 08:36:12PM +0800, Yang Zhang wrote: >> From: Yang Zhang <yang.z.zhang@Intel.com> >> >> The "acknowledge interrupt on exit" feature controls processor behavior >> for external interrupt acknowledgement. When this control is set, the >> processor acknowledges the interrupt controller to acquire the >> interrupt vector on VM exit. >> >> After enabling this feature, an interrupt which arrived when target cpu >> is running in vmx non-root mode will be handled by vmx handler instead >> of handler in idt. Currently, vmx handler only fakes an interrupt stack >> and jump to idt table to let real handler to handle it. Further, we >> will recognize the interrupt and only delivery the interrupt which not >> belong to current vcpu through idt table. The interrupt which belonged >> to current vcpu will be handled inside vmx handler. This will reduce >> the interrupt handle cost of KVM. >> >> Also, interrupt enable logic is changed if this feature is turnning on: >> Before this patch, hypervior call local_irq_enable() to enable it directly. >> Now IF bit is set on interrupt stack frame, and will be enabled on a return from >> interrupt handler if exterrupt interrupt exists. If no external interrupt, still >> call local_irq_enable() to enable it. >> >> Refer to Intel SDM volum 3, chapter 33.2. >> > Looks good to me except one comment bellow. Send that patch as part of > posted interrupt series, there is not point to apply it separately. Sure. I will send out the PI patch after it passes all testings. >> Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com> >> --- >> arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/svm.c >> | 6 +++ arch/x86/kvm/vmx.c | 70 >> ++++++++++++++++++++++++++++++++++++-- arch/x86/kvm/x86.c >> | 4 ++- 4 files changed, 76 insertions(+), 5 deletions(-) >> diff --git a/arch/x86/include/asm/kvm_host.h >> b/arch/x86/include/asm/kvm_host.h index 77d56a4..1f1b2f8 100644 --- >> a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h >> @@ -725,6 +725,7 @@ struct kvm_x86_ops { >> int (*check_intercept)(struct kvm_vcpu *vcpu, struct >> x86_instruction_info *info, enum x86_intercept_stage stage); >> + void (*handle_external_intr)(struct kvm_vcpu *vcpu); }; >> >> struct kvm_arch_async_pf { >> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c >> index d29d3cd..c283185 100644 >> --- a/arch/x86/kvm/svm.c >> +++ b/arch/x86/kvm/svm.c >> @@ -4227,6 +4227,11 @@ out: >> return ret; >> } >> +static void svm_handle_external_intr(struct kvm_vcpu *vcpu) >> +{ >> + local_irq_enable(); >> +} >> + >> static struct kvm_x86_ops svm_x86_ops = { .cpu_has_kvm_support = >> has_svm, .disabled_by_bios = is_disabled, @@ -4318,6 +4323,7 @@ >> static struct kvm_x86_ops svm_x86_ops = { .set_tdp_cr3 = set_tdp_cr3, >> >> .check_intercept = svm_check_intercept, + .handle_external_intr = >> svm_handle_external_intr, }; >> >> static int __init svm_init(void) >> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c >> index 02eeba8..eaef185 100644 >> --- a/arch/x86/kvm/vmx.c >> +++ b/arch/x86/kvm/vmx.c >> @@ -379,6 +379,7 @@ struct vcpu_vmx { >> struct shared_msr_entry *guest_msrs; int nmsrs; >> int save_nmsrs; + unsigned long >> host_idt_base; #ifdef CONFIG_X86_64 u64 >> msr_host_kernel_gs_base; u64 msr_guest_kernel_gs_base; >> @@ -2565,7 +2566,8 @@ static __init int setup_vmcs_config(struct > vmcs_config *vmcs_conf) >> #ifdef CONFIG_X86_64 >> min |= VM_EXIT_HOST_ADDR_SPACE_SIZE; >> #endif >> - opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT; >> + opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT | >> + VM_EXIT_ACK_INTR_ON_EXIT; >> if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS, >> &_vmexit_control) < 0) >> return -EIO; >> @@ -3742,11 +3744,12 @@ static void vmx_disable_intercept_for_msr(u32 msr, > bool longmode_only) >> * Note that host-state that does change is set elsewhere. E.g., host-state >> * that is set differently for each CPU is set in vmx_vcpu_load(), not here. >> */ >> -static void vmx_set_constant_host_state(void) >> +static void vmx_set_constant_host_state(struct kvm_vcpu *vcpu) > Pass vmx to the function. No need to convert vmx op vcpu and back. > >> { >> u32 low32, high32; >> unsigned long tmpl; >> struct desc_ptr dt; >> + struct vcpu_vmx *vmx = to_vmx(vcpu); >> >> vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */ >> vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */ >> @@ -3770,6 +3773,7 @@ static void vmx_set_constant_host_state(void) >> >> native_store_idt(&dt); >> vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */ >> + vmx->host_idt_base = dt.address; >> >> vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */ >> @@ -3884,7 +3888,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) >> >> vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */ >> vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */ >> - vmx_set_constant_host_state(); >> + vmx_set_constant_host_state(&vmx->vcpu); >> #ifdef CONFIG_X86_64 >> rdmsrl(MSR_FS_BASE, a); >> vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */ >> @@ -6094,6 +6098,63 @@ static void vmx_complete_atomic_exit(struct > vcpu_vmx *vmx) >> } >> } >> +static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) +{ + u32 >> exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); + + /* + * If >> external interrupt exists, IF bit is set in rflags/eflags on the + * >> interrupt stack frame, and interrupt will be enabled on a return + * >> from interrupt handler. + */ + if ((exit_intr_info & >> (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK)) + == >> (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR)) { + unsigned int vector; >> + unsigned long entry; + gate_desc *desc; + struct vcpu_vmx *vmx = >> to_vmx(vcpu); + + vector = exit_intr_info & INTR_INFO_VECTOR_MASK; >> +#ifdef CONFIG_X86_64 + desc = (void *)vmx->host_idt_base + vector * >> 16; +#else + desc = (void *)vmx->host_idt_base + vector * 8; +#endif + >> + entry = gate_offset(*desc); + asm( + "mov %0, %%" _ASM_DX " \n\t" >> +#ifdef CONFIG_X86_64 + "mov %%" _ASM_SP ", %%" _ASM_BX " \n\t" >> + "and $0xfffffffffffffff0, %%" _ASM_SP " \n\t" + "mov %%ss, %%" >> _ASM_AX " \n\t" + "push %%" _ASM_AX " \n\t" + "push %%" _ASM_BX " >> \n\t" +#endif + "pushf \n\t" + "pop %%" _ASM_AX " \n\t" + "or >> $0x200, %%" _ASM_AX " \n\t" + "push %%" _ASM_AX " \n\t" + "mov >> %%cs, %%" _ASM_AX " \n\t" + "push %%" _ASM_AX " \n\t" + "push >> intr_return \n\t" + "jmp *%% " _ASM_DX " \n\t" + "1: \n\t" >> + ".pushsection .rodata \n\t" + ".global intr_return \n\t" >> + "intr_return: " _ASM_PTR " 1b \n\t" + ".popsection \n\t" + : : >> "m"(entry) : +#ifdef CONFIG_X86_64 + "rax", "rbx", "rdx" +#else >> + "eax", "edx" +#endif + ); + } else + local_irq_enable(); +} + >> static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) >> { >> u32 exit_intr_info; >> @@ -6764,7 +6825,7 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, > struct vmcs12 *vmcs12) >> * Other fields are different per CPU, and will be set later when >> * vmx_vcpu_load() is called, and when vmx_save_host_state() is called. >> */ >> - vmx_set_constant_host_state(); >> + vmx_set_constant_host_state(vcpu); >> >> /* * HOST_RSP is normally set correctly in vmx_vcpu_run() just >> before @@ -7361,6 +7422,7 @@ static struct kvm_x86_ops vmx_x86_ops = { >> .set_tdp_cr3 = vmx_set_cr3, >> >> .check_intercept = vmx_check_intercept, + .handle_external_intr = >> vmx_handle_external_intr, }; >> >> static int __init vmx_init(void) >> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c >> index b9f5529..e019437 100644 >> --- a/arch/x86/kvm/x86.c >> +++ b/arch/x86/kvm/x86.c >> @@ -5767,7 +5767,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) >> >> vcpu->mode = OUTSIDE_GUEST_MODE; >> smp_wmb(); >> - local_irq_enable(); >> + >> + /* Interrupt is enabled by handle_external_intr() */ >> + kvm_x86_ops->handle_external_intr(vcpu); >> >> ++vcpu->stat.exits; >> -- >> 1.7.1 > > -- > Gleb. Best regards, Yang -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 77d56a4..1f1b2f8 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -725,6 +725,7 @@ struct kvm_x86_ops { int (*check_intercept)(struct kvm_vcpu *vcpu, struct x86_instruction_info *info, enum x86_intercept_stage stage); + void (*handle_external_intr)(struct kvm_vcpu *vcpu); }; struct kvm_arch_async_pf { diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index d29d3cd..c283185 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -4227,6 +4227,11 @@ out: return ret; } +static void svm_handle_external_intr(struct kvm_vcpu *vcpu) +{ + local_irq_enable(); +} + static struct kvm_x86_ops svm_x86_ops = { .cpu_has_kvm_support = has_svm, .disabled_by_bios = is_disabled, @@ -4318,6 +4323,7 @@ static struct kvm_x86_ops svm_x86_ops = { .set_tdp_cr3 = set_tdp_cr3, .check_intercept = svm_check_intercept, + .handle_external_intr = svm_handle_external_intr, }; static int __init svm_init(void) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 02eeba8..eaef185 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -379,6 +379,7 @@ struct vcpu_vmx { struct shared_msr_entry *guest_msrs; int nmsrs; int save_nmsrs; + unsigned long host_idt_base; #ifdef CONFIG_X86_64 u64 msr_host_kernel_gs_base; u64 msr_guest_kernel_gs_base; @@ -2565,7 +2566,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) #ifdef CONFIG_X86_64 min |= VM_EXIT_HOST_ADDR_SPACE_SIZE; #endif - opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT; + opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT | + VM_EXIT_ACK_INTR_ON_EXIT; if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS, &_vmexit_control) < 0) return -EIO; @@ -3742,11 +3744,12 @@ static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only) * Note that host-state that does change is set elsewhere. E.g., host-state * that is set differently for each CPU is set in vmx_vcpu_load(), not here. */ -static void vmx_set_constant_host_state(void) +static void vmx_set_constant_host_state(struct kvm_vcpu *vcpu) { u32 low32, high32; unsigned long tmpl; struct desc_ptr dt; + struct vcpu_vmx *vmx = to_vmx(vcpu); vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */ vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */ @@ -3770,6 +3773,7 @@ static void vmx_set_constant_host_state(void) native_store_idt(&dt); vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */ + vmx->host_idt_base = dt.address; vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */ @@ -3884,7 +3888,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */ vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */ - vmx_set_constant_host_state(); + vmx_set_constant_host_state(&vmx->vcpu); #ifdef CONFIG_X86_64 rdmsrl(MSR_FS_BASE, a); vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */ @@ -6094,6 +6098,63 @@ static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx) } } +static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) +{ + u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); + + /* + * If external interrupt exists, IF bit is set in rflags/eflags on the + * interrupt stack frame, and interrupt will be enabled on a return + * from interrupt handler. + */ + if ((exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK)) + == (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR)) { + unsigned int vector; + unsigned long entry; + gate_desc *desc; + struct vcpu_vmx *vmx = to_vmx(vcpu); + + vector = exit_intr_info & INTR_INFO_VECTOR_MASK; +#ifdef CONFIG_X86_64 + desc = (void *)vmx->host_idt_base + vector * 16; +#else + desc = (void *)vmx->host_idt_base + vector * 8; +#endif + + entry = gate_offset(*desc); + asm( + "mov %0, %%" _ASM_DX " \n\t" +#ifdef CONFIG_X86_64 + "mov %%" _ASM_SP ", %%" _ASM_BX " \n\t" + "and $0xfffffffffffffff0, %%" _ASM_SP " \n\t" + "mov %%ss, %%" _ASM_AX " \n\t" + "push %%" _ASM_AX " \n\t" + "push %%" _ASM_BX " \n\t" +#endif + "pushf \n\t" + "pop %%" _ASM_AX " \n\t" + "or $0x200, %%" _ASM_AX " \n\t" + "push %%" _ASM_AX " \n\t" + "mov %%cs, %%" _ASM_AX " \n\t" + "push %%" _ASM_AX " \n\t" + "push intr_return \n\t" + "jmp *%% " _ASM_DX " \n\t" + "1: \n\t" + ".pushsection .rodata \n\t" + ".global intr_return \n\t" + "intr_return: " _ASM_PTR " 1b \n\t" + ".popsection \n\t" + : : "m"(entry) : +#ifdef CONFIG_X86_64 + "rax", "rbx", "rdx" +#else + "eax", "edx" +#endif + ); + } else + local_irq_enable(); +} + static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) { u32 exit_intr_info; @@ -6764,7 +6825,7 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) * Other fields are different per CPU, and will be set later when * vmx_vcpu_load() is called, and when vmx_save_host_state() is called. */ - vmx_set_constant_host_state(); + vmx_set_constant_host_state(vcpu); /* * HOST_RSP is normally set correctly in vmx_vcpu_run() just before @@ -7361,6 +7422,7 @@ static struct kvm_x86_ops vmx_x86_ops = { .set_tdp_cr3 = vmx_set_cr3, .check_intercept = vmx_check_intercept, + .handle_external_intr = vmx_handle_external_intr, }; static int __init vmx_init(void) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index b9f5529..e019437 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5767,7 +5767,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) vcpu->mode = OUTSIDE_GUEST_MODE; smp_wmb(); - local_irq_enable(); + + /* Interrupt is enabled by handle_external_intr() */ + kvm_x86_ops->handle_external_intr(vcpu); ++vcpu->stat.exits;