@@ -24,6 +24,7 @@
#include "irq.h"
#include "i8254.h"
+#include "x86.h"
/*
* check if there are pending timer events
@@ -48,6 +49,9 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
{
struct kvm_pic *s;
+ if (!irqchip_in_kernel(v->kvm))
+ return v->arch.irq_summary;
+
if (kvm_apic_has_interrupt(v) == -1) { /* LAPIC */
if (kvm_apic_accept_pic_intr(v)) {
s = pic_irqchip(v->kvm); /* PIC */
@@ -67,6 +71,9 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
struct kvm_pic *s;
int vector;
+ if (!irqchip_in_kernel(v->kvm))
+ return kvm_pop_irq(v);
+
vector = kvm_get_apic_interrupt(v); /* APIC */
if (vector == -1) {
if (kvm_apic_accept_pic_intr(v)) {
@@ -2091,8 +2091,9 @@ static int interrupt_window_interception(struct vcpu_svm *svm,
* If the user space waits to inject interrupts, exit as soon as
* possible
*/
- if (kvm_run->request_interrupt_window &&
- !svm->vcpu.arch.irq_summary) {
+ if (!irqchip_in_kernel(svm->vcpu.kvm) &&
+ kvm_run->request_interrupt_window &&
+ !kvm_cpu_has_interrupt(&svm->vcpu)) {
++svm->vcpu.stat.irq_window_exits;
kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
return 0;
@@ -2373,7 +2374,8 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu,
(svm->vmcb->save.rflags & X86_EFLAGS_IF) &&
(svm->vcpu.arch.hflags & HF_GIF_MASK));
- if (svm->vcpu.arch.interrupt_window_open && svm->vcpu.arch.irq_summary)
+ if (svm->vcpu.arch.interrupt_window_open &&
+ kvm_cpu_has_interrupt(&svm->vcpu))
/*
* If interrupts enabled, and not blocked by sti or mov ss. Good.
*/
@@ -2383,7 +2385,8 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu,
* Interrupts blocked. Wait for unblock.
*/
if (!svm->vcpu.arch.interrupt_window_open &&
- (svm->vcpu.arch.irq_summary || kvm_run->request_interrupt_window))
+ (kvm_cpu_has_interrupt(&svm->vcpu) ||
+ kvm_run->request_interrupt_window))
svm_set_vintr(svm);
else
svm_clear_vintr(svm);
@@ -2535,21 +2535,20 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu,
vmx_inject_nmi(vcpu);
if (vcpu->arch.nmi_pending)
enable_nmi_window(vcpu);
- else if (vcpu->arch.irq_summary
- || kvm_run->request_interrupt_window)
+ else if (kvm_cpu_has_interrupt(vcpu) ||
+ kvm_run->request_interrupt_window)
enable_irq_window(vcpu);
return;
}
if (vcpu->arch.interrupt_window_open) {
- if (vcpu->arch.irq_summary && !vcpu->arch.interrupt.pending)
- kvm_queue_interrupt(vcpu, kvm_pop_irq(vcpu));
+ if (kvm_cpu_has_interrupt(vcpu) && !vcpu->arch.interrupt.pending)
+ kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu));
if (vcpu->arch.interrupt.pending)
vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr);
- }
- if (!vcpu->arch.interrupt_window_open &&
- (vcpu->arch.irq_summary || kvm_run->request_interrupt_window))
+ } else if(kvm_cpu_has_interrupt(vcpu) ||
+ kvm_run->request_interrupt_window)
enable_irq_window(vcpu);
}
@@ -2976,8 +2975,9 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu,
* If the user space waits to inject interrupts, exit as soon as
* possible
*/
- if (kvm_run->request_interrupt_window &&
- !vcpu->arch.irq_summary) {
+ if (!irqchip_in_kernel(vcpu->kvm) &&
+ kvm_run->request_interrupt_window &&
+ !kvm_cpu_has_interrupt(vcpu)) {
kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
return 0;
}
@@ -3065,7 +3065,7 @@ EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
{
- return (!vcpu->arch.irq_summary &&
+ return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
kvm_run->request_interrupt_window &&
vcpu->arch.interrupt_window_open &&
(kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF));
@@ -3082,7 +3082,7 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu,
else
kvm_run->ready_for_interrupt_injection =
(vcpu->arch.interrupt_window_open &&
- vcpu->arch.irq_summary == 0);
+ !kvm_cpu_has_interrupt(vcpu));
}
static void vapic_enter(struct kvm_vcpu *vcpu)