Message ID | 1242636246-11547-2-git-send-email-gleb@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Gleb Natapov wrote: > @@ -3174,10 +3174,11 @@ static void inject_pending_irq(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) > vcpu->arch.nmi_injected = true; > kvm_x86_ops->set_nmi(vcpu); > } > - } else if (kvm_cpu_has_interrupt(vcpu)) { > - if (kvm_x86_ops->interrupt_allowed(vcpu)) { > - kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), > - false); > + } else if (kvm_x86_ops->interrupt_allowed(vcpu)) { > + int vec = kvm_cpu_get_interrupt(vcpu); > + if (vec != -1) { > + printk(KERN_ERR"inject %d rip=%x\n", vec, kvm_rip_read(vcpu)); > + kvm_queue_interrupt(vcpu, vec, false); > kvm_x86_ops->set_irq(vcpu); > } } > Not sure if this is a win. Usually interrupts are allowed, so we'll have to do both calculations. On vmx ->interrupt_allowed() reads the vmcs, which is slow.
On Mon, May 18, 2009 at 12:00:40PM +0300, Avi Kivity wrote: > Gleb Natapov wrote: >> @@ -3174,10 +3174,11 @@ static void inject_pending_irq(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) >> vcpu->arch.nmi_injected = true; >> kvm_x86_ops->set_nmi(vcpu); >> } >> - } else if (kvm_cpu_has_interrupt(vcpu)) { >> - if (kvm_x86_ops->interrupt_allowed(vcpu)) { >> - kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), >> - false); >> + } else if (kvm_x86_ops->interrupt_allowed(vcpu)) { >> + int vec = kvm_cpu_get_interrupt(vcpu); >> + if (vec != -1) { >> + printk(KERN_ERR"inject %d rip=%x\n", vec, kvm_rip_read(vcpu)); >> + kvm_queue_interrupt(vcpu, vec, false); >> kvm_x86_ops->set_irq(vcpu); >> } } >> > > Not sure if this is a win. Usually interrupts are allowed, so we'll > have to do both calculations. On vmx ->interrupt_allowed() reads the > vmcs, which is slow. > Depend on how slow vmcs read is. In case when interrupt is available we also scan IRR twice with current code: first time in kvm_cpu_has_interrupt() and another in kvm_cpu_get_interrupt(). If we want to optimize vmcs access, and for nested virtualization we may want to do it, we can read common ones on exit and use in memory copy. -- Gleb. -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index 1ccb50c..d32ceac 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c @@ -218,6 +218,11 @@ int kvm_pic_read_irq(struct kvm *kvm) struct kvm_pic *s = pic_irqchip(kvm); pic_lock(s); + if (!s->output) { + pic_unlock(s); + return -1; + } + s->output = 0; irq = pic_get_irq(&s->pics[0]); if (irq >= 0) { pic_intack(&s->pics[0], irq); diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c index 96dfbb6..e93405a 100644 --- a/arch/x86/kvm/irq.c +++ b/arch/x86/kvm/irq.c @@ -78,7 +78,6 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v) if (vector == -1) { if (kvm_apic_accept_pic_intr(v)) { s = pic_irqchip(v->kvm); - s->output = 0; /* PIC */ vector = kvm_pic_read_irq(v->kvm); } } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 44e87a5..a08d96f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3174,10 +3174,11 @@ static void inject_pending_irq(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) vcpu->arch.nmi_injected = true; kvm_x86_ops->set_nmi(vcpu); } - } else if (kvm_cpu_has_interrupt(vcpu)) { - if (kvm_x86_ops->interrupt_allowed(vcpu)) { - kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), - false); + } else if (kvm_x86_ops->interrupt_allowed(vcpu)) { + int vec = kvm_cpu_get_interrupt(vcpu); + if (vec != -1) { + printk(KERN_ERR"inject %d rip=%x\n", vec, kvm_rip_read(vcpu)); + kvm_queue_interrupt(vcpu, vec, false); kvm_x86_ops->set_irq(vcpu); } }
Signed-off-by: Gleb Natapov <gleb@redhat.com> --- arch/x86/kvm/i8259.c | 5 +++++ arch/x86/kvm/irq.c | 1 - arch/x86/kvm/x86.c | 9 +++++---- 3 files changed, 10 insertions(+), 5 deletions(-)