Message ID | 1363610842-5878-6-git-send-email-yang.z.zhang@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Mon, Mar 18, 2013 at 08:47:19PM +0800, Yang Zhang wrote: > From: Yang Zhang <yang.z.zhang@Intel.com> > > Update destination vcpu map when ioapic entry or apic(id, ldr, dfr) is changed > > Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com> > --- > virt/kvm/ioapic.c | 40 ++++++++++++++++++++++++++++++++++++++-- > 1 files changed, 38 insertions(+), 2 deletions(-) > > diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c > index 4296116..329efe1 100644 > --- a/virt/kvm/ioapic.c > +++ b/virt/kvm/ioapic.c > @@ -87,6 +87,38 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, > return result; > } > > +#ifdef CONFIG_X86 > +static void rtc_irq_get_dest_vcpu(struct kvm_ioapic *ioapic, int irq) > +{ > + union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq]; > + struct kvm_lapic_irq irqe; > + > + if (irq != 8 || entry->fields.mask) > + return; > + > + spin_lock(&ioapic->lock); How does this not deadlock? The is called from kvm_scan_ioapic_entry() and kvm_scan_ioapic_entry() is called from ioapic_write_indirect() with the lock already taken. You should handle that the same way we handle eoibitmap recalculation: signal vcpu and calculate there. > + irqe.dest_id = entry->fields.dest_id; > + irqe.vector = entry->fields.vector; > + irqe.dest_mode = entry->fields.dest_mode; > + irqe.trig_mode = entry->fields.trig_mode; > + irqe.delivery_mode = entry->fields.delivery_mode << 8; > + irqe.level = 1; > + irqe.shorthand = 0; > + > + bitmap_zero(ioapic->rtc_status.vcpu_map, KVM_MAX_VCPUS); > + > + kvm_get_dest_vcpu(ioapic->kvm, &irqe, ioapic->rtc_status.vcpu_map); > + spin_unlock(&ioapic->lock); > +} > + > +#else > + > +static void rtc_irq_get_dest_vcpu(struct kvm_ioapic *ioapic, int irq) > +{ > + return; > +} > +#endif > + > static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx) > { > union kvm_ioapic_redirect_entry *pent; > @@ -147,9 +179,13 @@ void kvm_scan_ioapic_entry(struct kvm *kvm) > { > struct kvm_ioapic *ioapic = kvm->arch.vioapic; > > - if (!kvm_apic_vid_enabled(kvm) || !ioapic) > + if (!ioapic) > return; > - kvm_make_update_eoibitmap_request(kvm); > + > + rtc_irq_get_dest_vcpu(ioapic, 8); > + > + if (kvm_apic_vid_enabled(kvm)) > + kvm_make_update_eoibitmap_request(kvm); > } > > static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) > -- > 1.7.1 -- Gleb. -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
Gleb Natapov wrote on 2013-03-19: > On Mon, Mar 18, 2013 at 08:47:19PM +0800, Yang Zhang wrote: >> From: Yang Zhang <yang.z.zhang@Intel.com> >> >> Update destination vcpu map when ioapic entry or apic(id, ldr, dfr) is changed >> >> Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com> >> --- >> virt/kvm/ioapic.c | 40 ++++++++++++++++++++++++++++++++++++++-- >> 1 files changed, 38 insertions(+), 2 deletions(-) >> diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c >> index 4296116..329efe1 100644 >> --- a/virt/kvm/ioapic.c >> +++ b/virt/kvm/ioapic.c >> @@ -87,6 +87,38 @@ static unsigned long ioapic_read_indirect(struct > kvm_ioapic *ioapic, >> return result; >> } >> +#ifdef CONFIG_X86 >> +static void rtc_irq_get_dest_vcpu(struct kvm_ioapic *ioapic, int irq) >> +{ >> + union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq]; >> + struct kvm_lapic_irq irqe; >> + >> + if (irq != 8 || entry->fields.mask) >> + return; >> + >> + spin_lock(&ioapic->lock); > How does this not deadlock? The is called from kvm_scan_ioapic_entry() > and kvm_scan_ioapic_entry() is called from ioapic_write_indirect() with I removed the lock before call kvm_scan_ioapic_entry() in ioapic_write_indirect(). > the lock already taken. You should handle that the same way we handle > eoibitmap recalculation: signal vcpu and calculate there. Sure. >> + irqe.dest_id = entry->fields.dest_id; >> + irqe.vector = entry->fields.vector; >> + irqe.dest_mode = entry->fields.dest_mode; >> + irqe.trig_mode = entry->fields.trig_mode; >> + irqe.delivery_mode = entry->fields.delivery_mode << 8; >> + irqe.level = 1; >> + irqe.shorthand = 0; >> + >> + bitmap_zero(ioapic->rtc_status.vcpu_map, KVM_MAX_VCPUS); >> + >> + kvm_get_dest_vcpu(ioapic->kvm, &irqe, ioapic->rtc_status.vcpu_map); >> + spin_unlock(&ioapic->lock); >> +} >> + >> +#else >> + >> +static void rtc_irq_get_dest_vcpu(struct kvm_ioapic *ioapic, int irq) >> +{ >> + return; >> +} >> +#endif >> + >> static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx) >> { union kvm_ioapic_redirect_entry *pent; @@ -147,9 +179,13 @@ void >> kvm_scan_ioapic_entry(struct kvm *kvm) { struct kvm_ioapic *ioapic = >> kvm->arch.vioapic; >> - if (!kvm_apic_vid_enabled(kvm) || !ioapic) >> + if (!ioapic) >> return; >> - kvm_make_update_eoibitmap_request(kvm); >> + >> + rtc_irq_get_dest_vcpu(ioapic, 8); >> + >> + if (kvm_apic_vid_enabled(kvm)) >> + kvm_make_update_eoibitmap_request(kvm); >> } >> >> static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) >> -- >> 1.7.1 > > -- > Gleb. > -- > To unsubscribe from this list: send the line "unsubscribe kvm" in > the body of a message to majordomo@vger.kernel.org > More majordomo info at http://vger.kernel.org/majordomo-info.html Best regards, Yang -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index 4296116..329efe1 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c @@ -87,6 +87,38 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, return result; } +#ifdef CONFIG_X86 +static void rtc_irq_get_dest_vcpu(struct kvm_ioapic *ioapic, int irq) +{ + union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq]; + struct kvm_lapic_irq irqe; + + if (irq != 8 || entry->fields.mask) + return; + + spin_lock(&ioapic->lock); + irqe.dest_id = entry->fields.dest_id; + irqe.vector = entry->fields.vector; + irqe.dest_mode = entry->fields.dest_mode; + irqe.trig_mode = entry->fields.trig_mode; + irqe.delivery_mode = entry->fields.delivery_mode << 8; + irqe.level = 1; + irqe.shorthand = 0; + + bitmap_zero(ioapic->rtc_status.vcpu_map, KVM_MAX_VCPUS); + + kvm_get_dest_vcpu(ioapic->kvm, &irqe, ioapic->rtc_status.vcpu_map); + spin_unlock(&ioapic->lock); +} + +#else + +static void rtc_irq_get_dest_vcpu(struct kvm_ioapic *ioapic, int irq) +{ + return; +} +#endif + static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx) { union kvm_ioapic_redirect_entry *pent; @@ -147,9 +179,13 @@ void kvm_scan_ioapic_entry(struct kvm *kvm) { struct kvm_ioapic *ioapic = kvm->arch.vioapic; - if (!kvm_apic_vid_enabled(kvm) || !ioapic) + if (!ioapic) return; - kvm_make_update_eoibitmap_request(kvm); + + rtc_irq_get_dest_vcpu(ioapic, 8); + + if (kvm_apic_vid_enabled(kvm)) + kvm_make_update_eoibitmap_request(kvm); } static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)