Message ID | 1502310866-10450-23-git-send-email-tianyu.lan@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Wed, Aug 09, 2017 at 04:34:23PM -0400, Lan Tianyu wrote: > From: Chao Gao <chao.gao@intel.com> > > In two situations, hypervisor delivers a msi to a hvm guest. One is > when qemu sends a request to hypervisor through XEN_DMOP_inject_msi. > The other is when a physical interrupt arrives and it has been bound > to a guest msi. > > For the former, the msi is routed to common vIOMMU layer if it is in > remapping format. For the latter, if the pt irq is bound to a guest > remapping msi, a new remapping msi is constructed based on the binding > information and routed to common vIOMMU layer. After looking at the code below, I'm wondering whether it would make sense to add a new flag that's HVM_IRQ_DPCI_GUEST_REMAPPED or similar, so that you would use: HVM_IRQ_DPCI_GUEST_MSI | HVM_IRQ_DPCI_GUEST_REMAPPED In order to designate a remapped MSI. It seems like it would avoid some of the changes below (where you are just adding HVM_IRQ_DPCI_GUEST_MSI_IR to code paths already used by HVM_IRQ_DPCI_GUEST_MSI). More of a suggestion rather than a request for you to change the code. > Signed-off-by: Chao Gao <chao.gao@intel.com> > Signed-off-by: Lan Tianyu <tianyu.lan@intel.com> > --- > xen/arch/x86/hvm/irq.c | 11 ++++++++++ > xen/arch/x86/hvm/vmsi.c | 14 ++++++++++-- > xen/drivers/passthrough/io.c | 51 +++++++++++++++++++++++++++++++++----------- > xen/include/asm-x86/msi.h | 3 +++ > 4 files changed, 65 insertions(+), 14 deletions(-) > > diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c > index e425df9..12d83b3 100644 > --- a/xen/arch/x86/hvm/irq.c > +++ b/xen/arch/x86/hvm/irq.c > @@ -26,6 +26,7 @@ > #include <asm/hvm/domain.h> > #include <asm/hvm/support.h> > #include <asm/msi.h> > +#include <asm/viommu.h> > > /* Must be called with hvm_domain->irq_lock hold */ > static void assert_gsi(struct domain *d, unsigned ioapic_gsi) > @@ -340,6 +341,16 @@ int hvm_inject_msi(struct domain *d, uint64_t addr, uint32_t data) > >> MSI_DATA_TRIGGER_SHIFT; > uint8_t vector = data & MSI_DATA_VECTOR_MASK; > > + if ( addr & MSI_ADDR_INTEFORMAT_MASK ) > + { > + struct irq_remapping_request request; > + > + irq_request_msi_fill(&request, 0, addr, data); > + /* Currently, only viommu 0 is supported */ > + viommu_handle_irq_request(d, 0, &request); > + return 0; > + } > + > if ( !vector ) > { > int pirq = ((addr >> 32) & 0xffffff00) | dest; > diff --git a/xen/arch/x86/hvm/vmsi.c b/xen/arch/x86/hvm/vmsi.c > index c4ec0ad..75ceb19 100644 > --- a/xen/arch/x86/hvm/vmsi.c > +++ b/xen/arch/x86/hvm/vmsi.c > @@ -114,9 +114,19 @@ void vmsi_deliver_pirq(struct domain *d, const struct hvm_pirq_dpci *pirq_dpci) > "vector=%x trig_mode=%x\n", > dest, dest_mode, delivery_mode, vector, trig_mode); > > - ASSERT(pirq_dpci->flags & HVM_IRQ_DPCI_GUEST_MSI); > + ASSERT(pirq_dpci->flags & (HVM_IRQ_DPCI_GUEST_MSI | HVM_IRQ_DPCI_GUEST_MSI_IR)); Line break. Roger.
>>> On 23.08.17 at 12:55, <roger.pau@citrix.com> wrote: > On Wed, Aug 09, 2017 at 04:34:23PM -0400, Lan Tianyu wrote: >> From: Chao Gao <chao.gao@intel.com> >> >> In two situations, hypervisor delivers a msi to a hvm guest. One is >> when qemu sends a request to hypervisor through XEN_DMOP_inject_msi. >> The other is when a physical interrupt arrives and it has been bound >> to a guest msi. >> >> For the former, the msi is routed to common vIOMMU layer if it is in >> remapping format. For the latter, if the pt irq is bound to a guest >> remapping msi, a new remapping msi is constructed based on the binding >> information and routed to common vIOMMU layer. > > After looking at the code below, I'm wondering whether it would make > sense to add a new flag that's HVM_IRQ_DPCI_GUEST_REMAPPED or similar, > so that you would use: > > HVM_IRQ_DPCI_GUEST_MSI | HVM_IRQ_DPCI_GUEST_REMAPPED > > In order to designate a remapped MSI. It seems like it would avoid > some of the changes below (where you are just adding > HVM_IRQ_DPCI_GUEST_MSI_IR to code paths already used by > HVM_IRQ_DPCI_GUEST_MSI). More of a suggestion rather than a request > for you to change the code. I think this is a pretty good suggestion. Jan
diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c index e425df9..12d83b3 100644 --- a/xen/arch/x86/hvm/irq.c +++ b/xen/arch/x86/hvm/irq.c @@ -26,6 +26,7 @@ #include <asm/hvm/domain.h> #include <asm/hvm/support.h> #include <asm/msi.h> +#include <asm/viommu.h> /* Must be called with hvm_domain->irq_lock hold */ static void assert_gsi(struct domain *d, unsigned ioapic_gsi) @@ -340,6 +341,16 @@ int hvm_inject_msi(struct domain *d, uint64_t addr, uint32_t data) >> MSI_DATA_TRIGGER_SHIFT; uint8_t vector = data & MSI_DATA_VECTOR_MASK; + if ( addr & MSI_ADDR_INTEFORMAT_MASK ) + { + struct irq_remapping_request request; + + irq_request_msi_fill(&request, 0, addr, data); + /* Currently, only viommu 0 is supported */ + viommu_handle_irq_request(d, 0, &request); + return 0; + } + if ( !vector ) { int pirq = ((addr >> 32) & 0xffffff00) | dest; diff --git a/xen/arch/x86/hvm/vmsi.c b/xen/arch/x86/hvm/vmsi.c index c4ec0ad..75ceb19 100644 --- a/xen/arch/x86/hvm/vmsi.c +++ b/xen/arch/x86/hvm/vmsi.c @@ -114,9 +114,19 @@ void vmsi_deliver_pirq(struct domain *d, const struct hvm_pirq_dpci *pirq_dpci) "vector=%x trig_mode=%x\n", dest, dest_mode, delivery_mode, vector, trig_mode); - ASSERT(pirq_dpci->flags & HVM_IRQ_DPCI_GUEST_MSI); + ASSERT(pirq_dpci->flags & (HVM_IRQ_DPCI_GUEST_MSI | HVM_IRQ_DPCI_GUEST_MSI_IR)); + if ( pirq_dpci->flags & HVM_IRQ_DPCI_GUEST_MSI_IR ) + { + struct irq_remapping_request request; - vmsi_deliver(d, vector, dest, dest_mode, delivery_mode, trig_mode); + irq_request_msi_fill(&request, pirq_dpci->gmsi.intremap.source_id, + pirq_dpci->gmsi.intremap.addr, + pirq_dpci->gmsi.intremap.data); + /* Currently, only viommu 0 is supported */ + viommu_handle_irq_request(d, 0, &request); + } + else + vmsi_deliver(d, vector, dest, dest_mode, delivery_mode, trig_mode); } /* Return value, -1 : multi-dests, non-negative value: dest_vcpu_id */ diff --git a/xen/drivers/passthrough/io.c b/xen/drivers/passthrough/io.c index 0510887..3a086d8 100644 --- a/xen/drivers/passthrough/io.c +++ b/xen/drivers/passthrough/io.c @@ -139,7 +139,9 @@ static void pt_pirq_softirq_reset(struct hvm_pirq_dpci *pirq_dpci) bool_t pt_irq_need_timer(uint32_t flags) { - return !(flags & (HVM_IRQ_DPCI_GUEST_MSI | HVM_IRQ_DPCI_TRANSLATE)); + return !(flags & (HVM_IRQ_DPCI_GUEST_MSI_IR | + HVM_IRQ_DPCI_GUEST_MSI | + HVM_IRQ_DPCI_TRANSLATE)); } static int pt_irq_guest_eoi(struct domain *d, struct hvm_pirq_dpci *pirq_dpci, @@ -738,7 +740,8 @@ int pt_irq_destroy_bind( pirq = pirq_info(d, machine_gsi); pirq_dpci = pirq_dpci(pirq); - if ( hvm_irq_dpci && pt_irq_bind->irq_type != PT_IRQ_TYPE_MSI ) + if ( hvm_irq_dpci && (pt_irq_bind->irq_type != PT_IRQ_TYPE_MSI) && + (pt_irq_bind->irq_type != PT_IRQ_TYPE_MSI_IR) ) { unsigned int bus = pt_irq_bind->u.pci.bus; unsigned int device = pt_irq_bind->u.pci.device; @@ -909,17 +912,39 @@ static int _hvm_dpci_msi_eoi(struct domain *d, { int vector = (long)arg; - if ( (pirq_dpci->flags & HVM_IRQ_DPCI_MACH_MSI) && - (pirq_dpci->gmsi.legacy.gvec == vector) ) + if ( pirq_dpci->flags & HVM_IRQ_DPCI_MACH_MSI ) { - int dest = pirq_dpci->gmsi.legacy.gflags & VMSI_DEST_ID_MASK; - int dest_mode = !!(pirq_dpci->gmsi.legacy.gflags & VMSI_DM_MASK); + if ( (pirq_dpci->flags & HVM_IRQ_DPCI_GUEST_MSI) && + (pirq_dpci->gmsi.legacy.gvec == vector) ) + { + int dest = pirq_dpci->gmsi.legacy.gflags & VMSI_DEST_ID_MASK; + int dest_mode = !!(pirq_dpci->gmsi.legacy.gflags & VMSI_DM_MASK); - if ( vlapic_match_dest(vcpu_vlapic(current), NULL, 0, dest, - dest_mode) ) + if ( vlapic_match_dest(vcpu_vlapic(current), NULL, 0, dest, + dest_mode) ) + { + __msi_pirq_eoi(pirq_dpci); + return 1; + } + } + else if ( pirq_dpci->flags & HVM_IRQ_DPCI_GUEST_MSI_IR ) { - __msi_pirq_eoi(pirq_dpci); - return 1; + int ret; + struct irq_remapping_request request; + struct irq_remapping_info irq_info; + + irq_request_msi_fill(&request, pirq_dpci->gmsi.intremap.source_id, + pirq_dpci->gmsi.intremap.addr, + pirq_dpci->gmsi.intremap.data); + /* Currently, only viommu 0 is supported */ + ret = viommu_get_irq_info(d, 0, &request, &irq_info); + if ( (!ret) && (irq_info.vector == vector) && + vlapic_match_dest(vcpu_vlapic(current), NULL, 0, + irq_info.dest, irq_info.dest_mode) ) + { + __msi_pirq_eoi(pirq_dpci); + return 1; + } } } @@ -954,14 +979,16 @@ static void hvm_dirq_assist(struct domain *d, struct hvm_pirq_dpci *pirq_dpci) { send_guest_pirq(d, pirq); - if ( pirq_dpci->flags & HVM_IRQ_DPCI_GUEST_MSI ) + if ( pirq_dpci->flags & + (HVM_IRQ_DPCI_GUEST_MSI | HVM_IRQ_DPCI_GUEST_MSI_IR) ) { spin_unlock(&d->event_lock); return; } } - if ( pirq_dpci->flags & HVM_IRQ_DPCI_GUEST_MSI ) + if ( pirq_dpci->flags & + (HVM_IRQ_DPCI_GUEST_MSI | HVM_IRQ_DPCI_GUEST_MSI_IR) ) { vmsi_deliver_pirq(d, pirq_dpci); spin_unlock(&d->event_lock); diff --git a/xen/include/asm-x86/msi.h b/xen/include/asm-x86/msi.h index 37d37b8..5e94d07 100644 --- a/xen/include/asm-x86/msi.h +++ b/xen/include/asm-x86/msi.h @@ -49,6 +49,9 @@ #define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT) #define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT) +#define MSI_ADDR_INTEFORMAT_SHIFT 4 +#define MSI_ADDR_INTEFORMAT_MASK (1 << MSI_ADDR_INTEFORMAT_SHIFT) + #define MSI_ADDR_DEST_ID_SHIFT 12 #define MSI_ADDR_DEST_ID_MASK 0x00ff000 #define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & MSI_ADDR_DEST_ID_MASK)