@@ -1720,6 +1720,15 @@ int xc_domain_ioport_mapping(xc_interface *xch,
uint32_t nr_ports,
uint32_t add_mapping);
+int xc_domain_update_msi_irq_remapping(
+ xc_interface *xch,
+ uint32_t domid,
+ uint32_t pirq,
+ uint32_t source_id,
+ uint32_t data,
+ uint64_t addr,
+ uint64_t gtable);
+
int xc_domain_update_msi_irq(
xc_interface *xch,
uint32_t domid,
@@ -1734,6 +1743,14 @@ int xc_domain_unbind_msi_irq(xc_interface *xch,
uint32_t pirq,
uint32_t gflags);
+int xc_domain_unbind_msi_irq_remapping(
+ xc_interface *xch,
+ uint32_t domid,
+ uint32_t pirq,
+ uint32_t source_id,
+ uint32_t data,
+ uint64_t addr);
+
int xc_domain_bind_pt_irq(xc_interface *xch,
uint32_t domid,
uint8_t machine_irq,
@@ -1702,8 +1702,34 @@ int xc_deassign_dt_device(
return rc;
}
+int xc_domain_update_msi_irq_remapping(
+ xc_interface *xch,
+ uint32_t domid,
+ uint32_t pirq,
+ uint32_t source_id,
+ uint32_t data,
+ uint64_t addr,
+ uint64_t gtable)
+{
+ int rc;
+ xen_domctl_bind_pt_irq_t *bind;
+
+ DECLARE_DOMCTL;
+ domctl.cmd = XEN_DOMCTL_bind_pt_irq;
+ domctl.domain = (domid_t)domid;
+ bind = &(domctl.u.bind_pt_irq);
+ bind->irq_type = PT_IRQ_TYPE_MSI_IR;
+ bind->machine_irq = pirq;
+ bind->u.msi_ir.source_id = source_id;
+ bind->u.msi_ir.data = data;
+ bind->u.msi_ir.addr = addr;
+ bind->u.msi_ir.gtable = gtable;
+
+ rc = do_domctl(xch, &domctl);
+ return rc;
+}
int xc_domain_update_msi_irq(
xc_interface *xch,
@@ -1732,6 +1758,33 @@ int xc_domain_update_msi_irq(
return rc;
}
+int xc_domain_unbind_msi_irq_remapping(
+ xc_interface *xch,
+ uint32_t domid,
+ uint32_t pirq,
+ uint32_t source_id,
+ uint32_t data,
+ uint64_t addr)
+{
+ int rc;
+ xen_domctl_bind_pt_irq_t *bind;
+
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_unbind_pt_irq;
+ domctl.domain = (domid_t)domid;
+
+ bind = &(domctl.u.bind_pt_irq);
+ bind->irq_type = PT_IRQ_TYPE_MSI_IR;
+ bind->machine_irq = pirq;
+ bind->u.msi_ir.source_id = source_id;
+ bind->u.msi_ir.data = data;
+ bind->u.msi_ir.addr = addr;
+
+ rc = do_domctl(xch, &domctl);
+ return rc;
+}
+
int xc_domain_unbind_msi_irq(
xc_interface *xch,
uint32_t domid,
@@ -21,9 +21,11 @@
#include <xen/iommu.h>
#include <xen/cpu.h>
#include <xen/irq.h>
+#include <xen/viommu.h>
#include <asm/hvm/irq.h>
#include <asm/hvm/support.h>
#include <asm/io_apic.h>
+#include <asm/viommu.h>
static DEFINE_PER_CPU(struct list_head, dpci_list);
@@ -275,6 +277,106 @@ static struct vcpu *vector_hashing_dest(const struct domain *d,
return dest;
}
+static void set_hvm_gmsi_info(struct hvm_gmsi_info *msi,
+ xen_domctl_bind_pt_irq_t *pt_irq_bind)
+{
+ switch (pt_irq_bind->irq_type)
+ {
+ case PT_IRQ_TYPE_MSI:
+ msi->legacy.gvec = pt_irq_bind->u.msi.gvec;
+ msi->legacy.gflags = pt_irq_bind->u.msi.gflags;
+ break;
+
+ case PT_IRQ_TYPE_MSI_IR:
+ msi->intremap.source_id = pt_irq_bind->u.msi_ir.source_id;
+ msi->intremap.data = pt_irq_bind->u.msi_ir.data;
+ msi->intremap.addr = pt_irq_bind->u.msi_ir.addr;
+ break;
+
+ default:
+ ASSERT_UNREACHABLE();
+ }
+}
+
+static void clear_hvm_gmsi_info(struct hvm_gmsi_info *msi, int irq_type)
+{
+ switch (irq_type)
+ {
+ case PT_IRQ_TYPE_MSI:
+ msi->legacy.gvec = 0;
+ msi->legacy.gflags = 0;
+ break;
+
+ case PT_IRQ_TYPE_MSI_IR:
+ msi->intremap.source_id = 0;
+ msi->intremap.data = 0;
+ msi->intremap.addr = 0;
+ break;
+
+ default:
+ ASSERT_UNREACHABLE();
+ }
+}
+
+static bool hvm_gmsi_info_need_update(struct hvm_gmsi_info *msi,
+ xen_domctl_bind_pt_irq_t *pt_irq_bind)
+{
+ switch (pt_irq_bind->irq_type)
+ {
+ case PT_IRQ_TYPE_MSI:
+ return ((msi->legacy.gvec != pt_irq_bind->u.msi.gvec) ||
+ (msi->legacy.gflags != pt_irq_bind->u.msi.gflags));
+
+ case PT_IRQ_TYPE_MSI_IR:
+ return ((msi->intremap.source_id != pt_irq_bind->u.msi_ir.source_id) ||
+ (msi->intremap.data != pt_irq_bind->u.msi_ir.data) ||
+ (msi->intremap.addr != pt_irq_bind->u.msi_ir.addr));
+
+ default:
+ ASSERT_UNREACHABLE();
+ }
+
+ return 0;
+}
+
+static int pirq_dpci_2_msi_attr(struct domain *d,
+ struct hvm_pirq_dpci *pirq_dpci, uint8_t *gvec,
+ uint32_t *dest, bool *dm, uint8_t *dlm)
+{
+ int rc = 0;
+
+ if ( pirq_dpci->flags & HVM_IRQ_DPCI_GUEST_REMAPPED )
+ {
+ struct arch_irq_remapping_request request;
+ struct arch_irq_remapping_info irq_info;
+
+ irq_request_msi_fill(&request, pirq_dpci->gmsi.intremap.source_id,
+ pirq_dpci->gmsi.intremap.addr,
+ pirq_dpci->gmsi.intremap.data);
+ rc = viommu_get_irq_info(d, &request, &irq_info);
+ if ( rc )
+ return rc;
+
+ *gvec = irq_info.vector;
+ *dest = irq_info.dest;
+ *dm = irq_info.dest_mode;
+ *dlm = irq_info.delivery_mode;
+ }
+ else if ( pirq_dpci->flags & HVM_IRQ_DPCI_GUEST_MSI )
+ {
+ *gvec = pirq_dpci->gmsi.legacy.gvec;
+ *dest = MASK_EXTR(pirq_dpci->gmsi.legacy.gflags,
+ XEN_DOMCTL_VMSI_X86_DEST_ID_MASK);
+ *dm = pirq_dpci->gmsi.legacy.gflags & XEN_DOMCTL_VMSI_X86_DM_MASK;
+ *dlm = MASK_EXTR(pirq_dpci->gmsi.legacy.gflags,
+ XEN_DOMCTL_VMSI_X86_DELIV_MASK);
+ }
+ else
+ ASSERT_UNREACHABLE();
+
+ return rc;
+}
+
int pt_irq_create_bind(
struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind)
{
@@ -338,20 +440,24 @@ int pt_irq_create_bind(
switch ( pt_irq_bind->irq_type )
{
case PT_IRQ_TYPE_MSI:
+ case PT_IRQ_TYPE_MSI_IR:
{
- uint8_t dest, delivery_mode;
+ uint8_t delivery_mode, gvec;
+ uint32_t dest;
bool dest_mode;
int dest_vcpu_id;
const struct vcpu *vcpu;
- uint32_t gflags = pt_irq_bind->u.msi.gflags &
- ~XEN_DOMCTL_VMSI_X86_UNMASKED;
+ bool ir = (pt_irq_bind->irq_type == PT_IRQ_TYPE_MSI_IR);
+ uint64_t gtable = ir ? pt_irq_bind->u.msi_ir.gtable :
+ pt_irq_bind->u.msi.gtable;
if ( !(pirq_dpci->flags & HVM_IRQ_DPCI_MAPPED) )
{
pirq_dpci->flags = HVM_IRQ_DPCI_MAPPED | HVM_IRQ_DPCI_MACH_MSI |
HVM_IRQ_DPCI_GUEST_MSI;
- pirq_dpci->gmsi.legacy.gvec = pt_irq_bind->u.msi.gvec;
- pirq_dpci->gmsi.legacy.gflags = gflags;
+ if ( ir )
+ pirq_dpci->flags |= HVM_IRQ_DPCI_GUEST_REMAPPED;
+ set_hvm_gmsi_info(&pirq_dpci->gmsi, pt_irq_bind);
/*
* 'pt_irq_create_bind' can be called after 'pt_irq_destroy_bind'.
* The 'pirq_cleanup_check' which would free the structure is only
@@ -366,9 +472,9 @@ int pt_irq_create_bind(
pirq_dpci->dom = d;
/* bind after hvm_irq_dpci is setup to avoid race with irq handler*/
rc = pirq_guest_bind(d->vcpu[0], info, 0);
- if ( rc == 0 && pt_irq_bind->u.msi.gtable )
+ if ( rc == 0 && gtable )
{
- rc = msixtbl_pt_register(d, info, pt_irq_bind->u.msi.gtable);
+ rc = msixtbl_pt_register(d, info, gtable);
if ( unlikely(rc) )
{
pirq_guest_unbind(d, info);
@@ -383,8 +489,7 @@ int pt_irq_create_bind(
}
if ( unlikely(rc) )
{
- pirq_dpci->gmsi.legacy.gflags = 0;
- pirq_dpci->gmsi.legacy.gvec = 0;
+ clear_hvm_gmsi_info(&pirq_dpci->gmsi, pt_irq_bind->irq_type);
pirq_dpci->dom = NULL;
pirq_dpci->flags = 0;
pirq_cleanup_check(info, d);
@@ -396,6 +501,9 @@ int pt_irq_create_bind(
{
uint32_t mask = HVM_IRQ_DPCI_MACH_MSI | HVM_IRQ_DPCI_GUEST_MSI;
+ if ( ir )
+ mask |= HVM_IRQ_DPCI_GUEST_REMAPPED;
+
if ( (pirq_dpci->flags & mask) != mask )
{
spin_unlock(&d->event_lock);
@@ -403,31 +511,30 @@ int pt_irq_create_bind(
}
/* If pirq is already mapped as vmsi, update guest data/addr. */
- if ( pirq_dpci->gmsi.legacy.gvec != pt_irq_bind->u.msi.gvec ||
- pirq_dpci->gmsi.legacy.gflags != gflags )
+ if ( hvm_gmsi_info_need_update(&pirq_dpci->gmsi, pt_irq_bind) )
{
/* Directly clear pending EOIs before enabling new MSI info. */
pirq_guest_eoi(info);
- }
- pirq_dpci->gmsi.legacy.gvec = pt_irq_bind->u.msi.gvec;
- pirq_dpci->gmsi.legacy.gflags = gflags;
+ set_hvm_gmsi_info(&pirq_dpci->gmsi, pt_irq_bind);
}
}
/* Calculate dest_vcpu_id for MSI-type pirq migration. */
- dest = MASK_EXTR(pirq_dpci->gmsi.legacy.gflags,
- XEN_DOMCTL_VMSI_X86_DEST_ID_MASK);
- dest_mode = pirq_dpci->gmsi.legacy.gflags & XEN_DOMCTL_VMSI_X86_DM_MASK;
- delivery_mode = MASK_EXTR(pirq_dpci->gmsi.legacy.gflags,
- XEN_DOMCTL_VMSI_X86_DELIV_MASK);
-
+ rc = pirq_dpci_2_msi_attr(d, pirq_dpci, &gvec, &dest, &dest_mode,
+ &delivery_mode);
+ if ( unlikely(rc) )
+ {
+ spin_unlock(&d->event_lock);
+ return rc;
+ }
dest_vcpu_id = hvm_girq_dest_2_vcpu_id(d, dest, dest_mode);
pirq_dpci->gmsi.dest_vcpu_id = dest_vcpu_id;
spin_unlock(&d->event_lock);
pirq_dpci->gmsi.posted = false;
vcpu = (dest_vcpu_id >= 0) ? d->vcpu[dest_vcpu_id] : NULL;
- if ( iommu_intpost )
+ /* FIXME: won't use interrupt posting for guest's remapping MSIs */
+ if ( iommu_intpost && !ir )
{
if ( delivery_mode == dest_LowestPrio )
vcpu = vector_hashing_dest(d, dest, dest_mode,
@@ -439,7 +546,7 @@ int pt_irq_create_bind(
hvm_migrate_pirqs(d->vcpu[dest_vcpu_id]);
/* Use interrupt posting if it is supported. */
- if ( iommu_intpost )
+ if ( iommu_intpost && !ir )
pi_update_irte(vcpu ? &vcpu->arch.hvm_vmx.pi_desc : NULL,
info, pirq_dpci->gmsi.legacy.gvec);
@@ -646,6 +753,7 @@ int pt_irq_destroy_bind(
}
break;
case PT_IRQ_TYPE_MSI:
+ case PT_IRQ_TYPE_MSI_IR:
break;
default:
return -EOPNOTSUPP;
@@ -664,7 +772,8 @@ int pt_irq_destroy_bind(
pirq = pirq_info(d, machine_gsi);
pirq_dpci = pirq_dpci(pirq);
- if ( hvm_irq_dpci && pt_irq_bind->irq_type != PT_IRQ_TYPE_MSI )
+ if ( hvm_irq_dpci && pt_irq_bind->irq_type != PT_IRQ_TYPE_MSI &&
+ pt_irq_bind->irq_type != PT_IRQ_TYPE_MSI_IR )
{
unsigned int bus = pt_irq_bind->u.pci.bus;
unsigned int device = pt_irq_bind->u.pci.device;
@@ -121,6 +121,7 @@ struct dev_intx_gsi_link {
#define _HVM_IRQ_DPCI_GUEST_PCI_SHIFT 4
#define _HVM_IRQ_DPCI_GUEST_MSI_SHIFT 5
#define _HVM_IRQ_DPCI_IDENTITY_GSI_SHIFT 6
+#define _HVM_IRQ_DPCI_GUEST_REMAPPED_SHIFT 7
#define _HVM_IRQ_DPCI_TRANSLATE_SHIFT 15
#define HVM_IRQ_DPCI_MACH_PCI (1u << _HVM_IRQ_DPCI_MACH_PCI_SHIFT)
#define HVM_IRQ_DPCI_MACH_MSI (1u << _HVM_IRQ_DPCI_MACH_MSI_SHIFT)
@@ -128,6 +129,7 @@ struct dev_intx_gsi_link {
#define HVM_IRQ_DPCI_EOI_LATCH (1u << _HVM_IRQ_DPCI_EOI_LATCH_SHIFT)
#define HVM_IRQ_DPCI_GUEST_PCI (1u << _HVM_IRQ_DPCI_GUEST_PCI_SHIFT)
#define HVM_IRQ_DPCI_GUEST_MSI (1u << _HVM_IRQ_DPCI_GUEST_MSI_SHIFT)
+#define HVM_IRQ_DPCI_GUEST_REMAPPED (1u << _HVM_IRQ_DPCI_GUEST_REMAPPED_SHIFT)
#define HVM_IRQ_DPCI_IDENTITY_GSI (1u << _HVM_IRQ_DPCI_IDENTITY_GSI_SHIFT)
#define HVM_IRQ_DPCI_TRANSLATE (1u << _HVM_IRQ_DPCI_TRANSLATE_SHIFT)
@@ -137,6 +139,11 @@ struct hvm_gmsi_info {
uint32_t gvec;
uint32_t gflags;
} legacy;
+ struct {
+ uint32_t source_id;
+ uint32_t data;
+ uint64_t addr;
+ } intremap;
};
int dest_vcpu_id; /* -1 :multi-dest, non-negative: dest_vcpu_id */
bool posted; /* directly deliver to guest via VT-d PI? */
@@ -559,6 +559,7 @@ typedef enum pt_irq_type_e {
PT_IRQ_TYPE_MSI,
PT_IRQ_TYPE_MSI_TRANSLATE,
PT_IRQ_TYPE_SPI, /* ARM: valid range 32-1019 */
+ PT_IRQ_TYPE_MSI_IR,
} pt_irq_type_t;
struct xen_domctl_bind_pt_irq {
uint32_t machine_irq;
@@ -586,6 +587,12 @@ struct xen_domctl_bind_pt_irq {
uint64_aligned_t gtable;
} msi;
struct {
+ uint32_t source_id;
+ uint32_t data;
+ uint64_t addr;
+ uint64_t gtable;
+ } msi_ir;
+ struct {
uint16_t spi;
} spi;
} u;