@@ -1382,6 +1382,11 @@ int xc_domain_irq_permission(xc_interface *xch,
uint32_t pirq,
bool allow_access);
+int xc_domain_gsi_permission(xc_interface *xch,
+ uint32_t domid,
+ uint32_t gsi,
+ bool allow_access);
+
int xc_domain_iomem_permission(xc_interface *xch,
uint32_t domid,
unsigned long first_mfn,
@@ -1394,6 +1394,21 @@ int xc_domain_irq_permission(xc_interface *xch,
return do_domctl(xch, &domctl);
}
+int xc_domain_gsi_permission(xc_interface *xch,
+ uint32_t domid,
+ uint32_t gsi,
+ bool allow_access)
+{
+ struct xen_domctl domctl = {
+ .cmd = XEN_DOMCTL_gsi_permission,
+ .domain = domid,
+ .u.gsi_permission.gsi = gsi,
+ .u.gsi_permission.allow_access = allow_access,
+ };
+
+ return do_domctl(xch, &domctl);
+}
+
int xc_domain_iomem_permission(xc_interface *xch,
uint32_t domid,
unsigned long first_mfn,
@@ -91,6 +91,10 @@ void libxl__arch_update_domain_config(libxl__gc *gc,
libxl_domain_config *dst,
const libxl_domain_config *src);
+_hidden
+int libxl__arch_hvm_map_gsi(libxl__gc *gc, uint32_t sbdf, uint32_t domid);
+_hidden
+int libxl__arch_hvm_unmap_gsi(libxl__gc *gc, uint32_t sbdf, uint32_t domid);
#if defined(__i386__) || defined(__x86_64__)
#define LAPIC_BASE_ADDRESS 0xfee00000
@@ -1774,6 +1774,16 @@ void libxl__arch_update_domain_config(libxl__gc *gc,
{
}
+int libxl__arch_hvm_map_gsi(libxl__gc *gc, uint32_t sbdf, uint32_t domid)
+{
+ return -1;
+}
+
+int libxl__arch_hvm_unmap_gsi(libxl__gc *gc, uint32_t sbdf, uint32_t domid)
+{
+ return -1;
+}
+
/*
* Local variables:
* mode: C
@@ -17,6 +17,7 @@
#include "libxl_osdeps.h" /* must come before any other headers */
#include "libxl_internal.h"
+#include "libxl_arch.h"
#define PCI_BDF "%04x:%02x:%02x.%01x"
#define PCI_BDF_SHORT "%02x:%02x.%01x"
@@ -1478,6 +1479,16 @@ static void pci_add_dm_done(libxl__egc *egc,
fclose(f);
if (!pci_supp_legacy_irq())
goto out_no_irq;
+
+ /*
+ * When dom0 is PVH and mapping a x86 gsi to pirq for domU,
+ * should use gsi to grant irq permission.
+ */
+ if (!libxl__arch_hvm_map_gsi(gc, pci_encode_bdf(pci), domid))
+ goto pci_permissive;
+ else
+ LOGED(WARN, domid, "libxl__arch_hvm_map_gsi failed (err=%d)", errno);
+
sysfs_path = GCSPRINTF(SYSFS_PCI_DEV"/"PCI_BDF"/irq", pci->domain,
pci->bus, pci->dev, pci->func);
f = fopen(sysfs_path, "r");
@@ -1505,6 +1516,7 @@ static void pci_add_dm_done(libxl__egc *egc,
}
fclose(f);
+pci_permissive:
/* Don't restrict writes to the PCI config space from this VM */
if (pci->permissive) {
if ( sysfs_write_bdf(gc, SYSFS_PCIBACK_DRIVER"/permissive",
@@ -2229,6 +2241,11 @@ skip_bar:
if (!pci_supp_legacy_irq())
goto skip_legacy_irq;
+ if (!libxl__arch_hvm_unmap_gsi(gc, pci_encode_bdf(pci), domid))
+ goto skip_legacy_irq;
+ else
+ LOGED(WARN, domid, "libxl__arch_hvm_unmap_gsi failed (err=%d)", errno);
+
sysfs_path = GCSPRINTF(SYSFS_PCI_DEV"/"PCI_BDF"/irq", pci->domain,
pci->bus, pci->dev, pci->func);
@@ -879,6 +879,117 @@ void libxl__arch_update_domain_config(libxl__gc *gc,
libxl_defbool_val(src->b_info.u.hvm.pirq));
}
+struct pcidev_map_pirq {
+ uint32_t sbdf;
+ uint32_t pirq;
+ XEN_LIST_ENTRY(struct pcidev_map_pirq) entry;
+};
+
+static pthread_mutex_t pcidev_pirq_mutex = PTHREAD_MUTEX_INITIALIZER;
+static XEN_LIST_HEAD(, struct pcidev_map_pirq) pcidev_pirq_list =
+ XEN_LIST_HEAD_INITIALIZER(pcidev_pirq_list);
+
+int libxl__arch_hvm_map_gsi(libxl__gc *gc, uint32_t sbdf, uint32_t domid)
+{
+ int pirq = -1, gsi, r;
+ xc_domaininfo_t info;
+ struct pcidev_map_pirq *pcidev_pirq;
+ libxl_ctx *ctx = libxl__gc_owner(gc);
+
+ r = xc_domain_getinfo_single(ctx->xch, LIBXL_TOOLSTACK_DOMID, &info);
+ if (r < 0) {
+ LOGED(ERROR, domid, "getdomaininfo failed (error=%d)", errno);
+ return r;
+ }
+ if ((info.flags & XEN_DOMINF_hvm_guest) &&
+ !(info.arch_config.emulation_flags & XEN_X86_EMU_USE_PIRQ)) {
+ gsi = xc_physdev_gsi_from_pcidev(ctx->xch, sbdf);
+ if (gsi < 0) {
+ return ERROR_FAIL;
+ }
+ r = xc_physdev_map_pirq(ctx->xch, domid, gsi, &pirq);
+ if (r < 0) {
+ LOGED(ERROR, domid, "xc_physdev_map_pirq gsi=%d (error=%d)",
+ gsi, errno);
+ return r;
+ }
+ r = xc_domain_gsi_permission(ctx->xch, domid, gsi, 1);
+ if (r < 0) {
+ LOGED(ERROR, domid, "xc_domain_gsi_permission gsi=%d (error=%d)",
+ gsi, errno);
+ return r;
+ }
+ } else {
+ return ERROR_FAIL;
+ }
+
+ /* Save the pirq for the usage of unmapping */
+ pcidev_pirq = malloc(sizeof(struct pcidev_map_pirq));
+ if (!pcidev_pirq) {
+ LOGED(ERROR, domid, "no memory for saving pirq of pcidev info");
+ return ERROR_NOMEM;
+ }
+ pcidev_pirq->sbdf = sbdf;
+ pcidev_pirq->pirq = pirq;
+
+ assert(!pthread_mutex_lock(&pcidev_pirq_mutex));
+ XEN_LIST_INSERT_HEAD(&pcidev_pirq_list, pcidev_pirq, entry);
+ assert(!pthread_mutex_unlock(&pcidev_pirq_mutex));
+
+ return 0;
+}
+
+int libxl__arch_hvm_unmap_gsi(libxl__gc *gc, uint32_t sbdf, uint32_t domid)
+{
+ int pirq = -1, gsi, r;
+ xc_domaininfo_t info;
+ struct pcidev_map_pirq *pcidev_pirq;
+ libxl_ctx *ctx = libxl__gc_owner(gc);
+
+ r = xc_domain_getinfo_single(ctx->xch, LIBXL_TOOLSTACK_DOMID, &info);
+ if (r < 0) {
+ LOGED(ERROR, domid, "getdomaininfo failed (error=%d)", errno);
+ return r;
+ }
+ if ((info.flags & XEN_DOMINF_hvm_guest) &&
+ !(info.arch_config.emulation_flags & XEN_X86_EMU_USE_PIRQ)) {
+ gsi = xc_physdev_gsi_from_pcidev(ctx->xch, sbdf);
+ if (gsi < 0) {
+ return ERROR_FAIL;
+ }
+ assert(!pthread_mutex_lock(&pcidev_pirq_mutex));
+ XEN_LIST_FOREACH(pcidev_pirq, &pcidev_pirq_list, entry) {
+ if (pcidev_pirq->sbdf == sbdf) {
+ pirq = pcidev_pirq->pirq;
+ XEN_LIST_REMOVE(pcidev_pirq, entry);
+ free(pcidev_pirq);
+ break;
+ }
+ }
+ assert(!pthread_mutex_unlock(&pcidev_pirq_mutex));
+ if (pirq < 0) {
+ /* pirq has been unmapped, so return directly */
+ return 0;
+ }
+ r = xc_physdev_unmap_pirq(ctx->xch, domid, pirq);
+ if (r < 0) {
+ LOGED(ERROR, domid, "xc_physdev_unmap_pirq pirq=%d (error=%d)",
+ pirq, errno);
+ return r;
+ }
+ r = xc_domain_gsi_permission(ctx->xch, domid, gsi, 0);
+ if (r < 0) {
+ LOGED(ERROR, domid, "xc_domain_gsi_permission gsi=%d (error=%d)",
+ gsi, errno);
+ return r;
+ }
+ } else {
+ return ERROR_FAIL;
+ }
+
+ return 0;
+}
+
/*
* Local variables:
* mode: C