@@ -312,6 +312,8 @@ config INTEL_IDXD_MDEV
depends on VFIO_MDEV
depends on VFIO_MDEV_DEVICE
select PCI_SIOV
+ select IRQ_BYPASS_MANAGER
+ select IMS_MSI_ARRAY
config INTEL_IOATDMA
tristate "Intel I/OAT DMA support"
@@ -213,6 +213,7 @@ struct idxd_device {
struct workqueue_struct *wq;
struct work_struct work;
+ struct irq_domain *ims_domain;
int *int_handles;
};
@@ -508,8 +508,12 @@ static int msix_trigger_unregister(struct vdcm_idxd *vidxd, int index)
dev_dbg(dev, "disable MSIX trigger %d\n", index);
if (index) {
+ struct irq_bypass_producer *producer;
u32 auxval;
+ producer = &vidxd->vdev.producer[index - 1];
+ irq_bypass_unregister_producer(producer);
+
irq_entry = &vidxd->irq_entries[index - 1];
if (irq_entry->irq_set) {
free_irq(irq_entry->entry->irq, irq_entry);
@@ -553,9 +557,11 @@ static int msix_trigger_register(struct vdcm_idxd *vidxd, u32 fd, int index)
* in i - 1 to the host setup and irq_entries.
*/
if (index) {
+ struct irq_bypass_producer *producer;
int pasid;
u32 auxval;
+ producer = &vidxd->vdev.producer[index - 1];
irq_entry = &vidxd->irq_entries[index - 1];
pasid = idxd_mdev_get_pasid(mdev);
if (pasid < 0)
@@ -581,6 +587,14 @@ static int msix_trigger_register(struct vdcm_idxd *vidxd, u32 fd, int index)
irq_set_auxdata(irq_entry->entry->irq, IMS_AUXDATA_CONTROL_WORD, auxval);
return rc;
}
+
+ producer->token = trigger;
+ producer->irq = irq_entry->entry->irq;
+ rc = irq_bypass_register_producer(producer);
+ if (unlikely(rc))
+ dev_info(dev, "irq bypass producer (token %p) registration failed: %d\n",
+ producer->token, rc);
+
irq_entry->irq_set = true;
}
@@ -934,6 +948,7 @@ static const struct mdev_parent_ops idxd_vdcm_ops = {
int idxd_mdev_host_init(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
+ struct ims_array_info ims_info;
int rc;
if (!test_bit(IDXD_FLAG_SIOV_SUPPORTED, &idxd->flags))
@@ -950,6 +965,15 @@ int idxd_mdev_host_init(struct idxd_device *idxd)
return -EOPNOTSUPP;
}
+ ims_info.max_slots = idxd->ims_size;
+ ims_info.slots = idxd->reg_base + idxd->ims_offset;
+ idxd->ims_domain = pci_ims_array_create_msi_irq_domain(idxd->pdev, &ims_info);
+ if (!idxd->ims_domain) {
+ dev_warn(dev, "Fail to acquire IMS domain\n");
+ iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_AUX);
+ return -ENODEV;
+ }
+
return mdev_register_device(dev, &idxd_vdcm_ops);
}
@@ -958,6 +982,8 @@ void idxd_mdev_host_release(struct idxd_device *idxd)
struct device *dev = &idxd->pdev->dev;
int rc;
+ irq_domain_remove(idxd->ims_domain);
+
mdev_unregister_device(dev);
if (iommu_dev_has_feature(dev, IOMMU_DEV_FEAT_AUX)) {
rc = iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_AUX);
@@ -43,6 +43,7 @@ struct ims_irq_entry {
struct idxd_vdev {
struct mdev_device *mdev;
struct eventfd_ctx *msix_trigger[VIDXD_MAX_MSIX_ENTRIES];
+ struct irq_bypass_producer producer[VIDXD_MAX_MSIX_ENTRIES];
};
struct vdcm_idxd {
@@ -16,6 +16,7 @@
#include <linux/intel-svm.h>
#include <linux/kvm_host.h>
#include <linux/eventfd.h>
+#include <linux/irqchip/irq-ims-msi.h>
#include <uapi/linux/idxd.h>
#include "registers.h"
#include "idxd.h"
@@ -844,6 +845,51 @@ static void vidxd_wq_disable(struct vdcm_idxd *vidxd, int wq_id_mask)
idxd_complete_command(vidxd, IDXD_CMDSTS_SUCCESS);
}
+void vidxd_free_ims_entries(struct vdcm_idxd *vidxd)
+{
+ struct irq_domain *irq_domain;
+ struct mdev_device *mdev = vidxd->vdev.mdev;
+ struct device *dev = mdev_dev(mdev);
+ int i;
+
+ for (i = 0; i < VIDXD_MAX_MSIX_VECS; i++)
+ vidxd->irq_entries[i].entry = NULL;
+
+ irq_domain = dev_get_msi_domain(dev);
+ if (irq_domain)
+ msi_domain_free_irqs(irq_domain, dev);
+ else
+ dev_warn(dev, "No IMS irq domain.\n");
+}
+
+int vidxd_setup_ims_entries(struct vdcm_idxd *vidxd)
+{
+ struct irq_domain *irq_domain;
+ struct idxd_device *idxd = vidxd->idxd;
+ struct mdev_device *mdev = vidxd->vdev.mdev;
+ struct device *dev = mdev_dev(mdev);
+ int vecs = VIDXD_MAX_MSIX_VECS - 1;
+ struct msi_desc *entry;
+ struct ims_irq_entry *irq_entry;
+ int rc, i = 0;
+
+ irq_domain = idxd->ims_domain;
+ dev_set_msi_domain(dev, irq_domain);
+ rc = msi_domain_alloc_irqs(irq_domain, dev, vecs);
+ if (rc < 0)
+ return rc;
+
+ for_each_msi_entry(entry, dev) {
+ irq_entry = &vidxd->irq_entries[i];
+ irq_entry->vidxd = vidxd;
+ irq_entry->entry = entry;
+ irq_entry->id = i;
+ i++;
+ }
+
+ return 0;
+}
+
void vidxd_do_command(struct vdcm_idxd *vidxd, u32 val)
{
union idxd_command_reg *reg = (union idxd_command_reg *)(vidxd->bar0 + IDXD_CMD_OFFSET);
@@ -896,14 +942,3 @@ void vidxd_do_command(struct vdcm_idxd *vidxd, u32 val)
break;
}
}
-
-int vidxd_setup_ims_entries(struct vdcm_idxd *vidxd)
-{
- /* PLACEHOLDER */
- return 0;
-}
-
-void vidxd_free_ims_entries(struct vdcm_idxd *vidxd)
-{
- /* PLACEHOLDER */
-}
@@ -536,6 +536,7 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
return ops->domain_alloc_irqs(domain, dev, nvec);
}
+EXPORT_SYMBOL(msi_domain_alloc_irqs);
void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
{
@@ -572,6 +573,7 @@ void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
return ops->domain_free_irqs(domain, dev);
}
+EXPORT_SYMBOL(msi_domain_free_irqs);
/**
* msi_get_domain_info - Get the MSI interrupt domain info for @domain