diff mbox series

[RFC,V3,13/26] vfio/pci: Provide interrupt context to vfio_msi_enable() and vfio_msi_disable()

Message ID e94051e56d1484ed3398ba72b0ede89fbc8cf2c0.1698422237.git.reinette.chatre@intel.com (mailing list archive)
State New, archived
Headers show
Series vfio/pci: Back guest interrupts from Interrupt Message Store (IMS) | expand

Commit Message

Reinette Chatre Oct. 27, 2023, 5 p.m. UTC
vfio_msi_enable() and vfio_msi_disable() perform the PCI specific
operations to allocate and free interrupts on the device that will
back the guest interrupts. This makes these functions backend
specific calls that should be called by the interrupt management
frontend.

Pass the interrupt context as parameter to vfio_msi_enable() and
vfio_msi_disable() so that they can be called by a generic frontend
and make it possible for other backends to provide their own
vfio_msi_enable() and vfio_msi_disable().

Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
---
Changes since RFC V2:
- New patch

 drivers/vfio/pci/vfio_pci_intrs.c | 20 +++++++++++---------
 1 file changed, 11 insertions(+), 9 deletions(-)
diff mbox series

Patch

diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index cdb6f875271f..ad3f9c1baccc 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -346,14 +346,15 @@  static irqreturn_t vfio_msihandler(int irq, void *arg)
 	return IRQ_HANDLED;
 }
 
-static int vfio_msi_enable(struct vfio_pci_core_device *vdev, int nvec, bool msix)
+static int vfio_msi_enable(struct vfio_pci_intr_ctx *intr_ctx, int nvec, bool msix)
 {
+	struct vfio_pci_core_device *vdev = intr_ctx->priv;
 	struct pci_dev *pdev = vdev->pdev;
 	unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI;
 	int ret;
 	u16 cmd;
 
-	if (!is_irq_none(&vdev->intr_ctx))
+	if (!is_irq_none(intr_ctx))
 		return -EINVAL;
 
 	/* return the number of supported vectors if we can't get all: */
@@ -367,7 +368,7 @@  static int vfio_msi_enable(struct vfio_pci_core_device *vdev, int nvec, bool msi
 	}
 	vfio_pci_memory_unlock_and_restore(vdev, cmd);
 
-	vdev->intr_ctx.irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
+	intr_ctx->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
 				VFIO_PCI_MSI_IRQ_INDEX;
 
 	if (!msix) {
@@ -523,14 +524,15 @@  static int vfio_msi_set_block(struct vfio_pci_core_device *vdev,
 	return ret;
 }
 
-static void vfio_msi_disable(struct vfio_pci_core_device *vdev, bool msix)
+static void vfio_msi_disable(struct vfio_pci_intr_ctx *intr_ctx, bool msix)
 {
+	struct vfio_pci_core_device *vdev = intr_ctx->priv;
 	struct pci_dev *pdev = vdev->pdev;
 	struct vfio_pci_irq_ctx *ctx;
 	unsigned long i;
 	u16 cmd;
 
-	xa_for_each(&vdev->intr_ctx.ctx, i, ctx) {
+	xa_for_each(&intr_ctx->ctx, i, ctx) {
 		vfio_virqfd_disable(&ctx->unmask);
 		vfio_virqfd_disable(&ctx->mask);
 		vfio_msi_set_vector_signal(vdev, i, -1, msix);
@@ -547,7 +549,7 @@  static void vfio_msi_disable(struct vfio_pci_core_device *vdev, bool msix)
 	if (vdev->nointx)
 		pci_intx(pdev, 0);
 
-	vdev->intr_ctx.irq_type = VFIO_PCI_NUM_IRQS;
+	intr_ctx->irq_type = VFIO_PCI_NUM_IRQS;
 }
 
 /*
@@ -667,7 +669,7 @@  static int vfio_pci_set_msi_trigger(struct vfio_pci_intr_ctx *intr_ctx,
 	bool msix = (index == VFIO_PCI_MSIX_IRQ_INDEX) ? true : false;
 
 	if (irq_is(intr_ctx, index) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
-		vfio_msi_disable(vdev, msix);
+		vfio_msi_disable(intr_ctx, msix);
 		return 0;
 	}
 
@@ -682,13 +684,13 @@  static int vfio_pci_set_msi_trigger(struct vfio_pci_intr_ctx *intr_ctx,
 			return vfio_msi_set_block(vdev, start, count,
 						  fds, msix);
 
-		ret = vfio_msi_enable(vdev, start + count, msix);
+		ret = vfio_msi_enable(intr_ctx, start + count, msix);
 		if (ret)
 			return ret;
 
 		ret = vfio_msi_set_block(vdev, start, count, fds, msix);
 		if (ret)
-			vfio_msi_disable(vdev, msix);
+			vfio_msi_disable(intr_ctx, msix);
 
 		return ret;
 	}