@@ -2166,7 +2166,7 @@ int vfio_pci_core_init_dev(struct vfio_device *core_vdev)
INIT_LIST_HEAD(&vdev->sriov_pfs_item);
init_rwsem(&vdev->memory_lock);
xa_init(&vdev->ctx);
- vdev->intr_ctx.priv = vdev;
+ vfio_pci_init_intr_ctx(vdev, &vdev->intr_ctx);
return 0;
}
@@ -796,6 +796,23 @@ static int vfio_pci_set_req_trigger(struct vfio_pci_intr_ctx *intr_ctx,
count, flags, data);
}
+static struct vfio_pci_intr_ops vfio_pci_intr_ops = {
+ .set_intx_mask = vfio_pci_set_intx_mask,
+ .set_intx_unmask = vfio_pci_set_intx_unmask,
+ .set_intx_trigger = vfio_pci_set_intx_trigger,
+ .set_msi_trigger = vfio_pci_set_msi_trigger,
+ .set_err_trigger = vfio_pci_set_err_trigger,
+ .set_req_trigger = vfio_pci_set_req_trigger,
+};
+
+void vfio_pci_init_intr_ctx(struct vfio_pci_core_device *vdev,
+ struct vfio_pci_intr_ctx *intr_ctx)
+{
+ intr_ctx->ops = &vfio_pci_intr_ops;
+ intr_ctx->priv = vdev;
+}
+EXPORT_SYMBOL_GPL(vfio_pci_init_intr_ctx);
+
int vfio_pci_set_irqs_ioctl(struct vfio_pci_intr_ctx *intr_ctx, uint32_t flags,
unsigned int index, unsigned int start,
unsigned int count, void *data)
@@ -808,13 +825,16 @@ int vfio_pci_set_irqs_ioctl(struct vfio_pci_intr_ctx *intr_ctx, uint32_t flags,
case VFIO_PCI_INTX_IRQ_INDEX:
switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
case VFIO_IRQ_SET_ACTION_MASK:
- func = vfio_pci_set_intx_mask;
+ if (intr_ctx->ops->set_intx_mask)
+ func = intr_ctx->ops->set_intx_mask;
break;
case VFIO_IRQ_SET_ACTION_UNMASK:
- func = vfio_pci_set_intx_unmask;
+ if (intr_ctx->ops->set_intx_unmask)
+ func = intr_ctx->ops->set_intx_unmask;
break;
case VFIO_IRQ_SET_ACTION_TRIGGER:
- func = vfio_pci_set_intx_trigger;
+ if (intr_ctx->ops->set_intx_trigger)
+ func = intr_ctx->ops->set_intx_trigger;
break;
}
break;
@@ -826,21 +846,24 @@ int vfio_pci_set_irqs_ioctl(struct vfio_pci_intr_ctx *intr_ctx, uint32_t flags,
/* XXX Need masking support exported */
break;
case VFIO_IRQ_SET_ACTION_TRIGGER:
- func = vfio_pci_set_msi_trigger;
+ if (intr_ctx->ops->set_msi_trigger)
+ func = intr_ctx->ops->set_msi_trigger;
break;
}
break;
case VFIO_PCI_ERR_IRQ_INDEX:
switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
case VFIO_IRQ_SET_ACTION_TRIGGER:
- func = vfio_pci_set_err_trigger;
+ if (intr_ctx->ops->set_err_trigger)
+ func = intr_ctx->ops->set_err_trigger;
break;
}
break;
case VFIO_PCI_REQ_IRQ_INDEX:
switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
case VFIO_IRQ_SET_ACTION_TRIGGER:
- func = vfio_pci_set_req_trigger;
+ if (intr_ctx->ops->set_req_trigger)
+ func = intr_ctx->ops->set_req_trigger;
break;
}
break;
@@ -51,12 +51,35 @@ struct vfio_pci_region {
/*
* Interrupt context of virtual PCI device
+ * @ops: Interrupt management backend functions
* @priv: Private data of interrupt management backend
*/
struct vfio_pci_intr_ctx {
+ const struct vfio_pci_intr_ops *ops;
void *priv;
};
+struct vfio_pci_intr_ops {
+ int (*set_intx_mask)(struct vfio_pci_intr_ctx *intr_ctx,
+ unsigned int index, unsigned int start,
+ unsigned int count, uint32_t flags, void *data);
+ int (*set_intx_unmask)(struct vfio_pci_intr_ctx *intr_ctx,
+ unsigned int index, unsigned int start,
+ unsigned int count, uint32_t flags, void *data);
+ int (*set_intx_trigger)(struct vfio_pci_intr_ctx *intr_ctx,
+ unsigned int index, unsigned int start,
+ unsigned int count, uint32_t flags, void *data);
+ int (*set_msi_trigger)(struct vfio_pci_intr_ctx *intr_ctx,
+ unsigned int index, unsigned int start,
+ unsigned int count, uint32_t flags, void *data);
+ int (*set_err_trigger)(struct vfio_pci_intr_ctx *intr_ctx,
+ unsigned int index, unsigned int start,
+ unsigned int count, uint32_t flags, void *data);
+ int (*set_req_trigger)(struct vfio_pci_intr_ctx *intr_ctx,
+ unsigned int index, unsigned int start,
+ unsigned int count, uint32_t flags, void *data);
+};
+
struct vfio_pci_core_device {
struct vfio_device vdev;
struct pci_dev *pdev;
@@ -124,6 +147,8 @@ int vfio_pci_core_sriov_configure(struct vfio_pci_core_device *vdev,
int nr_virtfn);
long vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
unsigned long arg);
+void vfio_pci_init_intr_ctx(struct vfio_pci_core_device *vdev,
+ struct vfio_pci_intr_ctx *intr_ctx);
int vfio_pci_core_ioctl_feature(struct vfio_device *device, u32 flags,
void __user *arg, size_t argsz);
ssize_t vfio_pci_core_read(struct vfio_device *core_vdev, char __user *buf,
VFIO PCI interrupt management supports passthrough PCI devices with an interrupt in the guest backed by the same type of interrupt on the PCI device. Interrupt management can be more flexible. An interrupt in the guest may be backed by a different type of interrupt on the host, for example MSI-X in guest can be backed by IMS on the host, or not backed by a device interrupt at all when the interrupt is emulated by the virtual device driver. The main entry to guest interrupt management is via the VFIO_DEVICE_SET_IRQS ioctl(). By default the work is passed to interrupt management for PCI devices with the PCI specific functions called directly. Make the ioctl() configurable to support different interrupt management backends. This is accomplished by introducing interrupt context specific callbacks that are initialized by the virtual device driver and then triggered via the ioctl(). The introduction of virtual device driver specific callbacks require its initialization. Create a dedicated interrupt context initialization function to avoid mixing more interrupt context initialization with general virtual device driver initialization. Signed-off-by: Reinette Chatre <reinette.chatre@intel.com> --- Changes since RFC V2: - Improve changelog and comments. - Make vfio_pci_intr_ops static. drivers/vfio/pci/vfio_pci_core.c | 2 +- drivers/vfio/pci/vfio_pci_intrs.c | 35 +++++++++++++++++++++++++------ include/linux/vfio_pci_core.h | 25 ++++++++++++++++++++++ 3 files changed, 55 insertions(+), 7 deletions(-)