@@ -1168,7 +1168,7 @@ static int vfio_msi_config_write(struct vfio_pci_core_device *vdev, int pos,
flags = le16_to_cpu(*pflags);
/* MSI is enabled via ioctl */
- if (vdev->irq_type != VFIO_PCI_MSI_IRQ_INDEX)
+ if (vdev->intr_ctx.irq_type != VFIO_PCI_MSI_IRQ_INDEX)
flags &= ~PCI_MSI_FLAGS_ENABLE;
/* Check queue size */
@@ -427,7 +427,7 @@ static int vfio_pci_core_runtime_suspend(struct device *dev)
* vfio_pci_intx_mask() will return false and in that case, INTx
* should not be unmasked in the runtime resume.
*/
- vdev->pm_intx_masked = ((vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX) &&
+ vdev->pm_intx_masked = ((vdev->intr_ctx.irq_type == VFIO_PCI_INTX_IRQ_INDEX) &&
vfio_pci_intx_mask(vdev));
return 0;
@@ -596,7 +596,7 @@ void vfio_pci_core_disable(struct vfio_pci_core_device *vdev)
vfio_pci_set_irqs_ioctl(&vdev->intr_ctx, VFIO_IRQ_SET_DATA_NONE |
VFIO_IRQ_SET_ACTION_TRIGGER,
- vdev->irq_type, 0, 0, NULL);
+ vdev->intr_ctx.irq_type, 0, 0, NULL);
/* Device closed, don't need mutex here */
list_for_each_entry_safe(ioeventfd, ioeventfd_tmp,
@@ -2153,7 +2153,6 @@ int vfio_pci_core_init_dev(struct vfio_device *core_vdev)
container_of(core_vdev, struct vfio_pci_core_device, vdev);
vdev->pdev = to_pci_dev(core_vdev->dev);
- vdev->irq_type = VFIO_PCI_NUM_IRQS;
spin_lock_init(&vdev->irqlock);
mutex_init(&vdev->ioeventfds_lock);
INIT_LIST_HEAD(&vdev->dummy_resources_list);
@@ -33,19 +33,19 @@ struct vfio_pci_irq_ctx {
static bool irq_is(struct vfio_pci_core_device *vdev, int type)
{
- return vdev->irq_type == type;
+ return vdev->intr_ctx.irq_type == type;
}
static bool is_intx(struct vfio_pci_core_device *vdev)
{
- return vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX;
+ return vdev->intr_ctx.irq_type == VFIO_PCI_INTX_IRQ_INDEX;
}
static bool is_irq_none(struct vfio_pci_core_device *vdev)
{
- return !(vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX ||
- vdev->irq_type == VFIO_PCI_MSI_IRQ_INDEX ||
- vdev->irq_type == VFIO_PCI_MSIX_IRQ_INDEX);
+ return !(vdev->intr_ctx.irq_type == VFIO_PCI_INTX_IRQ_INDEX ||
+ vdev->intr_ctx.irq_type == VFIO_PCI_MSI_IRQ_INDEX ||
+ vdev->intr_ctx.irq_type == VFIO_PCI_MSIX_IRQ_INDEX);
}
static
@@ -255,7 +255,7 @@ static int vfio_intx_enable(struct vfio_pci_core_device *vdev)
if (vdev->pci_2_3)
pci_intx(vdev->pdev, !ctx->masked);
- vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX;
+ vdev->intr_ctx.irq_type = VFIO_PCI_INTX_IRQ_INDEX;
return 0;
}
@@ -331,7 +331,7 @@ static void vfio_intx_disable(struct vfio_pci_core_device *vdev)
vfio_virqfd_disable(&ctx->mask);
}
vfio_intx_set_signal(vdev, -1);
- vdev->irq_type = VFIO_PCI_NUM_IRQS;
+ vdev->intr_ctx.irq_type = VFIO_PCI_NUM_IRQS;
vfio_irq_ctx_free(vdev, ctx, 0);
}
@@ -367,7 +367,7 @@ static int vfio_msi_enable(struct vfio_pci_core_device *vdev, int nvec, bool msi
}
vfio_pci_memory_unlock_and_restore(vdev, cmd);
- vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
+ vdev->intr_ctx.irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
VFIO_PCI_MSI_IRQ_INDEX;
if (!msix) {
@@ -547,7 +547,7 @@ static void vfio_msi_disable(struct vfio_pci_core_device *vdev, bool msix)
if (vdev->nointx)
pci_intx(pdev, 0);
- vdev->irq_type = VFIO_PCI_NUM_IRQS;
+ vdev->intr_ctx.irq_type = VFIO_PCI_NUM_IRQS;
}
/*
@@ -677,7 +677,7 @@ static int vfio_pci_set_msi_trigger(struct vfio_pci_intr_ctx *intr_ctx,
int32_t *fds = data;
int ret;
- if (vdev->irq_type == index)
+ if (vdev->intr_ctx.irq_type == index)
return vfio_msi_set_block(vdev, start, count,
fds, msix);
@@ -807,6 +807,7 @@ static struct vfio_pci_intr_ops vfio_pci_intr_ops = {
void vfio_pci_init_intr_ctx(struct vfio_pci_core_device *vdev,
struct vfio_pci_intr_ctx *intr_ctx)
{
+ intr_ctx->irq_type = VFIO_PCI_NUM_IRQS;
intr_ctx->ops = &vfio_pci_intr_ops;
intr_ctx->priv = vdev;
mutex_init(&intr_ctx->igate);
@@ -57,6 +57,7 @@ struct vfio_pci_region {
* @err_trigger: Eventfd associated with error reporting IRQ
* @req_trigger: Eventfd associated with device request notification
* @ctx: Per-interrupt context indexed by vector
+ * @irq_type: Type of interrupt from guest perspective
*/
struct vfio_pci_intr_ctx {
const struct vfio_pci_intr_ops *ops;
@@ -65,6 +66,7 @@ struct vfio_pci_intr_ctx {
struct eventfd_ctx *err_trigger;
struct eventfd_ctx *req_trigger;
struct xarray ctx;
+ int irq_type;
};
struct vfio_pci_intr_ops {
@@ -100,7 +102,6 @@ struct vfio_pci_core_device {
u8 *vconfig;
struct perm_bits *msi_perm;
spinlock_t irqlock;
- int irq_type;
int num_regions;
struct vfio_pci_region *region;
u8 msi_qmax;
The type of interrupts within the guest is not unique to PCI devices and needed for other virtual devices supporting interrupts. Move interrupt type to the generic interrupt context struct vfio_pci_intr_ctx. Signed-off-by: Reinette Chatre <reinette.chatre@intel.com> --- Question for maintainers: irq_type is accessed in ioctl() flow as well as other flows. It is not clear to me how it is protected against concurrent access. Should accesses outside of ioctl() flow take the mutex? No changes since RFC V2. drivers/vfio/pci/vfio_pci_config.c | 2 +- drivers/vfio/pci/vfio_pci_core.c | 5 ++--- drivers/vfio/pci/vfio_pci_intrs.c | 21 +++++++++++---------- include/linux/vfio_pci_core.h | 3 ++- 4 files changed, 16 insertions(+), 15 deletions(-)