@@ -107,6 +107,21 @@ static int vfio_irq_ctx_alloc_num(struct vfio_pci_core_device *vdev,
return ret;
}
+static bool vfio_irq_ctx_range_allocated(struct vfio_pci_core_device *vdev,
+ unsigned int start, unsigned int count)
+{
+ struct vfio_pci_irq_ctx *ctx;
+ unsigned int i;
+
+ for (i = start; i < start + count; i++) {
+ ctx = xa_load(&vdev->ctx, i);
+ if (!ctx)
+ return false;
+ }
+
+ return true;
+}
+
/*
* INTx
*/
@@ -270,8 +285,6 @@ static int vfio_intx_enable(struct vfio_pci_core_device *vdev)
return -EINVAL;
}
- vdev->num_ctx = 1;
-
/*
* If the virtual interrupt is masked, restore it. Devices
* supporting DisINTx can be masked at the hardware level
@@ -358,7 +371,6 @@ static void vfio_intx_disable(struct vfio_pci_core_device *vdev)
}
vfio_intx_set_signal(vdev, -1);
vdev->irq_type = VFIO_PCI_NUM_IRQS;
- vdev->num_ctx = 0;
vfio_irq_ctx_free_all(vdev);
}
@@ -399,7 +411,6 @@ static int vfio_msi_enable(struct vfio_pci_core_device *vdev, int nvec, bool msi
}
vfio_pci_memory_unlock_and_restore(vdev, cmd);
- vdev->num_ctx = nvec;
vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
VFIO_PCI_MSI_IRQ_INDEX;
@@ -423,9 +434,6 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_core_device *vdev,
int irq, ret;
u16 cmd;
- if (vector >= vdev->num_ctx)
- return -EINVAL;
-
ctx = vfio_irq_ctx_get(vdev, vector);
if (!ctx)
return -EINVAL;
@@ -500,7 +508,7 @@ static int vfio_msi_set_block(struct vfio_pci_core_device *vdev, unsigned start,
int i, ret = 0;
unsigned int j;
- if (start >= vdev->num_ctx || start + count > vdev->num_ctx)
+ if (!vfio_irq_ctx_range_allocated(vdev, start, count))
return -EINVAL;
for (i = 0, j = start; i < count && !ret; i++, j++) {
@@ -541,7 +549,6 @@ static void vfio_msi_disable(struct vfio_pci_core_device *vdev, bool msix)
pci_intx(pdev, 0);
vdev->irq_type = VFIO_PCI_NUM_IRQS;
- vdev->num_ctx = 0;
vfio_irq_ctx_free_all(vdev);
}
@@ -677,7 +684,10 @@ static int vfio_pci_set_msi_trigger(struct vfio_pci_core_device *vdev,
return ret;
}
- if (!irq_is(vdev, index) || start + count > vdev->num_ctx)
+ if (!irq_is(vdev, index))
+ return -EINVAL;
+
+ if (!vfio_irq_ctx_range_allocated(vdev, start, count))
return -EINVAL;
for (i = start; i < start + count; i++) {
@@ -60,7 +60,6 @@ struct vfio_pci_core_device {
spinlock_t irqlock;
struct mutex igate;
struct xarray ctx;
- int num_ctx;
int irq_type;
int num_regions;
struct vfio_pci_region *region;
struct vfio_pci_core_device::num_ctx counts how many interrupt contexts have been allocated. When all interrupt contexts are allocated simultaneously num_ctx provides the upper bound of all vectors that can be used as indices into the interrupt context array. With the upcoming support for dynamic MSI-X the number of interrupt contexts does not necessarily span the range of allocated interrupts. Consequently, num_ctx is no longer a trusted upper bound for valid indices. Stop using num_ctx to determine if a provided vector is valid, use the existence of interrupt context directly. Introduce a helper that ensures a provided interrupt range is allocated before any user requested action is taken. This maintains existing behavior (early exit without modifications) when user space attempts to modify a range of vectors that includes unallocated interrupts. The checks that ensure that user space provides a range of vectors that is valid for the device are untouched. Signed-off-by: Reinette Chatre <reinette.chatre@intel.com> --- Existing behavior on error paths is not maintained for MSI-X when adding support for dynamic MSI-X. Please see maintainer comments associated with "vfio/pci: Support dynamic MSI-x". drivers/vfio/pci/vfio_pci_intrs.c | 30 ++++++++++++++++++++---------- include/linux/vfio_pci_core.h | 1 - 2 files changed, 20 insertions(+), 11 deletions(-)