@@ -143,6 +143,35 @@ void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops)
}
EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver);
+int vfio_device_iommu_map(struct vfio_device *device, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot)
+{
+ struct vfio_container *container = device->group->container;
+ const struct vfio_iommu_driver_ops *ops = container->iommu_driver->ops;
+ int ret;
+
+ if (!ops->device_map)
+ return -EINVAL;
+
+ ret = ops->device_map(container->iommu_data, iova, paddr, size, prot);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vfio_device_iommu_map);
+
+void vfio_device_iommu_unmap(struct vfio_device *device, unsigned long iova,
+ size_t size)
+{
+ struct vfio_container *container = device->group->container;
+ const struct vfio_iommu_driver_ops *ops = container->iommu_driver->ops;
+
+ if (!ops->device_unmap)
+ return;
+
+ ops->device_unmap(container->iommu_data, iova, size);
+}
+EXPORT_SYMBOL_GPL(vfio_device_iommu_unmap);
+
/**
* Group minor allocation/free - both called with vfio.group_lock held
*/
@@ -1025,6 +1025,64 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
return -ENOTTY;
}
+static int vfio_iommu_type1_device_map(void *iommu_data, unsigned long iova,
+ phys_addr_t paddr, size_t size,
+ int prot)
+{
+ struct vfio_iommu *iommu = iommu_data;
+ struct vfio_domain *d;
+ int ret;
+
+ mutex_lock(&iommu->lock);
+
+ list_for_each_entry(d, &iommu->domain_list, next) {
+
+ if (iommu_iova_to_phys(d->domain, iova))
+ continue;
+
+ ret = iommu_map(d->domain, iova, paddr,
+ size, prot | d->prot);
+
+ if (ret) {
+ if (ret != -EBUSY)
+ goto unwind;
+ }
+
+ cond_resched();
+ }
+
+ mutex_unlock(&iommu->lock);
+
+ return 0;
+
+unwind:
+ list_for_each_entry_continue_reverse(d, &iommu->domain_list, next)
+ iommu_unmap(d->domain, iova, size);
+
+ mutex_unlock(&iommu->lock);
+ return ret;
+}
+
+static void vfio_iommu_type1_device_unmap(void *iommu_data, unsigned long iova,
+ size_t size)
+{
+ struct vfio_iommu *iommu = iommu_data;
+ struct vfio_domain *d;
+
+ mutex_lock(&iommu->lock);
+
+ list_for_each_entry(d, &iommu->domain_list, next) {
+
+ if (!iommu_iova_to_phys(d->domain, iova))
+ continue;
+
+ iommu_unmap(d->domain, iova, size);
+ cond_resched();
+ }
+
+ mutex_unlock(&iommu->lock);
+}
+
static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = {
.name = "vfio-iommu-type1",
.owner = THIS_MODULE,
@@ -1033,6 +1091,8 @@ static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = {
.ioctl = vfio_iommu_type1_ioctl,
.attach_group = vfio_iommu_type1_attach_group,
.detach_group = vfio_iommu_type1_detach_group,
+ .device_map = vfio_iommu_type1_device_map,
+ .device_unmap = vfio_iommu_type1_device_unmap,
};
static int __init vfio_iommu_type1_init(void)
@@ -52,6 +52,12 @@ extern void *vfio_del_group_dev(struct device *dev);
extern struct vfio_device *vfio_device_get_from_dev(struct device *dev);
extern void vfio_device_put(struct vfio_device *device);
extern void *vfio_device_data(struct vfio_device *device);
+extern int vfio_device_iommu_map(struct vfio_device *device,
+ unsigned long iova,
+ phys_addr_t paddr,
+ size_t size, int prot);
+extern void vfio_device_iommu_unmap(struct vfio_device *device,
+ unsigned long iova, size_t size);
/**
* struct vfio_iommu_driver_ops - VFIO IOMMU driver callbacks
@@ -72,7 +78,10 @@ struct vfio_iommu_driver_ops {
struct iommu_group *group);
void (*detach_group)(void *iommu_data,
struct iommu_group *group);
-
+ int (*device_map)(void *iommu_data, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot);
+ void (*device_unmap)(void *iommu_data, unsigned long iova,
+ size_t size);
};
extern int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops);