@@ -6,6 +6,7 @@
#include <linux/iommu.h>
#include <linux/irqdomain.h>
+#include "io_pagetable.h"
#include "iommufd_private.h"
/*
@@ -25,6 +26,15 @@ struct iommufd_device {
bool enforce_cache_coherency;
};
+struct iommufd_access {
+ struct iommufd_object obj;
+ struct iommufd_ctx *ictx;
+ struct iommufd_ioas *ioas;
+ const struct iommufd_access_ops *ops;
+ void *data;
+ u32 ioas_access_list_id;
+};
+
void iommufd_device_destroy(struct iommufd_object *obj)
{
struct iommufd_device *idev =
@@ -397,3 +407,300 @@ void iommufd_device_detach(struct iommufd_device *idev)
refcount_dec(&idev->obj.users);
}
EXPORT_SYMBOL_NS_GPL(iommufd_device_detach, IOMMUFD);
+
+void iommufd_access_destroy_object(struct iommufd_object *obj)
+{
+ struct iommufd_access *access =
+ container_of(obj, struct iommufd_access, obj);
+
+ WARN_ON(xa_erase(&access->ioas->access_list,
+ access->ioas_access_list_id) != access);
+ iommufd_ctx_put(access->ictx);
+ refcount_dec(&access->ioas->obj.users);
+}
+
+struct iommufd_access *
+iommufd_access_create(struct iommufd_ctx *ictx, u32 ioas_id,
+ const struct iommufd_access_ops *ops, void *data)
+{
+ struct iommufd_access *access;
+ struct iommufd_object *obj;
+ int rc;
+
+ /*
+ * There is no uAPI for the access object, but to keep things symmetric
+ * use the object infrastructure anyhow.
+ */
+ access = iommufd_object_alloc(ictx, access, IOMMUFD_OBJ_ACCESS);
+ if (IS_ERR(access))
+ return access;
+
+ obj = iommufd_get_object(ictx, ioas_id, IOMMUFD_OBJ_IOAS);
+ if (IS_ERR(obj)) {
+ rc = PTR_ERR(obj);
+ goto out_abort;
+ }
+ access->ioas = container_of(obj, struct iommufd_ioas, obj);
+ iommufd_put_object_keep_user(obj);
+
+ rc = xa_alloc(&access->ioas->access_list, &access->ioas_access_list_id,
+ access, xa_limit_16b, GFP_KERNEL_ACCOUNT);
+ if (rc)
+ goto out_put_ioas;
+
+ /* The calling driver is a user until iommufd_access_destroy() */
+ refcount_inc(&access->obj.users);
+ access->ictx = ictx;
+ access->data = data;
+ iommufd_ctx_get(ictx);
+ iommufd_object_finalize(ictx, &access->obj);
+ return access;
+out_put_ioas:
+ refcount_dec(&access->ioas->obj.users);
+out_abort:
+ iommufd_object_abort(ictx, &access->obj);
+ return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_NS_GPL(iommufd_access_create, IOMMUFD);
+
+void iommufd_access_destroy(struct iommufd_access *access)
+{
+ bool was_destroyed;
+
+ was_destroyed = iommufd_object_destroy_user(access->ictx, &access->obj);
+ WARN_ON(!was_destroyed);
+}
+EXPORT_SYMBOL_NS_GPL(iommufd_access_destroy, IOMMUFD);
+
+/**
+ * iommufd_access_notify_unmap - Notify users of an iopt to stop using it
+ * @iopt - iopt to work on
+ * @iova - Starting iova in the iopt
+ * @length - Number of bytes
+ *
+ * After this function returns there should be no users attached to the pages
+ * linked to this iopt that intersect with iova,length. Anyone that has attached
+ * a user through iopt_access_pages() needs to detatch it through
+ * iommufd_access_unpin_pages() before this function returns.
+ *
+ * The unmap callback may not call or wait for a iommufd_access_destroy() to
+ * complete. Once iommufd_access_destroy() returns no ops are running and no
+ * future ops will be called.
+ */
+void iommufd_access_notify_unmap(struct io_pagetable *iopt, unsigned long iova,
+ unsigned long length)
+{
+ struct iommufd_ioas *ioas =
+ container_of(iopt, struct iommufd_ioas, iopt);
+ struct iommufd_access *access;
+ unsigned long index;
+
+ xa_lock(&ioas->access_list);
+ xa_for_each(&ioas->access_list, index, access) {
+ if (!iommufd_lock_obj(&access->obj))
+ continue;
+ xa_unlock(&ioas->access_list);
+
+ access->ops->unmap(access->data, iova, length);
+
+ iommufd_put_object(&access->obj);
+ xa_lock(&ioas->access_list);
+ }
+ xa_unlock(&ioas->access_list);
+}
+
+/**
+ * iommufd_access_unpin_pages() - Undo iommufd_access_pin_pages
+ * @access: IOAS access to act on
+ * @iova: Starting IOVA
+ * @length:- Number of bytes to access
+ *
+ * Return the struct page's. The caller must stop accessing them before calling
+ * this. The iova/length must exactly match the one provided to access_pages.
+ */
+void iommufd_access_unpin_pages(struct iommufd_access *access,
+ unsigned long iova, unsigned long length)
+{
+ struct io_pagetable *iopt = &access->ioas->iopt;
+ struct iopt_area_contig_iter iter;
+ unsigned long last_iova;
+ struct iopt_area *area;
+
+ if (WARN_ON(!length) ||
+ WARN_ON(check_add_overflow(iova, length - 1, &last_iova)))
+ return;
+
+ down_read(&iopt->iova_rwsem);
+ iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova)
+ iopt_pages_remove_access(
+ area, iopt_area_iova_to_index(area, iter.cur_iova),
+ iopt_area_iova_to_index(
+ area,
+ min(last_iova, iopt_area_last_iova(area))));
+ up_read(&iopt->iova_rwsem);
+ WARN_ON(!iopt_area_contig_done(&iter));
+}
+EXPORT_SYMBOL_NS_GPL(iommufd_access_unpin_pages, IOMMUFD);
+
+static bool iopt_area_contig_is_aligned(struct iopt_area_contig_iter *iter,
+ bool first)
+{
+ if (iopt_area_start_byte(iter->area, iter->cur_iova) % PAGE_SIZE)
+ return false;
+
+ if (!iopt_area_contig_done(iter) &&
+ (iopt_area_start_byte(iter->area, iopt_area_last_iova(iter->area)) %
+ PAGE_SIZE) != (PAGE_SIZE - 1))
+ return false;
+ return true;
+}
+
+static bool check_area_prot(struct iopt_area *area, unsigned int flags)
+{
+ if (flags & IOMMUFD_ACCESS_RW_WRITE)
+ return area->iommu_prot & IOMMU_WRITE;
+ return area->iommu_prot & IOMMU_READ;
+}
+
+/**
+ * iommufd_access_pin_pages() - Return a list of pages under the iova
+ * @access: IOAS access to act on
+ * @iova: Starting IOVA
+ * @length: Number of bytes to access
+ * @out_pages: Output page list
+ * @flags: IOPMMUFD_ACCESS_RW_* flags
+ *
+ * Reads @length bytes starting at iova and returns the struct page * pointers.
+ * These can be kmap'd by the caller for CPU access.
+ *
+ * The caller must perform iopt_unaccess_pages() when done to balance this.
+ *
+ * This API always requires a page aligned iova. This happens naturally if the
+ * ioas alignment is >= PAGE_SIZE and the iova is PAGE_SIZE aligned. However
+ * smaller alignments have corner cases where this API can fail on otherwise
+ * aligned iova.
+ */
+int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova,
+ unsigned long length, struct page **out_pages,
+ unsigned int flags)
+{
+ struct io_pagetable *iopt = &access->ioas->iopt;
+ struct iopt_area_contig_iter iter;
+ unsigned long last_iova;
+ struct iopt_area *area;
+ bool first = true;
+ int rc;
+
+ if (!length)
+ return -EINVAL;
+ if (check_add_overflow(iova, length - 1, &last_iova))
+ return -EOVERFLOW;
+
+ down_read(&iopt->iova_rwsem);
+ iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova) {
+ unsigned long last = min(last_iova, iopt_area_last_iova(area));
+ unsigned long last_index = iopt_area_iova_to_index(area, last);
+ unsigned long index =
+ iopt_area_iova_to_index(area, iter.cur_iova);
+
+ if (area->prevent_access ||
+ !iopt_area_contig_is_aligned(&iter, first)) {
+ rc = -EINVAL;
+ goto err_remove;
+ }
+
+ if (!check_area_prot(area, flags)) {
+ rc = -EPERM;
+ goto err_remove;
+ }
+ first = false;
+
+ mutex_lock(&area->pages->mutex);
+ rc = iopt_pages_add_access(area->pages, index, last_index,
+ out_pages, flags);
+ if (rc) {
+ mutex_unlock(&area->pages->mutex);
+ goto err_remove;
+ }
+ area->num_accesses++;
+ mutex_unlock(&area->pages->mutex);
+ out_pages += last_index - index + 1;
+ }
+ if (!iopt_area_contig_done(&iter)) {
+ rc = -ENOENT;
+ goto err_remove;
+ }
+
+ up_read(&iopt->iova_rwsem);
+ return 0;
+
+err_remove:
+ if (iova < iter.cur_iova) {
+ last_iova = iter.cur_iova - 1;
+ iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova)
+ iopt_pages_remove_access(
+ area,
+ iopt_area_iova_to_index(area, iter.cur_iova),
+ iopt_area_iova_to_index(
+ area, min(last_iova,
+ iopt_area_last_iova(area))));
+ }
+ up_read(&iopt->iova_rwsem);
+ return rc;
+}
+EXPORT_SYMBOL_NS_GPL(iommufd_access_pin_pages, IOMMUFD);
+
+/**
+ * iommufd_access_rw - Read or write data under the iova
+ * @access: IOAS access to act on
+ * @iova: Starting IOVA
+ * @data: Kernel buffer to copy to/from
+ * @length: Number of bytes to access
+ *
+ * Copy kernel to/from data into the range given by IOVA/length. If flags
+ * indicates IOMMUFD_ACCESS_RW_KTHREAD then a large copy can be optimized
+ * by changing it into copy_to/from_user().
+ */
+int iommufd_access_rw(struct iommufd_access *access, unsigned long iova,
+ void *data, size_t length, unsigned int flags)
+{
+ struct io_pagetable *iopt = &access->ioas->iopt;
+ struct iopt_area_contig_iter iter;
+ struct iopt_area *area;
+ unsigned long last_iova;
+ int rc;
+
+ if (!length)
+ return -EINVAL;
+ if (check_add_overflow(iova, length - 1, &last_iova))
+ return -EOVERFLOW;
+
+ down_read(&iopt->iova_rwsem);
+ iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova) {
+ unsigned long last = min(last_iova, iopt_area_last_iova(area));
+ unsigned long bytes = (last - iter.cur_iova) + 1;
+
+ if (area->prevent_access) {
+ rc = -EINVAL;
+ goto err_out;
+ }
+
+ if (!check_area_prot(area, flags)) {
+ rc = -EPERM;
+ goto err_out;
+ }
+
+ rc = iopt_pages_rw_access(
+ area->pages, iopt_area_start_byte(area, iter.cur_iova),
+ data, bytes, flags);
+ if (rc)
+ goto err_out;
+ data += bytes;
+ }
+ if (!iopt_area_contig_done(&iter))
+ rc = -ENOENT;
+err_out:
+ up_read(&iopt->iova_rwsem);
+ return rc;
+}
+EXPORT_SYMBOL_NS_GPL(iommufd_access_rw, IOMMUFD);
@@ -463,6 +463,7 @@ static int iopt_unmap_iova_range(struct io_pagetable *iopt, unsigned long start,
* is NULL. This prevents domain attach/detatch from running
* concurrently with cleaning up the area.
*/
+again:
down_read(&iopt->domains_rwsem);
down_write(&iopt->iova_rwsem);
while ((area = iopt_area_iter_first(iopt, start, end))) {
@@ -491,8 +492,10 @@ static int iopt_unmap_iova_range(struct io_pagetable *iopt, unsigned long start,
area->prevent_access = true;
up_write(&iopt->iova_rwsem);
up_read(&iopt->domains_rwsem);
- /* Later patch calls back to drivers to unmap */
- return -EBUSY;
+ iommufd_access_notify_unmap(iopt, area_first,
+ iopt_area_length(area));
+ WARN_ON(READ_ONCE(area->num_accesses));
+ goto again;
}
pages = area->pages;
@@ -17,6 +17,7 @@ void iommufd_ioas_destroy(struct iommufd_object *obj)
rc = iopt_unmap_all(&ioas->iopt, NULL);
WARN_ON(rc && rc != -ENOENT);
iopt_destroy_table(&ioas->iopt);
+ WARN_ON(!xa_empty(&ioas->access_list));
mutex_destroy(&ioas->mutex);
}
@@ -35,6 +36,7 @@ struct iommufd_ioas *iommufd_ioas_alloc(struct iommufd_ctx *ictx)
INIT_LIST_HEAD(&ioas->hwpt_list);
mutex_init(&ioas->mutex);
+ xa_init_flags(&ioas->access_list, XA_FLAGS_ALLOC);
return ioas;
out_abort:
@@ -64,6 +64,8 @@ int iopt_unmap_iova(struct io_pagetable *iopt, unsigned long iova,
unsigned long length, unsigned long *unmapped);
int iopt_unmap_all(struct io_pagetable *iopt, unsigned long *unmapped);
+void iommufd_access_notify_unmap(struct io_pagetable *iopt, unsigned long iova,
+ unsigned long length);
int iopt_table_add_domain(struct io_pagetable *iopt,
struct iommu_domain *domain);
void iopt_table_remove_domain(struct io_pagetable *iopt,
@@ -105,6 +107,7 @@ enum iommufd_object_type {
IOMMUFD_OBJ_DEVICE,
IOMMUFD_OBJ_HW_PAGETABLE,
IOMMUFD_OBJ_IOAS,
+ IOMMUFD_OBJ_ACCESS,
};
/* Base struct for all objects with a userspace ID handle. */
@@ -195,6 +198,7 @@ struct iommufd_ioas {
struct io_pagetable iopt;
struct mutex mutex;
struct list_head hwpt_list;
+ struct xarray access_list;
};
static inline struct iommufd_ioas *iommufd_get_ioas(struct iommufd_ucmd *ucmd,
@@ -243,4 +247,6 @@ void iommufd_hw_pagetable_destroy(struct iommufd_object *obj);
void iommufd_device_destroy(struct iommufd_object *obj);
+void iommufd_access_destroy_object(struct iommufd_object *obj);
+
#endif
@@ -352,6 +352,9 @@ void iommufd_ctx_put(struct iommufd_ctx *ictx)
EXPORT_SYMBOL_NS_GPL(iommufd_ctx_put, IOMMUFD);
static struct iommufd_object_ops iommufd_object_ops[] = {
+ [IOMMUFD_OBJ_ACCESS] = {
+ .destroy = iommufd_access_destroy_object,
+ },
[IOMMUFD_OBJ_DEVICE] = {
.destroy = iommufd_device_destroy,
},
@@ -9,10 +9,12 @@
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/err.h>
-#include <linux/device.h>
+struct device;
struct iommufd_device;
+struct page;
struct iommufd_ctx;
+struct iommufd_access;
struct file;
struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
@@ -26,6 +28,10 @@ int iommufd_device_attach(struct iommufd_device *idev, u32 *pt_id,
unsigned int flags);
void iommufd_device_detach(struct iommufd_device *idev);
+struct iommufd_access_ops {
+ void (*unmap)(void *data, unsigned long iova, unsigned long length);
+};
+
enum {
IOMMUFD_ACCESS_RW_READ = 0,
IOMMUFD_ACCESS_RW_WRITE = 1 << 0,
@@ -33,11 +39,24 @@ enum {
IOMMUFD_ACCESS_RW_KTHREAD = 1 << 1,
};
+struct iommufd_access *
+iommufd_access_create(struct iommufd_ctx *ictx, u32 ioas_id,
+ const struct iommufd_access_ops *ops, void *data);
+void iommufd_access_destroy(struct iommufd_access *access);
+
void iommufd_ctx_get(struct iommufd_ctx *ictx);
#if IS_ENABLED(CONFIG_IOMMUFD)
struct iommufd_ctx *iommufd_ctx_from_file(struct file *file);
void iommufd_ctx_put(struct iommufd_ctx *ictx);
+
+int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova,
+ unsigned long length, struct page **out_pages,
+ unsigned int flags);
+void iommufd_access_unpin_pages(struct iommufd_access *access,
+ unsigned long iova, unsigned long length);
+int iommufd_access_rw(struct iommufd_access *access, unsigned long iova,
+ void *data, size_t len, unsigned int flags);
#else /* !CONFIG_IOMMUFD */
static inline struct iommufd_ctx *iommufd_ctx_from_file(struct file *file)
{
@@ -47,5 +66,26 @@ static inline struct iommufd_ctx *iommufd_ctx_from_file(struct file *file)
static inline void iommufd_ctx_put(struct iommufd_ctx *ictx)
{
}
+
+static inline int iommufd_access_pin_pages(struct iommufd_access *access,
+ unsigned long iova,
+ unsigned long length,
+ struct page **out_pages,
+ unsigned int flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void iommufd_access_unpin_pages(struct iommufd_access *access,
+ unsigned long iova,
+ unsigned long length)
+{
+}
+
+static inline int iommufd_access_rw(struct iommufd_access *access, unsigned long iova,
+ void *data, size_t len, unsigned int flags)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_IOMMUFD */
#endif
Kernel access is the mode that VFIO "mdevs" use. In this case there is no struct device and no IOMMU connection. iommufd acts as a record keeper for accesses and returns the actual struct pages back to the caller to use however they need. eg with kmap or the DMA API. Each caller must create a struct iommufd_access with iommufd_access_create(), similar to how iommufd_device_bind() works. Using this struct the caller can access blocks of IOVA using iommufd_access_pin_pages() or iommufd_access_rw(). Callers must provide a callback that immediately unpins any IOVA being used within a range. This happens if userspace unmaps the IOVA under the pin. The implementation forwards the access requests directly to the iopt infrastructure that manages the iopt_pages_access. Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> --- drivers/iommu/iommufd/device.c | 307 ++++++++++++++++++++++++ drivers/iommu/iommufd/io_pagetable.c | 7 +- drivers/iommu/iommufd/ioas.c | 2 + drivers/iommu/iommufd/iommufd_private.h | 6 + drivers/iommu/iommufd/main.c | 3 + include/linux/iommufd.h | 42 +++- 6 files changed, 364 insertions(+), 3 deletions(-)