diff mbox series

[v5,14/19] iommufd: Add kAPI toward external drivers for kernel access

Message ID 14-v5-4001c2997bd0+30c-iommufd_jgg@nvidia.com (mailing list archive)
State Not Applicable
Headers show
Series IOMMUFD Generic interface | expand

Checks

Context Check Description
netdev/tree_selection success Not a local patch

Commit Message

Jason Gunthorpe Nov. 16, 2022, 9 p.m. UTC
Kernel access is the mode that VFIO "mdevs" use. In this case there is no
struct device and no IOMMU connection. iommufd acts as a record keeper for
accesses and returns the actual struct pages back to the caller to use
however they need. eg with kmap or the DMA API.

Each caller must create a struct iommufd_access with
iommufd_access_create(), similar to how iommufd_device_bind() works. Using
this struct the caller can access blocks of IOVA using
iommufd_access_pin_pages() or iommufd_access_rw().

Callers must provide a callback that immediately unpins any IOVA being
used within a range. This happens if userspace unmaps the IOVA under the
pin.

The implementation forwards the access requests directly to the iopt
infrastructure that manages the iopt_pages_access.

Tested-by: Nicolin Chen <nicolinc@nvidia.com>
Tested-by: Yi Liu <yi.l.liu@intel.com>
Tested-by: Lixiao Yang <lixiao.yang@intel.com>
Tested-by: Matthew Rosato <mjrosato@linux.ibm.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
---
 drivers/iommu/iommufd/device.c          | 314 ++++++++++++++++++++++++
 drivers/iommu/iommufd/io_pagetable.c    |   8 +-
 drivers/iommu/iommufd/iommufd_private.h |  10 +
 drivers/iommu/iommufd/main.c            |   3 +
 include/linux/iommufd.h                 |  43 +++-
 5 files changed, 375 insertions(+), 3 deletions(-)

Comments

Eric Auger Nov. 28, 2022, 3:48 p.m. UTC | #1
Hi Jason,

On 11/16/22 22:00, Jason Gunthorpe wrote:
> Kernel access is the mode that VFIO "mdevs" use. In this case there is no
> struct device and no IOMMU connection. iommufd acts as a record keeper for
> accesses and returns the actual struct pages back to the caller to use
> however they need. eg with kmap or the DMA API.
>
> Each caller must create a struct iommufd_access with
> iommufd_access_create(), similar to how iommufd_device_bind() works. Using
> this struct the caller can access blocks of IOVA using
> iommufd_access_pin_pages() or iommufd_access_rw().
>
> Callers must provide a callback that immediately unpins any IOVA being
> used within a range. This happens if userspace unmaps the IOVA under the
> pin.
>
> The implementation forwards the access requests directly to the iopt
> infrastructure that manages the iopt_pages_access.
>
> Tested-by: Nicolin Chen <nicolinc@nvidia.com>
> Tested-by: Yi Liu <yi.l.liu@intel.com>
> Tested-by: Lixiao Yang <lixiao.yang@intel.com>
> Tested-by: Matthew Rosato <mjrosato@linux.ibm.com>
> Reviewed-by: Kevin Tian <kevin.tian@intel.com>
> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
> ---
>  drivers/iommu/iommufd/device.c          | 314 ++++++++++++++++++++++++
>  drivers/iommu/iommufd/io_pagetable.c    |   8 +-
>  drivers/iommu/iommufd/iommufd_private.h |  10 +
>  drivers/iommu/iommufd/main.c            |   3 +
>  include/linux/iommufd.h                 |  43 +++-
>  5 files changed, 375 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
> index a71f5740773f84..522469ae7b5770 100644
> --- a/drivers/iommu/iommufd/device.c
> +++ b/drivers/iommu/iommufd/device.c
> @@ -6,6 +6,7 @@
>  #include <linux/iommu.h>
>  #include <linux/irqdomain.h>
>  
> +#include "io_pagetable.h"
>  #include "iommufd_private.h"
>  
>  /*
> @@ -415,3 +416,316 @@ void iommufd_device_detach(struct iommufd_device *idev)
>  	refcount_dec(&idev->obj.users);
>  }
>  EXPORT_SYMBOL_NS_GPL(iommufd_device_detach, IOMMUFD);
> +
> +void iommufd_access_destroy_object(struct iommufd_object *obj)
> +{
> +	struct iommufd_access *access =
> +		container_of(obj, struct iommufd_access, obj);
> +
> +	iopt_remove_access(&access->ioas->iopt, access);
> +	iommufd_ctx_put(access->ictx);
> +	refcount_dec(&access->ioas->obj.users);
> +}
> +
> +/**
> + * iommufd_access_create - Create an iommufd_access
> + * @ictx: iommufd file descriptor
> + * @ioas_id: ID for a IOMMUFD_OBJ_IOAS
> + * @ops: Driver's ops to associate with the access
> + * @data: Opaque data to pass into ops functions
> + *
> + * An iommufd_access allows a driver to read/write to the IOAS without using
> + * DMA. The underlying CPU memory can be accessed using the
> + * iommufd_access_pin_pages() or iommufd_access_rw() functions.
> + *
> + * The provided ops are required to use iommufd_access_pin_pages().
> + */
> +struct iommufd_access *
> +iommufd_access_create(struct iommufd_ctx *ictx, u32 ioas_id,
> +		      const struct iommufd_access_ops *ops, void *data)
> +{
> +	struct iommufd_access *access;
> +	struct iommufd_object *obj;
> +	int rc;
> +
> +	/*
> +	 * There is no uAPI for the access object, but to keep things symmetric
> +	 * use the object infrastructure anyhow.
> +	 */
> +	access = iommufd_object_alloc(ictx, access, IOMMUFD_OBJ_ACCESS);
> +	if (IS_ERR(access))
> +		return access;
> +
> +	access->data = data;
> +	access->ops = ops;
> +
> +	obj = iommufd_get_object(ictx, ioas_id, IOMMUFD_OBJ_IOAS);
> +	if (IS_ERR(obj)) {
> +		rc = PTR_ERR(obj);
> +		goto out_abort;
> +	}
> +	access->ioas = container_of(obj, struct iommufd_ioas, obj);
> +	iommufd_ref_to_users(obj);
> +
> +	if (ops->needs_pin_pages)
> +		access->iova_alignment = PAGE_SIZE;
> +	else
> +		access->iova_alignment = 1;
> +	rc = iopt_add_access(&access->ioas->iopt, access);
> +	if (rc)
> +		goto out_put_ioas;
> +
> +	/* The calling driver is a user until iommufd_access_destroy() */
> +	refcount_inc(&access->obj.users);
> +	access->ictx = ictx;
> +	iommufd_ctx_get(ictx);
> +	iommufd_object_finalize(ictx, &access->obj);
> +	return access;
> +out_put_ioas:
> +	refcount_dec(&access->ioas->obj.users);
> +out_abort:
> +	iommufd_object_abort(ictx, &access->obj);
> +	return ERR_PTR(rc);
> +}
> +EXPORT_SYMBOL_NS_GPL(iommufd_access_create, IOMMUFD);
> +
> +/**
> + * iommufd_access_destroy - Destroy an iommufd_access
> + * @access: The access to destroy
> + *
> + * The caller must stop using the access before destroying it.
> + */
> +void iommufd_access_destroy(struct iommufd_access *access)
> +{
> +	bool was_destroyed;
> +
> +	was_destroyed = iommufd_object_destroy_user(access->ictx, &access->obj);
> +	WARN_ON(!was_destroyed);
> +}
> +EXPORT_SYMBOL_NS_GPL(iommufd_access_destroy, IOMMUFD);
> +
> +/**
> + * iommufd_access_notify_unmap - Notify users of an iopt to stop using it
> + * @iopt: iopt to work on
> + * @iova: Starting iova in the iopt
> + * @length: Number of bytes
> + *
> + * After this function returns there should be no users attached to the pages
> + * linked to this iopt that intersect with iova,length. Anyone that has attached
> + * a user through iopt_access_pages() needs to detatch it through
detach
> + * iommufd_access_unpin_pages() before this function returns.
> + *
> + * The unmap callback may not call or wait for a iommufd_access_destroy() to
> + * complete. Once iommufd_access_destroy() returns no ops are running and no
> + * future ops will be called.
I don't understand the above sentence. Is that related to the

+		if (!iommufd_lock_obj(&access->obj))
+			continue;

where is the unmap() called in that case?

> + */
> +void iommufd_access_notify_unmap(struct io_pagetable *iopt, unsigned long iova,
> +				 unsigned long length)
> +{
> +	struct iommufd_ioas *ioas =
> +		container_of(iopt, struct iommufd_ioas, iopt);
> +	struct iommufd_access *access;
> +	unsigned long index;
> +
> +	xa_lock(&ioas->iopt.access_list);
> +	xa_for_each(&ioas->iopt.access_list, index, access) {
> +		if (!iommufd_lock_obj(&access->obj))
> +			continue;
> +		xa_unlock(&ioas->iopt.access_list);
> +
> +		access->ops->unmap(access->data, iova, length);
> +
> +		iommufd_put_object(&access->obj);
> +		xa_lock(&ioas->iopt.access_list);
> +	}
> +	xa_unlock(&ioas->iopt.access_list);
> +}
> +
> +/**
> + * iommufd_access_unpin_pages() - Undo iommufd_access_pin_pages
> + * @access: IOAS access to act on
> + * @iova: Starting IOVA
> + * @length:- Number of bytes to access
s/-//
> + *
> + * Return the struct page's. The caller must stop accessing them before calling
> + * this. The iova/length must exactly match the one provided to access_pages.
> + */
> +void iommufd_access_unpin_pages(struct iommufd_access *access,
> +				unsigned long iova, unsigned long length)
> +{
> +	struct io_pagetable *iopt = &access->ioas->iopt;
> +	struct iopt_area_contig_iter iter;
> +	unsigned long last_iova;
> +	struct iopt_area *area;
> +
> +	if (WARN_ON(!length) ||
> +	    WARN_ON(check_add_overflow(iova, length - 1, &last_iova)))
> +		return;
> +
> +	down_read(&iopt->iova_rwsem);
> +	iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova)
> +		iopt_area_remove_access(
> +			area, iopt_area_iova_to_index(area, iter.cur_iova),
> +			iopt_area_iova_to_index(
> +				area,
> +				min(last_iova, iopt_area_last_iova(area))));
> +	up_read(&iopt->iova_rwsem);
> +	WARN_ON(!iopt_area_contig_done(&iter));
> +}
> +EXPORT_SYMBOL_NS_GPL(iommufd_access_unpin_pages, IOMMUFD);
> +
> +static bool iopt_area_contig_is_aligned(struct iopt_area_contig_iter *iter)
> +{
> +	if (iopt_area_start_byte(iter->area, iter->cur_iova) % PAGE_SIZE)
> +		return false;
> +
> +	if (!iopt_area_contig_done(iter) &&
> +	    (iopt_area_start_byte(iter->area, iopt_area_last_iova(iter->area)) %
> +	     PAGE_SIZE) != (PAGE_SIZE - 1))
> +		return false;
> +	return true;
> +}
> +
> +static bool check_area_prot(struct iopt_area *area, unsigned int flags)
> +{
> +	if (flags & IOMMUFD_ACCESS_RW_WRITE)
> +		return area->iommu_prot & IOMMU_WRITE;
> +	return area->iommu_prot & IOMMU_READ;
> +}
> +
> +/**
> + * iommufd_access_pin_pages() - Return a list of pages under the iova
> + * @access: IOAS access to act on
> + * @iova: Starting IOVA
> + * @length: Number of bytes to access
> + * @out_pages: Output page list
> + * @flags: IOPMMUFD_ACCESS_RW_* flags
> + *
> + * Reads @length bytes starting at iova and returns the struct page * pointers.
> + * These can be kmap'd by the caller for CPU access.
> + *
> + * The caller must perform iopt_unaccess_pages() when done to balance this.
this function does not exist
> + *
> + * This API always requires a page aligned iova. This happens naturally if the
> + * ioas alignment is >= PAGE_SIZE and the iova is PAGE_SIZE aligned. However
> + * smaller alignments have corner cases where this API can fail on otherwise
> + * aligned iova.
> + */
> +int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova,
> +			     unsigned long length, struct page **out_pages,
> +			     unsigned int flags)
> +{
> +	struct io_pagetable *iopt = &access->ioas->iopt;
> +	struct iopt_area_contig_iter iter;
> +	unsigned long last_iova;
> +	struct iopt_area *area;
> +	int rc;
> +
> +	if (!length)
> +		return -EINVAL;
> +	if (check_add_overflow(iova, length - 1, &last_iova))
> +		return -EOVERFLOW;
> +
> +	down_read(&iopt->iova_rwsem);
> +	iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova) {
> +		unsigned long last = min(last_iova, iopt_area_last_iova(area));
> +		unsigned long last_index = iopt_area_iova_to_index(area, last);
> +		unsigned long index =
> +			iopt_area_iova_to_index(area, iter.cur_iova);
> +
> +		if (area->prevent_access ||
> +		    !iopt_area_contig_is_aligned(&iter)) {
> +			rc = -EINVAL;
> +			goto err_remove;
> +		}
> +
> +		if (!check_area_prot(area, flags)) {
> +			rc = -EPERM;
> +			goto err_remove;
> +		}
> +
> +		rc = iopt_area_add_access(area, index, last_index, out_pages,
> +					  flags);
> +		if (rc)
> +			goto err_remove;
> +		out_pages += last_index - index + 1;
> +	}
> +	if (!iopt_area_contig_done(&iter)) {
> +		rc = -ENOENT;
> +		goto err_remove;
> +	}
> +
> +	up_read(&iopt->iova_rwsem);
> +	return 0;
> +
> +err_remove:
> +	if (iova < iter.cur_iova) {
> +		last_iova = iter.cur_iova - 1;
> +		iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova)
> +			iopt_area_remove_access(
> +				area,
> +				iopt_area_iova_to_index(area, iter.cur_iova),
> +				iopt_area_iova_to_index(
> +					area, min(last_iova,
> +						  iopt_area_last_iova(area))));
> +	}
> +	up_read(&iopt->iova_rwsem);
> +	return rc;
> +}
> +EXPORT_SYMBOL_NS_GPL(iommufd_access_pin_pages, IOMMUFD);
> +
> +/**
> + * iommufd_access_rw - Read or write data under the iova
> + * @access: IOAS access to act on
> + * @iova: Starting IOVA
> + * @data: Kernel buffer to copy to/from
> + * @length: Number of bytes to access
> + * @flags: IOMMUFD_ACCESS_RW_* flags
> + *
> + * Copy kernel to/from data into the range given by IOVA/length. If flags
> + * indicates IOMMUFD_ACCESS_RW_KTHREAD then a large copy can be optimized
> + * by changing it into copy_to/from_user().
> + */
> +int iommufd_access_rw(struct iommufd_access *access, unsigned long iova,
> +		      void *data, size_t length, unsigned int flags)
> +{
> +	struct io_pagetable *iopt = &access->ioas->iopt;
> +	struct iopt_area_contig_iter iter;
> +	struct iopt_area *area;
> +	unsigned long last_iova;
> +	int rc;
> +
> +	if (!length)
> +		return -EINVAL;
> +	if (check_add_overflow(iova, length - 1, &last_iova))
> +		return -EOVERFLOW;
> +
> +	down_read(&iopt->iova_rwsem);
> +	iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova) {
> +		unsigned long last = min(last_iova, iopt_area_last_iova(area));
> +		unsigned long bytes = (last - iter.cur_iova) + 1;
> +
> +		if (area->prevent_access) {
> +			rc = -EINVAL;
> +			goto err_out;
> +		}
> +
> +		if (!check_area_prot(area, flags)) {
> +			rc = -EPERM;
> +			goto err_out;
> +		}
> +
> +		rc = iopt_pages_rw_access(
> +			area->pages, iopt_area_start_byte(area, iter.cur_iova),
> +			data, bytes, flags);
> +		if (rc)
> +			goto err_out;
> +		data += bytes;
> +	}
> +	if (!iopt_area_contig_done(&iter))
> +		rc = -ENOENT;
> +err_out:
> +	up_read(&iopt->iova_rwsem);
> +	return rc;
> +}
> +EXPORT_SYMBOL_NS_GPL(iommufd_access_rw, IOMMUFD);
> diff --git a/drivers/iommu/iommufd/io_pagetable.c b/drivers/iommu/iommufd/io_pagetable.c
> index 756d347948f0ec..4f4a9d9aac570e 100644
> --- a/drivers/iommu/iommufd/io_pagetable.c
> +++ b/drivers/iommu/iommufd/io_pagetable.c
> @@ -458,6 +458,7 @@ static int iopt_unmap_iova_range(struct io_pagetable *iopt, unsigned long start,
>  	 * is NULL. This prevents domain attach/detatch from running
>  	 * concurrently with cleaning up the area.
>  	 */
> +again:
>  	down_read(&iopt->domains_rwsem);
>  	down_write(&iopt->iova_rwsem);
>  	while ((area = iopt_area_iter_first(iopt, start, last))) {
> @@ -486,8 +487,11 @@ static int iopt_unmap_iova_range(struct io_pagetable *iopt, unsigned long start,
>  			area->prevent_access = true;
>  			up_write(&iopt->iova_rwsem);
>  			up_read(&iopt->domains_rwsem);
> -			/* Later patch calls back to drivers to unmap */
> -			return -EBUSY;
> +			iommufd_access_notify_unmap(iopt, area_first,
> +						    iopt_area_length(area));
> +			if (WARN_ON(READ_ONCE(area->num_accesses)))
> +				return -EDEADLOCK;
> +			goto again;
>  		}
>  
>  		pages = area->pages;
> diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h
> index 73345886d969e5..e1653b2276dac9 100644
> --- a/drivers/iommu/iommufd/iommufd_private.h
> +++ b/drivers/iommu/iommufd/iommufd_private.h
> @@ -65,6 +65,8 @@ int iopt_unmap_iova(struct io_pagetable *iopt, unsigned long iova,
>  		    unsigned long length, unsigned long *unmapped);
>  int iopt_unmap_all(struct io_pagetable *iopt, unsigned long *unmapped);
>  
> +void iommufd_access_notify_unmap(struct io_pagetable *iopt, unsigned long iova,
> +				 unsigned long length);
>  int iopt_table_add_domain(struct io_pagetable *iopt,
>  			  struct iommu_domain *domain);
>  void iopt_table_remove_domain(struct io_pagetable *iopt,
> @@ -106,6 +108,7 @@ enum iommufd_object_type {
>  	IOMMUFD_OBJ_DEVICE,
>  	IOMMUFD_OBJ_HW_PAGETABLE,
>  	IOMMUFD_OBJ_IOAS,
> +	IOMMUFD_OBJ_ACCESS,
>  };
>  
>  /* Base struct for all objects with a userspace ID handle. */
> @@ -246,6 +249,11 @@ void iommufd_hw_pagetable_destroy(struct iommufd_object *obj);
>  void iommufd_device_destroy(struct iommufd_object *obj);
>  
>  struct iommufd_access {
> +	struct iommufd_object obj;
> +	struct iommufd_ctx *ictx;
> +	struct iommufd_ioas *ioas;
> +	const struct iommufd_access_ops *ops;
> +	void *data;
>  	unsigned long iova_alignment;
>  	u32 iopt_access_list_id;
>  };
> @@ -253,4 +261,6 @@ struct iommufd_access {
>  int iopt_add_access(struct io_pagetable *iopt, struct iommufd_access *access);
>  void iopt_remove_access(struct io_pagetable *iopt,
>  			struct iommufd_access *access);
> +void iommufd_access_destroy_object(struct iommufd_object *obj);
> +
>  #endif
> diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c
> index 8a114ddbdfcde2..c8cc0953dea13a 100644
> --- a/drivers/iommu/iommufd/main.c
> +++ b/drivers/iommu/iommufd/main.c
> @@ -352,6 +352,9 @@ void iommufd_ctx_put(struct iommufd_ctx *ictx)
>  EXPORT_SYMBOL_NS_GPL(iommufd_ctx_put, IOMMUFD);
>  
>  static const struct iommufd_object_ops iommufd_object_ops[] = {
> +	[IOMMUFD_OBJ_ACCESS] = {
> +		.destroy = iommufd_access_destroy_object,
> +	},
>  	[IOMMUFD_OBJ_DEVICE] = {
>  		.destroy = iommufd_device_destroy,
>  	},
> diff --git a/include/linux/iommufd.h b/include/linux/iommufd.h
> index 31efacd8a46cce..fb9a4c275cca86 100644
> --- a/include/linux/iommufd.h
> +++ b/include/linux/iommufd.h
> @@ -9,10 +9,12 @@
>  #include <linux/types.h>
>  #include <linux/errno.h>
>  #include <linux/err.h>
> -#include <linux/device.h>
>  
> +struct device;
>  struct iommufd_device;
> +struct page;
>  struct iommufd_ctx;
> +struct iommufd_access;
>  struct file;
>  
>  struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
> @@ -26,6 +28,11 @@ int iommufd_device_attach(struct iommufd_device *idev, u32 *pt_id,
>  			  unsigned int flags);
>  void iommufd_device_detach(struct iommufd_device *idev);
>  
> +struct iommufd_access_ops {
> +	u8 needs_pin_pages : 1;
> +	void (*unmap)(void *data, unsigned long iova, unsigned long length);
> +};
> +
>  enum {
>  	IOMMUFD_ACCESS_RW_READ = 0,
>  	IOMMUFD_ACCESS_RW_WRITE = 1 << 0,
> @@ -33,11 +40,24 @@ enum {
>  	IOMMUFD_ACCESS_RW_KTHREAD = 1 << 1,
>  };
>  
> +struct iommufd_access *
> +iommufd_access_create(struct iommufd_ctx *ictx, u32 ioas_id,
> +		      const struct iommufd_access_ops *ops, void *data);
> +void iommufd_access_destroy(struct iommufd_access *access);
> +
>  void iommufd_ctx_get(struct iommufd_ctx *ictx);
>  
>  #if IS_ENABLED(CONFIG_IOMMUFD)
>  struct iommufd_ctx *iommufd_ctx_from_file(struct file *file);
>  void iommufd_ctx_put(struct iommufd_ctx *ictx);
> +
> +int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova,
> +			     unsigned long length, struct page **out_pages,
> +			     unsigned int flags);
> +void iommufd_access_unpin_pages(struct iommufd_access *access,
> +				unsigned long iova, unsigned long length);
> +int iommufd_access_rw(struct iommufd_access *access, unsigned long iova,
> +		      void *data, size_t len, unsigned int flags);
>  #else /* !CONFIG_IOMMUFD */
>  static inline struct iommufd_ctx *iommufd_ctx_from_file(struct file *file)
>  {
> @@ -47,5 +67,26 @@ static inline struct iommufd_ctx *iommufd_ctx_from_file(struct file *file)
>  static inline void iommufd_ctx_put(struct iommufd_ctx *ictx)
>  {
>  }
> +
> +static inline int iommufd_access_pin_pages(struct iommufd_access *access,
> +					   unsigned long iova,
> +					   unsigned long length,
> +					   struct page **out_pages,
> +					   unsigned int flags)
> +{
> +	return -EOPNOTSUPP;
> +}
> +
> +static inline void iommufd_access_unpin_pages(struct iommufd_access *access,
> +					      unsigned long iova,
> +					      unsigned long length)
> +{
> +}
> +
> +static inline int iommufd_access_rw(struct iommufd_access *access, unsigned long iova,
> +		      void *data, size_t len, unsigned int flags)
> +{
> +	return -EOPNOTSUPP;
> +}
>  #endif /* CONFIG_IOMMUFD */
>  #endif
Eric
Jason Gunthorpe Nov. 28, 2022, 6:56 p.m. UTC | #2
On Mon, Nov 28, 2022 at 04:48:58PM +0100, Eric Auger wrote:
> > +/**
> > + * iommufd_access_notify_unmap - Notify users of an iopt to stop using it
> > + * @iopt: iopt to work on
> > + * @iova: Starting iova in the iopt
> > + * @length: Number of bytes
> > + *
> > + * After this function returns there should be no users attached to the pages
> > + * linked to this iopt that intersect with iova,length. Anyone that has attached
> > + * a user through iopt_access_pages() needs to detatch it through
> detach
> > + * iommufd_access_unpin_pages() before this function returns.
> > + *
> > + * The unmap callback may not call or wait for a iommufd_access_destroy() to
> > + * complete. Once iommufd_access_destroy() returns no ops are running and no
> > + * future ops will be called.
> I don't understand the above sentence. Is that related to the
> 
> +		if (!iommufd_lock_obj(&access->obj))
> +			continue;
> 
> where is the unmap() called in that case?

It is basically saying a driver cannot write this:

unmap():
  mutex_lock(lock)
   iommufd_access_unpin_pages(access)
  mutex_unlock(lock)

driver_close
  mutex_lock(lock)
   iommufd_access_destroy(access)
  mutex_unlock(lock)

Or any other equivalent thing. How about

 * iommufd_access_destroy() will wait for any outstanding unmap callback to
 * complete. Once iommufd_access_destroy() no unmap ops are running or will
 * run in the future. Due to this a driver must not create locking that prevents
 * unmap to complete while iommufd_access_destroy() is running.

And I should really add a lockdep map here, which I will add as a
followup patch:

diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
index de1babd56af156..d2b8e33ffaa0d7 100644
--- a/drivers/iommu/iommufd/device.c
+++ b/drivers/iommu/iommufd/device.c
@@ -5,6 +5,7 @@
 #include <linux/slab.h>
 #include <linux/iommu.h>
 #include <linux/irqdomain.h>
+#include <linux/lockdep.h>
 
 #include "io_pagetable.h"
 #include "iommufd_private.h"
@@ -501,6 +502,15 @@ void iommufd_access_destroy(struct iommufd_access *access)
 {
 	bool was_destroyed;
 
+	/*
+	 * Alert lockdep that this cannot become entangled with an unmap
+	 * callback, or we will have deadlock.
+	 */
+#ifdef CONFIG_LOCKDEP
+	lock_acquire_exclusive(&access->ioas->iopt.unmap_map, 0, 0, NULL, _RET_IP_);
+	lock_release(&access->ioas->iopt.unmap_map, _RET_IP_);
+#endif
+
 	was_destroyed = iommufd_object_destroy_user(access->ictx, &access->obj);
 	WARN_ON(!was_destroyed);
 }
diff --git a/drivers/iommu/iommufd/io_pagetable.c b/drivers/iommu/iommufd/io_pagetable.c
index 3467cea795684c..d858cc7f241fd0 100644
--- a/drivers/iommu/iommufd/io_pagetable.c
+++ b/drivers/iommu/iommufd/io_pagetable.c
@@ -460,6 +460,9 @@ static int iopt_unmap_iova_range(struct io_pagetable *iopt, unsigned long start,
 	unsigned long unmapped_bytes = 0;
 	int rc = -ENOENT;
 
+#ifdef CONFIG_LOCKDEP
+	lock_acquire(&iopt->unmap_map, 0, 0, NULL, _RET_IP_);
+#endif
 	/*
 	 * The domains_rwsem must be held in read mode any time any area->pages
 	 * is NULL. This prevents domain attach/detatch from running
@@ -521,6 +524,10 @@ static int iopt_unmap_iova_range(struct io_pagetable *iopt, unsigned long start,
 	up_read(&iopt->domains_rwsem);
 	if (unmapped)
 		*unmapped = unmapped_bytes;
+
+#ifdef CONFIG_LOCKDEP
+	lock_release(&iopt->unmap_map, _RET_IP_);
+#endif
 	return rc;
 }
 
@@ -643,6 +650,14 @@ void iopt_init_table(struct io_pagetable *iopt)
 	 * restriction.
 	 */
 	iopt->iova_alignment = 1;
+
+#ifdef CONFIG_LOCKDEP
+	{
+		static struct lock_class_key key;
+
+		lockdep_init_map(&iopt->unmap_map, "access_unmap", &key, 0);
+	}
+#endif
 }
 
 void iopt_destroy_table(struct io_pagetable *iopt)
diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h
index 222e86591f8ac9..8fb8e53ee0d3d3 100644
--- a/drivers/iommu/iommufd/iommufd_private.h
+++ b/drivers/iommu/iommufd/iommufd_private.h
@@ -45,6 +45,10 @@ struct io_pagetable {
 	struct rb_root_cached reserved_itree;
 	u8 disable_large_pages;
 	unsigned long iova_alignment;
+
+#ifdef CONFIG_LOCKDEP
+	struct lockdep_map unmap_map;
+#endif
 };
 
 void iopt_init_table(struct io_pagetable *iopt);

> > +/**
> > + * iommufd_access_pin_pages() - Return a list of pages under the iova
> > + * @access: IOAS access to act on
> > + * @iova: Starting IOVA
> > + * @length: Number of bytes to access
> > + * @out_pages: Output page list
> > + * @flags: IOPMMUFD_ACCESS_RW_* flags
> > + *
> > + * Reads @length bytes starting at iova and returns the struct page * pointers.
> > + * These can be kmap'd by the caller for CPU access.
> > + *
> > + * The caller must perform iopt_unaccess_pages() when done to balance this.
> this function does not exist

iommufd_access_unpin_pages()

Thanks,
Jason
Jason Gunthorpe Dec. 6, 2022, 8:40 p.m. UTC | #3
On Mon, Nov 28, 2022 at 02:56:17PM -0400, Jason Gunthorpe wrote:
> It is basically saying a driver cannot write this:
> 
> unmap():
>   mutex_lock(lock)
>    iommufd_access_unpin_pages(access)
>   mutex_unlock(lock)
> 
> driver_close
>   mutex_lock(lock)
>    iommufd_access_destroy(access)
>   mutex_unlock(lock)
> 
> Or any other equivalent thing. How about
> 
>  * iommufd_access_destroy() will wait for any outstanding unmap callback to
>  * complete. Once iommufd_access_destroy() no unmap ops are running or will
>  * run in the future. Due to this a driver must not create locking that prevents
>  * unmap to complete while iommufd_access_destroy() is running.
> 
> And I should really add a lockdep map here, which I will add as a
> followup patch:

Actually, it turns out we already have enough real locks that lockdep
warns on this anyhow:

[  186.281328] ======================================================
[  186.281647] WARNING: possible circular locking dependency detected
[  186.281930] 6.1.0-rc7+ #156 Not tainted
[  186.282104] ------------------------------------------------------
[  186.282394] iommufd/404 is trying to acquire lock:
[  186.282622] ffff888006e57278 (&staccess->lock){+.+.}-{3:3}, at: iommufd_test_access_unmap+0x2a/0x170 [iommufd]
[  186.283211] 
[  186.283211] but task is already holding lock:
[  186.283498] ffff888008059a70 (&obj->destroy_rwsem){++++}-{3:3}, at: iommufd_access_notify_unmap+0xb7/0x240 [iommufd]
[  186.284000] 
[  186.284000] which lock already depends on the new lock.
[  186.284000] 
[  186.284496] 
[  186.284496] the existing dependency chain (in reverse order) is:
[  186.284905] 
[  186.284905] -> #1 (&obj->destroy_rwsem){++++}-{3:3}:
[  186.285234]        down_write+0x34/0x50
[  186.285438]        iommufd_object_destroy_user+0x1b/0x120 [iommufd]
[  186.285771]        iommufd_access_destroy+0x80/0xa0 [iommufd]
[  186.286111]        iommufd_test_staccess_release+0x5a/0x80 [iommufd]
[  186.286454]        __fput+0x1f9/0x3f0
[  186.286650]        ____fput+0x9/0x10
[  186.286834]        task_work_run+0xf4/0x150
[  186.287026]        exit_to_user_mode_loop+0xd0/0xf0
[  186.287271]        exit_to_user_mode_prepare+0x7f/0xc0
[  186.287519]        syscall_exit_to_user_mode+0x4d/0x1e0
[  186.287768]        do_syscall_64+0x50/0x90
[  186.287958]        entry_SYSCALL_64_after_hwframe+0x46/0xb0
[  186.288206] 
[  186.288206] -> #0 (&staccess->lock){+.+.}-{3:3}:
[  186.288598]        __lock_acquire+0x2092/0x3c80
[  186.288837]        lock_acquire+0x1b5/0x300
[  186.289037]        __mutex_lock_common+0xf7/0x1410
[  186.289299]        mutex_lock_nested+0x1b/0x30
[  186.289561]        iommufd_test_access_unmap+0x2a/0x170 [iommufd]
[  186.289892]        iommufd_access_notify_unmap+0x196/0x240 [iommufd]
[  186.290259]        iopt_unmap_iova_range+0x2c2/0x350 [iommufd]
[  186.290604]        iopt_unmap_iova+0x1b/0x30 [iommufd]
[  186.290889]        iommufd_ioas_unmap+0xdc/0x1d0 [iommufd]
[  186.291170]        iommufd_fops_ioctl+0x1e7/0x210 [iommufd]
[  186.291450]        __x64_sys_ioctl+0x11bb/0x1260
[  186.291707]        do_syscall_64+0x44/0x90
[  186.291903]        entry_SYSCALL_64_after_hwframe+0x46/0xb0

eg trying to provoke it by deliberately wrongly locking
iommufd_access_destroy()

Jason
diff mbox series

Patch

diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
index a71f5740773f84..522469ae7b5770 100644
--- a/drivers/iommu/iommufd/device.c
+++ b/drivers/iommu/iommufd/device.c
@@ -6,6 +6,7 @@ 
 #include <linux/iommu.h>
 #include <linux/irqdomain.h>
 
+#include "io_pagetable.h"
 #include "iommufd_private.h"
 
 /*
@@ -415,3 +416,316 @@  void iommufd_device_detach(struct iommufd_device *idev)
 	refcount_dec(&idev->obj.users);
 }
 EXPORT_SYMBOL_NS_GPL(iommufd_device_detach, IOMMUFD);
+
+void iommufd_access_destroy_object(struct iommufd_object *obj)
+{
+	struct iommufd_access *access =
+		container_of(obj, struct iommufd_access, obj);
+
+	iopt_remove_access(&access->ioas->iopt, access);
+	iommufd_ctx_put(access->ictx);
+	refcount_dec(&access->ioas->obj.users);
+}
+
+/**
+ * iommufd_access_create - Create an iommufd_access
+ * @ictx: iommufd file descriptor
+ * @ioas_id: ID for a IOMMUFD_OBJ_IOAS
+ * @ops: Driver's ops to associate with the access
+ * @data: Opaque data to pass into ops functions
+ *
+ * An iommufd_access allows a driver to read/write to the IOAS without using
+ * DMA. The underlying CPU memory can be accessed using the
+ * iommufd_access_pin_pages() or iommufd_access_rw() functions.
+ *
+ * The provided ops are required to use iommufd_access_pin_pages().
+ */
+struct iommufd_access *
+iommufd_access_create(struct iommufd_ctx *ictx, u32 ioas_id,
+		      const struct iommufd_access_ops *ops, void *data)
+{
+	struct iommufd_access *access;
+	struct iommufd_object *obj;
+	int rc;
+
+	/*
+	 * There is no uAPI for the access object, but to keep things symmetric
+	 * use the object infrastructure anyhow.
+	 */
+	access = iommufd_object_alloc(ictx, access, IOMMUFD_OBJ_ACCESS);
+	if (IS_ERR(access))
+		return access;
+
+	access->data = data;
+	access->ops = ops;
+
+	obj = iommufd_get_object(ictx, ioas_id, IOMMUFD_OBJ_IOAS);
+	if (IS_ERR(obj)) {
+		rc = PTR_ERR(obj);
+		goto out_abort;
+	}
+	access->ioas = container_of(obj, struct iommufd_ioas, obj);
+	iommufd_ref_to_users(obj);
+
+	if (ops->needs_pin_pages)
+		access->iova_alignment = PAGE_SIZE;
+	else
+		access->iova_alignment = 1;
+	rc = iopt_add_access(&access->ioas->iopt, access);
+	if (rc)
+		goto out_put_ioas;
+
+	/* The calling driver is a user until iommufd_access_destroy() */
+	refcount_inc(&access->obj.users);
+	access->ictx = ictx;
+	iommufd_ctx_get(ictx);
+	iommufd_object_finalize(ictx, &access->obj);
+	return access;
+out_put_ioas:
+	refcount_dec(&access->ioas->obj.users);
+out_abort:
+	iommufd_object_abort(ictx, &access->obj);
+	return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_NS_GPL(iommufd_access_create, IOMMUFD);
+
+/**
+ * iommufd_access_destroy - Destroy an iommufd_access
+ * @access: The access to destroy
+ *
+ * The caller must stop using the access before destroying it.
+ */
+void iommufd_access_destroy(struct iommufd_access *access)
+{
+	bool was_destroyed;
+
+	was_destroyed = iommufd_object_destroy_user(access->ictx, &access->obj);
+	WARN_ON(!was_destroyed);
+}
+EXPORT_SYMBOL_NS_GPL(iommufd_access_destroy, IOMMUFD);
+
+/**
+ * iommufd_access_notify_unmap - Notify users of an iopt to stop using it
+ * @iopt: iopt to work on
+ * @iova: Starting iova in the iopt
+ * @length: Number of bytes
+ *
+ * After this function returns there should be no users attached to the pages
+ * linked to this iopt that intersect with iova,length. Anyone that has attached
+ * a user through iopt_access_pages() needs to detatch it through
+ * iommufd_access_unpin_pages() before this function returns.
+ *
+ * The unmap callback may not call or wait for a iommufd_access_destroy() to
+ * complete. Once iommufd_access_destroy() returns no ops are running and no
+ * future ops will be called.
+ */
+void iommufd_access_notify_unmap(struct io_pagetable *iopt, unsigned long iova,
+				 unsigned long length)
+{
+	struct iommufd_ioas *ioas =
+		container_of(iopt, struct iommufd_ioas, iopt);
+	struct iommufd_access *access;
+	unsigned long index;
+
+	xa_lock(&ioas->iopt.access_list);
+	xa_for_each(&ioas->iopt.access_list, index, access) {
+		if (!iommufd_lock_obj(&access->obj))
+			continue;
+		xa_unlock(&ioas->iopt.access_list);
+
+		access->ops->unmap(access->data, iova, length);
+
+		iommufd_put_object(&access->obj);
+		xa_lock(&ioas->iopt.access_list);
+	}
+	xa_unlock(&ioas->iopt.access_list);
+}
+
+/**
+ * iommufd_access_unpin_pages() - Undo iommufd_access_pin_pages
+ * @access: IOAS access to act on
+ * @iova: Starting IOVA
+ * @length:- Number of bytes to access
+ *
+ * Return the struct page's. The caller must stop accessing them before calling
+ * this. The iova/length must exactly match the one provided to access_pages.
+ */
+void iommufd_access_unpin_pages(struct iommufd_access *access,
+				unsigned long iova, unsigned long length)
+{
+	struct io_pagetable *iopt = &access->ioas->iopt;
+	struct iopt_area_contig_iter iter;
+	unsigned long last_iova;
+	struct iopt_area *area;
+
+	if (WARN_ON(!length) ||
+	    WARN_ON(check_add_overflow(iova, length - 1, &last_iova)))
+		return;
+
+	down_read(&iopt->iova_rwsem);
+	iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova)
+		iopt_area_remove_access(
+			area, iopt_area_iova_to_index(area, iter.cur_iova),
+			iopt_area_iova_to_index(
+				area,
+				min(last_iova, iopt_area_last_iova(area))));
+	up_read(&iopt->iova_rwsem);
+	WARN_ON(!iopt_area_contig_done(&iter));
+}
+EXPORT_SYMBOL_NS_GPL(iommufd_access_unpin_pages, IOMMUFD);
+
+static bool iopt_area_contig_is_aligned(struct iopt_area_contig_iter *iter)
+{
+	if (iopt_area_start_byte(iter->area, iter->cur_iova) % PAGE_SIZE)
+		return false;
+
+	if (!iopt_area_contig_done(iter) &&
+	    (iopt_area_start_byte(iter->area, iopt_area_last_iova(iter->area)) %
+	     PAGE_SIZE) != (PAGE_SIZE - 1))
+		return false;
+	return true;
+}
+
+static bool check_area_prot(struct iopt_area *area, unsigned int flags)
+{
+	if (flags & IOMMUFD_ACCESS_RW_WRITE)
+		return area->iommu_prot & IOMMU_WRITE;
+	return area->iommu_prot & IOMMU_READ;
+}
+
+/**
+ * iommufd_access_pin_pages() - Return a list of pages under the iova
+ * @access: IOAS access to act on
+ * @iova: Starting IOVA
+ * @length: Number of bytes to access
+ * @out_pages: Output page list
+ * @flags: IOPMMUFD_ACCESS_RW_* flags
+ *
+ * Reads @length bytes starting at iova and returns the struct page * pointers.
+ * These can be kmap'd by the caller for CPU access.
+ *
+ * The caller must perform iopt_unaccess_pages() when done to balance this.
+ *
+ * This API always requires a page aligned iova. This happens naturally if the
+ * ioas alignment is >= PAGE_SIZE and the iova is PAGE_SIZE aligned. However
+ * smaller alignments have corner cases where this API can fail on otherwise
+ * aligned iova.
+ */
+int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova,
+			     unsigned long length, struct page **out_pages,
+			     unsigned int flags)
+{
+	struct io_pagetable *iopt = &access->ioas->iopt;
+	struct iopt_area_contig_iter iter;
+	unsigned long last_iova;
+	struct iopt_area *area;
+	int rc;
+
+	if (!length)
+		return -EINVAL;
+	if (check_add_overflow(iova, length - 1, &last_iova))
+		return -EOVERFLOW;
+
+	down_read(&iopt->iova_rwsem);
+	iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova) {
+		unsigned long last = min(last_iova, iopt_area_last_iova(area));
+		unsigned long last_index = iopt_area_iova_to_index(area, last);
+		unsigned long index =
+			iopt_area_iova_to_index(area, iter.cur_iova);
+
+		if (area->prevent_access ||
+		    !iopt_area_contig_is_aligned(&iter)) {
+			rc = -EINVAL;
+			goto err_remove;
+		}
+
+		if (!check_area_prot(area, flags)) {
+			rc = -EPERM;
+			goto err_remove;
+		}
+
+		rc = iopt_area_add_access(area, index, last_index, out_pages,
+					  flags);
+		if (rc)
+			goto err_remove;
+		out_pages += last_index - index + 1;
+	}
+	if (!iopt_area_contig_done(&iter)) {
+		rc = -ENOENT;
+		goto err_remove;
+	}
+
+	up_read(&iopt->iova_rwsem);
+	return 0;
+
+err_remove:
+	if (iova < iter.cur_iova) {
+		last_iova = iter.cur_iova - 1;
+		iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova)
+			iopt_area_remove_access(
+				area,
+				iopt_area_iova_to_index(area, iter.cur_iova),
+				iopt_area_iova_to_index(
+					area, min(last_iova,
+						  iopt_area_last_iova(area))));
+	}
+	up_read(&iopt->iova_rwsem);
+	return rc;
+}
+EXPORT_SYMBOL_NS_GPL(iommufd_access_pin_pages, IOMMUFD);
+
+/**
+ * iommufd_access_rw - Read or write data under the iova
+ * @access: IOAS access to act on
+ * @iova: Starting IOVA
+ * @data: Kernel buffer to copy to/from
+ * @length: Number of bytes to access
+ * @flags: IOMMUFD_ACCESS_RW_* flags
+ *
+ * Copy kernel to/from data into the range given by IOVA/length. If flags
+ * indicates IOMMUFD_ACCESS_RW_KTHREAD then a large copy can be optimized
+ * by changing it into copy_to/from_user().
+ */
+int iommufd_access_rw(struct iommufd_access *access, unsigned long iova,
+		      void *data, size_t length, unsigned int flags)
+{
+	struct io_pagetable *iopt = &access->ioas->iopt;
+	struct iopt_area_contig_iter iter;
+	struct iopt_area *area;
+	unsigned long last_iova;
+	int rc;
+
+	if (!length)
+		return -EINVAL;
+	if (check_add_overflow(iova, length - 1, &last_iova))
+		return -EOVERFLOW;
+
+	down_read(&iopt->iova_rwsem);
+	iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova) {
+		unsigned long last = min(last_iova, iopt_area_last_iova(area));
+		unsigned long bytes = (last - iter.cur_iova) + 1;
+
+		if (area->prevent_access) {
+			rc = -EINVAL;
+			goto err_out;
+		}
+
+		if (!check_area_prot(area, flags)) {
+			rc = -EPERM;
+			goto err_out;
+		}
+
+		rc = iopt_pages_rw_access(
+			area->pages, iopt_area_start_byte(area, iter.cur_iova),
+			data, bytes, flags);
+		if (rc)
+			goto err_out;
+		data += bytes;
+	}
+	if (!iopt_area_contig_done(&iter))
+		rc = -ENOENT;
+err_out:
+	up_read(&iopt->iova_rwsem);
+	return rc;
+}
+EXPORT_SYMBOL_NS_GPL(iommufd_access_rw, IOMMUFD);
diff --git a/drivers/iommu/iommufd/io_pagetable.c b/drivers/iommu/iommufd/io_pagetable.c
index 756d347948f0ec..4f4a9d9aac570e 100644
--- a/drivers/iommu/iommufd/io_pagetable.c
+++ b/drivers/iommu/iommufd/io_pagetable.c
@@ -458,6 +458,7 @@  static int iopt_unmap_iova_range(struct io_pagetable *iopt, unsigned long start,
 	 * is NULL. This prevents domain attach/detatch from running
 	 * concurrently with cleaning up the area.
 	 */
+again:
 	down_read(&iopt->domains_rwsem);
 	down_write(&iopt->iova_rwsem);
 	while ((area = iopt_area_iter_first(iopt, start, last))) {
@@ -486,8 +487,11 @@  static int iopt_unmap_iova_range(struct io_pagetable *iopt, unsigned long start,
 			area->prevent_access = true;
 			up_write(&iopt->iova_rwsem);
 			up_read(&iopt->domains_rwsem);
-			/* Later patch calls back to drivers to unmap */
-			return -EBUSY;
+			iommufd_access_notify_unmap(iopt, area_first,
+						    iopt_area_length(area));
+			if (WARN_ON(READ_ONCE(area->num_accesses)))
+				return -EDEADLOCK;
+			goto again;
 		}
 
 		pages = area->pages;
diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h
index 73345886d969e5..e1653b2276dac9 100644
--- a/drivers/iommu/iommufd/iommufd_private.h
+++ b/drivers/iommu/iommufd/iommufd_private.h
@@ -65,6 +65,8 @@  int iopt_unmap_iova(struct io_pagetable *iopt, unsigned long iova,
 		    unsigned long length, unsigned long *unmapped);
 int iopt_unmap_all(struct io_pagetable *iopt, unsigned long *unmapped);
 
+void iommufd_access_notify_unmap(struct io_pagetable *iopt, unsigned long iova,
+				 unsigned long length);
 int iopt_table_add_domain(struct io_pagetable *iopt,
 			  struct iommu_domain *domain);
 void iopt_table_remove_domain(struct io_pagetable *iopt,
@@ -106,6 +108,7 @@  enum iommufd_object_type {
 	IOMMUFD_OBJ_DEVICE,
 	IOMMUFD_OBJ_HW_PAGETABLE,
 	IOMMUFD_OBJ_IOAS,
+	IOMMUFD_OBJ_ACCESS,
 };
 
 /* Base struct for all objects with a userspace ID handle. */
@@ -246,6 +249,11 @@  void iommufd_hw_pagetable_destroy(struct iommufd_object *obj);
 void iommufd_device_destroy(struct iommufd_object *obj);
 
 struct iommufd_access {
+	struct iommufd_object obj;
+	struct iommufd_ctx *ictx;
+	struct iommufd_ioas *ioas;
+	const struct iommufd_access_ops *ops;
+	void *data;
 	unsigned long iova_alignment;
 	u32 iopt_access_list_id;
 };
@@ -253,4 +261,6 @@  struct iommufd_access {
 int iopt_add_access(struct io_pagetable *iopt, struct iommufd_access *access);
 void iopt_remove_access(struct io_pagetable *iopt,
 			struct iommufd_access *access);
+void iommufd_access_destroy_object(struct iommufd_object *obj);
+
 #endif
diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c
index 8a114ddbdfcde2..c8cc0953dea13a 100644
--- a/drivers/iommu/iommufd/main.c
+++ b/drivers/iommu/iommufd/main.c
@@ -352,6 +352,9 @@  void iommufd_ctx_put(struct iommufd_ctx *ictx)
 EXPORT_SYMBOL_NS_GPL(iommufd_ctx_put, IOMMUFD);
 
 static const struct iommufd_object_ops iommufd_object_ops[] = {
+	[IOMMUFD_OBJ_ACCESS] = {
+		.destroy = iommufd_access_destroy_object,
+	},
 	[IOMMUFD_OBJ_DEVICE] = {
 		.destroy = iommufd_device_destroy,
 	},
diff --git a/include/linux/iommufd.h b/include/linux/iommufd.h
index 31efacd8a46cce..fb9a4c275cca86 100644
--- a/include/linux/iommufd.h
+++ b/include/linux/iommufd.h
@@ -9,10 +9,12 @@ 
 #include <linux/types.h>
 #include <linux/errno.h>
 #include <linux/err.h>
-#include <linux/device.h>
 
+struct device;
 struct iommufd_device;
+struct page;
 struct iommufd_ctx;
+struct iommufd_access;
 struct file;
 
 struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
@@ -26,6 +28,11 @@  int iommufd_device_attach(struct iommufd_device *idev, u32 *pt_id,
 			  unsigned int flags);
 void iommufd_device_detach(struct iommufd_device *idev);
 
+struct iommufd_access_ops {
+	u8 needs_pin_pages : 1;
+	void (*unmap)(void *data, unsigned long iova, unsigned long length);
+};
+
 enum {
 	IOMMUFD_ACCESS_RW_READ = 0,
 	IOMMUFD_ACCESS_RW_WRITE = 1 << 0,
@@ -33,11 +40,24 @@  enum {
 	IOMMUFD_ACCESS_RW_KTHREAD = 1 << 1,
 };
 
+struct iommufd_access *
+iommufd_access_create(struct iommufd_ctx *ictx, u32 ioas_id,
+		      const struct iommufd_access_ops *ops, void *data);
+void iommufd_access_destroy(struct iommufd_access *access);
+
 void iommufd_ctx_get(struct iommufd_ctx *ictx);
 
 #if IS_ENABLED(CONFIG_IOMMUFD)
 struct iommufd_ctx *iommufd_ctx_from_file(struct file *file);
 void iommufd_ctx_put(struct iommufd_ctx *ictx);
+
+int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova,
+			     unsigned long length, struct page **out_pages,
+			     unsigned int flags);
+void iommufd_access_unpin_pages(struct iommufd_access *access,
+				unsigned long iova, unsigned long length);
+int iommufd_access_rw(struct iommufd_access *access, unsigned long iova,
+		      void *data, size_t len, unsigned int flags);
 #else /* !CONFIG_IOMMUFD */
 static inline struct iommufd_ctx *iommufd_ctx_from_file(struct file *file)
 {
@@ -47,5 +67,26 @@  static inline struct iommufd_ctx *iommufd_ctx_from_file(struct file *file)
 static inline void iommufd_ctx_put(struct iommufd_ctx *ictx)
 {
 }
+
+static inline int iommufd_access_pin_pages(struct iommufd_access *access,
+					   unsigned long iova,
+					   unsigned long length,
+					   struct page **out_pages,
+					   unsigned int flags)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline void iommufd_access_unpin_pages(struct iommufd_access *access,
+					      unsigned long iova,
+					      unsigned long length)
+{
+}
+
+static inline int iommufd_access_rw(struct iommufd_access *access, unsigned long iova,
+		      void *data, size_t len, unsigned int flags)
+{
+	return -EOPNOTSUPP;
+}
 #endif /* CONFIG_IOMMUFD */
 #endif