diff mbox series

[v5,5/7] iommu: Add virtio-iommu driver

Message ID 20181122193801.50510-6-jean-philippe.brucker@arm.com (mailing list archive)
State New, archived
Delegated to: Bjorn Helgaas
Headers show
Series Add virtio-iommu driver | expand

Commit Message

Jean-Philippe Brucker Nov. 22, 2018, 7:37 p.m. UTC
The virtio IOMMU is a para-virtualized device, allowing to send IOMMU
requests such as map/unmap over virtio transport without emulating page
tables. This implementation handles ATTACH, DETACH, MAP and UNMAP
requests.

The bulk of the code transforms calls coming from the IOMMU API into
corresponding virtio requests. Mappings are kept in an interval tree
instead of page tables.

Signed-off-by: Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
---
 MAINTAINERS                       |   7 +
 drivers/iommu/Kconfig             |  11 +
 drivers/iommu/Makefile            |   1 +
 drivers/iommu/virtio-iommu.c      | 916 ++++++++++++++++++++++++++++++
 include/uapi/linux/virtio_ids.h   |   1 +
 include/uapi/linux/virtio_iommu.h | 104 ++++
 6 files changed, 1040 insertions(+)
 create mode 100644 drivers/iommu/virtio-iommu.c
 create mode 100644 include/uapi/linux/virtio_iommu.h

Comments

Eric Auger Nov. 23, 2018, 8:27 a.m. UTC | #1
Hi Jean,

On 11/22/18 8:37 PM, Jean-Philippe Brucker wrote:
> The virtio IOMMU is a para-virtualized device, allowing to send IOMMU
> requests such as map/unmap over virtio transport without emulating page
> tables. This implementation handles ATTACH, DETACH, MAP and UNMAP
> requests.
> 
> The bulk of the code transforms calls coming from the IOMMU API into
> corresponding virtio requests. Mappings are kept in an interval tree
> instead of page tables.
> 
> Signed-off-by: Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
Reviewed-by: Eric Auger <eric.auger@redhat.com>

Thanks

Eric
> ---
>  MAINTAINERS                       |   7 +
>  drivers/iommu/Kconfig             |  11 +
>  drivers/iommu/Makefile            |   1 +
>  drivers/iommu/virtio-iommu.c      | 916 ++++++++++++++++++++++++++++++
>  include/uapi/linux/virtio_ids.h   |   1 +
>  include/uapi/linux/virtio_iommu.h | 104 ++++
>  6 files changed, 1040 insertions(+)
>  create mode 100644 drivers/iommu/virtio-iommu.c
>  create mode 100644 include/uapi/linux/virtio_iommu.h
> 
> diff --git a/MAINTAINERS b/MAINTAINERS
> index 1689dcfec800..3d8550c76f4a 100644
> --- a/MAINTAINERS
> +++ b/MAINTAINERS
> @@ -15946,6 +15946,13 @@ S:	Maintained
>  F:	drivers/virtio/virtio_input.c
>  F:	include/uapi/linux/virtio_input.h
>  
> +VIRTIO IOMMU DRIVER
> +M:	Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
> +L:	virtualization@lists.linux-foundation.org
> +S:	Maintained
> +F:	drivers/iommu/virtio-iommu.c
> +F:	include/uapi/linux/virtio_iommu.h
> +
>  VIRTUAL BOX GUEST DEVICE DRIVER
>  M:	Hans de Goede <hdegoede@redhat.com>
>  M:	Arnd Bergmann <arnd@arndb.de>
> diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
> index bf2bbfa2a399..db5f2b8c23f5 100644
> --- a/drivers/iommu/Kconfig
> +++ b/drivers/iommu/Kconfig
> @@ -464,4 +464,15 @@ config QCOM_IOMMU
>  	help
>  	  Support for IOMMU on certain Qualcomm SoCs.
>  
> +config VIRTIO_IOMMU
> +	bool "Virtio IOMMU driver"
> +	depends on VIRTIO=y
> +	select IOMMU_API
> +	select INTERVAL_TREE
> +	select ARM_DMA_USE_IOMMU if ARM
> +	help
> +	  Para-virtualised IOMMU driver with virtio.
> +
> +	  Say Y here if you intend to run this kernel as a guest.
> +
>  endif # IOMMU_SUPPORT
> diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
> index 5481e5fe1f95..bd7e55751d09 100644
> --- a/drivers/iommu/Makefile
> +++ b/drivers/iommu/Makefile
> @@ -36,3 +36,4 @@ obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
>  obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
>  obj-$(CONFIG_S390_IOMMU) += s390-iommu.o
>  obj-$(CONFIG_QCOM_IOMMU) += qcom_iommu.o
> +obj-$(CONFIG_VIRTIO_IOMMU) += virtio-iommu.o
> diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
> new file mode 100644
> index 000000000000..7540dab9c8dc
> --- /dev/null
> +++ b/drivers/iommu/virtio-iommu.c
> @@ -0,0 +1,916 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * Virtio driver for the paravirtualized IOMMU
> + *
> + * Copyright (C) 2018 Arm Limited
> + */
> +
> +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
> +
> +#include <linux/amba/bus.h>
> +#include <linux/delay.h>
> +#include <linux/dma-iommu.h>
> +#include <linux/freezer.h>
> +#include <linux/interval_tree.h>
> +#include <linux/iommu.h>
> +#include <linux/module.h>
> +#include <linux/of_iommu.h>
> +#include <linux/of_platform.h>
> +#include <linux/pci.h>
> +#include <linux/platform_device.h>
> +#include <linux/virtio.h>
> +#include <linux/virtio_config.h>
> +#include <linux/virtio_ids.h>
> +#include <linux/wait.h>
> +
> +#include <uapi/linux/virtio_iommu.h>
> +
> +#define MSI_IOVA_BASE			0x8000000
> +#define MSI_IOVA_LENGTH			0x100000
> +
> +#define VIOMMU_REQUEST_VQ		0
> +#define VIOMMU_NR_VQS			1
> +
> +struct viommu_dev {
> +	struct iommu_device		iommu;
> +	struct device			*dev;
> +	struct virtio_device		*vdev;
> +
> +	struct ida			domain_ids;
> +
> +	struct virtqueue		*vqs[VIOMMU_NR_VQS];
> +	spinlock_t			request_lock;
> +	struct list_head		requests;
> +
> +	/* Device configuration */
> +	struct iommu_domain_geometry	geometry;
> +	u64				pgsize_bitmap;
> +	u8				domain_bits;
> +};
> +
> +struct viommu_mapping {
> +	phys_addr_t			paddr;
> +	struct interval_tree_node	iova;
> +	u32				flags;
> +};
> +
> +struct viommu_domain {
> +	struct iommu_domain		domain;
> +	struct viommu_dev		*viommu;
> +	struct mutex			mutex; /* protects viommu pointer */
> +	unsigned int			id;
> +
> +	spinlock_t			mappings_lock;
> +	struct rb_root_cached		mappings;
> +
> +	unsigned long			nr_endpoints;
> +};
> +
> +struct viommu_endpoint {
> +	struct viommu_dev		*viommu;
> +	struct viommu_domain		*vdomain;
> +};
> +
> +struct viommu_request {
> +	struct list_head		list;
> +	void				*writeback;
> +	unsigned int			write_offset;
> +	unsigned int			len;
> +	char				buf[];
> +};
> +
> +#define to_viommu_domain(domain)	\
> +	container_of(domain, struct viommu_domain, domain)
> +
> +static int viommu_get_req_errno(void *buf, size_t len)
> +{
> +	struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail);
> +
> +	switch (tail->status) {
> +	case VIRTIO_IOMMU_S_OK:
> +		return 0;
> +	case VIRTIO_IOMMU_S_UNSUPP:
> +		return -ENOSYS;
> +	case VIRTIO_IOMMU_S_INVAL:
> +		return -EINVAL;
> +	case VIRTIO_IOMMU_S_RANGE:
> +		return -ERANGE;
> +	case VIRTIO_IOMMU_S_NOENT:
> +		return -ENOENT;
> +	case VIRTIO_IOMMU_S_FAULT:
> +		return -EFAULT;
> +	case VIRTIO_IOMMU_S_IOERR:
> +	case VIRTIO_IOMMU_S_DEVERR:
> +	default:
> +		return -EIO;
> +	}
> +}
> +
> +static void viommu_set_req_status(void *buf, size_t len, int status)
> +{
> +	struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail);
> +
> +	tail->status = status;
> +}
> +
> +static off_t viommu_get_write_desc_offset(struct viommu_dev *viommu,
> +					  struct virtio_iommu_req_head *req,
> +					  size_t len)
> +{
> +	size_t tail_size = sizeof(struct virtio_iommu_req_tail);
> +
> +	return len - tail_size;
> +}
> +
> +/*
> + * __viommu_sync_req - Complete all in-flight requests
> + *
> + * Wait for all added requests to complete. When this function returns, all
> + * requests that were in-flight at the time of the call have completed.
> + */
> +static int __viommu_sync_req(struct viommu_dev *viommu)
> +{
> +	int ret = 0;
> +	unsigned int len;
> +	size_t write_len;
> +	struct viommu_request *req;
> +	struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
> +
> +	assert_spin_locked(&viommu->request_lock);
> +
> +	virtqueue_kick(vq);
> +
> +	while (!list_empty(&viommu->requests)) {
> +		len = 0;
> +		req = virtqueue_get_buf(vq, &len);
> +		if (!req)
> +			continue;
> +
> +		if (!len)
> +			viommu_set_req_status(req->buf, req->len,
> +					      VIRTIO_IOMMU_S_IOERR);
> +
> +		write_len = req->len - req->write_offset;
> +		if (req->writeback && len == write_len)
> +			memcpy(req->writeback, req->buf + req->write_offset,
> +			       write_len);
> +
> +		list_del(&req->list);
> +		kfree(req);
> +	}
> +
> +	return ret;
> +}
> +
> +static int viommu_sync_req(struct viommu_dev *viommu)
> +{
> +	int ret;
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&viommu->request_lock, flags);
> +	ret = __viommu_sync_req(viommu);
> +	if (ret)
> +		dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret);
> +	spin_unlock_irqrestore(&viommu->request_lock, flags);
> +
> +	return ret;
> +}
> +
> +/*
> + * __viommu_add_request - Add one request to the queue
> + * @buf: pointer to the request buffer
> + * @len: length of the request buffer
> + * @writeback: copy data back to the buffer when the request completes.
> + *
> + * Add a request to the queue. Only synchronize the queue if it's already full.
> + * Otherwise don't kick the queue nor wait for requests to complete.
> + *
> + * When @writeback is true, data written by the device, including the request
> + * status, is copied into @buf after the request completes. This is unsafe if
> + * the caller allocates @buf on stack and drops the lock between add_req() and
> + * sync_req().
> + *
> + * Return 0 if the request was successfully added to the queue.
> + */
> +static int __viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len,
> +			    bool writeback)
> +{
> +	int ret;
> +	off_t write_offset;
> +	struct viommu_request *req;
> +	struct scatterlist top_sg, bottom_sg;
> +	struct scatterlist *sg[2] = { &top_sg, &bottom_sg };
> +	struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
> +
> +	assert_spin_locked(&viommu->request_lock);
> +
> +	write_offset = viommu_get_write_desc_offset(viommu, buf, len);
> +	if (write_offset <= 0)
> +		return -EINVAL;
> +
> +	req = kzalloc(sizeof(*req) + len, GFP_ATOMIC);
> +	if (!req)
> +		return -ENOMEM;
> +
> +	req->len = len;
> +	if (writeback) {
> +		req->writeback = buf + write_offset;
> +		req->write_offset = write_offset;
> +	}
> +	memcpy(&req->buf, buf, write_offset);
> +
> +	sg_init_one(&top_sg, req->buf, write_offset);
> +	sg_init_one(&bottom_sg, req->buf + write_offset, len - write_offset);
> +
> +	ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC);
> +	if (ret == -ENOSPC) {
> +		/* If the queue is full, sync and retry */
> +		if (!__viommu_sync_req(viommu))
> +			ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC);
> +	}
> +	if (ret)
> +		goto err_free;
> +
> +	list_add_tail(&req->list, &viommu->requests);
> +	return 0;
> +
> +err_free:
> +	kfree(req);
> +	return ret;
> +}
> +
> +static int viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len)
> +{
> +	int ret;
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&viommu->request_lock, flags);
> +	ret = __viommu_add_req(viommu, buf, len, false);
> +	if (ret)
> +		dev_dbg(viommu->dev, "could not add request: %d\n", ret);
> +	spin_unlock_irqrestore(&viommu->request_lock, flags);
> +
> +	return ret;
> +}
> +
> +/*
> + * Send a request and wait for it to complete. Return the request status (as an
> + * errno)
> + */
> +static int viommu_send_req_sync(struct viommu_dev *viommu, void *buf,
> +				size_t len)
> +{
> +	int ret;
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&viommu->request_lock, flags);
> +
> +	ret = __viommu_add_req(viommu, buf, len, true);
> +	if (ret) {
> +		dev_dbg(viommu->dev, "could not add request (%d)\n", ret);
> +		goto out_unlock;
> +	}
> +
> +	ret = __viommu_sync_req(viommu);
> +	if (ret) {
> +		dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret);
> +		/* Fall-through (get the actual request status) */
> +	}
> +
> +	ret = viommu_get_req_errno(buf, len);
> +out_unlock:
> +	spin_unlock_irqrestore(&viommu->request_lock, flags);
> +	return ret;
> +}
> +
> +/*
> + * viommu_add_mapping - add a mapping to the internal tree
> + *
> + * On success, return the new mapping. Otherwise return NULL.
> + */
> +static int viommu_add_mapping(struct viommu_domain *vdomain, unsigned long iova,
> +			      phys_addr_t paddr, size_t size, u32 flags)
> +{
> +	unsigned long irqflags;
> +	struct viommu_mapping *mapping;
> +
> +	mapping = kzalloc(sizeof(*mapping), GFP_ATOMIC);
> +	if (!mapping)
> +		return -ENOMEM;
> +
> +	mapping->paddr		= paddr;
> +	mapping->iova.start	= iova;
> +	mapping->iova.last	= iova + size - 1;
> +	mapping->flags		= flags;
> +
> +	spin_lock_irqsave(&vdomain->mappings_lock, irqflags);
> +	interval_tree_insert(&mapping->iova, &vdomain->mappings);
> +	spin_unlock_irqrestore(&vdomain->mappings_lock, irqflags);
> +
> +	return 0;
> +}
> +
> +/*
> + * viommu_del_mappings - remove mappings from the internal tree
> + *
> + * @vdomain: the domain
> + * @iova: start of the range
> + * @size: size of the range. A size of 0 corresponds to the entire address
> + *	space.
> + *
> + * On success, returns the number of unmapped bytes (>= size)
> + */
> +static size_t viommu_del_mappings(struct viommu_domain *vdomain,
> +				  unsigned long iova, size_t size)
> +{
> +	size_t unmapped = 0;
> +	unsigned long flags;
> +	unsigned long last = iova + size - 1;
> +	struct viommu_mapping *mapping = NULL;
> +	struct interval_tree_node *node, *next;
> +
> +	spin_lock_irqsave(&vdomain->mappings_lock, flags);
> +	next = interval_tree_iter_first(&vdomain->mappings, iova, last);
> +	while (next) {
> +		node = next;
> +		mapping = container_of(node, struct viommu_mapping, iova);
> +		next = interval_tree_iter_next(node, iova, last);
> +
> +		/* Trying to split a mapping? */
> +		if (mapping->iova.start < iova)
> +			break;
> +
> +		/*
> +		 * Virtio-iommu doesn't allow UNMAP to split a mapping created
> +		 * with a single MAP request, so remove the full mapping.
> +		 */
> +		unmapped += mapping->iova.last - mapping->iova.start + 1;
> +
> +		interval_tree_remove(node, &vdomain->mappings);
> +		kfree(mapping);
> +	}
> +	spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
> +
> +	return unmapped;
> +}
> +
> +/*
> + * viommu_replay_mappings - re-send MAP requests
> + *
> + * When reattaching a domain that was previously detached from all endpoints,
> + * mappings were deleted from the device. Re-create the mappings available in
> + * the internal tree.
> + */
> +static int viommu_replay_mappings(struct viommu_domain *vdomain)
> +{
> +	int ret = 0;
> +	unsigned long flags;
> +	struct viommu_mapping *mapping;
> +	struct interval_tree_node *node;
> +	struct virtio_iommu_req_map map;
> +
> +	spin_lock_irqsave(&vdomain->mappings_lock, flags);
> +	node = interval_tree_iter_first(&vdomain->mappings, 0, -1UL);
> +	while (node) {
> +		mapping = container_of(node, struct viommu_mapping, iova);
> +		map = (struct virtio_iommu_req_map) {
> +			.head.type	= VIRTIO_IOMMU_T_MAP,
> +			.domain		= cpu_to_le32(vdomain->id),
> +			.virt_start	= cpu_to_le64(mapping->iova.start),
> +			.virt_end	= cpu_to_le64(mapping->iova.last),
> +			.phys_start	= cpu_to_le64(mapping->paddr),
> +			.flags		= cpu_to_le32(mapping->flags),
> +		};
> +
> +		ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
> +		if (ret)
> +			break;
> +
> +		node = interval_tree_iter_next(node, 0, -1UL);
> +	}
> +	spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
> +
> +	return ret;
> +}
> +
> +/* IOMMU API */
> +
> +static struct iommu_domain *viommu_domain_alloc(unsigned type)
> +{
> +	struct viommu_domain *vdomain;
> +
> +	if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
> +		return NULL;
> +
> +	vdomain = kzalloc(sizeof(*vdomain), GFP_KERNEL);
> +	if (!vdomain)
> +		return NULL;
> +
> +	mutex_init(&vdomain->mutex);
> +	spin_lock_init(&vdomain->mappings_lock);
> +	vdomain->mappings = RB_ROOT_CACHED;
> +
> +	if (type == IOMMU_DOMAIN_DMA &&
> +	    iommu_get_dma_cookie(&vdomain->domain)) {
> +		kfree(vdomain);
> +		return NULL;
> +	}
> +
> +	return &vdomain->domain;
> +}
> +
> +static int viommu_domain_finalise(struct viommu_dev *viommu,
> +				  struct iommu_domain *domain)
> +{
> +	int ret;
> +	struct viommu_domain *vdomain = to_viommu_domain(domain);
> +	unsigned int max_domain = viommu->domain_bits > 31 ? ~0 :
> +				  (1U << viommu->domain_bits) - 1;
> +
> +	vdomain->viommu		= viommu;
> +
> +	domain->pgsize_bitmap	= viommu->pgsize_bitmap;
> +	domain->geometry	= viommu->geometry;
> +
> +	ret = ida_alloc_max(&viommu->domain_ids, max_domain, GFP_KERNEL);
> +	if (ret >= 0)
> +		vdomain->id = (unsigned int)ret;
> +
> +	return ret > 0 ? 0 : ret;
> +}
> +
> +static void viommu_domain_free(struct iommu_domain *domain)
> +{
> +	struct viommu_domain *vdomain = to_viommu_domain(domain);
> +
> +	iommu_put_dma_cookie(domain);
> +
> +	/* Free all remaining mappings (size 2^64) */
> +	viommu_del_mappings(vdomain, 0, 0);
> +
> +	if (vdomain->viommu)
> +		ida_free(&vdomain->viommu->domain_ids, vdomain->id);
> +
> +	kfree(vdomain);
> +}
> +
> +static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
> +{
> +	int i;
> +	int ret = 0;
> +	struct virtio_iommu_req_attach req;
> +	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
> +	struct viommu_endpoint *vdev = fwspec->iommu_priv;
> +	struct viommu_domain *vdomain = to_viommu_domain(domain);
> +
> +	mutex_lock(&vdomain->mutex);
> +	if (!vdomain->viommu) {
> +		/*
> +		 * Properly initialize the domain now that we know which viommu
> +		 * owns it.
> +		 */
> +		ret = viommu_domain_finalise(vdev->viommu, domain);
> +	} else if (vdomain->viommu != vdev->viommu) {
> +		dev_err(dev, "cannot attach to foreign vIOMMU\n");
> +		ret = -EXDEV;
> +	}
> +	mutex_unlock(&vdomain->mutex);
> +
> +	if (ret)
> +		return ret;
> +
> +	/*
> +	 * In the virtio-iommu device, when attaching the endpoint to a new
> +	 * domain, it is detached from the old one and, if as as a result the
> +	 * old domain isn't attached to any endpoint, all mappings are removed
> +	 * from the old domain and it is freed.
> +	 *
> +	 * In the driver the old domain still exists, and its mappings will be
> +	 * recreated if it gets reattached to an endpoint. Otherwise it will be
> +	 * freed explicitly.
> +	 *
> +	 * vdev->vdomain is protected by group->mutex
> +	 */
> +	if (vdev->vdomain)
> +		vdev->vdomain->nr_endpoints--;
> +
> +	req = (struct virtio_iommu_req_attach) {
> +		.head.type	= VIRTIO_IOMMU_T_ATTACH,
> +		.domain		= cpu_to_le32(vdomain->id),
> +	};
> +
> +	for (i = 0; i < fwspec->num_ids; i++) {
> +		req.endpoint = cpu_to_le32(fwspec->ids[i]);
> +
> +		ret = viommu_send_req_sync(vdomain->viommu, &req, sizeof(req));
> +		if (ret)
> +			return ret;
> +	}
> +
> +	if (!vdomain->nr_endpoints) {
> +		/*
> +		 * This endpoint is the first to be attached to the domain.
> +		 * Replay existing mappings (e.g. SW MSI).
> +		 */
> +		ret = viommu_replay_mappings(vdomain);
> +		if (ret)
> +			return ret;
> +	}
> +
> +	vdomain->nr_endpoints++;
> +	vdev->vdomain = vdomain;
> +
> +	return 0;
> +}
> +
> +static int viommu_map(struct iommu_domain *domain, unsigned long iova,
> +		      phys_addr_t paddr, size_t size, int prot)
> +{
> +	int ret;
> +	int flags;
> +	struct virtio_iommu_req_map map;
> +	struct viommu_domain *vdomain = to_viommu_domain(domain);
> +
> +	flags = (prot & IOMMU_READ ? VIRTIO_IOMMU_MAP_F_READ : 0) |
> +		(prot & IOMMU_WRITE ? VIRTIO_IOMMU_MAP_F_WRITE : 0) |
> +		(prot & IOMMU_MMIO ? VIRTIO_IOMMU_MAP_F_MMIO : 0);
> +
> +	ret = viommu_add_mapping(vdomain, iova, paddr, size, flags);
> +	if (ret)
> +		return ret;
> +
> +	map = (struct virtio_iommu_req_map) {
> +		.head.type	= VIRTIO_IOMMU_T_MAP,
> +		.domain		= cpu_to_le32(vdomain->id),
> +		.virt_start	= cpu_to_le64(iova),
> +		.phys_start	= cpu_to_le64(paddr),
> +		.virt_end	= cpu_to_le64(iova + size - 1),
> +		.flags		= cpu_to_le32(flags),
> +	};
> +
> +	if (!vdomain->nr_endpoints)
> +		return 0;
> +
> +	ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
> +	if (ret)
> +		viommu_del_mappings(vdomain, iova, size);
> +
> +	return ret;
> +}
> +
> +static size_t viommu_unmap(struct iommu_domain *domain, unsigned long iova,
> +			   size_t size)
> +{
> +	int ret = 0;
> +	size_t unmapped;
> +	struct virtio_iommu_req_unmap unmap;
> +	struct viommu_domain *vdomain = to_viommu_domain(domain);
> +
> +	unmapped = viommu_del_mappings(vdomain, iova, size);
> +	if (unmapped < size)
> +		return 0;
> +
> +	/* Device already removed all mappings after detach. */
> +	if (!vdomain->nr_endpoints)
> +		return unmapped;
> +
> +	unmap = (struct virtio_iommu_req_unmap) {
> +		.head.type	= VIRTIO_IOMMU_T_UNMAP,
> +		.domain		= cpu_to_le32(vdomain->id),
> +		.virt_start	= cpu_to_le64(iova),
> +		.virt_end	= cpu_to_le64(iova + unmapped - 1),
> +	};
> +
> +	ret = viommu_add_req(vdomain->viommu, &unmap, sizeof(unmap));
> +	return ret ? 0 : unmapped;
> +}
> +
> +static phys_addr_t viommu_iova_to_phys(struct iommu_domain *domain,
> +				       dma_addr_t iova)
> +{
> +	u64 paddr = 0;
> +	unsigned long flags;
> +	struct viommu_mapping *mapping;
> +	struct interval_tree_node *node;
> +	struct viommu_domain *vdomain = to_viommu_domain(domain);
> +
> +	spin_lock_irqsave(&vdomain->mappings_lock, flags);
> +	node = interval_tree_iter_first(&vdomain->mappings, iova, iova);
> +	if (node) {
> +		mapping = container_of(node, struct viommu_mapping, iova);
> +		paddr = mapping->paddr + (iova - mapping->iova.start);
> +	}
> +	spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
> +
> +	return paddr;
> +}
> +
> +static void viommu_iotlb_sync(struct iommu_domain *domain)
> +{
> +	struct viommu_domain *vdomain = to_viommu_domain(domain);
> +
> +	viommu_sync_req(vdomain->viommu);
> +}
> +
> +static void viommu_get_resv_regions(struct device *dev, struct list_head *head)
> +{
> +	struct iommu_resv_region *region;
> +	int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
> +
> +	region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, prot,
> +					 IOMMU_RESV_SW_MSI);
> +	if (!region)
> +		return;
> +
> +	list_add_tail(&region->list, head);
> +	iommu_dma_get_resv_regions(dev, head);
> +}
> +
> +static void viommu_put_resv_regions(struct device *dev, struct list_head *head)
> +{
> +	struct iommu_resv_region *entry, *next;
> +
> +	list_for_each_entry_safe(entry, next, head, list)
> +		kfree(entry);
> +}
> +
> +static struct iommu_ops viommu_ops;
> +static struct virtio_driver virtio_iommu_drv;
> +
> +static int viommu_match_node(struct device *dev, void *data)
> +{
> +	return dev->parent->fwnode == data;
> +}
> +
> +static struct viommu_dev *viommu_get_by_fwnode(struct fwnode_handle *fwnode)
> +{
> +	struct device *dev = driver_find_device(&virtio_iommu_drv.driver, NULL,
> +						fwnode, viommu_match_node);
> +	put_device(dev);
> +
> +	return dev ? dev_to_virtio(dev)->priv : NULL;
> +}
> +
> +static int viommu_add_device(struct device *dev)
> +{
> +	int ret;
> +	struct iommu_group *group;
> +	struct viommu_endpoint *vdev;
> +	struct viommu_dev *viommu = NULL;
> +	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
> +
> +	if (!fwspec || fwspec->ops != &viommu_ops)
> +		return -ENODEV;
> +
> +	viommu = viommu_get_by_fwnode(fwspec->iommu_fwnode);
> +	if (!viommu)
> +		return -ENODEV;
> +
> +	vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
> +	if (!vdev)
> +		return -ENOMEM;
> +
> +	vdev->viommu = viommu;
> +	fwspec->iommu_priv = vdev;
> +
> +	ret = iommu_device_link(&viommu->iommu, dev);
> +	if (ret)
> +		goto err_free_dev;
> +
> +	/*
> +	 * Last step creates a default domain and attaches to it. Everything
> +	 * must be ready.
> +	 */
> +	group = iommu_group_get_for_dev(dev);
> +	if (IS_ERR(group)) {
> +		ret = PTR_ERR(group);
> +		goto err_unlink_dev;
> +	}
> +
> +	iommu_group_put(group);
> +
> +	return PTR_ERR_OR_ZERO(group);
> +
> +err_unlink_dev:
> +	iommu_device_unlink(&viommu->iommu, dev);
> +err_free_dev:
> +	kfree(vdev);
> +
> +	return ret;
> +}
> +
> +static void viommu_remove_device(struct device *dev)
> +{
> +	struct viommu_endpoint *vdev;
> +	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
> +
> +	if (!fwspec || fwspec->ops != &viommu_ops)
> +		return;
> +
> +	vdev = fwspec->iommu_priv;
> +
> +	iommu_group_remove_device(dev);
> +	iommu_device_unlink(&vdev->viommu->iommu, dev);
> +	kfree(vdev);
> +}
> +
> +static struct iommu_group *viommu_device_group(struct device *dev)
> +{
> +	if (dev_is_pci(dev))
> +		return pci_device_group(dev);
> +	else
> +		return generic_device_group(dev);
> +}
> +
> +static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args)
> +{
> +	return iommu_fwspec_add_ids(dev, args->args, 1);
> +}
> +
> +static struct iommu_ops viommu_ops = {
> +	.domain_alloc		= viommu_domain_alloc,
> +	.domain_free		= viommu_domain_free,
> +	.attach_dev		= viommu_attach_dev,
> +	.map			= viommu_map,
> +	.unmap			= viommu_unmap,
> +	.iova_to_phys		= viommu_iova_to_phys,
> +	.iotlb_sync		= viommu_iotlb_sync,
> +	.add_device		= viommu_add_device,
> +	.remove_device		= viommu_remove_device,
> +	.device_group		= viommu_device_group,
> +	.get_resv_regions	= viommu_get_resv_regions,
> +	.put_resv_regions	= viommu_put_resv_regions,
> +	.of_xlate		= viommu_of_xlate,
> +};
> +
> +static int viommu_init_vqs(struct viommu_dev *viommu)
> +{
> +	struct virtio_device *vdev = dev_to_virtio(viommu->dev);
> +	const char *name = "request";
> +	void *ret;
> +
> +	ret = virtio_find_single_vq(vdev, NULL, name);
> +	if (IS_ERR(ret)) {
> +		dev_err(viommu->dev, "cannot find VQ\n");
> +		return PTR_ERR(ret);
> +	}
> +
> +	viommu->vqs[VIOMMU_REQUEST_VQ] = ret;
> +
> +	return 0;
> +}
> +
> +static int viommu_probe(struct virtio_device *vdev)
> +{
> +	struct device *parent_dev = vdev->dev.parent;
> +	struct viommu_dev *viommu = NULL;
> +	struct device *dev = &vdev->dev;
> +	u64 input_start = 0;
> +	u64 input_end = -1UL;
> +	int ret;
> +
> +	if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
> +	    !virtio_has_feature(vdev, VIRTIO_IOMMU_F_MAP_UNMAP))
> +		return -ENODEV;
> +
> +	viommu = devm_kzalloc(dev, sizeof(*viommu), GFP_KERNEL);
> +	if (!viommu)
> +		return -ENOMEM;
> +
> +	spin_lock_init(&viommu->request_lock);
> +	ida_init(&viommu->domain_ids);
> +	viommu->dev = dev;
> +	viommu->vdev = vdev;
> +	INIT_LIST_HEAD(&viommu->requests);
> +
> +	ret = viommu_init_vqs(viommu);
> +	if (ret)
> +		return ret;
> +
> +	virtio_cread(vdev, struct virtio_iommu_config, page_size_mask,
> +		     &viommu->pgsize_bitmap);
> +
> +	if (!viommu->pgsize_bitmap) {
> +		ret = -EINVAL;
> +		goto err_free_vqs;
> +	}
> +
> +	viommu->domain_bits = 32;
> +
> +	/* Optional features */
> +	virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
> +			     struct virtio_iommu_config, input_range.start,
> +			     &input_start);
> +
> +	virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
> +			     struct virtio_iommu_config, input_range.end,
> +			     &input_end);
> +
> +	virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_BITS,
> +			     struct virtio_iommu_config, domain_bits,
> +			     &viommu->domain_bits);
> +
> +	viommu->geometry = (struct iommu_domain_geometry) {
> +		.aperture_start	= input_start,
> +		.aperture_end	= input_end,
> +		.force_aperture	= true,
> +	};
> +
> +	viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap;
> +
> +	virtio_device_ready(vdev);
> +
> +	ret = iommu_device_sysfs_add(&viommu->iommu, dev, NULL, "%s",
> +				     virtio_bus_name(vdev));
> +	if (ret)
> +		goto err_free_vqs;
> +
> +	iommu_device_set_ops(&viommu->iommu, &viommu_ops);
> +	iommu_device_set_fwnode(&viommu->iommu, parent_dev->fwnode);
> +
> +	iommu_device_register(&viommu->iommu);
> +
> +#ifdef CONFIG_PCI
> +	if (pci_bus_type.iommu_ops != &viommu_ops) {
> +		pci_request_acs();
> +		ret = bus_set_iommu(&pci_bus_type, &viommu_ops);
> +		if (ret)
> +			goto err_unregister;
> +	}
> +#endif
> +#ifdef CONFIG_ARM_AMBA
> +	if (amba_bustype.iommu_ops != &viommu_ops) {
> +		ret = bus_set_iommu(&amba_bustype, &viommu_ops);
> +		if (ret)
> +			goto err_unregister;
> +	}
> +#endif
> +	if (platform_bus_type.iommu_ops != &viommu_ops) {
> +		ret = bus_set_iommu(&platform_bus_type, &viommu_ops);
> +		if (ret)
> +			goto err_unregister;
> +	}
> +
> +	vdev->priv = viommu;
> +
> +	dev_info(dev, "input address: %u bits\n",
> +		 order_base_2(viommu->geometry.aperture_end));
> +	dev_info(dev, "page mask: %#llx\n", viommu->pgsize_bitmap);
> +
> +	return 0;
> +
> +err_unregister:
> +	iommu_device_sysfs_remove(&viommu->iommu);
> +	iommu_device_unregister(&viommu->iommu);
> +err_free_vqs:
> +	vdev->config->del_vqs(vdev);
> +
> +	return ret;
> +}
> +
> +static void viommu_remove(struct virtio_device *vdev)
> +{
> +	struct viommu_dev *viommu = vdev->priv;
> +
> +	iommu_device_sysfs_remove(&viommu->iommu);
> +	iommu_device_unregister(&viommu->iommu);
> +
> +	/* Stop all virtqueues */
> +	vdev->config->reset(vdev);
> +	vdev->config->del_vqs(vdev);
> +
> +	dev_info(&vdev->dev, "device removed\n");
> +}
> +
> +static void viommu_config_changed(struct virtio_device *vdev)
> +{
> +	dev_warn(&vdev->dev, "config changed\n");
> +}
> +
> +static unsigned int features[] = {
> +	VIRTIO_IOMMU_F_MAP_UNMAP,
> +	VIRTIO_IOMMU_F_DOMAIN_BITS,
> +	VIRTIO_IOMMU_F_INPUT_RANGE,
> +};
> +
> +static struct virtio_device_id id_table[] = {
> +	{ VIRTIO_ID_IOMMU, VIRTIO_DEV_ANY_ID },
> +	{ 0 },
> +};
> +
> +static struct virtio_driver virtio_iommu_drv = {
> +	.driver.name		= KBUILD_MODNAME,
> +	.driver.owner		= THIS_MODULE,
> +	.id_table		= id_table,
> +	.feature_table		= features,
> +	.feature_table_size	= ARRAY_SIZE(features),
> +	.probe			= viommu_probe,
> +	.remove			= viommu_remove,
> +	.config_changed		= viommu_config_changed,
> +};
> +
> +module_virtio_driver(virtio_iommu_drv);
> +
> +MODULE_DESCRIPTION("Virtio IOMMU driver");
> +MODULE_AUTHOR("Jean-Philippe Brucker <jean-philippe.brucker@arm.com>");
> +MODULE_LICENSE("GPL v2");
> diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h
> index 6d5c3b2d4f4d..cfe47c5d9a56 100644
> --- a/include/uapi/linux/virtio_ids.h
> +++ b/include/uapi/linux/virtio_ids.h
> @@ -43,5 +43,6 @@
>  #define VIRTIO_ID_INPUT        18 /* virtio input */
>  #define VIRTIO_ID_VSOCK        19 /* virtio vsock transport */
>  #define VIRTIO_ID_CRYPTO       20 /* virtio crypto */
> +#define VIRTIO_ID_IOMMU        23 /* virtio IOMMU */
>  
>  #endif /* _LINUX_VIRTIO_IDS_H */
> diff --git a/include/uapi/linux/virtio_iommu.h b/include/uapi/linux/virtio_iommu.h
> new file mode 100644
> index 000000000000..e7c05e3afa44
> --- /dev/null
> +++ b/include/uapi/linux/virtio_iommu.h
> @@ -0,0 +1,104 @@
> +/* SPDX-License-Identifier: BSD-3-Clause */
> +/*
> + * Virtio-iommu definition v0.9
> + *
> + * Copyright (C) 2018 Arm Ltd.
> + */
> +#ifndef _UAPI_LINUX_VIRTIO_IOMMU_H
> +#define _UAPI_LINUX_VIRTIO_IOMMU_H
> +
> +#include <linux/types.h>
> +
> +/* Feature bits */
> +#define VIRTIO_IOMMU_F_INPUT_RANGE		0
> +#define VIRTIO_IOMMU_F_DOMAIN_BITS		1
> +#define VIRTIO_IOMMU_F_MAP_UNMAP		2
> +#define VIRTIO_IOMMU_F_BYPASS			3
> +
> +struct virtio_iommu_range {
> +	__u64					start;
> +	__u64					end;
> +};
> +
> +struct virtio_iommu_config {
> +	/* Supported page sizes */
> +	__u64					page_size_mask;
> +	/* Supported IOVA range */
> +	struct virtio_iommu_range		input_range;
> +	/* Max domain ID size */
> +	__u8					domain_bits;
> +	__u8					padding[3];
> +};
> +
> +/* Request types */
> +#define VIRTIO_IOMMU_T_ATTACH			0x01
> +#define VIRTIO_IOMMU_T_DETACH			0x02
> +#define VIRTIO_IOMMU_T_MAP			0x03
> +#define VIRTIO_IOMMU_T_UNMAP			0x04
> +
> +/* Status types */
> +#define VIRTIO_IOMMU_S_OK			0x00
> +#define VIRTIO_IOMMU_S_IOERR			0x01
> +#define VIRTIO_IOMMU_S_UNSUPP			0x02
> +#define VIRTIO_IOMMU_S_DEVERR			0x03
> +#define VIRTIO_IOMMU_S_INVAL			0x04
> +#define VIRTIO_IOMMU_S_RANGE			0x05
> +#define VIRTIO_IOMMU_S_NOENT			0x06
> +#define VIRTIO_IOMMU_S_FAULT			0x07
> +
> +struct virtio_iommu_req_head {
> +	__u8					type;
> +	__u8					reserved[3];
> +};
> +
> +struct virtio_iommu_req_tail {
> +	__u8					status;
> +	__u8					reserved[3];
> +};
> +
> +struct virtio_iommu_req_attach {
> +	struct virtio_iommu_req_head		head;
> +	__le32					domain;
> +	__le32					endpoint;
> +	__u8					reserved[8];
> +	struct virtio_iommu_req_tail		tail;
> +};
> +
> +struct virtio_iommu_req_detach {
> +	struct virtio_iommu_req_head		head;
> +	__le32					domain;
> +	__le32					endpoint;
> +	__u8					reserved[8];
> +	struct virtio_iommu_req_tail		tail;
> +};
> +
> +#define VIRTIO_IOMMU_MAP_F_READ			(1 << 0)
> +#define VIRTIO_IOMMU_MAP_F_WRITE		(1 << 1)
> +#define VIRTIO_IOMMU_MAP_F_EXEC			(1 << 2)
> +#define VIRTIO_IOMMU_MAP_F_MMIO			(1 << 3)
> +
> +#define VIRTIO_IOMMU_MAP_F_MASK			(VIRTIO_IOMMU_MAP_F_READ |	\
> +						 VIRTIO_IOMMU_MAP_F_WRITE |	\
> +						 VIRTIO_IOMMU_MAP_F_EXEC |	\
> +						 VIRTIO_IOMMU_MAP_F_MMIO)
> +
> +struct virtio_iommu_req_map {
> +	struct virtio_iommu_req_head		head;
> +	__le32					domain;
> +	__le64					virt_start;
> +	__le64					virt_end;
> +	__le64					phys_start;
> +	__le32					flags;
> +	struct virtio_iommu_req_tail		tail;
> +};
> +
> +struct virtio_iommu_req_unmap {
> +	struct virtio_iommu_req_head		head;
> +	__le32					domain;
> +	__le64					virt_start;
> +	__le64					virt_end;
> +	__u8					reserved[4];
> +	struct virtio_iommu_req_tail		tail;
> +};
> +
> +#endif
>
Michael S. Tsirkin Nov. 23, 2018, 9:48 p.m. UTC | #2
On Thu, Nov 22, 2018 at 07:37:59PM +0000, Jean-Philippe Brucker wrote:
> The virtio IOMMU is a para-virtualized device, allowing to send IOMMU
> requests such as map/unmap over virtio transport without emulating page
> tables. This implementation handles ATTACH, DETACH, MAP and UNMAP
> requests.
> 
> The bulk of the code transforms calls coming from the IOMMU API into
> corresponding virtio requests. Mappings are kept in an interval tree
> instead of page tables.
> 
> Signed-off-by: Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
> ---
>  MAINTAINERS                       |   7 +
>  drivers/iommu/Kconfig             |  11 +
>  drivers/iommu/Makefile            |   1 +
>  drivers/iommu/virtio-iommu.c      | 916 ++++++++++++++++++++++++++++++
>  include/uapi/linux/virtio_ids.h   |   1 +
>  include/uapi/linux/virtio_iommu.h | 104 ++++
>  6 files changed, 1040 insertions(+)
>  create mode 100644 drivers/iommu/virtio-iommu.c
>  create mode 100644 include/uapi/linux/virtio_iommu.h
> 
> diff --git a/MAINTAINERS b/MAINTAINERS
> index 1689dcfec800..3d8550c76f4a 100644
> --- a/MAINTAINERS
> +++ b/MAINTAINERS
> @@ -15946,6 +15946,13 @@ S:	Maintained
>  F:	drivers/virtio/virtio_input.c
>  F:	include/uapi/linux/virtio_input.h
>  
> +VIRTIO IOMMU DRIVER
> +M:	Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
> +L:	virtualization@lists.linux-foundation.org
> +S:	Maintained
> +F:	drivers/iommu/virtio-iommu.c
> +F:	include/uapi/linux/virtio_iommu.h
> +
>  VIRTUAL BOX GUEST DEVICE DRIVER
>  M:	Hans de Goede <hdegoede@redhat.com>
>  M:	Arnd Bergmann <arnd@arndb.de>
> diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
> index bf2bbfa2a399..db5f2b8c23f5 100644
> --- a/drivers/iommu/Kconfig
> +++ b/drivers/iommu/Kconfig
> @@ -464,4 +464,15 @@ config QCOM_IOMMU
>  	help
>  	  Support for IOMMU on certain Qualcomm SoCs.
>  
> +config VIRTIO_IOMMU
> +	bool "Virtio IOMMU driver"
> +	depends on VIRTIO=y
> +	select IOMMU_API
> +	select INTERVAL_TREE
> +	select ARM_DMA_USE_IOMMU if ARM
> +	help
> +	  Para-virtualised IOMMU driver with virtio.
> +
> +	  Say Y here if you intend to run this kernel as a guest.
> +
>  endif # IOMMU_SUPPORT
> diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
> index 5481e5fe1f95..bd7e55751d09 100644
> --- a/drivers/iommu/Makefile
> +++ b/drivers/iommu/Makefile
> @@ -36,3 +36,4 @@ obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
>  obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
>  obj-$(CONFIG_S390_IOMMU) += s390-iommu.o
>  obj-$(CONFIG_QCOM_IOMMU) += qcom_iommu.o
> +obj-$(CONFIG_VIRTIO_IOMMU) += virtio-iommu.o
> diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
> new file mode 100644
> index 000000000000..7540dab9c8dc
> --- /dev/null
> +++ b/drivers/iommu/virtio-iommu.c
> @@ -0,0 +1,916 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * Virtio driver for the paravirtualized IOMMU
> + *
> + * Copyright (C) 2018 Arm Limited
> + */
> +
> +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
> +
> +#include <linux/amba/bus.h>
> +#include <linux/delay.h>
> +#include <linux/dma-iommu.h>
> +#include <linux/freezer.h>
> +#include <linux/interval_tree.h>
> +#include <linux/iommu.h>
> +#include <linux/module.h>
> +#include <linux/of_iommu.h>
> +#include <linux/of_platform.h>
> +#include <linux/pci.h>
> +#include <linux/platform_device.h>
> +#include <linux/virtio.h>
> +#include <linux/virtio_config.h>
> +#include <linux/virtio_ids.h>
> +#include <linux/wait.h>
> +
> +#include <uapi/linux/virtio_iommu.h>
> +
> +#define MSI_IOVA_BASE			0x8000000
> +#define MSI_IOVA_LENGTH			0x100000
> +
> +#define VIOMMU_REQUEST_VQ		0
> +#define VIOMMU_NR_VQS			1
> +
> +struct viommu_dev {
> +	struct iommu_device		iommu;
> +	struct device			*dev;
> +	struct virtio_device		*vdev;
> +
> +	struct ida			domain_ids;
> +
> +	struct virtqueue		*vqs[VIOMMU_NR_VQS];
> +	spinlock_t			request_lock;
> +	struct list_head		requests;
> +
> +	/* Device configuration */
> +	struct iommu_domain_geometry	geometry;
> +	u64				pgsize_bitmap;
> +	u8				domain_bits;
> +};
> +
> +struct viommu_mapping {
> +	phys_addr_t			paddr;
> +	struct interval_tree_node	iova;
> +	u32				flags;
> +};
> +
> +struct viommu_domain {
> +	struct iommu_domain		domain;
> +	struct viommu_dev		*viommu;
> +	struct mutex			mutex; /* protects viommu pointer */
> +	unsigned int			id;
> +
> +	spinlock_t			mappings_lock;
> +	struct rb_root_cached		mappings;
> +
> +	unsigned long			nr_endpoints;
> +};
> +
> +struct viommu_endpoint {
> +	struct viommu_dev		*viommu;
> +	struct viommu_domain		*vdomain;
> +};
> +
> +struct viommu_request {
> +	struct list_head		list;
> +	void				*writeback;
> +	unsigned int			write_offset;
> +	unsigned int			len;
> +	char				buf[];
> +};
> +
> +#define to_viommu_domain(domain)	\
> +	container_of(domain, struct viommu_domain, domain)
> +
> +static int viommu_get_req_errno(void *buf, size_t len)
> +{
> +	struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail);
> +
> +	switch (tail->status) {
> +	case VIRTIO_IOMMU_S_OK:
> +		return 0;
> +	case VIRTIO_IOMMU_S_UNSUPP:
> +		return -ENOSYS;
> +	case VIRTIO_IOMMU_S_INVAL:
> +		return -EINVAL;
> +	case VIRTIO_IOMMU_S_RANGE:
> +		return -ERANGE;
> +	case VIRTIO_IOMMU_S_NOENT:
> +		return -ENOENT;
> +	case VIRTIO_IOMMU_S_FAULT:
> +		return -EFAULT;
> +	case VIRTIO_IOMMU_S_IOERR:
> +	case VIRTIO_IOMMU_S_DEVERR:
> +	default:
> +		return -EIO;
> +	}
> +}
> +
> +static void viommu_set_req_status(void *buf, size_t len, int status)
> +{
> +	struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail);
> +
> +	tail->status = status;
> +}
> +
> +static off_t viommu_get_write_desc_offset(struct viommu_dev *viommu,
> +					  struct virtio_iommu_req_head *req,
> +					  size_t len)
> +{
> +	size_t tail_size = sizeof(struct virtio_iommu_req_tail);
> +
> +	return len - tail_size;
> +}
> +
> +/*
> + * __viommu_sync_req - Complete all in-flight requests
> + *
> + * Wait for all added requests to complete. When this function returns, all
> + * requests that were in-flight at the time of the call have completed.
> + */
> +static int __viommu_sync_req(struct viommu_dev *viommu)
> +{
> +	int ret = 0;
> +	unsigned int len;
> +	size_t write_len;
> +	struct viommu_request *req;
> +	struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
> +
> +	assert_spin_locked(&viommu->request_lock);
> +
> +	virtqueue_kick(vq);
> +
> +	while (!list_empty(&viommu->requests)) {
> +		len = 0;
> +		req = virtqueue_get_buf(vq, &len);
> +		if (!req)
> +			continue;
> +
> +		if (!len)
> +			viommu_set_req_status(req->buf, req->len,
> +					      VIRTIO_IOMMU_S_IOERR);
> +
> +		write_len = req->len - req->write_offset;
> +		if (req->writeback && len == write_len)
> +			memcpy(req->writeback, req->buf + req->write_offset,
> +			       write_len);
> +
> +		list_del(&req->list);
> +		kfree(req);
> +	}
> +
> +	return ret;
> +}
> +
> +static int viommu_sync_req(struct viommu_dev *viommu)
> +{
> +	int ret;
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&viommu->request_lock, flags);
> +	ret = __viommu_sync_req(viommu);
> +	if (ret)
> +		dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret);
> +	spin_unlock_irqrestore(&viommu->request_lock, flags);
> +
> +	return ret;
> +}
> +
> +/*
> + * __viommu_add_request - Add one request to the queue
> + * @buf: pointer to the request buffer
> + * @len: length of the request buffer
> + * @writeback: copy data back to the buffer when the request completes.
> + *
> + * Add a request to the queue. Only synchronize the queue if it's already full.
> + * Otherwise don't kick the queue nor wait for requests to complete.
> + *
> + * When @writeback is true, data written by the device, including the request
> + * status, is copied into @buf after the request completes. This is unsafe if
> + * the caller allocates @buf on stack and drops the lock between add_req() and
> + * sync_req().
> + *
> + * Return 0 if the request was successfully added to the queue.
> + */
> +static int __viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len,
> +			    bool writeback)
> +{
> +	int ret;
> +	off_t write_offset;
> +	struct viommu_request *req;
> +	struct scatterlist top_sg, bottom_sg;
> +	struct scatterlist *sg[2] = { &top_sg, &bottom_sg };
> +	struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
> +
> +	assert_spin_locked(&viommu->request_lock);
> +
> +	write_offset = viommu_get_write_desc_offset(viommu, buf, len);
> +	if (write_offset <= 0)
> +		return -EINVAL;
> +
> +	req = kzalloc(sizeof(*req) + len, GFP_ATOMIC);
> +	if (!req)
> +		return -ENOMEM;
> +
> +	req->len = len;
> +	if (writeback) {
> +		req->writeback = buf + write_offset;
> +		req->write_offset = write_offset;
> +	}
> +	memcpy(&req->buf, buf, write_offset);
> +
> +	sg_init_one(&top_sg, req->buf, write_offset);
> +	sg_init_one(&bottom_sg, req->buf + write_offset, len - write_offset);
> +
> +	ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC);
> +	if (ret == -ENOSPC) {
> +		/* If the queue is full, sync and retry */
> +		if (!__viommu_sync_req(viommu))
> +			ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC);
> +	}
> +	if (ret)
> +		goto err_free;
> +
> +	list_add_tail(&req->list, &viommu->requests);
> +	return 0;
> +
> +err_free:
> +	kfree(req);
> +	return ret;
> +}
> +
> +static int viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len)
> +{
> +	int ret;
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&viommu->request_lock, flags);
> +	ret = __viommu_add_req(viommu, buf, len, false);
> +	if (ret)
> +		dev_dbg(viommu->dev, "could not add request: %d\n", ret);
> +	spin_unlock_irqrestore(&viommu->request_lock, flags);
> +
> +	return ret;
> +}
> +
> +/*
> + * Send a request and wait for it to complete. Return the request status (as an
> + * errno)
> + */
> +static int viommu_send_req_sync(struct viommu_dev *viommu, void *buf,
> +				size_t len)
> +{
> +	int ret;
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&viommu->request_lock, flags);
> +
> +	ret = __viommu_add_req(viommu, buf, len, true);
> +	if (ret) {
> +		dev_dbg(viommu->dev, "could not add request (%d)\n", ret);
> +		goto out_unlock;
> +	}
> +
> +	ret = __viommu_sync_req(viommu);
> +	if (ret) {
> +		dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret);
> +		/* Fall-through (get the actual request status) */
> +	}
> +
> +	ret = viommu_get_req_errno(buf, len);
> +out_unlock:
> +	spin_unlock_irqrestore(&viommu->request_lock, flags);
> +	return ret;
> +}
> +
> +/*
> + * viommu_add_mapping - add a mapping to the internal tree
> + *
> + * On success, return the new mapping. Otherwise return NULL.
> + */
> +static int viommu_add_mapping(struct viommu_domain *vdomain, unsigned long iova,
> +			      phys_addr_t paddr, size_t size, u32 flags)
> +{
> +	unsigned long irqflags;
> +	struct viommu_mapping *mapping;
> +
> +	mapping = kzalloc(sizeof(*mapping), GFP_ATOMIC);
> +	if (!mapping)
> +		return -ENOMEM;
> +
> +	mapping->paddr		= paddr;
> +	mapping->iova.start	= iova;
> +	mapping->iova.last	= iova + size - 1;
> +	mapping->flags		= flags;
> +
> +	spin_lock_irqsave(&vdomain->mappings_lock, irqflags);
> +	interval_tree_insert(&mapping->iova, &vdomain->mappings);
> +	spin_unlock_irqrestore(&vdomain->mappings_lock, irqflags);
> +
> +	return 0;
> +}
> +
> +/*
> + * viommu_del_mappings - remove mappings from the internal tree
> + *
> + * @vdomain: the domain
> + * @iova: start of the range
> + * @size: size of the range. A size of 0 corresponds to the entire address
> + *	space.
> + *
> + * On success, returns the number of unmapped bytes (>= size)
> + */
> +static size_t viommu_del_mappings(struct viommu_domain *vdomain,
> +				  unsigned long iova, size_t size)
> +{
> +	size_t unmapped = 0;
> +	unsigned long flags;
> +	unsigned long last = iova + size - 1;
> +	struct viommu_mapping *mapping = NULL;
> +	struct interval_tree_node *node, *next;
> +
> +	spin_lock_irqsave(&vdomain->mappings_lock, flags);
> +	next = interval_tree_iter_first(&vdomain->mappings, iova, last);
> +	while (next) {
> +		node = next;
> +		mapping = container_of(node, struct viommu_mapping, iova);
> +		next = interval_tree_iter_next(node, iova, last);
> +
> +		/* Trying to split a mapping? */
> +		if (mapping->iova.start < iova)
> +			break;
> +
> +		/*
> +		 * Virtio-iommu doesn't allow UNMAP to split a mapping created
> +		 * with a single MAP request, so remove the full mapping.
> +		 */
> +		unmapped += mapping->iova.last - mapping->iova.start + 1;
> +
> +		interval_tree_remove(node, &vdomain->mappings);
> +		kfree(mapping);
> +	}
> +	spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
> +
> +	return unmapped;
> +}
> +
> +/*
> + * viommu_replay_mappings - re-send MAP requests
> + *
> + * When reattaching a domain that was previously detached from all endpoints,
> + * mappings were deleted from the device. Re-create the mappings available in
> + * the internal tree.
> + */
> +static int viommu_replay_mappings(struct viommu_domain *vdomain)
> +{
> +	int ret = 0;
> +	unsigned long flags;
> +	struct viommu_mapping *mapping;
> +	struct interval_tree_node *node;
> +	struct virtio_iommu_req_map map;
> +
> +	spin_lock_irqsave(&vdomain->mappings_lock, flags);
> +	node = interval_tree_iter_first(&vdomain->mappings, 0, -1UL);
> +	while (node) {
> +		mapping = container_of(node, struct viommu_mapping, iova);
> +		map = (struct virtio_iommu_req_map) {
> +			.head.type	= VIRTIO_IOMMU_T_MAP,
> +			.domain		= cpu_to_le32(vdomain->id),
> +			.virt_start	= cpu_to_le64(mapping->iova.start),
> +			.virt_end	= cpu_to_le64(mapping->iova.last),
> +			.phys_start	= cpu_to_le64(mapping->paddr),
> +			.flags		= cpu_to_le32(mapping->flags),
> +		};
> +
> +		ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
> +		if (ret)
> +			break;
> +
> +		node = interval_tree_iter_next(node, 0, -1UL);
> +	}
> +	spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
> +
> +	return ret;
> +}
> +
> +/* IOMMU API */
> +
> +static struct iommu_domain *viommu_domain_alloc(unsigned type)
> +{
> +	struct viommu_domain *vdomain;
> +
> +	if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
> +		return NULL;
> +
> +	vdomain = kzalloc(sizeof(*vdomain), GFP_KERNEL);
> +	if (!vdomain)
> +		return NULL;
> +
> +	mutex_init(&vdomain->mutex);
> +	spin_lock_init(&vdomain->mappings_lock);
> +	vdomain->mappings = RB_ROOT_CACHED;
> +
> +	if (type == IOMMU_DOMAIN_DMA &&
> +	    iommu_get_dma_cookie(&vdomain->domain)) {
> +		kfree(vdomain);
> +		return NULL;
> +	}
> +
> +	return &vdomain->domain;
> +}
> +
> +static int viommu_domain_finalise(struct viommu_dev *viommu,
> +				  struct iommu_domain *domain)
> +{
> +	int ret;
> +	struct viommu_domain *vdomain = to_viommu_domain(domain);
> +	unsigned int max_domain = viommu->domain_bits > 31 ? ~0 :
> +				  (1U << viommu->domain_bits) - 1;
> +
> +	vdomain->viommu		= viommu;
> +
> +	domain->pgsize_bitmap	= viommu->pgsize_bitmap;
> +	domain->geometry	= viommu->geometry;
> +
> +	ret = ida_alloc_max(&viommu->domain_ids, max_domain, GFP_KERNEL);
> +	if (ret >= 0)
> +		vdomain->id = (unsigned int)ret;
> +
> +	return ret > 0 ? 0 : ret;
> +}
> +
> +static void viommu_domain_free(struct iommu_domain *domain)
> +{
> +	struct viommu_domain *vdomain = to_viommu_domain(domain);
> +
> +	iommu_put_dma_cookie(domain);
> +
> +	/* Free all remaining mappings (size 2^64) */
> +	viommu_del_mappings(vdomain, 0, 0);
> +
> +	if (vdomain->viommu)
> +		ida_free(&vdomain->viommu->domain_ids, vdomain->id);
> +
> +	kfree(vdomain);
> +}
> +
> +static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
> +{
> +	int i;
> +	int ret = 0;
> +	struct virtio_iommu_req_attach req;
> +	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
> +	struct viommu_endpoint *vdev = fwspec->iommu_priv;
> +	struct viommu_domain *vdomain = to_viommu_domain(domain);
> +
> +	mutex_lock(&vdomain->mutex);
> +	if (!vdomain->viommu) {
> +		/*
> +		 * Properly initialize the domain now that we know which viommu
> +		 * owns it.
> +		 */
> +		ret = viommu_domain_finalise(vdev->viommu, domain);
> +	} else if (vdomain->viommu != vdev->viommu) {
> +		dev_err(dev, "cannot attach to foreign vIOMMU\n");
> +		ret = -EXDEV;
> +	}
> +	mutex_unlock(&vdomain->mutex);
> +
> +	if (ret)
> +		return ret;
> +
> +	/*
> +	 * In the virtio-iommu device, when attaching the endpoint to a new
> +	 * domain, it is detached from the old one and, if as as a result the
> +	 * old domain isn't attached to any endpoint, all mappings are removed
> +	 * from the old domain and it is freed.
> +	 *
> +	 * In the driver the old domain still exists, and its mappings will be
> +	 * recreated if it gets reattached to an endpoint. Otherwise it will be
> +	 * freed explicitly.
> +	 *
> +	 * vdev->vdomain is protected by group->mutex
> +	 */
> +	if (vdev->vdomain)
> +		vdev->vdomain->nr_endpoints--;
> +
> +	req = (struct virtio_iommu_req_attach) {
> +		.head.type	= VIRTIO_IOMMU_T_ATTACH,
> +		.domain		= cpu_to_le32(vdomain->id),
> +	};
> +
> +	for (i = 0; i < fwspec->num_ids; i++) {
> +		req.endpoint = cpu_to_le32(fwspec->ids[i]);
> +
> +		ret = viommu_send_req_sync(vdomain->viommu, &req, sizeof(req));
> +		if (ret)
> +			return ret;
> +	}
> +
> +	if (!vdomain->nr_endpoints) {
> +		/*
> +		 * This endpoint is the first to be attached to the domain.
> +		 * Replay existing mappings (e.g. SW MSI).
> +		 */
> +		ret = viommu_replay_mappings(vdomain);
> +		if (ret)
> +			return ret;
> +	}
> +
> +	vdomain->nr_endpoints++;
> +	vdev->vdomain = vdomain;
> +
> +	return 0;
> +}
> +
> +static int viommu_map(struct iommu_domain *domain, unsigned long iova,
> +		      phys_addr_t paddr, size_t size, int prot)
> +{
> +	int ret;
> +	int flags;
> +	struct virtio_iommu_req_map map;
> +	struct viommu_domain *vdomain = to_viommu_domain(domain);
> +
> +	flags = (prot & IOMMU_READ ? VIRTIO_IOMMU_MAP_F_READ : 0) |
> +		(prot & IOMMU_WRITE ? VIRTIO_IOMMU_MAP_F_WRITE : 0) |
> +		(prot & IOMMU_MMIO ? VIRTIO_IOMMU_MAP_F_MMIO : 0);
> +
> +	ret = viommu_add_mapping(vdomain, iova, paddr, size, flags);
> +	if (ret)
> +		return ret;
> +
> +	map = (struct virtio_iommu_req_map) {
> +		.head.type	= VIRTIO_IOMMU_T_MAP,
> +		.domain		= cpu_to_le32(vdomain->id),
> +		.virt_start	= cpu_to_le64(iova),
> +		.phys_start	= cpu_to_le64(paddr),
> +		.virt_end	= cpu_to_le64(iova + size - 1),
> +		.flags		= cpu_to_le32(flags),
> +	};
> +
> +	if (!vdomain->nr_endpoints)
> +		return 0;
> +
> +	ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
> +	if (ret)
> +		viommu_del_mappings(vdomain, iova, size);
> +
> +	return ret;
> +}
> +
> +static size_t viommu_unmap(struct iommu_domain *domain, unsigned long iova,
> +			   size_t size)
> +{
> +	int ret = 0;
> +	size_t unmapped;
> +	struct virtio_iommu_req_unmap unmap;
> +	struct viommu_domain *vdomain = to_viommu_domain(domain);
> +
> +	unmapped = viommu_del_mappings(vdomain, iova, size);
> +	if (unmapped < size)
> +		return 0;
> +
> +	/* Device already removed all mappings after detach. */
> +	if (!vdomain->nr_endpoints)
> +		return unmapped;
> +
> +	unmap = (struct virtio_iommu_req_unmap) {
> +		.head.type	= VIRTIO_IOMMU_T_UNMAP,
> +		.domain		= cpu_to_le32(vdomain->id),
> +		.virt_start	= cpu_to_le64(iova),
> +		.virt_end	= cpu_to_le64(iova + unmapped - 1),
> +	};
> +
> +	ret = viommu_add_req(vdomain->viommu, &unmap, sizeof(unmap));
> +	return ret ? 0 : unmapped;
> +}
> +
> +static phys_addr_t viommu_iova_to_phys(struct iommu_domain *domain,
> +				       dma_addr_t iova)
> +{
> +	u64 paddr = 0;
> +	unsigned long flags;
> +	struct viommu_mapping *mapping;
> +	struct interval_tree_node *node;
> +	struct viommu_domain *vdomain = to_viommu_domain(domain);
> +
> +	spin_lock_irqsave(&vdomain->mappings_lock, flags);
> +	node = interval_tree_iter_first(&vdomain->mappings, iova, iova);
> +	if (node) {
> +		mapping = container_of(node, struct viommu_mapping, iova);
> +		paddr = mapping->paddr + (iova - mapping->iova.start);
> +	}
> +	spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
> +
> +	return paddr;
> +}
> +
> +static void viommu_iotlb_sync(struct iommu_domain *domain)
> +{
> +	struct viommu_domain *vdomain = to_viommu_domain(domain);
> +
> +	viommu_sync_req(vdomain->viommu);
> +}
> +
> +static void viommu_get_resv_regions(struct device *dev, struct list_head *head)
> +{
> +	struct iommu_resv_region *region;
> +	int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
> +
> +	region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, prot,
> +					 IOMMU_RESV_SW_MSI);
> +	if (!region)
> +		return;
> +
> +	list_add_tail(&region->list, head);
> +	iommu_dma_get_resv_regions(dev, head);
> +}
> +
> +static void viommu_put_resv_regions(struct device *dev, struct list_head *head)
> +{
> +	struct iommu_resv_region *entry, *next;
> +
> +	list_for_each_entry_safe(entry, next, head, list)
> +		kfree(entry);
> +}
> +
> +static struct iommu_ops viommu_ops;
> +static struct virtio_driver virtio_iommu_drv;
> +
> +static int viommu_match_node(struct device *dev, void *data)
> +{
> +	return dev->parent->fwnode == data;
> +}
> +
> +static struct viommu_dev *viommu_get_by_fwnode(struct fwnode_handle *fwnode)
> +{
> +	struct device *dev = driver_find_device(&virtio_iommu_drv.driver, NULL,
> +						fwnode, viommu_match_node);
> +	put_device(dev);
> +
> +	return dev ? dev_to_virtio(dev)->priv : NULL;
> +}
> +
> +static int viommu_add_device(struct device *dev)
> +{
> +	int ret;
> +	struct iommu_group *group;
> +	struct viommu_endpoint *vdev;
> +	struct viommu_dev *viommu = NULL;
> +	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
> +
> +	if (!fwspec || fwspec->ops != &viommu_ops)
> +		return -ENODEV;
> +
> +	viommu = viommu_get_by_fwnode(fwspec->iommu_fwnode);
> +	if (!viommu)
> +		return -ENODEV;
> +
> +	vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
> +	if (!vdev)
> +		return -ENOMEM;
> +
> +	vdev->viommu = viommu;
> +	fwspec->iommu_priv = vdev;
> +
> +	ret = iommu_device_link(&viommu->iommu, dev);
> +	if (ret)
> +		goto err_free_dev;
> +
> +	/*
> +	 * Last step creates a default domain and attaches to it. Everything
> +	 * must be ready.
> +	 */
> +	group = iommu_group_get_for_dev(dev);
> +	if (IS_ERR(group)) {
> +		ret = PTR_ERR(group);
> +		goto err_unlink_dev;
> +	}
> +
> +	iommu_group_put(group);
> +
> +	return PTR_ERR_OR_ZERO(group);
> +
> +err_unlink_dev:
> +	iommu_device_unlink(&viommu->iommu, dev);
> +err_free_dev:
> +	kfree(vdev);
> +
> +	return ret;
> +}
> +
> +static void viommu_remove_device(struct device *dev)
> +{
> +	struct viommu_endpoint *vdev;
> +	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
> +
> +	if (!fwspec || fwspec->ops != &viommu_ops)
> +		return;
> +
> +	vdev = fwspec->iommu_priv;
> +
> +	iommu_group_remove_device(dev);
> +	iommu_device_unlink(&vdev->viommu->iommu, dev);
> +	kfree(vdev);
> +}
> +
> +static struct iommu_group *viommu_device_group(struct device *dev)
> +{
> +	if (dev_is_pci(dev))
> +		return pci_device_group(dev);
> +	else
> +		return generic_device_group(dev);
> +}
> +
> +static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args)
> +{
> +	return iommu_fwspec_add_ids(dev, args->args, 1);
> +}
> +
> +static struct iommu_ops viommu_ops = {
> +	.domain_alloc		= viommu_domain_alloc,
> +	.domain_free		= viommu_domain_free,
> +	.attach_dev		= viommu_attach_dev,
> +	.map			= viommu_map,
> +	.unmap			= viommu_unmap,
> +	.iova_to_phys		= viommu_iova_to_phys,
> +	.iotlb_sync		= viommu_iotlb_sync,
> +	.add_device		= viommu_add_device,
> +	.remove_device		= viommu_remove_device,
> +	.device_group		= viommu_device_group,
> +	.get_resv_regions	= viommu_get_resv_regions,
> +	.put_resv_regions	= viommu_put_resv_regions,
> +	.of_xlate		= viommu_of_xlate,
> +};
> +
> +static int viommu_init_vqs(struct viommu_dev *viommu)
> +{
> +	struct virtio_device *vdev = dev_to_virtio(viommu->dev);
> +	const char *name = "request";
> +	void *ret;
> +
> +	ret = virtio_find_single_vq(vdev, NULL, name);
> +	if (IS_ERR(ret)) {
> +		dev_err(viommu->dev, "cannot find VQ\n");
> +		return PTR_ERR(ret);
> +	}
> +
> +	viommu->vqs[VIOMMU_REQUEST_VQ] = ret;
> +
> +	return 0;
> +}
> +
> +static int viommu_probe(struct virtio_device *vdev)
> +{
> +	struct device *parent_dev = vdev->dev.parent;
> +	struct viommu_dev *viommu = NULL;
> +	struct device *dev = &vdev->dev;
> +	u64 input_start = 0;
> +	u64 input_end = -1UL;
> +	int ret;
> +
> +	if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
> +	    !virtio_has_feature(vdev, VIRTIO_IOMMU_F_MAP_UNMAP))
> +		return -ENODEV;
> +
> +	viommu = devm_kzalloc(dev, sizeof(*viommu), GFP_KERNEL);
> +	if (!viommu)
> +		return -ENOMEM;
> +
> +	spin_lock_init(&viommu->request_lock);
> +	ida_init(&viommu->domain_ids);
> +	viommu->dev = dev;
> +	viommu->vdev = vdev;
> +	INIT_LIST_HEAD(&viommu->requests);
> +
> +	ret = viommu_init_vqs(viommu);
> +	if (ret)
> +		return ret;
> +
> +	virtio_cread(vdev, struct virtio_iommu_config, page_size_mask,
> +		     &viommu->pgsize_bitmap);
> +
> +	if (!viommu->pgsize_bitmap) {
> +		ret = -EINVAL;
> +		goto err_free_vqs;
> +	}
> +
> +	viommu->domain_bits = 32;
> +
> +	/* Optional features */
> +	virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
> +			     struct virtio_iommu_config, input_range.start,
> +			     &input_start);
> +
> +	virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
> +			     struct virtio_iommu_config, input_range.end,
> +			     &input_end);
> +
> +	virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_BITS,
> +			     struct virtio_iommu_config, domain_bits,
> +			     &viommu->domain_bits);
> +
> +	viommu->geometry = (struct iommu_domain_geometry) {
> +		.aperture_start	= input_start,
> +		.aperture_end	= input_end,
> +		.force_aperture	= true,
> +	};
> +
> +	viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap;
> +
> +	virtio_device_ready(vdev);
> +
> +	ret = iommu_device_sysfs_add(&viommu->iommu, dev, NULL, "%s",
> +				     virtio_bus_name(vdev));
> +	if (ret)
> +		goto err_free_vqs;
> +
> +	iommu_device_set_ops(&viommu->iommu, &viommu_ops);
> +	iommu_device_set_fwnode(&viommu->iommu, parent_dev->fwnode);
> +
> +	iommu_device_register(&viommu->iommu);
> +
> +#ifdef CONFIG_PCI
> +	if (pci_bus_type.iommu_ops != &viommu_ops) {
> +		pci_request_acs();
> +		ret = bus_set_iommu(&pci_bus_type, &viommu_ops);
> +		if (ret)
> +			goto err_unregister;
> +	}
> +#endif
> +#ifdef CONFIG_ARM_AMBA
> +	if (amba_bustype.iommu_ops != &viommu_ops) {
> +		ret = bus_set_iommu(&amba_bustype, &viommu_ops);
> +		if (ret)
> +			goto err_unregister;
> +	}
> +#endif
> +	if (platform_bus_type.iommu_ops != &viommu_ops) {
> +		ret = bus_set_iommu(&platform_bus_type, &viommu_ops);
> +		if (ret)
> +			goto err_unregister;
> +	}
> +
> +	vdev->priv = viommu;
> +
> +	dev_info(dev, "input address: %u bits\n",
> +		 order_base_2(viommu->geometry.aperture_end));
> +	dev_info(dev, "page mask: %#llx\n", viommu->pgsize_bitmap);
> +
> +	return 0;
> +
> +err_unregister:
> +	iommu_device_sysfs_remove(&viommu->iommu);
> +	iommu_device_unregister(&viommu->iommu);
> +err_free_vqs:
> +	vdev->config->del_vqs(vdev);
> +
> +	return ret;
> +}
> +
> +static void viommu_remove(struct virtio_device *vdev)
> +{
> +	struct viommu_dev *viommu = vdev->priv;
> +
> +	iommu_device_sysfs_remove(&viommu->iommu);
> +	iommu_device_unregister(&viommu->iommu);
> +
> +	/* Stop all virtqueues */
> +	vdev->config->reset(vdev);
> +	vdev->config->del_vqs(vdev);
> +
> +	dev_info(&vdev->dev, "device removed\n");
> +}
> +
> +static void viommu_config_changed(struct virtio_device *vdev)
> +{
> +	dev_warn(&vdev->dev, "config changed\n");
> +}
> +
> +static unsigned int features[] = {
> +	VIRTIO_IOMMU_F_MAP_UNMAP,
> +	VIRTIO_IOMMU_F_DOMAIN_BITS,
> +	VIRTIO_IOMMU_F_INPUT_RANGE,
> +};
> +
> +static struct virtio_device_id id_table[] = {
> +	{ VIRTIO_ID_IOMMU, VIRTIO_DEV_ANY_ID },
> +	{ 0 },
> +};
> +
> +static struct virtio_driver virtio_iommu_drv = {
> +	.driver.name		= KBUILD_MODNAME,
> +	.driver.owner		= THIS_MODULE,
> +	.id_table		= id_table,
> +	.feature_table		= features,
> +	.feature_table_size	= ARRAY_SIZE(features),
> +	.probe			= viommu_probe,
> +	.remove			= viommu_remove,
> +	.config_changed		= viommu_config_changed,
> +};
> +
> +module_virtio_driver(virtio_iommu_drv);
> +
> +MODULE_DESCRIPTION("Virtio IOMMU driver");
> +MODULE_AUTHOR("Jean-Philippe Brucker <jean-philippe.brucker@arm.com>");
> +MODULE_LICENSE("GPL v2");
> diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h
> index 6d5c3b2d4f4d..cfe47c5d9a56 100644
> --- a/include/uapi/linux/virtio_ids.h
> +++ b/include/uapi/linux/virtio_ids.h
> @@ -43,5 +43,6 @@
>  #define VIRTIO_ID_INPUT        18 /* virtio input */
>  #define VIRTIO_ID_VSOCK        19 /* virtio vsock transport */
>  #define VIRTIO_ID_CRYPTO       20 /* virtio crypto */
> +#define VIRTIO_ID_IOMMU        23 /* virtio IOMMU */
>  
>  #endif /* _LINUX_VIRTIO_IDS_H */
> diff --git a/include/uapi/linux/virtio_iommu.h b/include/uapi/linux/virtio_iommu.h
> new file mode 100644
> index 000000000000..e7c05e3afa44
> --- /dev/null
> +++ b/include/uapi/linux/virtio_iommu.h
> @@ -0,0 +1,104 @@
> +/* SPDX-License-Identifier: BSD-3-Clause */
> +/*
> + * Virtio-iommu definition v0.9
> + *
> + * Copyright (C) 2018 Arm Ltd.
> + */
> +#ifndef _UAPI_LINUX_VIRTIO_IOMMU_H
> +#define _UAPI_LINUX_VIRTIO_IOMMU_H
> +
> +#include <linux/types.h>
> +
> +/* Feature bits */
> +#define VIRTIO_IOMMU_F_INPUT_RANGE		0
> +#define VIRTIO_IOMMU_F_DOMAIN_BITS		1
> +#define VIRTIO_IOMMU_F_MAP_UNMAP		2
> +#define VIRTIO_IOMMU_F_BYPASS			3
> +
> +struct virtio_iommu_range {
> +	__u64					start;
> +	__u64					end;
> +};
> +
> +struct virtio_iommu_config {
> +	/* Supported page sizes */
> +	__u64					page_size_mask;
> +	/* Supported IOVA range */
> +	struct virtio_iommu_range		input_range;
> +	/* Max domain ID size */
> +	__u8					domain_bits;
> +	__u8					padding[3];

Not enough padding here it seems. Structure is 8 byte
aligned on 64 bit systems.

> +};
> +
> +/* Request types */
> +#define VIRTIO_IOMMU_T_ATTACH			0x01
> +#define VIRTIO_IOMMU_T_DETACH			0x02
> +#define VIRTIO_IOMMU_T_MAP			0x03
> +#define VIRTIO_IOMMU_T_UNMAP			0x04
> +
> +/* Status types */
> +#define VIRTIO_IOMMU_S_OK			0x00
> +#define VIRTIO_IOMMU_S_IOERR			0x01
> +#define VIRTIO_IOMMU_S_UNSUPP			0x02
> +#define VIRTIO_IOMMU_S_DEVERR			0x03
> +#define VIRTIO_IOMMU_S_INVAL			0x04
> +#define VIRTIO_IOMMU_S_RANGE			0x05
> +#define VIRTIO_IOMMU_S_NOENT			0x06
> +#define VIRTIO_IOMMU_S_FAULT			0x07
> +
> +struct virtio_iommu_req_head {
> +	__u8					type;
> +	__u8					reserved[3];
> +};
> +
> +struct virtio_iommu_req_tail {
> +	__u8					status;
> +	__u8					reserved[3];
> +};
> +
> +struct virtio_iommu_req_attach {
> +	struct virtio_iommu_req_head		head;
> +	__le32					domain;
> +	__le32					endpoint;
> +	__u8					reserved[8];
> +	struct virtio_iommu_req_tail		tail;
> +};
> +
> +struct virtio_iommu_req_detach {
> +	struct virtio_iommu_req_head		head;
> +	__le32					domain;
> +	__le32					endpoint;
> +	__u8					reserved[8];
> +	struct virtio_iommu_req_tail		tail;
> +};
> +
> +#define VIRTIO_IOMMU_MAP_F_READ			(1 << 0)
> +#define VIRTIO_IOMMU_MAP_F_WRITE		(1 << 1)
> +#define VIRTIO_IOMMU_MAP_F_EXEC			(1 << 2)
> +#define VIRTIO_IOMMU_MAP_F_MMIO			(1 << 3)
> +
> +#define VIRTIO_IOMMU_MAP_F_MASK			(VIRTIO_IOMMU_MAP_F_READ |	\
> +						 VIRTIO_IOMMU_MAP_F_WRITE |	\
> +						 VIRTIO_IOMMU_MAP_F_EXEC |	\
> +						 VIRTIO_IOMMU_MAP_F_MMIO)
> +
> +struct virtio_iommu_req_map {
> +	struct virtio_iommu_req_head		head;
> +	__le32					domain;
> +	__le64					virt_start;
> +	__le64					virt_end;
> +	__le64					phys_start;
> +	__le32					flags;
> +	struct virtio_iommu_req_tail		tail;
> +};
> +
> +struct virtio_iommu_req_unmap {
> +	struct virtio_iommu_req_head		head;
> +	__le32					domain;
> +	__le64					virt_start;
> +	__le64					virt_end;
> +	__u8					reserved[4];
> +	struct virtio_iommu_req_tail		tail;
> +};
> +
> +#endif
> -- 
> 2.19.1
> 
> _______________________________________________
> Virtualization mailing list
> Virtualization@lists.linux-foundation.org
> https://lists.linuxfoundation.org/mailman/listinfo/virtualization
Michael S. Tsirkin Nov. 23, 2018, 9:56 p.m. UTC | #3
On Thu, Nov 22, 2018 at 07:37:59PM +0000, Jean-Philippe Brucker wrote:
> The virtio IOMMU is a para-virtualized device, allowing to send IOMMU
> requests such as map/unmap over virtio transport without emulating page
> tables. This implementation handles ATTACH, DETACH, MAP and UNMAP
> requests.
> 
> The bulk of the code transforms calls coming from the IOMMU API into
> corresponding virtio requests. Mappings are kept in an interval tree
> instead of page tables.
> 
> Signed-off-by: Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
> ---
>  MAINTAINERS                       |   7 +
>  drivers/iommu/Kconfig             |  11 +
>  drivers/iommu/Makefile            |   1 +
>  drivers/iommu/virtio-iommu.c      | 916 ++++++++++++++++++++++++++++++
>  include/uapi/linux/virtio_ids.h   |   1 +
>  include/uapi/linux/virtio_iommu.h | 104 ++++
>  6 files changed, 1040 insertions(+)
>  create mode 100644 drivers/iommu/virtio-iommu.c
>  create mode 100644 include/uapi/linux/virtio_iommu.h
> 
> diff --git a/MAINTAINERS b/MAINTAINERS
> index 1689dcfec800..3d8550c76f4a 100644
> --- a/MAINTAINERS
> +++ b/MAINTAINERS
> @@ -15946,6 +15946,13 @@ S:	Maintained
>  F:	drivers/virtio/virtio_input.c
>  F:	include/uapi/linux/virtio_input.h
>  
> +VIRTIO IOMMU DRIVER
> +M:	Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
> +L:	virtualization@lists.linux-foundation.org
> +S:	Maintained
> +F:	drivers/iommu/virtio-iommu.c
> +F:	include/uapi/linux/virtio_iommu.h
> +
>  VIRTUAL BOX GUEST DEVICE DRIVER
>  M:	Hans de Goede <hdegoede@redhat.com>
>  M:	Arnd Bergmann <arnd@arndb.de>
> diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
> index bf2bbfa2a399..db5f2b8c23f5 100644
> --- a/drivers/iommu/Kconfig
> +++ b/drivers/iommu/Kconfig
> @@ -464,4 +464,15 @@ config QCOM_IOMMU
>  	help
>  	  Support for IOMMU on certain Qualcomm SoCs.
>  
> +config VIRTIO_IOMMU
> +	bool "Virtio IOMMU driver"
> +	depends on VIRTIO=y
> +	select IOMMU_API
> +	select INTERVAL_TREE
> +	select ARM_DMA_USE_IOMMU if ARM
> +	help
> +	  Para-virtualised IOMMU driver with virtio.
> +
> +	  Say Y here if you intend to run this kernel as a guest.
> +

Given it is arm specific right now, shouldn't this depend on ARM?
E.g. there's a hack for x86 right now.

>  endif # IOMMU_SUPPORT
> diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
> index 5481e5fe1f95..bd7e55751d09 100644
> --- a/drivers/iommu/Makefile
> +++ b/drivers/iommu/Makefile
> @@ -36,3 +36,4 @@ obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
>  obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
>  obj-$(CONFIG_S390_IOMMU) += s390-iommu.o
>  obj-$(CONFIG_QCOM_IOMMU) += qcom_iommu.o
> +obj-$(CONFIG_VIRTIO_IOMMU) += virtio-iommu.o
> diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
> new file mode 100644
> index 000000000000..7540dab9c8dc
> --- /dev/null
> +++ b/drivers/iommu/virtio-iommu.c
> @@ -0,0 +1,916 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * Virtio driver for the paravirtualized IOMMU
> + *
> + * Copyright (C) 2018 Arm Limited
> + */
> +
> +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
> +
> +#include <linux/amba/bus.h>
> +#include <linux/delay.h>
> +#include <linux/dma-iommu.h>
> +#include <linux/freezer.h>
> +#include <linux/interval_tree.h>
> +#include <linux/iommu.h>
> +#include <linux/module.h>
> +#include <linux/of_iommu.h>
> +#include <linux/of_platform.h>
> +#include <linux/pci.h>
> +#include <linux/platform_device.h>
> +#include <linux/virtio.h>
> +#include <linux/virtio_config.h>
> +#include <linux/virtio_ids.h>
> +#include <linux/wait.h>
> +
> +#include <uapi/linux/virtio_iommu.h>
> +
> +#define MSI_IOVA_BASE			0x8000000
> +#define MSI_IOVA_LENGTH			0x100000
> +
> +#define VIOMMU_REQUEST_VQ		0
> +#define VIOMMU_NR_VQS			1
> +
> +struct viommu_dev {
> +	struct iommu_device		iommu;
> +	struct device			*dev;
> +	struct virtio_device		*vdev;
> +
> +	struct ida			domain_ids;
> +
> +	struct virtqueue		*vqs[VIOMMU_NR_VQS];
> +	spinlock_t			request_lock;
> +	struct list_head		requests;
> +
> +	/* Device configuration */
> +	struct iommu_domain_geometry	geometry;
> +	u64				pgsize_bitmap;
> +	u8				domain_bits;
> +};
> +
> +struct viommu_mapping {
> +	phys_addr_t			paddr;
> +	struct interval_tree_node	iova;
> +	u32				flags;
> +};
> +
> +struct viommu_domain {
> +	struct iommu_domain		domain;
> +	struct viommu_dev		*viommu;
> +	struct mutex			mutex; /* protects viommu pointer */
> +	unsigned int			id;
> +
> +	spinlock_t			mappings_lock;
> +	struct rb_root_cached		mappings;
> +
> +	unsigned long			nr_endpoints;
> +};
> +
> +struct viommu_endpoint {
> +	struct viommu_dev		*viommu;
> +	struct viommu_domain		*vdomain;
> +};
> +
> +struct viommu_request {
> +	struct list_head		list;
> +	void				*writeback;
> +	unsigned int			write_offset;
> +	unsigned int			len;
> +	char				buf[];
> +};
> +
> +#define to_viommu_domain(domain)	\
> +	container_of(domain, struct viommu_domain, domain)
> +
> +static int viommu_get_req_errno(void *buf, size_t len)
> +{
> +	struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail);
> +
> +	switch (tail->status) {
> +	case VIRTIO_IOMMU_S_OK:
> +		return 0;
> +	case VIRTIO_IOMMU_S_UNSUPP:
> +		return -ENOSYS;
> +	case VIRTIO_IOMMU_S_INVAL:
> +		return -EINVAL;
> +	case VIRTIO_IOMMU_S_RANGE:
> +		return -ERANGE;
> +	case VIRTIO_IOMMU_S_NOENT:
> +		return -ENOENT;
> +	case VIRTIO_IOMMU_S_FAULT:
> +		return -EFAULT;
> +	case VIRTIO_IOMMU_S_IOERR:
> +	case VIRTIO_IOMMU_S_DEVERR:
> +	default:
> +		return -EIO;
> +	}
> +}
> +
> +static void viommu_set_req_status(void *buf, size_t len, int status)
> +{
> +	struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail);
> +
> +	tail->status = status;
> +}
> +
> +static off_t viommu_get_write_desc_offset(struct viommu_dev *viommu,
> +					  struct virtio_iommu_req_head *req,
> +					  size_t len)
> +{
> +	size_t tail_size = sizeof(struct virtio_iommu_req_tail);
> +
> +	return len - tail_size;
> +}
> +
> +/*
> + * __viommu_sync_req - Complete all in-flight requests
> + *
> + * Wait for all added requests to complete. When this function returns, all
> + * requests that were in-flight at the time of the call have completed.
> + */
> +static int __viommu_sync_req(struct viommu_dev *viommu)
> +{
> +	int ret = 0;
> +	unsigned int len;
> +	size_t write_len;
> +	struct viommu_request *req;
> +	struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
> +
> +	assert_spin_locked(&viommu->request_lock);
> +
> +	virtqueue_kick(vq);
> +
> +	while (!list_empty(&viommu->requests)) {
> +		len = 0;
> +		req = virtqueue_get_buf(vq, &len);
> +		if (!req)
> +			continue;
> +
> +		if (!len)
> +			viommu_set_req_status(req->buf, req->len,
> +					      VIRTIO_IOMMU_S_IOERR);
> +
> +		write_len = req->len - req->write_offset;
> +		if (req->writeback && len == write_len)
> +			memcpy(req->writeback, req->buf + req->write_offset,
> +			       write_len);
> +
> +		list_del(&req->list);
> +		kfree(req);
> +	}
> +
> +	return ret;
> +}
> +
> +static int viommu_sync_req(struct viommu_dev *viommu)
> +{
> +	int ret;
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&viommu->request_lock, flags);
> +	ret = __viommu_sync_req(viommu);
> +	if (ret)
> +		dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret);
> +	spin_unlock_irqrestore(&viommu->request_lock, flags);
> +
> +	return ret;
> +}
> +
> +/*
> + * __viommu_add_request - Add one request to the queue
> + * @buf: pointer to the request buffer
> + * @len: length of the request buffer
> + * @writeback: copy data back to the buffer when the request completes.
> + *
> + * Add a request to the queue. Only synchronize the queue if it's already full.
> + * Otherwise don't kick the queue nor wait for requests to complete.
> + *
> + * When @writeback is true, data written by the device, including the request
> + * status, is copied into @buf after the request completes. This is unsafe if
> + * the caller allocates @buf on stack and drops the lock between add_req() and
> + * sync_req().
> + *
> + * Return 0 if the request was successfully added to the queue.
> + */
> +static int __viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len,
> +			    bool writeback)
> +{
> +	int ret;
> +	off_t write_offset;
> +	struct viommu_request *req;
> +	struct scatterlist top_sg, bottom_sg;
> +	struct scatterlist *sg[2] = { &top_sg, &bottom_sg };
> +	struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
> +
> +	assert_spin_locked(&viommu->request_lock);
> +
> +	write_offset = viommu_get_write_desc_offset(viommu, buf, len);
> +	if (write_offset <= 0)
> +		return -EINVAL;
> +
> +	req = kzalloc(sizeof(*req) + len, GFP_ATOMIC);
> +	if (!req)
> +		return -ENOMEM;
> +
> +	req->len = len;
> +	if (writeback) {
> +		req->writeback = buf + write_offset;
> +		req->write_offset = write_offset;
> +	}
> +	memcpy(&req->buf, buf, write_offset);
> +
> +	sg_init_one(&top_sg, req->buf, write_offset);
> +	sg_init_one(&bottom_sg, req->buf + write_offset, len - write_offset);
> +
> +	ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC);
> +	if (ret == -ENOSPC) {
> +		/* If the queue is full, sync and retry */
> +		if (!__viommu_sync_req(viommu))
> +			ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC);
> +	}
> +	if (ret)
> +		goto err_free;
> +
> +	list_add_tail(&req->list, &viommu->requests);
> +	return 0;
> +
> +err_free:
> +	kfree(req);
> +	return ret;
> +}
> +
> +static int viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len)
> +{
> +	int ret;
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&viommu->request_lock, flags);
> +	ret = __viommu_add_req(viommu, buf, len, false);
> +	if (ret)
> +		dev_dbg(viommu->dev, "could not add request: %d\n", ret);
> +	spin_unlock_irqrestore(&viommu->request_lock, flags);
> +
> +	return ret;
> +}
> +
> +/*
> + * Send a request and wait for it to complete. Return the request status (as an
> + * errno)
> + */
> +static int viommu_send_req_sync(struct viommu_dev *viommu, void *buf,
> +				size_t len)
> +{
> +	int ret;
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&viommu->request_lock, flags);
> +
> +	ret = __viommu_add_req(viommu, buf, len, true);
> +	if (ret) {
> +		dev_dbg(viommu->dev, "could not add request (%d)\n", ret);
> +		goto out_unlock;
> +	}
> +
> +	ret = __viommu_sync_req(viommu);
> +	if (ret) {
> +		dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret);
> +		/* Fall-through (get the actual request status) */
> +	}
> +
> +	ret = viommu_get_req_errno(buf, len);
> +out_unlock:
> +	spin_unlock_irqrestore(&viommu->request_lock, flags);
> +	return ret;
> +}
> +
> +/*
> + * viommu_add_mapping - add a mapping to the internal tree
> + *
> + * On success, return the new mapping. Otherwise return NULL.
> + */
> +static int viommu_add_mapping(struct viommu_domain *vdomain, unsigned long iova,
> +			      phys_addr_t paddr, size_t size, u32 flags)
> +{
> +	unsigned long irqflags;
> +	struct viommu_mapping *mapping;
> +
> +	mapping = kzalloc(sizeof(*mapping), GFP_ATOMIC);
> +	if (!mapping)
> +		return -ENOMEM;
> +
> +	mapping->paddr		= paddr;
> +	mapping->iova.start	= iova;
> +	mapping->iova.last	= iova + size - 1;
> +	mapping->flags		= flags;
> +
> +	spin_lock_irqsave(&vdomain->mappings_lock, irqflags);
> +	interval_tree_insert(&mapping->iova, &vdomain->mappings);
> +	spin_unlock_irqrestore(&vdomain->mappings_lock, irqflags);
> +
> +	return 0;
> +}
> +
> +/*
> + * viommu_del_mappings - remove mappings from the internal tree
> + *
> + * @vdomain: the domain
> + * @iova: start of the range
> + * @size: size of the range. A size of 0 corresponds to the entire address
> + *	space.
> + *
> + * On success, returns the number of unmapped bytes (>= size)
> + */
> +static size_t viommu_del_mappings(struct viommu_domain *vdomain,
> +				  unsigned long iova, size_t size)
> +{
> +	size_t unmapped = 0;
> +	unsigned long flags;
> +	unsigned long last = iova + size - 1;
> +	struct viommu_mapping *mapping = NULL;
> +	struct interval_tree_node *node, *next;
> +
> +	spin_lock_irqsave(&vdomain->mappings_lock, flags);
> +	next = interval_tree_iter_first(&vdomain->mappings, iova, last);
> +	while (next) {
> +		node = next;
> +		mapping = container_of(node, struct viommu_mapping, iova);
> +		next = interval_tree_iter_next(node, iova, last);
> +
> +		/* Trying to split a mapping? */
> +		if (mapping->iova.start < iova)
> +			break;
> +
> +		/*
> +		 * Virtio-iommu doesn't allow UNMAP to split a mapping created
> +		 * with a single MAP request, so remove the full mapping.
> +		 */
> +		unmapped += mapping->iova.last - mapping->iova.start + 1;
> +
> +		interval_tree_remove(node, &vdomain->mappings);
> +		kfree(mapping);
> +	}
> +	spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
> +
> +	return unmapped;
> +}
> +
> +/*
> + * viommu_replay_mappings - re-send MAP requests
> + *
> + * When reattaching a domain that was previously detached from all endpoints,
> + * mappings were deleted from the device. Re-create the mappings available in
> + * the internal tree.
> + */
> +static int viommu_replay_mappings(struct viommu_domain *vdomain)
> +{
> +	int ret = 0;
> +	unsigned long flags;
> +	struct viommu_mapping *mapping;
> +	struct interval_tree_node *node;
> +	struct virtio_iommu_req_map map;
> +
> +	spin_lock_irqsave(&vdomain->mappings_lock, flags);
> +	node = interval_tree_iter_first(&vdomain->mappings, 0, -1UL);
> +	while (node) {
> +		mapping = container_of(node, struct viommu_mapping, iova);
> +		map = (struct virtio_iommu_req_map) {
> +			.head.type	= VIRTIO_IOMMU_T_MAP,
> +			.domain		= cpu_to_le32(vdomain->id),
> +			.virt_start	= cpu_to_le64(mapping->iova.start),
> +			.virt_end	= cpu_to_le64(mapping->iova.last),
> +			.phys_start	= cpu_to_le64(mapping->paddr),
> +			.flags		= cpu_to_le32(mapping->flags),
> +		};
> +
> +		ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
> +		if (ret)
> +			break;
> +
> +		node = interval_tree_iter_next(node, 0, -1UL);
> +	}
> +	spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
> +
> +	return ret;
> +}
> +
> +/* IOMMU API */
> +
> +static struct iommu_domain *viommu_domain_alloc(unsigned type)
> +{
> +	struct viommu_domain *vdomain;
> +
> +	if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
> +		return NULL;
> +
> +	vdomain = kzalloc(sizeof(*vdomain), GFP_KERNEL);
> +	if (!vdomain)
> +		return NULL;
> +
> +	mutex_init(&vdomain->mutex);
> +	spin_lock_init(&vdomain->mappings_lock);
> +	vdomain->mappings = RB_ROOT_CACHED;
> +
> +	if (type == IOMMU_DOMAIN_DMA &&
> +	    iommu_get_dma_cookie(&vdomain->domain)) {
> +		kfree(vdomain);
> +		return NULL;
> +	}
> +
> +	return &vdomain->domain;
> +}
> +
> +static int viommu_domain_finalise(struct viommu_dev *viommu,
> +				  struct iommu_domain *domain)
> +{
> +	int ret;
> +	struct viommu_domain *vdomain = to_viommu_domain(domain);
> +	unsigned int max_domain = viommu->domain_bits > 31 ? ~0 :
> +				  (1U << viommu->domain_bits) - 1;
> +
> +	vdomain->viommu		= viommu;
> +
> +	domain->pgsize_bitmap	= viommu->pgsize_bitmap;
> +	domain->geometry	= viommu->geometry;
> +
> +	ret = ida_alloc_max(&viommu->domain_ids, max_domain, GFP_KERNEL);
> +	if (ret >= 0)
> +		vdomain->id = (unsigned int)ret;
> +
> +	return ret > 0 ? 0 : ret;
> +}
> +
> +static void viommu_domain_free(struct iommu_domain *domain)
> +{
> +	struct viommu_domain *vdomain = to_viommu_domain(domain);
> +
> +	iommu_put_dma_cookie(domain);
> +
> +	/* Free all remaining mappings (size 2^64) */
> +	viommu_del_mappings(vdomain, 0, 0);
> +
> +	if (vdomain->viommu)
> +		ida_free(&vdomain->viommu->domain_ids, vdomain->id);
> +
> +	kfree(vdomain);
> +}
> +
> +static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
> +{
> +	int i;
> +	int ret = 0;
> +	struct virtio_iommu_req_attach req;
> +	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
> +	struct viommu_endpoint *vdev = fwspec->iommu_priv;
> +	struct viommu_domain *vdomain = to_viommu_domain(domain);
> +
> +	mutex_lock(&vdomain->mutex);
> +	if (!vdomain->viommu) {
> +		/*
> +		 * Properly initialize the domain now that we know which viommu
> +		 * owns it.
> +		 */
> +		ret = viommu_domain_finalise(vdev->viommu, domain);
> +	} else if (vdomain->viommu != vdev->viommu) {
> +		dev_err(dev, "cannot attach to foreign vIOMMU\n");
> +		ret = -EXDEV;
> +	}
> +	mutex_unlock(&vdomain->mutex);
> +
> +	if (ret)
> +		return ret;
> +
> +	/*
> +	 * In the virtio-iommu device, when attaching the endpoint to a new
> +	 * domain, it is detached from the old one and, if as as a result the
> +	 * old domain isn't attached to any endpoint, all mappings are removed
> +	 * from the old domain and it is freed.
> +	 *
> +	 * In the driver the old domain still exists, and its mappings will be
> +	 * recreated if it gets reattached to an endpoint. Otherwise it will be
> +	 * freed explicitly.
> +	 *
> +	 * vdev->vdomain is protected by group->mutex
> +	 */
> +	if (vdev->vdomain)
> +		vdev->vdomain->nr_endpoints--;
> +
> +	req = (struct virtio_iommu_req_attach) {
> +		.head.type	= VIRTIO_IOMMU_T_ATTACH,
> +		.domain		= cpu_to_le32(vdomain->id),
> +	};
> +
> +	for (i = 0; i < fwspec->num_ids; i++) {
> +		req.endpoint = cpu_to_le32(fwspec->ids[i]);
> +
> +		ret = viommu_send_req_sync(vdomain->viommu, &req, sizeof(req));
> +		if (ret)
> +			return ret;
> +	}
> +
> +	if (!vdomain->nr_endpoints) {
> +		/*
> +		 * This endpoint is the first to be attached to the domain.
> +		 * Replay existing mappings (e.g. SW MSI).
> +		 */
> +		ret = viommu_replay_mappings(vdomain);
> +		if (ret)
> +			return ret;
> +	}
> +
> +	vdomain->nr_endpoints++;
> +	vdev->vdomain = vdomain;
> +
> +	return 0;
> +}
> +
> +static int viommu_map(struct iommu_domain *domain, unsigned long iova,
> +		      phys_addr_t paddr, size_t size, int prot)
> +{
> +	int ret;
> +	int flags;
> +	struct virtio_iommu_req_map map;
> +	struct viommu_domain *vdomain = to_viommu_domain(domain);
> +
> +	flags = (prot & IOMMU_READ ? VIRTIO_IOMMU_MAP_F_READ : 0) |
> +		(prot & IOMMU_WRITE ? VIRTIO_IOMMU_MAP_F_WRITE : 0) |
> +		(prot & IOMMU_MMIO ? VIRTIO_IOMMU_MAP_F_MMIO : 0);
> +
> +	ret = viommu_add_mapping(vdomain, iova, paddr, size, flags);
> +	if (ret)
> +		return ret;
> +
> +	map = (struct virtio_iommu_req_map) {
> +		.head.type	= VIRTIO_IOMMU_T_MAP,
> +		.domain		= cpu_to_le32(vdomain->id),
> +		.virt_start	= cpu_to_le64(iova),
> +		.phys_start	= cpu_to_le64(paddr),
> +		.virt_end	= cpu_to_le64(iova + size - 1),
> +		.flags		= cpu_to_le32(flags),
> +	};
> +
> +	if (!vdomain->nr_endpoints)
> +		return 0;
> +
> +	ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
> +	if (ret)
> +		viommu_del_mappings(vdomain, iova, size);
> +
> +	return ret;
> +}
> +
> +static size_t viommu_unmap(struct iommu_domain *domain, unsigned long iova,
> +			   size_t size)
> +{
> +	int ret = 0;
> +	size_t unmapped;
> +	struct virtio_iommu_req_unmap unmap;
> +	struct viommu_domain *vdomain = to_viommu_domain(domain);
> +
> +	unmapped = viommu_del_mappings(vdomain, iova, size);
> +	if (unmapped < size)
> +		return 0;
> +
> +	/* Device already removed all mappings after detach. */
> +	if (!vdomain->nr_endpoints)
> +		return unmapped;
> +
> +	unmap = (struct virtio_iommu_req_unmap) {
> +		.head.type	= VIRTIO_IOMMU_T_UNMAP,
> +		.domain		= cpu_to_le32(vdomain->id),
> +		.virt_start	= cpu_to_le64(iova),
> +		.virt_end	= cpu_to_le64(iova + unmapped - 1),
> +	};
> +
> +	ret = viommu_add_req(vdomain->viommu, &unmap, sizeof(unmap));
> +	return ret ? 0 : unmapped;
> +}
> +
> +static phys_addr_t viommu_iova_to_phys(struct iommu_domain *domain,
> +				       dma_addr_t iova)
> +{
> +	u64 paddr = 0;
> +	unsigned long flags;
> +	struct viommu_mapping *mapping;
> +	struct interval_tree_node *node;
> +	struct viommu_domain *vdomain = to_viommu_domain(domain);
> +
> +	spin_lock_irqsave(&vdomain->mappings_lock, flags);
> +	node = interval_tree_iter_first(&vdomain->mappings, iova, iova);
> +	if (node) {
> +		mapping = container_of(node, struct viommu_mapping, iova);
> +		paddr = mapping->paddr + (iova - mapping->iova.start);
> +	}
> +	spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
> +
> +	return paddr;
> +}
> +
> +static void viommu_iotlb_sync(struct iommu_domain *domain)
> +{
> +	struct viommu_domain *vdomain = to_viommu_domain(domain);
> +
> +	viommu_sync_req(vdomain->viommu);
> +}
> +
> +static void viommu_get_resv_regions(struct device *dev, struct list_head *head)
> +{
> +	struct iommu_resv_region *region;
> +	int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
> +
> +	region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, prot,
> +					 IOMMU_RESV_SW_MSI);
> +	if (!region)
> +		return;
> +
> +	list_add_tail(&region->list, head);
> +	iommu_dma_get_resv_regions(dev, head);
> +}
> +
> +static void viommu_put_resv_regions(struct device *dev, struct list_head *head)
> +{
> +	struct iommu_resv_region *entry, *next;
> +
> +	list_for_each_entry_safe(entry, next, head, list)
> +		kfree(entry);
> +}
> +
> +static struct iommu_ops viommu_ops;
> +static struct virtio_driver virtio_iommu_drv;
> +
> +static int viommu_match_node(struct device *dev, void *data)
> +{
> +	return dev->parent->fwnode == data;
> +}
> +
> +static struct viommu_dev *viommu_get_by_fwnode(struct fwnode_handle *fwnode)
> +{
> +	struct device *dev = driver_find_device(&virtio_iommu_drv.driver, NULL,
> +						fwnode, viommu_match_node);
> +	put_device(dev);
> +
> +	return dev ? dev_to_virtio(dev)->priv : NULL;
> +}
> +
> +static int viommu_add_device(struct device *dev)
> +{
> +	int ret;
> +	struct iommu_group *group;
> +	struct viommu_endpoint *vdev;
> +	struct viommu_dev *viommu = NULL;
> +	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
> +
> +	if (!fwspec || fwspec->ops != &viommu_ops)
> +		return -ENODEV;
> +
> +	viommu = viommu_get_by_fwnode(fwspec->iommu_fwnode);
> +	if (!viommu)
> +		return -ENODEV;
> +
> +	vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
> +	if (!vdev)
> +		return -ENOMEM;
> +
> +	vdev->viommu = viommu;
> +	fwspec->iommu_priv = vdev;
> +
> +	ret = iommu_device_link(&viommu->iommu, dev);
> +	if (ret)
> +		goto err_free_dev;
> +
> +	/*
> +	 * Last step creates a default domain and attaches to it. Everything
> +	 * must be ready.
> +	 */
> +	group = iommu_group_get_for_dev(dev);
> +	if (IS_ERR(group)) {
> +		ret = PTR_ERR(group);
> +		goto err_unlink_dev;
> +	}
> +
> +	iommu_group_put(group);
> +
> +	return PTR_ERR_OR_ZERO(group);
> +
> +err_unlink_dev:
> +	iommu_device_unlink(&viommu->iommu, dev);
> +err_free_dev:
> +	kfree(vdev);
> +
> +	return ret;
> +}
> +
> +static void viommu_remove_device(struct device *dev)
> +{
> +	struct viommu_endpoint *vdev;
> +	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
> +
> +	if (!fwspec || fwspec->ops != &viommu_ops)
> +		return;
> +
> +	vdev = fwspec->iommu_priv;
> +
> +	iommu_group_remove_device(dev);
> +	iommu_device_unlink(&vdev->viommu->iommu, dev);
> +	kfree(vdev);
> +}
> +
> +static struct iommu_group *viommu_device_group(struct device *dev)
> +{
> +	if (dev_is_pci(dev))
> +		return pci_device_group(dev);
> +	else
> +		return generic_device_group(dev);
> +}
> +
> +static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args)
> +{
> +	return iommu_fwspec_add_ids(dev, args->args, 1);
> +}
> +
> +static struct iommu_ops viommu_ops = {
> +	.domain_alloc		= viommu_domain_alloc,
> +	.domain_free		= viommu_domain_free,
> +	.attach_dev		= viommu_attach_dev,
> +	.map			= viommu_map,
> +	.unmap			= viommu_unmap,
> +	.iova_to_phys		= viommu_iova_to_phys,
> +	.iotlb_sync		= viommu_iotlb_sync,
> +	.add_device		= viommu_add_device,
> +	.remove_device		= viommu_remove_device,
> +	.device_group		= viommu_device_group,
> +	.get_resv_regions	= viommu_get_resv_regions,
> +	.put_resv_regions	= viommu_put_resv_regions,
> +	.of_xlate		= viommu_of_xlate,
> +};
> +
> +static int viommu_init_vqs(struct viommu_dev *viommu)
> +{
> +	struct virtio_device *vdev = dev_to_virtio(viommu->dev);
> +	const char *name = "request";
> +	void *ret;
> +
> +	ret = virtio_find_single_vq(vdev, NULL, name);
> +	if (IS_ERR(ret)) {
> +		dev_err(viommu->dev, "cannot find VQ\n");
> +		return PTR_ERR(ret);
> +	}
> +
> +	viommu->vqs[VIOMMU_REQUEST_VQ] = ret;
> +
> +	return 0;
> +}
> +
> +static int viommu_probe(struct virtio_device *vdev)
> +{
> +	struct device *parent_dev = vdev->dev.parent;
> +	struct viommu_dev *viommu = NULL;
> +	struct device *dev = &vdev->dev;
> +	u64 input_start = 0;
> +	u64 input_end = -1UL;
> +	int ret;
> +
> +	if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
> +	    !virtio_has_feature(vdev, VIRTIO_IOMMU_F_MAP_UNMAP))

Why bother with a feature bit for this then btw?

> +		return -ENODEV;
> +
> +	viommu = devm_kzalloc(dev, sizeof(*viommu), GFP_KERNEL);
> +	if (!viommu)
> +		return -ENOMEM;
> +
> +	spin_lock_init(&viommu->request_lock);
> +	ida_init(&viommu->domain_ids);
> +	viommu->dev = dev;
> +	viommu->vdev = vdev;
> +	INIT_LIST_HEAD(&viommu->requests);
> +
> +	ret = viommu_init_vqs(viommu);
> +	if (ret)
> +		return ret;
> +
> +	virtio_cread(vdev, struct virtio_iommu_config, page_size_mask,
> +		     &viommu->pgsize_bitmap);
> +
> +	if (!viommu->pgsize_bitmap) {
> +		ret = -EINVAL;
> +		goto err_free_vqs;
> +	}
> +
> +	viommu->domain_bits = 32;
> +
> +	/* Optional features */
> +	virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
> +			     struct virtio_iommu_config, input_range.start,
> +			     &input_start);
> +
> +	virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
> +			     struct virtio_iommu_config, input_range.end,
> +			     &input_end);
> +
> +	virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_BITS,
> +			     struct virtio_iommu_config, domain_bits,
> +			     &viommu->domain_bits);
> +
> +	viommu->geometry = (struct iommu_domain_geometry) {
> +		.aperture_start	= input_start,
> +		.aperture_end	= input_end,
> +		.force_aperture	= true,
> +	};
> +
> +	viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap;
> +
> +	virtio_device_ready(vdev);
> +
> +	ret = iommu_device_sysfs_add(&viommu->iommu, dev, NULL, "%s",
> +				     virtio_bus_name(vdev));
> +	if (ret)
> +		goto err_free_vqs;
> +
> +	iommu_device_set_ops(&viommu->iommu, &viommu_ops);
> +	iommu_device_set_fwnode(&viommu->iommu, parent_dev->fwnode);
> +
> +	iommu_device_register(&viommu->iommu);
> +
> +#ifdef CONFIG_PCI
> +	if (pci_bus_type.iommu_ops != &viommu_ops) {
> +		pci_request_acs();
> +		ret = bus_set_iommu(&pci_bus_type, &viommu_ops);
> +		if (ret)
> +			goto err_unregister;
> +	}
> +#endif
> +#ifdef CONFIG_ARM_AMBA
> +	if (amba_bustype.iommu_ops != &viommu_ops) {
> +		ret = bus_set_iommu(&amba_bustype, &viommu_ops);
> +		if (ret)
> +			goto err_unregister;
> +	}
> +#endif
> +	if (platform_bus_type.iommu_ops != &viommu_ops) {
> +		ret = bus_set_iommu(&platform_bus_type, &viommu_ops);
> +		if (ret)
> +			goto err_unregister;
> +	}
> +
> +	vdev->priv = viommu;
> +
> +	dev_info(dev, "input address: %u bits\n",
> +		 order_base_2(viommu->geometry.aperture_end));
> +	dev_info(dev, "page mask: %#llx\n", viommu->pgsize_bitmap);
> +
> +	return 0;
> +
> +err_unregister:
> +	iommu_device_sysfs_remove(&viommu->iommu);
> +	iommu_device_unregister(&viommu->iommu);
> +err_free_vqs:
> +	vdev->config->del_vqs(vdev);
> +
> +	return ret;
> +}
> +
> +static void viommu_remove(struct virtio_device *vdev)
> +{
> +	struct viommu_dev *viommu = vdev->priv;
> +
> +	iommu_device_sysfs_remove(&viommu->iommu);
> +	iommu_device_unregister(&viommu->iommu);
> +
> +	/* Stop all virtqueues */
> +	vdev->config->reset(vdev);
> +	vdev->config->del_vqs(vdev);
> +
> +	dev_info(&vdev->dev, "device removed\n");
> +}
> +
> +static void viommu_config_changed(struct virtio_device *vdev)
> +{
> +	dev_warn(&vdev->dev, "config changed\n");
> +}
> +
> +static unsigned int features[] = {
> +	VIRTIO_IOMMU_F_MAP_UNMAP,
> +	VIRTIO_IOMMU_F_DOMAIN_BITS,
> +	VIRTIO_IOMMU_F_INPUT_RANGE,
> +};
> +
> +static struct virtio_device_id id_table[] = {
> +	{ VIRTIO_ID_IOMMU, VIRTIO_DEV_ANY_ID },
> +	{ 0 },
> +};
> +
> +static struct virtio_driver virtio_iommu_drv = {
> +	.driver.name		= KBUILD_MODNAME,
> +	.driver.owner		= THIS_MODULE,
> +	.id_table		= id_table,
> +	.feature_table		= features,
> +	.feature_table_size	= ARRAY_SIZE(features),
> +	.probe			= viommu_probe,
> +	.remove			= viommu_remove,
> +	.config_changed		= viommu_config_changed,
> +};
> +
> +module_virtio_driver(virtio_iommu_drv);
> +
> +MODULE_DESCRIPTION("Virtio IOMMU driver");
> +MODULE_AUTHOR("Jean-Philippe Brucker <jean-philippe.brucker@arm.com>");
> +MODULE_LICENSE("GPL v2");
> diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h
> index 6d5c3b2d4f4d..cfe47c5d9a56 100644
> --- a/include/uapi/linux/virtio_ids.h
> +++ b/include/uapi/linux/virtio_ids.h
> @@ -43,5 +43,6 @@
>  #define VIRTIO_ID_INPUT        18 /* virtio input */
>  #define VIRTIO_ID_VSOCK        19 /* virtio vsock transport */
>  #define VIRTIO_ID_CRYPTO       20 /* virtio crypto */
> +#define VIRTIO_ID_IOMMU        23 /* virtio IOMMU */
>  
>  #endif /* _LINUX_VIRTIO_IDS_H */
> diff --git a/include/uapi/linux/virtio_iommu.h b/include/uapi/linux/virtio_iommu.h
> new file mode 100644
> index 000000000000..e7c05e3afa44
> --- /dev/null
> +++ b/include/uapi/linux/virtio_iommu.h
> @@ -0,0 +1,104 @@
> +/* SPDX-License-Identifier: BSD-3-Clause */
> +/*
> + * Virtio-iommu definition v0.9
> + *
> + * Copyright (C) 2018 Arm Ltd.
> + */
> +#ifndef _UAPI_LINUX_VIRTIO_IOMMU_H
> +#define _UAPI_LINUX_VIRTIO_IOMMU_H
> +
> +#include <linux/types.h>
> +
> +/* Feature bits */
> +#define VIRTIO_IOMMU_F_INPUT_RANGE		0
> +#define VIRTIO_IOMMU_F_DOMAIN_BITS		1
> +#define VIRTIO_IOMMU_F_MAP_UNMAP		2
> +#define VIRTIO_IOMMU_F_BYPASS			3
> +
> +struct virtio_iommu_range {
> +	__u64					start;
> +	__u64					end;
> +};
> +
> +struct virtio_iommu_config {
> +	/* Supported page sizes */
> +	__u64					page_size_mask;
> +	/* Supported IOVA range */
> +	struct virtio_iommu_range		input_range;
> +	/* Max domain ID size */
> +	__u8					domain_bits;
> +	__u8					padding[3];
> +};
> +
> +/* Request types */
> +#define VIRTIO_IOMMU_T_ATTACH			0x01
> +#define VIRTIO_IOMMU_T_DETACH			0x02
> +#define VIRTIO_IOMMU_T_MAP			0x03
> +#define VIRTIO_IOMMU_T_UNMAP			0x04
> +
> +/* Status types */
> +#define VIRTIO_IOMMU_S_OK			0x00
> +#define VIRTIO_IOMMU_S_IOERR			0x01
> +#define VIRTIO_IOMMU_S_UNSUPP			0x02
> +#define VIRTIO_IOMMU_S_DEVERR			0x03
> +#define VIRTIO_IOMMU_S_INVAL			0x04
> +#define VIRTIO_IOMMU_S_RANGE			0x05
> +#define VIRTIO_IOMMU_S_NOENT			0x06
> +#define VIRTIO_IOMMU_S_FAULT			0x07
> +
> +struct virtio_iommu_req_head {
> +	__u8					type;
> +	__u8					reserved[3];
> +};
> +
> +struct virtio_iommu_req_tail {
> +	__u8					status;
> +	__u8					reserved[3];
> +};
> +
> +struct virtio_iommu_req_attach {
> +	struct virtio_iommu_req_head		head;
> +	__le32					domain;
> +	__le32					endpoint;
> +	__u8					reserved[8];
> +	struct virtio_iommu_req_tail		tail;
> +};
> +
> +struct virtio_iommu_req_detach {
> +	struct virtio_iommu_req_head		head;
> +	__le32					domain;
> +	__le32					endpoint;
> +	__u8					reserved[8];
> +	struct virtio_iommu_req_tail		tail;
> +};
> +
> +#define VIRTIO_IOMMU_MAP_F_READ			(1 << 0)
> +#define VIRTIO_IOMMU_MAP_F_WRITE		(1 << 1)
> +#define VIRTIO_IOMMU_MAP_F_EXEC			(1 << 2)
> +#define VIRTIO_IOMMU_MAP_F_MMIO			(1 << 3)
> +
> +#define VIRTIO_IOMMU_MAP_F_MASK			(VIRTIO_IOMMU_MAP_F_READ |	\
> +						 VIRTIO_IOMMU_MAP_F_WRITE |	\
> +						 VIRTIO_IOMMU_MAP_F_EXEC |	\
> +						 VIRTIO_IOMMU_MAP_F_MMIO)
> +
> +struct virtio_iommu_req_map {
> +	struct virtio_iommu_req_head		head;
> +	__le32					domain;
> +	__le64					virt_start;
> +	__le64					virt_end;
> +	__le64					phys_start;
> +	__le32					flags;
> +	struct virtio_iommu_req_tail		tail;
> +};
> +
> +struct virtio_iommu_req_unmap {
> +	struct virtio_iommu_req_head		head;
> +	__le32					domain;
> +	__le64					virt_start;
> +	__le64					virt_end;
> +	__u8					reserved[4];
> +	struct virtio_iommu_req_tail		tail;
> +};
> +
> +#endif
> -- 
> 2.19.1
> 
> _______________________________________________
> Virtualization mailing list
> Virtualization@lists.linux-foundation.org
> https://lists.linuxfoundation.org/mailman/listinfo/virtualization
Michael S. Tsirkin Nov. 23, 2018, 10:02 p.m. UTC | #4
On Thu, Nov 22, 2018 at 07:37:59PM +0000, Jean-Philippe Brucker wrote:
> The virtio IOMMU is a para-virtualized device, allowing to send IOMMU
> requests such as map/unmap over virtio transport without emulating page
> tables. This implementation handles ATTACH, DETACH, MAP and UNMAP
> requests.
> 
> The bulk of the code transforms calls coming from the IOMMU API into
> corresponding virtio requests. Mappings are kept in an interval tree
> instead of page tables.
> 
> Signed-off-by: Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
> ---
>  MAINTAINERS                       |   7 +
>  drivers/iommu/Kconfig             |  11 +
>  drivers/iommu/Makefile            |   1 +
>  drivers/iommu/virtio-iommu.c      | 916 ++++++++++++++++++++++++++++++
>  include/uapi/linux/virtio_ids.h   |   1 +
>  include/uapi/linux/virtio_iommu.h | 104 ++++
>  6 files changed, 1040 insertions(+)
>  create mode 100644 drivers/iommu/virtio-iommu.c
>  create mode 100644 include/uapi/linux/virtio_iommu.h
> 
> diff --git a/MAINTAINERS b/MAINTAINERS
> index 1689dcfec800..3d8550c76f4a 100644
> --- a/MAINTAINERS
> +++ b/MAINTAINERS
> @@ -15946,6 +15946,13 @@ S:	Maintained
>  F:	drivers/virtio/virtio_input.c
>  F:	include/uapi/linux/virtio_input.h
>  
> +VIRTIO IOMMU DRIVER
> +M:	Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
> +L:	virtualization@lists.linux-foundation.org
> +S:	Maintained
> +F:	drivers/iommu/virtio-iommu.c
> +F:	include/uapi/linux/virtio_iommu.h
> +
>  VIRTUAL BOX GUEST DEVICE DRIVER
>  M:	Hans de Goede <hdegoede@redhat.com>
>  M:	Arnd Bergmann <arnd@arndb.de>
> diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
> index bf2bbfa2a399..db5f2b8c23f5 100644
> --- a/drivers/iommu/Kconfig
> +++ b/drivers/iommu/Kconfig
> @@ -464,4 +464,15 @@ config QCOM_IOMMU
>  	help
>  	  Support for IOMMU on certain Qualcomm SoCs.
>  
> +config VIRTIO_IOMMU
> +	bool "Virtio IOMMU driver"
> +	depends on VIRTIO=y
> +	select IOMMU_API
> +	select INTERVAL_TREE
> +	select ARM_DMA_USE_IOMMU if ARM
> +	help
> +	  Para-virtualised IOMMU driver with virtio.
> +
> +	  Say Y here if you intend to run this kernel as a guest.
> +
>  endif # IOMMU_SUPPORT
> diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
> index 5481e5fe1f95..bd7e55751d09 100644
> --- a/drivers/iommu/Makefile
> +++ b/drivers/iommu/Makefile
> @@ -36,3 +36,4 @@ obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
>  obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
>  obj-$(CONFIG_S390_IOMMU) += s390-iommu.o
>  obj-$(CONFIG_QCOM_IOMMU) += qcom_iommu.o
> +obj-$(CONFIG_VIRTIO_IOMMU) += virtio-iommu.o
> diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
> new file mode 100644
> index 000000000000..7540dab9c8dc
> --- /dev/null
> +++ b/drivers/iommu/virtio-iommu.c
> @@ -0,0 +1,916 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * Virtio driver for the paravirtualized IOMMU
> + *
> + * Copyright (C) 2018 Arm Limited
> + */
> +
> +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
> +
> +#include <linux/amba/bus.h>
> +#include <linux/delay.h>
> +#include <linux/dma-iommu.h>
> +#include <linux/freezer.h>
> +#include <linux/interval_tree.h>
> +#include <linux/iommu.h>
> +#include <linux/module.h>
> +#include <linux/of_iommu.h>
> +#include <linux/of_platform.h>
> +#include <linux/pci.h>
> +#include <linux/platform_device.h>
> +#include <linux/virtio.h>
> +#include <linux/virtio_config.h>
> +#include <linux/virtio_ids.h>
> +#include <linux/wait.h>
> +
> +#include <uapi/linux/virtio_iommu.h>
> +
> +#define MSI_IOVA_BASE			0x8000000
> +#define MSI_IOVA_LENGTH			0x100000
> +
> +#define VIOMMU_REQUEST_VQ		0
> +#define VIOMMU_NR_VQS			1
> +
> +struct viommu_dev {
> +	struct iommu_device		iommu;
> +	struct device			*dev;
> +	struct virtio_device		*vdev;
> +
> +	struct ida			domain_ids;
> +
> +	struct virtqueue		*vqs[VIOMMU_NR_VQS];
> +	spinlock_t			request_lock;
> +	struct list_head		requests;
> +
> +	/* Device configuration */
> +	struct iommu_domain_geometry	geometry;
> +	u64				pgsize_bitmap;
> +	u8				domain_bits;
> +};
> +
> +struct viommu_mapping {
> +	phys_addr_t			paddr;
> +	struct interval_tree_node	iova;
> +	u32				flags;
> +};
> +
> +struct viommu_domain {
> +	struct iommu_domain		domain;
> +	struct viommu_dev		*viommu;
> +	struct mutex			mutex; /* protects viommu pointer */
> +	unsigned int			id;
> +
> +	spinlock_t			mappings_lock;
> +	struct rb_root_cached		mappings;
> +
> +	unsigned long			nr_endpoints;
> +};
> +
> +struct viommu_endpoint {
> +	struct viommu_dev		*viommu;
> +	struct viommu_domain		*vdomain;
> +};
> +
> +struct viommu_request {
> +	struct list_head		list;
> +	void				*writeback;
> +	unsigned int			write_offset;
> +	unsigned int			len;
> +	char				buf[];
> +};
> +
> +#define to_viommu_domain(domain)	\
> +	container_of(domain, struct viommu_domain, domain)
> +
> +static int viommu_get_req_errno(void *buf, size_t len)
> +{
> +	struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail);
> +
> +	switch (tail->status) {
> +	case VIRTIO_IOMMU_S_OK:
> +		return 0;
> +	case VIRTIO_IOMMU_S_UNSUPP:
> +		return -ENOSYS;
> +	case VIRTIO_IOMMU_S_INVAL:
> +		return -EINVAL;
> +	case VIRTIO_IOMMU_S_RANGE:
> +		return -ERANGE;
> +	case VIRTIO_IOMMU_S_NOENT:
> +		return -ENOENT;
> +	case VIRTIO_IOMMU_S_FAULT:
> +		return -EFAULT;
> +	case VIRTIO_IOMMU_S_IOERR:
> +	case VIRTIO_IOMMU_S_DEVERR:
> +	default:
> +		return -EIO;
> +	}
> +}
> +
> +static void viommu_set_req_status(void *buf, size_t len, int status)
> +{
> +	struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail);
> +
> +	tail->status = status;
> +}
> +
> +static off_t viommu_get_write_desc_offset(struct viommu_dev *viommu,
> +					  struct virtio_iommu_req_head *req,
> +					  size_t len)
> +{
> +	size_t tail_size = sizeof(struct virtio_iommu_req_tail);
> +
> +	return len - tail_size;
> +}
> +
> +/*
> + * __viommu_sync_req - Complete all in-flight requests
> + *
> + * Wait for all added requests to complete. When this function returns, all
> + * requests that were in-flight at the time of the call have completed.
> + */
> +static int __viommu_sync_req(struct viommu_dev *viommu)
> +{
> +	int ret = 0;
> +	unsigned int len;
> +	size_t write_len;
> +	struct viommu_request *req;
> +	struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
> +
> +	assert_spin_locked(&viommu->request_lock);
> +
> +	virtqueue_kick(vq);
> +
> +	while (!list_empty(&viommu->requests)) {
> +		len = 0;
> +		req = virtqueue_get_buf(vq, &len);
> +		if (!req)
> +			continue;
> +
> +		if (!len)
> +			viommu_set_req_status(req->buf, req->len,
> +					      VIRTIO_IOMMU_S_IOERR);
> +
> +		write_len = req->len - req->write_offset;
> +		if (req->writeback && len == write_len)
> +			memcpy(req->writeback, req->buf + req->write_offset,
> +			       write_len);
> +
> +		list_del(&req->list);
> +		kfree(req);
> +	}

I didn't notice this in the past but it seems this will spin
with interrupts disabled until host handles the request.
Please do not do this - host execution can be another
task that needs the same host CPU. This will then disable
interrupts for a very very long time.

What to do then? Queue in software and wake up task.

As kick is vm exit, kick under interrupts disabled is discouraged too:
better to prepare for kick enable interrupts then kick.

> +
> +	return ret;
> +}
> +
> +static int viommu_sync_req(struct viommu_dev *viommu)
> +{
> +	int ret;
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&viommu->request_lock, flags);
> +	ret = __viommu_sync_req(viommu);
> +	if (ret)
> +		dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret);
> +	spin_unlock_irqrestore(&viommu->request_lock, flags);
> +
> +	return ret;
> +}
> +
> +/*
> + * __viommu_add_request - Add one request to the queue
> + * @buf: pointer to the request buffer
> + * @len: length of the request buffer
> + * @writeback: copy data back to the buffer when the request completes.
> + *
> + * Add a request to the queue. Only synchronize the queue if it's already full.
> + * Otherwise don't kick the queue nor wait for requests to complete.
> + *
> + * When @writeback is true, data written by the device, including the request
> + * status, is copied into @buf after the request completes. This is unsafe if
> + * the caller allocates @buf on stack and drops the lock between add_req() and
> + * sync_req().
> + *
> + * Return 0 if the request was successfully added to the queue.
> + */
> +static int __viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len,
> +			    bool writeback)
> +{
> +	int ret;
> +	off_t write_offset;
> +	struct viommu_request *req;
> +	struct scatterlist top_sg, bottom_sg;
> +	struct scatterlist *sg[2] = { &top_sg, &bottom_sg };
> +	struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
> +
> +	assert_spin_locked(&viommu->request_lock);
> +
> +	write_offset = viommu_get_write_desc_offset(viommu, buf, len);
> +	if (write_offset <= 0)
> +		return -EINVAL;
> +
> +	req = kzalloc(sizeof(*req) + len, GFP_ATOMIC);
> +	if (!req)
> +		return -ENOMEM;
> +
> +	req->len = len;
> +	if (writeback) {
> +		req->writeback = buf + write_offset;
> +		req->write_offset = write_offset;
> +	}
> +	memcpy(&req->buf, buf, write_offset);
> +
> +	sg_init_one(&top_sg, req->buf, write_offset);
> +	sg_init_one(&bottom_sg, req->buf + write_offset, len - write_offset);
> +
> +	ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC);
> +	if (ret == -ENOSPC) {
> +		/* If the queue is full, sync and retry */
> +		if (!__viommu_sync_req(viommu))
> +			ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC);
> +	}
> +	if (ret)
> +		goto err_free;
> +
> +	list_add_tail(&req->list, &viommu->requests);
> +	return 0;
> +
> +err_free:
> +	kfree(req);
> +	return ret;
> +}
> +
> +static int viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len)
> +{
> +	int ret;
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&viommu->request_lock, flags);
> +	ret = __viommu_add_req(viommu, buf, len, false);
> +	if (ret)
> +		dev_dbg(viommu->dev, "could not add request: %d\n", ret);
> +	spin_unlock_irqrestore(&viommu->request_lock, flags);
> +
> +	return ret;
> +}
> +
> +/*
> + * Send a request and wait for it to complete. Return the request status (as an
> + * errno)
> + */
> +static int viommu_send_req_sync(struct viommu_dev *viommu, void *buf,
> +				size_t len)
> +{
> +	int ret;
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&viommu->request_lock, flags);
> +
> +	ret = __viommu_add_req(viommu, buf, len, true);
> +	if (ret) {
> +		dev_dbg(viommu->dev, "could not add request (%d)\n", ret);
> +		goto out_unlock;
> +	}
> +
> +	ret = __viommu_sync_req(viommu);
> +	if (ret) {
> +		dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret);
> +		/* Fall-through (get the actual request status) */
> +	}
> +
> +	ret = viommu_get_req_errno(buf, len);
> +out_unlock:
> +	spin_unlock_irqrestore(&viommu->request_lock, flags);
> +	return ret;
> +}
> +
> +/*
> + * viommu_add_mapping - add a mapping to the internal tree
> + *
> + * On success, return the new mapping. Otherwise return NULL.
> + */
> +static int viommu_add_mapping(struct viommu_domain *vdomain, unsigned long iova,
> +			      phys_addr_t paddr, size_t size, u32 flags)
> +{
> +	unsigned long irqflags;
> +	struct viommu_mapping *mapping;
> +
> +	mapping = kzalloc(sizeof(*mapping), GFP_ATOMIC);
> +	if (!mapping)
> +		return -ENOMEM;
> +
> +	mapping->paddr		= paddr;
> +	mapping->iova.start	= iova;
> +	mapping->iova.last	= iova + size - 1;
> +	mapping->flags		= flags;
> +
> +	spin_lock_irqsave(&vdomain->mappings_lock, irqflags);
> +	interval_tree_insert(&mapping->iova, &vdomain->mappings);
> +	spin_unlock_irqrestore(&vdomain->mappings_lock, irqflags);
> +
> +	return 0;
> +}
> +
> +/*
> + * viommu_del_mappings - remove mappings from the internal tree
> + *
> + * @vdomain: the domain
> + * @iova: start of the range
> + * @size: size of the range. A size of 0 corresponds to the entire address
> + *	space.
> + *
> + * On success, returns the number of unmapped bytes (>= size)
> + */
> +static size_t viommu_del_mappings(struct viommu_domain *vdomain,
> +				  unsigned long iova, size_t size)
> +{
> +	size_t unmapped = 0;
> +	unsigned long flags;
> +	unsigned long last = iova + size - 1;
> +	struct viommu_mapping *mapping = NULL;
> +	struct interval_tree_node *node, *next;
> +
> +	spin_lock_irqsave(&vdomain->mappings_lock, flags);
> +	next = interval_tree_iter_first(&vdomain->mappings, iova, last);
> +	while (next) {
> +		node = next;
> +		mapping = container_of(node, struct viommu_mapping, iova);
> +		next = interval_tree_iter_next(node, iova, last);
> +
> +		/* Trying to split a mapping? */
> +		if (mapping->iova.start < iova)
> +			break;
> +
> +		/*
> +		 * Virtio-iommu doesn't allow UNMAP to split a mapping created
> +		 * with a single MAP request, so remove the full mapping.
> +		 */
> +		unmapped += mapping->iova.last - mapping->iova.start + 1;
> +
> +		interval_tree_remove(node, &vdomain->mappings);
> +		kfree(mapping);
> +	}
> +	spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
> +
> +	return unmapped;
> +}
> +
> +/*
> + * viommu_replay_mappings - re-send MAP requests
> + *
> + * When reattaching a domain that was previously detached from all endpoints,
> + * mappings were deleted from the device. Re-create the mappings available in
> + * the internal tree.
> + */
> +static int viommu_replay_mappings(struct viommu_domain *vdomain)
> +{
> +	int ret = 0;
> +	unsigned long flags;
> +	struct viommu_mapping *mapping;
> +	struct interval_tree_node *node;
> +	struct virtio_iommu_req_map map;
> +
> +	spin_lock_irqsave(&vdomain->mappings_lock, flags);
> +	node = interval_tree_iter_first(&vdomain->mappings, 0, -1UL);
> +	while (node) {
> +		mapping = container_of(node, struct viommu_mapping, iova);
> +		map = (struct virtio_iommu_req_map) {
> +			.head.type	= VIRTIO_IOMMU_T_MAP,
> +			.domain		= cpu_to_le32(vdomain->id),
> +			.virt_start	= cpu_to_le64(mapping->iova.start),
> +			.virt_end	= cpu_to_le64(mapping->iova.last),
> +			.phys_start	= cpu_to_le64(mapping->paddr),
> +			.flags		= cpu_to_le32(mapping->flags),
> +		};
> +
> +		ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
> +		if (ret)
> +			break;
> +
> +		node = interval_tree_iter_next(node, 0, -1UL);
> +	}
> +	spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
> +
> +	return ret;
> +}
> +
> +/* IOMMU API */
> +
> +static struct iommu_domain *viommu_domain_alloc(unsigned type)
> +{
> +	struct viommu_domain *vdomain;
> +
> +	if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
> +		return NULL;
> +
> +	vdomain = kzalloc(sizeof(*vdomain), GFP_KERNEL);
> +	if (!vdomain)
> +		return NULL;
> +
> +	mutex_init(&vdomain->mutex);
> +	spin_lock_init(&vdomain->mappings_lock);
> +	vdomain->mappings = RB_ROOT_CACHED;
> +
> +	if (type == IOMMU_DOMAIN_DMA &&
> +	    iommu_get_dma_cookie(&vdomain->domain)) {
> +		kfree(vdomain);
> +		return NULL;
> +	}
> +
> +	return &vdomain->domain;
> +}
> +
> +static int viommu_domain_finalise(struct viommu_dev *viommu,
> +				  struct iommu_domain *domain)
> +{
> +	int ret;
> +	struct viommu_domain *vdomain = to_viommu_domain(domain);
> +	unsigned int max_domain = viommu->domain_bits > 31 ? ~0 :
> +				  (1U << viommu->domain_bits) - 1;
> +
> +	vdomain->viommu		= viommu;
> +
> +	domain->pgsize_bitmap	= viommu->pgsize_bitmap;
> +	domain->geometry	= viommu->geometry;
> +
> +	ret = ida_alloc_max(&viommu->domain_ids, max_domain, GFP_KERNEL);
> +	if (ret >= 0)
> +		vdomain->id = (unsigned int)ret;
> +
> +	return ret > 0 ? 0 : ret;
> +}
> +
> +static void viommu_domain_free(struct iommu_domain *domain)
> +{
> +	struct viommu_domain *vdomain = to_viommu_domain(domain);
> +
> +	iommu_put_dma_cookie(domain);
> +
> +	/* Free all remaining mappings (size 2^64) */
> +	viommu_del_mappings(vdomain, 0, 0);
> +
> +	if (vdomain->viommu)
> +		ida_free(&vdomain->viommu->domain_ids, vdomain->id);
> +
> +	kfree(vdomain);
> +}
> +
> +static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
> +{
> +	int i;
> +	int ret = 0;
> +	struct virtio_iommu_req_attach req;
> +	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
> +	struct viommu_endpoint *vdev = fwspec->iommu_priv;
> +	struct viommu_domain *vdomain = to_viommu_domain(domain);
> +
> +	mutex_lock(&vdomain->mutex);
> +	if (!vdomain->viommu) {
> +		/*
> +		 * Properly initialize the domain now that we know which viommu
> +		 * owns it.
> +		 */
> +		ret = viommu_domain_finalise(vdev->viommu, domain);
> +	} else if (vdomain->viommu != vdev->viommu) {
> +		dev_err(dev, "cannot attach to foreign vIOMMU\n");
> +		ret = -EXDEV;
> +	}
> +	mutex_unlock(&vdomain->mutex);
> +
> +	if (ret)
> +		return ret;
> +
> +	/*
> +	 * In the virtio-iommu device, when attaching the endpoint to a new
> +	 * domain, it is detached from the old one and, if as as a result the
> +	 * old domain isn't attached to any endpoint, all mappings are removed
> +	 * from the old domain and it is freed.
> +	 *
> +	 * In the driver the old domain still exists, and its mappings will be
> +	 * recreated if it gets reattached to an endpoint. Otherwise it will be
> +	 * freed explicitly.
> +	 *
> +	 * vdev->vdomain is protected by group->mutex
> +	 */
> +	if (vdev->vdomain)
> +		vdev->vdomain->nr_endpoints--;
> +
> +	req = (struct virtio_iommu_req_attach) {
> +		.head.type	= VIRTIO_IOMMU_T_ATTACH,
> +		.domain		= cpu_to_le32(vdomain->id),
> +	};
> +
> +	for (i = 0; i < fwspec->num_ids; i++) {
> +		req.endpoint = cpu_to_le32(fwspec->ids[i]);
> +
> +		ret = viommu_send_req_sync(vdomain->viommu, &req, sizeof(req));
> +		if (ret)
> +			return ret;
> +	}
> +
> +	if (!vdomain->nr_endpoints) {
> +		/*
> +		 * This endpoint is the first to be attached to the domain.
> +		 * Replay existing mappings (e.g. SW MSI).
> +		 */
> +		ret = viommu_replay_mappings(vdomain);
> +		if (ret)
> +			return ret;
> +	}
> +
> +	vdomain->nr_endpoints++;
> +	vdev->vdomain = vdomain;
> +
> +	return 0;
> +}
> +
> +static int viommu_map(struct iommu_domain *domain, unsigned long iova,
> +		      phys_addr_t paddr, size_t size, int prot)
> +{
> +	int ret;
> +	int flags;
> +	struct virtio_iommu_req_map map;
> +	struct viommu_domain *vdomain = to_viommu_domain(domain);
> +
> +	flags = (prot & IOMMU_READ ? VIRTIO_IOMMU_MAP_F_READ : 0) |
> +		(prot & IOMMU_WRITE ? VIRTIO_IOMMU_MAP_F_WRITE : 0) |
> +		(prot & IOMMU_MMIO ? VIRTIO_IOMMU_MAP_F_MMIO : 0);
> +
> +	ret = viommu_add_mapping(vdomain, iova, paddr, size, flags);
> +	if (ret)
> +		return ret;
> +
> +	map = (struct virtio_iommu_req_map) {
> +		.head.type	= VIRTIO_IOMMU_T_MAP,
> +		.domain		= cpu_to_le32(vdomain->id),
> +		.virt_start	= cpu_to_le64(iova),
> +		.phys_start	= cpu_to_le64(paddr),
> +		.virt_end	= cpu_to_le64(iova + size - 1),
> +		.flags		= cpu_to_le32(flags),
> +	};
> +
> +	if (!vdomain->nr_endpoints)
> +		return 0;
> +
> +	ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
> +	if (ret)
> +		viommu_del_mappings(vdomain, iova, size);
> +
> +	return ret;
> +}
> +
> +static size_t viommu_unmap(struct iommu_domain *domain, unsigned long iova,
> +			   size_t size)
> +{
> +	int ret = 0;
> +	size_t unmapped;
> +	struct virtio_iommu_req_unmap unmap;
> +	struct viommu_domain *vdomain = to_viommu_domain(domain);
> +
> +	unmapped = viommu_del_mappings(vdomain, iova, size);
> +	if (unmapped < size)
> +		return 0;
> +
> +	/* Device already removed all mappings after detach. */
> +	if (!vdomain->nr_endpoints)
> +		return unmapped;
> +
> +	unmap = (struct virtio_iommu_req_unmap) {
> +		.head.type	= VIRTIO_IOMMU_T_UNMAP,
> +		.domain		= cpu_to_le32(vdomain->id),
> +		.virt_start	= cpu_to_le64(iova),
> +		.virt_end	= cpu_to_le64(iova + unmapped - 1),
> +	};
> +
> +	ret = viommu_add_req(vdomain->viommu, &unmap, sizeof(unmap));
> +	return ret ? 0 : unmapped;
> +}
> +
> +static phys_addr_t viommu_iova_to_phys(struct iommu_domain *domain,
> +				       dma_addr_t iova)
> +{
> +	u64 paddr = 0;
> +	unsigned long flags;
> +	struct viommu_mapping *mapping;
> +	struct interval_tree_node *node;
> +	struct viommu_domain *vdomain = to_viommu_domain(domain);
> +
> +	spin_lock_irqsave(&vdomain->mappings_lock, flags);
> +	node = interval_tree_iter_first(&vdomain->mappings, iova, iova);
> +	if (node) {
> +		mapping = container_of(node, struct viommu_mapping, iova);
> +		paddr = mapping->paddr + (iova - mapping->iova.start);
> +	}
> +	spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
> +
> +	return paddr;
> +}
> +
> +static void viommu_iotlb_sync(struct iommu_domain *domain)
> +{
> +	struct viommu_domain *vdomain = to_viommu_domain(domain);
> +
> +	viommu_sync_req(vdomain->viommu);
> +}
> +
> +static void viommu_get_resv_regions(struct device *dev, struct list_head *head)
> +{
> +	struct iommu_resv_region *region;
> +	int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
> +
> +	region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, prot,
> +					 IOMMU_RESV_SW_MSI);
> +	if (!region)
> +		return;
> +
> +	list_add_tail(&region->list, head);
> +	iommu_dma_get_resv_regions(dev, head);
> +}
> +
> +static void viommu_put_resv_regions(struct device *dev, struct list_head *head)
> +{
> +	struct iommu_resv_region *entry, *next;
> +
> +	list_for_each_entry_safe(entry, next, head, list)
> +		kfree(entry);
> +}
> +
> +static struct iommu_ops viommu_ops;
> +static struct virtio_driver virtio_iommu_drv;
> +
> +static int viommu_match_node(struct device *dev, void *data)
> +{
> +	return dev->parent->fwnode == data;
> +}
> +
> +static struct viommu_dev *viommu_get_by_fwnode(struct fwnode_handle *fwnode)
> +{
> +	struct device *dev = driver_find_device(&virtio_iommu_drv.driver, NULL,
> +						fwnode, viommu_match_node);
> +	put_device(dev);
> +
> +	return dev ? dev_to_virtio(dev)->priv : NULL;
> +}
> +
> +static int viommu_add_device(struct device *dev)
> +{
> +	int ret;
> +	struct iommu_group *group;
> +	struct viommu_endpoint *vdev;
> +	struct viommu_dev *viommu = NULL;
> +	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
> +
> +	if (!fwspec || fwspec->ops != &viommu_ops)
> +		return -ENODEV;
> +
> +	viommu = viommu_get_by_fwnode(fwspec->iommu_fwnode);
> +	if (!viommu)
> +		return -ENODEV;
> +
> +	vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
> +	if (!vdev)
> +		return -ENOMEM;
> +
> +	vdev->viommu = viommu;
> +	fwspec->iommu_priv = vdev;
> +
> +	ret = iommu_device_link(&viommu->iommu, dev);
> +	if (ret)
> +		goto err_free_dev;
> +
> +	/*
> +	 * Last step creates a default domain and attaches to it. Everything
> +	 * must be ready.
> +	 */
> +	group = iommu_group_get_for_dev(dev);
> +	if (IS_ERR(group)) {
> +		ret = PTR_ERR(group);
> +		goto err_unlink_dev;
> +	}
> +
> +	iommu_group_put(group);
> +
> +	return PTR_ERR_OR_ZERO(group);
> +
> +err_unlink_dev:
> +	iommu_device_unlink(&viommu->iommu, dev);
> +err_free_dev:
> +	kfree(vdev);
> +
> +	return ret;
> +}
> +
> +static void viommu_remove_device(struct device *dev)
> +{
> +	struct viommu_endpoint *vdev;
> +	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
> +
> +	if (!fwspec || fwspec->ops != &viommu_ops)
> +		return;
> +
> +	vdev = fwspec->iommu_priv;
> +
> +	iommu_group_remove_device(dev);
> +	iommu_device_unlink(&vdev->viommu->iommu, dev);
> +	kfree(vdev);
> +}
> +
> +static struct iommu_group *viommu_device_group(struct device *dev)
> +{
> +	if (dev_is_pci(dev))
> +		return pci_device_group(dev);
> +	else
> +		return generic_device_group(dev);
> +}
> +
> +static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args)
> +{
> +	return iommu_fwspec_add_ids(dev, args->args, 1);
> +}
> +
> +static struct iommu_ops viommu_ops = {
> +	.domain_alloc		= viommu_domain_alloc,
> +	.domain_free		= viommu_domain_free,
> +	.attach_dev		= viommu_attach_dev,
> +	.map			= viommu_map,
> +	.unmap			= viommu_unmap,
> +	.iova_to_phys		= viommu_iova_to_phys,
> +	.iotlb_sync		= viommu_iotlb_sync,
> +	.add_device		= viommu_add_device,
> +	.remove_device		= viommu_remove_device,
> +	.device_group		= viommu_device_group,
> +	.get_resv_regions	= viommu_get_resv_regions,
> +	.put_resv_regions	= viommu_put_resv_regions,
> +	.of_xlate		= viommu_of_xlate,
> +};
> +
> +static int viommu_init_vqs(struct viommu_dev *viommu)
> +{
> +	struct virtio_device *vdev = dev_to_virtio(viommu->dev);
> +	const char *name = "request";
> +	void *ret;
> +
> +	ret = virtio_find_single_vq(vdev, NULL, name);
> +	if (IS_ERR(ret)) {
> +		dev_err(viommu->dev, "cannot find VQ\n");
> +		return PTR_ERR(ret);
> +	}
> +
> +	viommu->vqs[VIOMMU_REQUEST_VQ] = ret;
> +
> +	return 0;
> +}
> +
> +static int viommu_probe(struct virtio_device *vdev)
> +{
> +	struct device *parent_dev = vdev->dev.parent;
> +	struct viommu_dev *viommu = NULL;
> +	struct device *dev = &vdev->dev;
> +	u64 input_start = 0;
> +	u64 input_end = -1UL;
> +	int ret;
> +
> +	if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
> +	    !virtio_has_feature(vdev, VIRTIO_IOMMU_F_MAP_UNMAP))
> +		return -ENODEV;
> +
> +	viommu = devm_kzalloc(dev, sizeof(*viommu), GFP_KERNEL);
> +	if (!viommu)
> +		return -ENOMEM;
> +
> +	spin_lock_init(&viommu->request_lock);
> +	ida_init(&viommu->domain_ids);
> +	viommu->dev = dev;
> +	viommu->vdev = vdev;
> +	INIT_LIST_HEAD(&viommu->requests);
> +
> +	ret = viommu_init_vqs(viommu);
> +	if (ret)
> +		return ret;
> +
> +	virtio_cread(vdev, struct virtio_iommu_config, page_size_mask,
> +		     &viommu->pgsize_bitmap);
> +
> +	if (!viommu->pgsize_bitmap) {
> +		ret = -EINVAL;
> +		goto err_free_vqs;
> +	}
> +
> +	viommu->domain_bits = 32;
> +
> +	/* Optional features */
> +	virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
> +			     struct virtio_iommu_config, input_range.start,
> +			     &input_start);
> +
> +	virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
> +			     struct virtio_iommu_config, input_range.end,
> +			     &input_end);
> +
> +	virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_BITS,
> +			     struct virtio_iommu_config, domain_bits,
> +			     &viommu->domain_bits);
> +
> +	viommu->geometry = (struct iommu_domain_geometry) {
> +		.aperture_start	= input_start,
> +		.aperture_end	= input_end,
> +		.force_aperture	= true,
> +	};
> +
> +	viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap;
> +
> +	virtio_device_ready(vdev);
> +
> +	ret = iommu_device_sysfs_add(&viommu->iommu, dev, NULL, "%s",
> +				     virtio_bus_name(vdev));
> +	if (ret)
> +		goto err_free_vqs;
> +
> +	iommu_device_set_ops(&viommu->iommu, &viommu_ops);
> +	iommu_device_set_fwnode(&viommu->iommu, parent_dev->fwnode);
> +
> +	iommu_device_register(&viommu->iommu);
> +
> +#ifdef CONFIG_PCI
> +	if (pci_bus_type.iommu_ops != &viommu_ops) {
> +		pci_request_acs();
> +		ret = bus_set_iommu(&pci_bus_type, &viommu_ops);
> +		if (ret)
> +			goto err_unregister;
> +	}
> +#endif
> +#ifdef CONFIG_ARM_AMBA
> +	if (amba_bustype.iommu_ops != &viommu_ops) {
> +		ret = bus_set_iommu(&amba_bustype, &viommu_ops);
> +		if (ret)
> +			goto err_unregister;
> +	}
> +#endif
> +	if (platform_bus_type.iommu_ops != &viommu_ops) {
> +		ret = bus_set_iommu(&platform_bus_type, &viommu_ops);
> +		if (ret)
> +			goto err_unregister;
> +	}
> +
> +	vdev->priv = viommu;
> +
> +	dev_info(dev, "input address: %u bits\n",
> +		 order_base_2(viommu->geometry.aperture_end));
> +	dev_info(dev, "page mask: %#llx\n", viommu->pgsize_bitmap);
> +
> +	return 0;
> +
> +err_unregister:
> +	iommu_device_sysfs_remove(&viommu->iommu);
> +	iommu_device_unregister(&viommu->iommu);
> +err_free_vqs:
> +	vdev->config->del_vqs(vdev);
> +
> +	return ret;
> +}
> +
> +static void viommu_remove(struct virtio_device *vdev)
> +{
> +	struct viommu_dev *viommu = vdev->priv;
> +
> +	iommu_device_sysfs_remove(&viommu->iommu);
> +	iommu_device_unregister(&viommu->iommu);
> +
> +	/* Stop all virtqueues */
> +	vdev->config->reset(vdev);
> +	vdev->config->del_vqs(vdev);
> +
> +	dev_info(&vdev->dev, "device removed\n");
> +}
> +
> +static void viommu_config_changed(struct virtio_device *vdev)
> +{
> +	dev_warn(&vdev->dev, "config changed\n");
> +}
> +
> +static unsigned int features[] = {
> +	VIRTIO_IOMMU_F_MAP_UNMAP,
> +	VIRTIO_IOMMU_F_DOMAIN_BITS,
> +	VIRTIO_IOMMU_F_INPUT_RANGE,
> +};
> +
> +static struct virtio_device_id id_table[] = {
> +	{ VIRTIO_ID_IOMMU, VIRTIO_DEV_ANY_ID },
> +	{ 0 },
> +};
> +
> +static struct virtio_driver virtio_iommu_drv = {
> +	.driver.name		= KBUILD_MODNAME,
> +	.driver.owner		= THIS_MODULE,
> +	.id_table		= id_table,
> +	.feature_table		= features,
> +	.feature_table_size	= ARRAY_SIZE(features),
> +	.probe			= viommu_probe,
> +	.remove			= viommu_remove,
> +	.config_changed		= viommu_config_changed,
> +};
> +
> +module_virtio_driver(virtio_iommu_drv);
> +
> +MODULE_DESCRIPTION("Virtio IOMMU driver");
> +MODULE_AUTHOR("Jean-Philippe Brucker <jean-philippe.brucker@arm.com>");
> +MODULE_LICENSE("GPL v2");
> diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h
> index 6d5c3b2d4f4d..cfe47c5d9a56 100644
> --- a/include/uapi/linux/virtio_ids.h
> +++ b/include/uapi/linux/virtio_ids.h
> @@ -43,5 +43,6 @@
>  #define VIRTIO_ID_INPUT        18 /* virtio input */
>  #define VIRTIO_ID_VSOCK        19 /* virtio vsock transport */
>  #define VIRTIO_ID_CRYPTO       20 /* virtio crypto */
> +#define VIRTIO_ID_IOMMU        23 /* virtio IOMMU */
>  
>  #endif /* _LINUX_VIRTIO_IDS_H */
> diff --git a/include/uapi/linux/virtio_iommu.h b/include/uapi/linux/virtio_iommu.h
> new file mode 100644
> index 000000000000..e7c05e3afa44
> --- /dev/null
> +++ b/include/uapi/linux/virtio_iommu.h
> @@ -0,0 +1,104 @@
> +/* SPDX-License-Identifier: BSD-3-Clause */
> +/*
> + * Virtio-iommu definition v0.9
> + *
> + * Copyright (C) 2018 Arm Ltd.
> + */
> +#ifndef _UAPI_LINUX_VIRTIO_IOMMU_H
> +#define _UAPI_LINUX_VIRTIO_IOMMU_H
> +
> +#include <linux/types.h>
> +
> +/* Feature bits */
> +#define VIRTIO_IOMMU_F_INPUT_RANGE		0
> +#define VIRTIO_IOMMU_F_DOMAIN_BITS		1
> +#define VIRTIO_IOMMU_F_MAP_UNMAP		2
> +#define VIRTIO_IOMMU_F_BYPASS			3
> +
> +struct virtio_iommu_range {
> +	__u64					start;
> +	__u64					end;
> +};
> +
> +struct virtio_iommu_config {
> +	/* Supported page sizes */
> +	__u64					page_size_mask;
> +	/* Supported IOVA range */
> +	struct virtio_iommu_range		input_range;
> +	/* Max domain ID size */
> +	__u8					domain_bits;
> +	__u8					padding[3];
> +};
> +
> +/* Request types */
> +#define VIRTIO_IOMMU_T_ATTACH			0x01
> +#define VIRTIO_IOMMU_T_DETACH			0x02
> +#define VIRTIO_IOMMU_T_MAP			0x03
> +#define VIRTIO_IOMMU_T_UNMAP			0x04
> +
> +/* Status types */
> +#define VIRTIO_IOMMU_S_OK			0x00
> +#define VIRTIO_IOMMU_S_IOERR			0x01
> +#define VIRTIO_IOMMU_S_UNSUPP			0x02
> +#define VIRTIO_IOMMU_S_DEVERR			0x03
> +#define VIRTIO_IOMMU_S_INVAL			0x04
> +#define VIRTIO_IOMMU_S_RANGE			0x05
> +#define VIRTIO_IOMMU_S_NOENT			0x06
> +#define VIRTIO_IOMMU_S_FAULT			0x07
> +
> +struct virtio_iommu_req_head {
> +	__u8					type;
> +	__u8					reserved[3];
> +};
> +
> +struct virtio_iommu_req_tail {
> +	__u8					status;
> +	__u8					reserved[3];
> +};
> +
> +struct virtio_iommu_req_attach {
> +	struct virtio_iommu_req_head		head;
> +	__le32					domain;
> +	__le32					endpoint;
> +	__u8					reserved[8];
> +	struct virtio_iommu_req_tail		tail;
> +};
> +
> +struct virtio_iommu_req_detach {
> +	struct virtio_iommu_req_head		head;
> +	__le32					domain;
> +	__le32					endpoint;
> +	__u8					reserved[8];
> +	struct virtio_iommu_req_tail		tail;
> +};
> +
> +#define VIRTIO_IOMMU_MAP_F_READ			(1 << 0)
> +#define VIRTIO_IOMMU_MAP_F_WRITE		(1 << 1)
> +#define VIRTIO_IOMMU_MAP_F_EXEC			(1 << 2)
> +#define VIRTIO_IOMMU_MAP_F_MMIO			(1 << 3)
> +
> +#define VIRTIO_IOMMU_MAP_F_MASK			(VIRTIO_IOMMU_MAP_F_READ |	\
> +						 VIRTIO_IOMMU_MAP_F_WRITE |	\
> +						 VIRTIO_IOMMU_MAP_F_EXEC |	\
> +						 VIRTIO_IOMMU_MAP_F_MMIO)
> +
> +struct virtio_iommu_req_map {
> +	struct virtio_iommu_req_head		head;
> +	__le32					domain;
> +	__le64					virt_start;
> +	__le64					virt_end;
> +	__le64					phys_start;
> +	__le32					flags;
> +	struct virtio_iommu_req_tail		tail;
> +};
> +
> +struct virtio_iommu_req_unmap {
> +	struct virtio_iommu_req_head		head;
> +	__le32					domain;
> +	__le64					virt_start;
> +	__le64					virt_end;
> +	__u8					reserved[4];
> +	struct virtio_iommu_req_tail		tail;
> +};
> +
> +#endif
> -- 
> 2.19.1
> 
> _______________________________________________
> Virtualization mailing list
> Virtualization@lists.linux-foundation.org
> https://lists.linuxfoundation.org/mailman/listinfo/virtualization
Jean-Philippe Brucker Nov. 27, 2018, 5:50 p.m. UTC | #5
On 23/11/2018 22:02, Michael S. Tsirkin wrote:
>> +/*
>> + * __viommu_sync_req - Complete all in-flight requests
>> + *
>> + * Wait for all added requests to complete. When this function returns, all
>> + * requests that were in-flight at the time of the call have completed.
>> + */
>> +static int __viommu_sync_req(struct viommu_dev *viommu)
>> +{
>> +	int ret = 0;
>> +	unsigned int len;
>> +	size_t write_len;
>> +	struct viommu_request *req;
>> +	struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
>> +
>> +	assert_spin_locked(&viommu->request_lock);
>> +
>> +	virtqueue_kick(vq);
>> +
>> +	while (!list_empty(&viommu->requests)) {
>> +		len = 0;
>> +		req = virtqueue_get_buf(vq, &len);
>> +		if (!req)
>> +			continue;
>> +
>> +		if (!len)
>> +			viommu_set_req_status(req->buf, req->len,
>> +					      VIRTIO_IOMMU_S_IOERR);
>> +
>> +		write_len = req->len - req->write_offset;
>> +		if (req->writeback && len == write_len)
>> +			memcpy(req->writeback, req->buf + req->write_offset,
>> +			       write_len);
>> +
>> +		list_del(&req->list);
>> +		kfree(req);
>> +	}
> 
> I didn't notice this in the past but it seems this will spin
> with interrupts disabled until host handles the request.
> Please do not do this - host execution can be another
> task that needs the same host CPU. This will then disable
> interrupts for a very very long time.

In the guest yes, but that doesn't prevent the host from running another
task right? My tests run fine when QEMU is bound to a single CPU, even
though vcpu and viommu run in different threads

> What to do then? Queue in software and wake up task.

Unfortunately I can't do anything here, because IOMMU drivers can't
sleep in the iommu_map() or iommu_unmap() path. The problem is the same
for all IOMMU drivers. That's because the DMA API allows drivers to call
some functions with interrupts disabled. For example
Documentation/DMA-API-HOWTO.txt allows dma_alloc_coherent() and
dma_unmap_single() to be called in interrupt context.

> As kick is vm exit, kick under interrupts disabled is discouraged too:
> better to prepare for kick enable interrupts then kick.

That was on my list of things to look at, because it could relax
things for device drivers that don't call us with interrupts disabled. I
just tried it and I can see some performance improvement (7% and 4% on
tcp_stream and tcp_maerts respectively, +/-2.5%).

Since it's an optimization I'll leave it for later (ACPI and module
support is higher on my list). The resulting change is complicated
because we now need to deal with threads adding new requests while
sync() is running. With my current prototype one thread could end up
staying in sync() while other threads add new async requests, so I need
to find a way to bound it.

Thanks,
Jean
Jean-Philippe Brucker Nov. 27, 2018, 5:55 p.m. UTC | #6
On 23/11/2018 21:56, Michael S. Tsirkin wrote:
>> +config VIRTIO_IOMMU
>> +	bool "Virtio IOMMU driver"
>> +	depends on VIRTIO=y
>> +	select IOMMU_API
>> +	select INTERVAL_TREE
>> +	select ARM_DMA_USE_IOMMU if ARM
>> +	help
>> +	  Para-virtualised IOMMU driver with virtio.
>> +
>> +	  Say Y here if you intend to run this kernel as a guest.
>> +
> 
> Given it is arm specific right now, shouldn't this depend on ARM?
> E.g. there's a hack for x86 right now.

Sure, I'll make it depend on ARM64 for now

[..]
>> +static int viommu_probe(struct virtio_device *vdev)
>> +{
>> +	struct device *parent_dev = vdev->dev.parent;
>> +	struct viommu_dev *viommu = NULL;
>> +	struct device *dev = &vdev->dev;
>> +	u64 input_start = 0;
>> +	u64 input_end = -1UL;
>> +	int ret;
>> +
>> +	if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
>> +	    !virtio_has_feature(vdev, VIRTIO_IOMMU_F_MAP_UNMAP))
> 
> Why bother with a feature bit for this then btw?

We'll need a new feature bit for sharing page tables with the hardware,
because they require different requests (attach_table/invalidate instead
of map/unmap.) A future device supporting page table sharing won't
necessarily need to support map/unmap.

Thanks,
Jean
Jean-Philippe Brucker Nov. 27, 2018, 5:58 p.m. UTC | #7
On 23/11/2018 21:48, Michael S. Tsirkin wrote:
>> +struct virtio_iommu_config {
>> +	/* Supported page sizes */
>> +	__u64					page_size_mask;
>> +	/* Supported IOVA range */
>> +	struct virtio_iommu_range		input_range;
>> +	/* Max domain ID size */
>> +	__u8					domain_bits;
>> +	__u8					padding[3];
> 
> Not enough padding here it seems. Structure is 8 byte
> aligned on 64 bit systems.

The next field (probe_size) is 4 bytes, so the alignment ends up fine.
That field is introduced in patch 6, maybe I should move it here?

Thanks,
Jean
Michael S. Tsirkin Nov. 27, 2018, 6:04 p.m. UTC | #8
On Tue, Nov 27, 2018 at 05:50:50PM +0000, Jean-Philippe Brucker wrote:
> On 23/11/2018 22:02, Michael S. Tsirkin wrote:
> >> +/*
> >> + * __viommu_sync_req - Complete all in-flight requests
> >> + *
> >> + * Wait for all added requests to complete. When this function returns, all
> >> + * requests that were in-flight at the time of the call have completed.
> >> + */
> >> +static int __viommu_sync_req(struct viommu_dev *viommu)
> >> +{
> >> +	int ret = 0;
> >> +	unsigned int len;
> >> +	size_t write_len;
> >> +	struct viommu_request *req;
> >> +	struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
> >> +
> >> +	assert_spin_locked(&viommu->request_lock);
> >> +
> >> +	virtqueue_kick(vq);
> >> +
> >> +	while (!list_empty(&viommu->requests)) {
> >> +		len = 0;
> >> +		req = virtqueue_get_buf(vq, &len);
> >> +		if (!req)
> >> +			continue;
> >> +
> >> +		if (!len)
> >> +			viommu_set_req_status(req->buf, req->len,
> >> +					      VIRTIO_IOMMU_S_IOERR);
> >> +
> >> +		write_len = req->len - req->write_offset;
> >> +		if (req->writeback && len == write_len)
> >> +			memcpy(req->writeback, req->buf + req->write_offset,
> >> +			       write_len);
> >> +
> >> +		list_del(&req->list);
> >> +		kfree(req);
> >> +	}
> > 
> > I didn't notice this in the past but it seems this will spin
> > with interrupts disabled until host handles the request.
> > Please do not do this - host execution can be another
> > task that needs the same host CPU. This will then disable
> > interrupts for a very very long time.
> 
> In the guest yes, but that doesn't prevent the host from running another
> task right?

Doesn't prevent it but it will delay it significantly
until scheduler decides to kick the VCPU task out.

> My tests run fine when QEMU is bound to a single CPU, even
> though vcpu and viommu run in different threads
> 
> > What to do then? Queue in software and wake up task.
> 
> Unfortunately I can't do anything here, because IOMMU drivers can't
> sleep in the iommu_map() or iommu_unmap() path.
>
> The problem is the same
> for all IOMMU drivers. That's because the DMA API allows drivers to call
> some functions with interrupts disabled. For example
> Documentation/DMA-API-HOWTO.txt allows dma_alloc_coherent() and
> dma_unmap_single() to be called in interrupt context.

In fact I don't really understand how it's supposed to
work at all: you only sync when ring is full.
So host may not have seen your map request if ring
is not full.
Why is it safe to use the address with a device then?


> > As kick is vm exit, kick under interrupts disabled is discouraged too:
> > better to prepare for kick enable interrupts then kick.
> 
> That was on my list of things to look at, because it could relax
> things for device drivers that don't call us with interrupts disabled. I
> just tried it and I can see some performance improvement (7% and 4% on
> tcp_stream and tcp_maerts respectively, +/-2.5%).
> 
> Since it's an optimization I'll leave it for later (ACPI and module
> support is higher on my list). The resulting change is complicated
> because we now need to deal with threads adding new requests while
> sync() is running. With my current prototype one thread could end up
> staying in sync() while other threads add new async requests, so I need
> to find a way to bound it.
> 
> Thanks,
> Jean
Michael S. Tsirkin Nov. 27, 2018, 6:10 p.m. UTC | #9
On Tue, Nov 27, 2018 at 05:55:20PM +0000, Jean-Philippe Brucker wrote:
> On 23/11/2018 21:56, Michael S. Tsirkin wrote:
> >> +config VIRTIO_IOMMU
> >> +	bool "Virtio IOMMU driver"
> >> +	depends on VIRTIO=y
> >> +	select IOMMU_API
> >> +	select INTERVAL_TREE
> >> +	select ARM_DMA_USE_IOMMU if ARM
> >> +	help
> >> +	  Para-virtualised IOMMU driver with virtio.
> >> +
> >> +	  Say Y here if you intend to run this kernel as a guest.
> >> +
> > 
> > Given it is arm specific right now, shouldn't this depend on ARM?
> > E.g. there's a hack for x86 right now.
> 
> Sure, I'll make it depend on ARM64 for now
> 
> [..]
> >> +static int viommu_probe(struct virtio_device *vdev)
> >> +{
> >> +	struct device *parent_dev = vdev->dev.parent;
> >> +	struct viommu_dev *viommu = NULL;
> >> +	struct device *dev = &vdev->dev;
> >> +	u64 input_start = 0;
> >> +	u64 input_end = -1UL;
> >> +	int ret;
> >> +
> >> +	if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
> >> +	    !virtio_has_feature(vdev, VIRTIO_IOMMU_F_MAP_UNMAP))
> > 
> > Why bother with a feature bit for this then btw?
> 
> We'll need a new feature bit for sharing page tables with the hardware,
> because they require different requests (attach_table/invalidate instead
> of map/unmap.) A future device supporting page table sharing won't
> necessarily need to support map/unmap.
> 
> Thanks,
> Jean

I don't see virtio iommu being extended to support ARM specific
requests. This just won't scale, too many different
descriptor formats out there.

If you want to go that way down the road, you should avoid
virtio iommu, instead emulate and share code with the ARM SMMU (probably
with a different vendor id so you can implement the
report on map for devices without PRI).

Others on the TC might feel differently.

If someone's looking into adding virtio iommu support in hardware,
that's a different matter. Which is it?
Michael S. Tsirkin Nov. 27, 2018, 6:10 p.m. UTC | #10
On Tue, Nov 27, 2018 at 05:58:18PM +0000, Jean-Philippe Brucker wrote:
> On 23/11/2018 21:48, Michael S. Tsirkin wrote:
> >> +struct virtio_iommu_config {
> >> +	/* Supported page sizes */
> >> +	__u64					page_size_mask;
> >> +	/* Supported IOVA range */
> >> +	struct virtio_iommu_range		input_range;
> >> +	/* Max domain ID size */
> >> +	__u8					domain_bits;
> >> +	__u8					padding[3];
> > 
> > Not enough padding here it seems. Structure is 8 byte
> > aligned on 64 bit systems.
> 
> The next field (probe_size) is 4 bytes, so the alignment ends up fine.
> That field is introduced in patch 6, maybe I should move it here?
> 
> Thanks,
> Jean

Sounds like a good idea.
Jean-Philippe Brucker Nov. 27, 2018, 6:10 p.m. UTC | #11
On 27/11/2018 18:04, Michael S. Tsirkin wrote:
> On Tue, Nov 27, 2018 at 05:50:50PM +0000, Jean-Philippe Brucker wrote:
>> On 23/11/2018 22:02, Michael S. Tsirkin wrote:
>>>> +/*
>>>> + * __viommu_sync_req - Complete all in-flight requests
>>>> + *
>>>> + * Wait for all added requests to complete. When this function returns, all
>>>> + * requests that were in-flight at the time of the call have completed.
>>>> + */
>>>> +static int __viommu_sync_req(struct viommu_dev *viommu)
>>>> +{
>>>> +	int ret = 0;
>>>> +	unsigned int len;
>>>> +	size_t write_len;
>>>> +	struct viommu_request *req;
>>>> +	struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
>>>> +
>>>> +	assert_spin_locked(&viommu->request_lock);
>>>> +
>>>> +	virtqueue_kick(vq);
>>>> +
>>>> +	while (!list_empty(&viommu->requests)) {
>>>> +		len = 0;
>>>> +		req = virtqueue_get_buf(vq, &len);
>>>> +		if (!req)
>>>> +			continue;
>>>> +
>>>> +		if (!len)
>>>> +			viommu_set_req_status(req->buf, req->len,
>>>> +					      VIRTIO_IOMMU_S_IOERR);
>>>> +
>>>> +		write_len = req->len - req->write_offset;
>>>> +		if (req->writeback && len == write_len)
>>>> +			memcpy(req->writeback, req->buf + req->write_offset,
>>>> +			       write_len);
>>>> +
>>>> +		list_del(&req->list);
>>>> +		kfree(req);
>>>> +	}
>>>
>>> I didn't notice this in the past but it seems this will spin
>>> with interrupts disabled until host handles the request.
>>> Please do not do this - host execution can be another
>>> task that needs the same host CPU. This will then disable
>>> interrupts for a very very long time.
>>
>> In the guest yes, but that doesn't prevent the host from running another
>> task right?
> 
> Doesn't prevent it but it will delay it significantly
> until scheduler decides to kick the VCPU task out.
> 
>> My tests run fine when QEMU is bound to a single CPU, even
>> though vcpu and viommu run in different threads
>>
>>> What to do then? Queue in software and wake up task.
>>
>> Unfortunately I can't do anything here, because IOMMU drivers can't
>> sleep in the iommu_map() or iommu_unmap() path.
>>
>> The problem is the same
>> for all IOMMU drivers. That's because the DMA API allows drivers to call
>> some functions with interrupts disabled. For example
>> Documentation/DMA-API-HOWTO.txt allows dma_alloc_coherent() and
>> dma_unmap_single() to be called in interrupt context.
> 
> In fact I don't really understand how it's supposed to
> work at all: you only sync when ring is full.
> So host may not have seen your map request if ring
> is not full.
> Why is it safe to use the address with a device then?

viommu_map() calls viommu_send_req_sync(), which does the sync
immediately after adding the MAP request.

Thanks,
Jean
Michael S. Tsirkin Nov. 27, 2018, 6:13 p.m. UTC | #12
On Tue, Nov 27, 2018 at 05:50:50PM +0000, Jean-Philippe Brucker wrote:
> > I didn't notice this in the past but it seems this will spin
> > with interrupts disabled until host handles the request.
> > Please do not do this - host execution can be another
> > task that needs the same host CPU. This will then disable
> > interrupts for a very very long time.
> 
> In the guest yes, but that doesn't prevent the host from running another
> task right? My tests run fine when QEMU is bound to a single CPU, even
> though vcpu and viommu run in different threads

So a kind of a solution is to add a config space field for sync.
That at least can give host a hint that yes, vcpu
is stopped now and it should do something else.
Not ideal but better than polling VQ forever.
Michael S. Tsirkin Nov. 27, 2018, 6:53 p.m. UTC | #13
On Tue, Nov 27, 2018 at 06:10:46PM +0000, Jean-Philippe Brucker wrote:
> On 27/11/2018 18:04, Michael S. Tsirkin wrote:
> > On Tue, Nov 27, 2018 at 05:50:50PM +0000, Jean-Philippe Brucker wrote:
> >> On 23/11/2018 22:02, Michael S. Tsirkin wrote:
> >>>> +/*
> >>>> + * __viommu_sync_req - Complete all in-flight requests
> >>>> + *
> >>>> + * Wait for all added requests to complete. When this function returns, all
> >>>> + * requests that were in-flight at the time of the call have completed.
> >>>> + */
> >>>> +static int __viommu_sync_req(struct viommu_dev *viommu)
> >>>> +{
> >>>> +	int ret = 0;
> >>>> +	unsigned int len;
> >>>> +	size_t write_len;
> >>>> +	struct viommu_request *req;
> >>>> +	struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
> >>>> +
> >>>> +	assert_spin_locked(&viommu->request_lock);
> >>>> +
> >>>> +	virtqueue_kick(vq);
> >>>> +
> >>>> +	while (!list_empty(&viommu->requests)) {
> >>>> +		len = 0;
> >>>> +		req = virtqueue_get_buf(vq, &len);
> >>>> +		if (!req)
> >>>> +			continue;
> >>>> +
> >>>> +		if (!len)
> >>>> +			viommu_set_req_status(req->buf, req->len,
> >>>> +					      VIRTIO_IOMMU_S_IOERR);
> >>>> +
> >>>> +		write_len = req->len - req->write_offset;
> >>>> +		if (req->writeback && len == write_len)
> >>>> +			memcpy(req->writeback, req->buf + req->write_offset,
> >>>> +			       write_len);
> >>>> +
> >>>> +		list_del(&req->list);
> >>>> +		kfree(req);
> >>>> +	}
> >>>
> >>> I didn't notice this in the past but it seems this will spin
> >>> with interrupts disabled until host handles the request.
> >>> Please do not do this - host execution can be another
> >>> task that needs the same host CPU. This will then disable
> >>> interrupts for a very very long time.
> >>
> >> In the guest yes, but that doesn't prevent the host from running another
> >> task right?
> > 
> > Doesn't prevent it but it will delay it significantly
> > until scheduler decides to kick the VCPU task out.
> > 
> >> My tests run fine when QEMU is bound to a single CPU, even
> >> though vcpu and viommu run in different threads
> >>
> >>> What to do then? Queue in software and wake up task.
> >>
> >> Unfortunately I can't do anything here, because IOMMU drivers can't
> >> sleep in the iommu_map() or iommu_unmap() path.
> >>
> >> The problem is the same
> >> for all IOMMU drivers. That's because the DMA API allows drivers to call
> >> some functions with interrupts disabled. For example
> >> Documentation/DMA-API-HOWTO.txt allows dma_alloc_coherent() and
> >> dma_unmap_single() to be called in interrupt context.
> > 
> > In fact I don't really understand how it's supposed to
> > work at all: you only sync when ring is full.
> > So host may not have seen your map request if ring
> > is not full.
> > Why is it safe to use the address with a device then?
> 
> viommu_map() calls viommu_send_req_sync(), which does the sync
> immediately after adding the MAP request.
> 
> Thanks,
> Jean

I see. So it happens on every request. Maybe you should clear
event index then. This way if exits are disabled you know that
host is processing the ring. Event index is good for when
you don't care when it will be processed, you just want
to reduce number of exits as much as possible.
Jean-Philippe Brucker Dec. 7, 2018, 6:52 p.m. UTC | #14
Sorry for the delay, I wanted to do a little more performance analysis
before continuing.

On 27/11/2018 18:10, Michael S. Tsirkin wrote:
> On Tue, Nov 27, 2018 at 05:55:20PM +0000, Jean-Philippe Brucker wrote:
>>>> +	if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
>>>> +	    !virtio_has_feature(vdev, VIRTIO_IOMMU_F_MAP_UNMAP))
>>>
>>> Why bother with a feature bit for this then btw?
>>
>> We'll need a new feature bit for sharing page tables with the hardware,
>> because they require different requests (attach_table/invalidate instead
>> of map/unmap.) A future device supporting page table sharing won't
>> necessarily need to support map/unmap.
>>
> I don't see virtio iommu being extended to support ARM specific
> requests. This just won't scale, too many different
> descriptor formats out there.

They aren't really ARM specific requests. The two new requests are
ATTACH_TABLE and INVALIDATE, which would be used by x86 IOMMUs as well.

Sharing CPU address space with the HW IOMMU (SVM) has been in the scope
of virtio-iommu since the first RFC, and I've been working with that
extension in mind since the beginning. As an example you can have a look
at my current draft for this [1], which is inspired from the VFIO work
we've been doing with Intel.

The negotiation phase inevitably requires vendor-specific fields in the
descriptors - host tells which formats are supported, guest chooses a
format and attaches page tables. But invalidation and fault reporting
descriptors are fairly generic.

> If you want to go that way down the road, you should avoid
> virtio iommu, instead emulate and share code with the ARM SMMU (probably
> with a different vendor id so you can implement the
> report on map for devices without PRI).

vSMMU has to stay in userspace though. The main reason we're proposing
virtio-iommu is that emulating every possible vIOMMU model in the kernel
would be unmaintainable. With virtio-iommu we can process the fast path
in the host kernel, through vhost-iommu, and do the heavy lifting in
userspace. As said above, I'm trying to keep the fast path for
virtio-iommu generic.


More notes on what I consider to be the fast path, and comparison with
vSMMU:

(1) The primary use-case we have in mind for vIOMMU is something like
DPDK in the guest, assigning a hardware device to guest userspace. DPDK
maps a large amount of memory statically, to be used by a pass-through
device. For this case I don't think we care about vIOMMU performance.
Setup and teardown need to be reasonably fast, sure, but the MAP/UNMAP
requests don't have to be optimal.


(2) If the assigned device is owned by the guest kernel, then mappings
are dynamic and require dma_map/unmap() to be fast, but there generally
is no need for a vIOMMU, since device and drivers are trusted by the
guest kernel. Even when the user does enable a vIOMMU for this case
(allowing to over-commit guest memory, which needs to be pinned
otherwise), we generally play tricks like lazy TLBI (non-strict mode) to
make it faster. Here device and drivers are trusted, therefore the
vulnerability window of lazy mode isn't a concern.

If the reason to enable the vIOMMU is over-comitting guest memory
however, you can't use nested translation because it requires pinning
the second-level tables. For this case performance matters a bit,
because your invalidate-on-map needs to be fast, even if you enable lazy
mode and only receive inval-on-unmap every 10ms. It won't ever be as
fast as nested translation, though. For this case I think vSMMU+Caching
Mode and userspace virtio-iommu with MAP/UNMAP would perform similarly
(given page-sized payloads), because the pagetable walk doesn't add a
lot of overhead compared to the context switch. But given the results
below, vhost-iommu would be faster than vSMMU+CM.


(3) Then there is SVM. For SVM, any destructive change to the process
address space requires a synchronous invalidation command to the
hardware (at least when using PCI ATS). Given that SVM is based on page
faults, fault reporting from host to guest also needs to be fast, as
well as fault response from guest to host.

I think this is where performance matters the most. To get a feel of the
advantage we get with virtio-iommu, I compared the vSMMU page-table
sharing implementation [2] and vhost-iommu + VFIO with page table
sharing (based on Tomasz Nowicki's vhost-iommu prototype). That's on a
ThunderX2 with a 10Gb NIC assigned to the guest kernel, which
corresponds to case (2) above, with nesting page tables and without the
lazy mode. The host's only job is forwarding invalidation to the HW SMMU.

vhost-iommu performed on average 1.8x and 5.5x better than vSMMU on
netperf TCP_STREAM and TCP_MAERTS respectively (~200 samples). I think
this can be further optimized (that was still polling under the vq
lock), and unlike vSMMU, virtio-iommu offers the possibility of
multi-queue for improved scalability. In addition, the guest will need
to send both TLB and ATC invalidations with vSMMU, but virtio-iommu
allows to multiplex those, and to invalidate ranges. Similarly for fault
injection, having the ability to report page faults to the guest from
the host kernel should be significantly faster than having to go to
userspace and back to the kernel.


(4) Virtio and vhost endpoints weren't really a priority for the base
virtio-iommu device, we were looking mainly at device pass-through. I
have optimizations in mind for this, although a lot of them are based on
page tables, not MAP/UNMAP requests. But just getting the vIOMMU closer
to vhost devices, avoiding the trip to userspace through vhost-tlb,
should already improve things.

The important difference when DMA is done by software is that you don't
need to mirror all mappings into the HW IOMMU - you don't need
inval-on-map. The endpoint can ask the vIOMMU for mappings when it needs
them, like vhost-iotlb does for example. So the MAP/UNMAP interface of
virtio-iommu performs poorly for emulated/PV endpoints compared to an
emulated IOMMU, since it requires three context switches for DMA
(MAP/DMA/UNMAP) between host and guest, rather than two (DMA/INVAL).
There is a feature I call "posted MAP", that avoids the kick on MAP and
instead lets the device fetch the MAP request on TLB miss, but I haven't
spent enough time experimenting with this.

> Others on the TC might feel differently.
> 
> If someone's looking into adding virtio iommu support in hardware,
> that's a different matter. Which is it?

I'm not aware of anything like that, and suspect that no one would
consider it until virtio-iommu is more widely adopted.

Thanks,
Jean


[1] Diff between current spec and page table sharing draft
    (Very rough, missing page fault support and I'd like to rework the
     PASID model a bit, but table descriptors p.24-26 for both Arm
     SMMUv2 and SMMUv3.)

http://jpbrucker.net/virtio-iommu/spec-table/diffs/virtio-iommu-pdf-diff-v0.9-v0.10.dev03.pdf

[2] [RFC v2 00/28] vSMMUv3/pSMMUv3 2 stage VFIO integration
    https://www.mail-archive.com/qemu-devel@nongnu.org/msg562369.html
Jean-Philippe Brucker Dec. 10, 2018, 3:06 p.m. UTC | #15
On 27/11/2018 18:53, Michael S. Tsirkin wrote:
> On Tue, Nov 27, 2018 at 06:10:46PM +0000, Jean-Philippe Brucker wrote:
>> On 27/11/2018 18:04, Michael S. Tsirkin wrote:
>>> On Tue, Nov 27, 2018 at 05:50:50PM +0000, Jean-Philippe Brucker wrote:
>>>> On 23/11/2018 22:02, Michael S. Tsirkin wrote:
>>>>>> +/*
>>>>>> + * __viommu_sync_req - Complete all in-flight requests
>>>>>> + *
>>>>>> + * Wait for all added requests to complete. When this function returns, all
>>>>>> + * requests that were in-flight at the time of the call have completed.
>>>>>> + */
>>>>>> +static int __viommu_sync_req(struct viommu_dev *viommu)
>>>>>> +{
>>>>>> +	int ret = 0;
>>>>>> +	unsigned int len;
>>>>>> +	size_t write_len;
>>>>>> +	struct viommu_request *req;
>>>>>> +	struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
>>>>>> +
>>>>>> +	assert_spin_locked(&viommu->request_lock);
>>>>>> +
>>>>>> +	virtqueue_kick(vq);
>>>>>> +
>>>>>> +	while (!list_empty(&viommu->requests)) {
>>>>>> +		len = 0;
>>>>>> +		req = virtqueue_get_buf(vq, &len);
>>>>>> +		if (!req)
>>>>>> +			continue;
>>>>>> +
>>>>>> +		if (!len)
>>>>>> +			viommu_set_req_status(req->buf, req->len,
>>>>>> +					      VIRTIO_IOMMU_S_IOERR);
>>>>>> +
>>>>>> +		write_len = req->len - req->write_offset;
>>>>>> +		if (req->writeback && len == write_len)
>>>>>> +			memcpy(req->writeback, req->buf + req->write_offset,
>>>>>> +			       write_len);
>>>>>> +
>>>>>> +		list_del(&req->list);
>>>>>> +		kfree(req);
>>>>>> +	}
>>>>>
>>>>> I didn't notice this in the past but it seems this will spin
>>>>> with interrupts disabled until host handles the request.
>>>>> Please do not do this - host execution can be another
>>>>> task that needs the same host CPU. This will then disable
>>>>> interrupts for a very very long time.
>>>>
>>>> In the guest yes, but that doesn't prevent the host from running another
>>>> task right?
>>>
>>> Doesn't prevent it but it will delay it significantly
>>> until scheduler decides to kick the VCPU task out.
>>>
>>>> My tests run fine when QEMU is bound to a single CPU, even
>>>> though vcpu and viommu run in different threads
>>>>
>>>>> What to do then? Queue in software and wake up task.
>>>>
>>>> Unfortunately I can't do anything here, because IOMMU drivers can't
>>>> sleep in the iommu_map() or iommu_unmap() path.
>>>>
>>>> The problem is the same
>>>> for all IOMMU drivers. That's because the DMA API allows drivers to call
>>>> some functions with interrupts disabled. For example
>>>> Documentation/DMA-API-HOWTO.txt allows dma_alloc_coherent() and
>>>> dma_unmap_single() to be called in interrupt context.
>>>
>>> In fact I don't really understand how it's supposed to
>>> work at all: you only sync when ring is full.
>>> So host may not have seen your map request if ring
>>> is not full.
>>> Why is it safe to use the address with a device then?
>>
>> viommu_map() calls viommu_send_req_sync(), which does the sync
>> immediately after adding the MAP request.
>>
>> Thanks,
>> Jean
> 
> I see. So it happens on every request. Maybe you should clear
> event index then. This way if exits are disabled you know that
> host is processing the ring. Event index is good for when
> you don't care when it will be processed, you just want
> to reduce number of exits as much as possible.
> 

I think that's already the case: since we don't attach a callback to the
request queue, VRING_AVAIL_F_NO_INTERRUPT is set in avail_flags_shadow,
which causes the used event index to stay clear.

Thanks,
Jean
Michael S. Tsirkin Dec. 10, 2018, 10:53 p.m. UTC | #16
On Mon, Dec 10, 2018 at 03:06:47PM +0000, Jean-Philippe Brucker wrote:
> On 27/11/2018 18:53, Michael S. Tsirkin wrote:
> > On Tue, Nov 27, 2018 at 06:10:46PM +0000, Jean-Philippe Brucker wrote:
> >> On 27/11/2018 18:04, Michael S. Tsirkin wrote:
> >>> On Tue, Nov 27, 2018 at 05:50:50PM +0000, Jean-Philippe Brucker wrote:
> >>>> On 23/11/2018 22:02, Michael S. Tsirkin wrote:
> >>>>>> +/*
> >>>>>> + * __viommu_sync_req - Complete all in-flight requests
> >>>>>> + *
> >>>>>> + * Wait for all added requests to complete. When this function returns, all
> >>>>>> + * requests that were in-flight at the time of the call have completed.
> >>>>>> + */
> >>>>>> +static int __viommu_sync_req(struct viommu_dev *viommu)
> >>>>>> +{
> >>>>>> +	int ret = 0;
> >>>>>> +	unsigned int len;
> >>>>>> +	size_t write_len;
> >>>>>> +	struct viommu_request *req;
> >>>>>> +	struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
> >>>>>> +
> >>>>>> +	assert_spin_locked(&viommu->request_lock);
> >>>>>> +
> >>>>>> +	virtqueue_kick(vq);
> >>>>>> +
> >>>>>> +	while (!list_empty(&viommu->requests)) {
> >>>>>> +		len = 0;
> >>>>>> +		req = virtqueue_get_buf(vq, &len);
> >>>>>> +		if (!req)
> >>>>>> +			continue;
> >>>>>> +
> >>>>>> +		if (!len)
> >>>>>> +			viommu_set_req_status(req->buf, req->len,
> >>>>>> +					      VIRTIO_IOMMU_S_IOERR);
> >>>>>> +
> >>>>>> +		write_len = req->len - req->write_offset;
> >>>>>> +		if (req->writeback && len == write_len)
> >>>>>> +			memcpy(req->writeback, req->buf + req->write_offset,
> >>>>>> +			       write_len);
> >>>>>> +
> >>>>>> +		list_del(&req->list);
> >>>>>> +		kfree(req);
> >>>>>> +	}
> >>>>>
> >>>>> I didn't notice this in the past but it seems this will spin
> >>>>> with interrupts disabled until host handles the request.
> >>>>> Please do not do this - host execution can be another
> >>>>> task that needs the same host CPU. This will then disable
> >>>>> interrupts for a very very long time.
> >>>>
> >>>> In the guest yes, but that doesn't prevent the host from running another
> >>>> task right?
> >>>
> >>> Doesn't prevent it but it will delay it significantly
> >>> until scheduler decides to kick the VCPU task out.
> >>>
> >>>> My tests run fine when QEMU is bound to a single CPU, even
> >>>> though vcpu and viommu run in different threads
> >>>>
> >>>>> What to do then? Queue in software and wake up task.
> >>>>
> >>>> Unfortunately I can't do anything here, because IOMMU drivers can't
> >>>> sleep in the iommu_map() or iommu_unmap() path.
> >>>>
> >>>> The problem is the same
> >>>> for all IOMMU drivers. That's because the DMA API allows drivers to call
> >>>> some functions with interrupts disabled. For example
> >>>> Documentation/DMA-API-HOWTO.txt allows dma_alloc_coherent() and
> >>>> dma_unmap_single() to be called in interrupt context.
> >>>
> >>> In fact I don't really understand how it's supposed to
> >>> work at all: you only sync when ring is full.
> >>> So host may not have seen your map request if ring
> >>> is not full.
> >>> Why is it safe to use the address with a device then?
> >>
> >> viommu_map() calls viommu_send_req_sync(), which does the sync
> >> immediately after adding the MAP request.
> >>
> >> Thanks,
> >> Jean
> > 
> > I see. So it happens on every request. Maybe you should clear
> > event index then. This way if exits are disabled you know that
> > host is processing the ring. Event index is good for when
> > you don't care when it will be processed, you just want
> > to reduce number of exits as much as possible.
> > 
> 
> I think that's already the case: since we don't attach a callback to the
> request queue, VRING_AVAIL_F_NO_INTERRUPT is set in avail_flags_shadow,
> which causes the used event index to stay clear.
> 
> Thanks,
> Jean

VRING_AVAIL_F_NO_INTERRUPT has no effect when the event index
feature has been negotiated. In any case, it also does not
affect kick notifications from guest - it affects
device interrupts.
Jean-Philippe Brucker Dec. 11, 2018, 4:29 p.m. UTC | #17
On 10/12/2018 22:53, Michael S. Tsirkin wrote:
> On Mon, Dec 10, 2018 at 03:06:47PM +0000, Jean-Philippe Brucker wrote:
>> On 27/11/2018 18:53, Michael S. Tsirkin wrote:
>>> On Tue, Nov 27, 2018 at 06:10:46PM +0000, Jean-Philippe Brucker wrote:
>>>> On 27/11/2018 18:04, Michael S. Tsirkin wrote:
>>>>> On Tue, Nov 27, 2018 at 05:50:50PM +0000, Jean-Philippe Brucker wrote:
>>>>>> On 23/11/2018 22:02, Michael S. Tsirkin wrote:
>>>>>>>> +/*
>>>>>>>> + * __viommu_sync_req - Complete all in-flight requests
>>>>>>>> + *
>>>>>>>> + * Wait for all added requests to complete. When this function returns, all
>>>>>>>> + * requests that were in-flight at the time of the call have completed.
>>>>>>>> + */
>>>>>>>> +static int __viommu_sync_req(struct viommu_dev *viommu)
>>>>>>>> +{
>>>>>>>> +	int ret = 0;
>>>>>>>> +	unsigned int len;
>>>>>>>> +	size_t write_len;
>>>>>>>> +	struct viommu_request *req;
>>>>>>>> +	struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
>>>>>>>> +
>>>>>>>> +	assert_spin_locked(&viommu->request_lock);
>>>>>>>> +
>>>>>>>> +	virtqueue_kick(vq);
>>>>>>>> +
>>>>>>>> +	while (!list_empty(&viommu->requests)) {
>>>>>>>> +		len = 0;
>>>>>>>> +		req = virtqueue_get_buf(vq, &len);
>>>>>>>> +		if (!req)
>>>>>>>> +			continue;
>>>>>>>> +
>>>>>>>> +		if (!len)
>>>>>>>> +			viommu_set_req_status(req->buf, req->len,
>>>>>>>> +					      VIRTIO_IOMMU_S_IOERR);
>>>>>>>> +
>>>>>>>> +		write_len = req->len - req->write_offset;
>>>>>>>> +		if (req->writeback && len == write_len)
>>>>>>>> +			memcpy(req->writeback, req->buf + req->write_offset,
>>>>>>>> +			       write_len);
>>>>>>>> +
>>>>>>>> +		list_del(&req->list);
>>>>>>>> +		kfree(req);
>>>>>>>> +	}
>>>>>>>
>>>>>>> I didn't notice this in the past but it seems this will spin
>>>>>>> with interrupts disabled until host handles the request.
>>>>>>> Please do not do this - host execution can be another
>>>>>>> task that needs the same host CPU. This will then disable
>>>>>>> interrupts for a very very long time.
>>>>>>
>>>>>> In the guest yes, but that doesn't prevent the host from running another
>>>>>> task right?
>>>>>
>>>>> Doesn't prevent it but it will delay it significantly
>>>>> until scheduler decides to kick the VCPU task out.
>>>>>
>>>>>> My tests run fine when QEMU is bound to a single CPU, even
>>>>>> though vcpu and viommu run in different threads
>>>>>>
>>>>>>> What to do then? Queue in software and wake up task.
>>>>>>
>>>>>> Unfortunately I can't do anything here, because IOMMU drivers can't
>>>>>> sleep in the iommu_map() or iommu_unmap() path.
>>>>>>
>>>>>> The problem is the same
>>>>>> for all IOMMU drivers. That's because the DMA API allows drivers to call
>>>>>> some functions with interrupts disabled. For example
>>>>>> Documentation/DMA-API-HOWTO.txt allows dma_alloc_coherent() and
>>>>>> dma_unmap_single() to be called in interrupt context.
>>>>>
>>>>> In fact I don't really understand how it's supposed to
>>>>> work at all: you only sync when ring is full.
>>>>> So host may not have seen your map request if ring
>>>>> is not full.
>>>>> Why is it safe to use the address with a device then?
>>>>
>>>> viommu_map() calls viommu_send_req_sync(), which does the sync
>>>> immediately after adding the MAP request.
>>>>
>>>> Thanks,
>>>> Jean
>>>
>>> I see. So it happens on every request. Maybe you should clear
>>> event index then. This way if exits are disabled you know that
>>> host is processing the ring. Event index is good for when
>>> you don't care when it will be processed, you just want
>>> to reduce number of exits as much as possible.
>>>
>>
>> I think that's already the case: since we don't attach a callback to the
>> request queue, VRING_AVAIL_F_NO_INTERRUPT is set in avail_flags_shadow,
>> which causes the used event index to stay clear.
>>
>> Thanks,
>> Jean
> 
> VRING_AVAIL_F_NO_INTERRUPT has no effect when the event index
> feature has been negotiated. In any case, it also does not
> affect kick notifications from guest - it affects
> device interrupts.

Ok, I thought we were talking about the used event. Then this is a
device-side optimization right?

In QEMU, disabling notifications while processing the queue didn't show
any difference in netperf. Kvmtool already masks events when processing
the ring - if the host is still handling requests while the guest adds
more, then the avail event is at least one behind the new avail index,
and the guest doesn't kick the host.

Anyway I think we can look at optimizations later, since I don't think
there is any trivial one that we can squash into the initial driver.
I'll resend this series with the Kconfig and header change.

Thanks,
Jean
Michael S. Tsirkin Dec. 12, 2018, 2:56 p.m. UTC | #18
On Fri, Dec 07, 2018 at 06:52:31PM +0000, Jean-Philippe Brucker wrote:
> Sorry for the delay, I wanted to do a little more performance analysis
> before continuing.
> 
> On 27/11/2018 18:10, Michael S. Tsirkin wrote:
> > On Tue, Nov 27, 2018 at 05:55:20PM +0000, Jean-Philippe Brucker wrote:
> >>>> +	if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
> >>>> +	    !virtio_has_feature(vdev, VIRTIO_IOMMU_F_MAP_UNMAP))
> >>>
> >>> Why bother with a feature bit for this then btw?
> >>
> >> We'll need a new feature bit for sharing page tables with the hardware,
> >> because they require different requests (attach_table/invalidate instead
> >> of map/unmap.) A future device supporting page table sharing won't
> >> necessarily need to support map/unmap.
> >>
> > I don't see virtio iommu being extended to support ARM specific
> > requests. This just won't scale, too many different
> > descriptor formats out there.
> 
> They aren't really ARM specific requests. The two new requests are
> ATTACH_TABLE and INVALIDATE, which would be used by x86 IOMMUs as well.
> 
> Sharing CPU address space with the HW IOMMU (SVM) has been in the scope
> of virtio-iommu since the first RFC, and I've been working with that
> extension in mind since the beginning. As an example you can have a look
> at my current draft for this [1], which is inspired from the VFIO work
> we've been doing with Intel.
> 
> The negotiation phase inevitably requires vendor-specific fields in the
> descriptors - host tells which formats are supported, guest chooses a
> format and attaches page tables. But invalidation and fault reporting
> descriptors are fairly generic.

We need to tread carefully here.  People expect it that if user does
lspci and sees a virtio device then it's reasonably portable.

> > If you want to go that way down the road, you should avoid
> > virtio iommu, instead emulate and share code with the ARM SMMU (probably
> > with a different vendor id so you can implement the
> > report on map for devices without PRI).
> 
> vSMMU has to stay in userspace though. The main reason we're proposing
> virtio-iommu is that emulating every possible vIOMMU model in the kernel
> would be unmaintainable. With virtio-iommu we can process the fast path
> in the host kernel, through vhost-iommu, and do the heavy lifting in
> userspace.

Interesting.

> As said above, I'm trying to keep the fast path for
> virtio-iommu generic.
> 
> More notes on what I consider to be the fast path, and comparison with
> vSMMU:
> 
> (1) The primary use-case we have in mind for vIOMMU is something like
> DPDK in the guest, assigning a hardware device to guest userspace. DPDK
> maps a large amount of memory statically, to be used by a pass-through
> device. For this case I don't think we care about vIOMMU performance.
> Setup and teardown need to be reasonably fast, sure, but the MAP/UNMAP
> requests don't have to be optimal.
> 
> 
> (2) If the assigned device is owned by the guest kernel, then mappings
> are dynamic and require dma_map/unmap() to be fast, but there generally
> is no need for a vIOMMU, since device and drivers are trusted by the
> guest kernel. Even when the user does enable a vIOMMU for this case
> (allowing to over-commit guest memory, which needs to be pinned
> otherwise),

BTW that's in theory in practice it doesn't really work.

> we generally play tricks like lazy TLBI (non-strict mode) to
> make it faster.

Simple lazy TLB for guest/userspace drivers would be a big no no.
You need something smarter.

> Here device and drivers are trusted, therefore the
> vulnerability window of lazy mode isn't a concern.
> 
> If the reason to enable the vIOMMU is over-comitting guest memory
> however, you can't use nested translation because it requires pinning
> the second-level tables. For this case performance matters a bit,
> because your invalidate-on-map needs to be fast, even if you enable lazy
> mode and only receive inval-on-unmap every 10ms. It won't ever be as
> fast as nested translation, though. For this case I think vSMMU+Caching
> Mode and userspace virtio-iommu with MAP/UNMAP would perform similarly
> (given page-sized payloads), because the pagetable walk doesn't add a
> lot of overhead compared to the context switch. But given the results
> below, vhost-iommu would be faster than vSMMU+CM.
> 
> 
> (3) Then there is SVM. For SVM, any destructive change to the process
> address space requires a synchronous invalidation command to the
> hardware (at least when using PCI ATS). Given that SVM is based on page
> faults, fault reporting from host to guest also needs to be fast, as
> well as fault response from guest to host.
> 
> I think this is where performance matters the most. To get a feel of the
> advantage we get with virtio-iommu, I compared the vSMMU page-table
> sharing implementation [2] and vhost-iommu + VFIO with page table
> sharing (based on Tomasz Nowicki's vhost-iommu prototype). That's on a
> ThunderX2 with a 10Gb NIC assigned to the guest kernel, which
> corresponds to case (2) above, with nesting page tables and without the
> lazy mode. The host's only job is forwarding invalidation to the HW SMMU.
> 
> vhost-iommu performed on average 1.8x and 5.5x better than vSMMU on
> netperf TCP_STREAM and TCP_MAERTS respectively (~200 samples). I think
> this can be further optimized (that was still polling under the vq
> lock), and unlike vSMMU, virtio-iommu offers the possibility of
> multi-queue for improved scalability. In addition, the guest will need
> to send both TLB and ATC invalidations with vSMMU, but virtio-iommu
> allows to multiplex those, and to invalidate ranges. Similarly for fault
> injection, having the ability to report page faults to the guest from
> the host kernel should be significantly faster than having to go to
> userspace and back to the kernel.

Fascinating. Any data about host CPU utilization?

Eric what do you think?

Is it true that SMMUv3 is fundmentally slow at the architecture level
and so a PV interface will always scale better until
a new hardware interface is designed?


> 
> (4) Virtio and vhost endpoints weren't really a priority for the base
> virtio-iommu device, we were looking mainly at device pass-through. I
> have optimizations in mind for this, although a lot of them are based on
> page tables, not MAP/UNMAP requests. But just getting the vIOMMU closer
> to vhost devices, avoiding the trip to userspace through vhost-tlb,
> should already improve things.
> 
> The important difference when DMA is done by software is that you don't
> need to mirror all mappings into the HW IOMMU - you don't need
> inval-on-map. The endpoint can ask the vIOMMU for mappings when it needs
> them, like vhost-iotlb does for example. So the MAP/UNMAP interface of
> virtio-iommu performs poorly for emulated/PV endpoints compared to an
> emulated IOMMU, since it requires three context switches for DMA
> (MAP/DMA/UNMAP) between host and guest, rather than two (DMA/INVAL).
> There is a feature I call "posted MAP", that avoids the kick on MAP and
> instead lets the device fetch the MAP request on TLB miss, but I haven't
> spent enough time experimenting with this.
> 
> > Others on the TC might feel differently.
> > 
> > If someone's looking into adding virtio iommu support in hardware,
> > that's a different matter. Which is it?
> 
> I'm not aware of anything like that, and suspect that no one would
> consider it until virtio-iommu is more widely adopted.
> 
> Thanks,
> Jean
> 
> 
> [1] Diff between current spec and page table sharing draft
>     (Very rough, missing page fault support and I'd like to rework the
>      PASID model a bit, but table descriptors p.24-26 for both Arm
>      SMMUv2 and SMMUv3.)
> 
> http://jpbrucker.net/virtio-iommu/spec-table/diffs/virtio-iommu-pdf-diff-v0.9-v0.10.dev03.pdf
> 
> [2] [RFC v2 00/28] vSMMUv3/pSMMUv3 2 stage VFIO integration
>     https://www.mail-archive.com/qemu-devel@nongnu.org/msg562369.html
Eric Auger Dec. 12, 2018, 3:27 p.m. UTC | #19
Hi,

On 12/12/18 3:56 PM, Michael S. Tsirkin wrote:
> On Fri, Dec 07, 2018 at 06:52:31PM +0000, Jean-Philippe Brucker wrote:
>> Sorry for the delay, I wanted to do a little more performance analysis
>> before continuing.
>>
>> On 27/11/2018 18:10, Michael S. Tsirkin wrote:
>>> On Tue, Nov 27, 2018 at 05:55:20PM +0000, Jean-Philippe Brucker wrote:
>>>>>> +	if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
>>>>>> +	    !virtio_has_feature(vdev, VIRTIO_IOMMU_F_MAP_UNMAP))
>>>>>
>>>>> Why bother with a feature bit for this then btw?
>>>>
>>>> We'll need a new feature bit for sharing page tables with the hardware,
>>>> because they require different requests (attach_table/invalidate instead
>>>> of map/unmap.) A future device supporting page table sharing won't
>>>> necessarily need to support map/unmap.
>>>>
>>> I don't see virtio iommu being extended to support ARM specific
>>> requests. This just won't scale, too many different
>>> descriptor formats out there.
>>
>> They aren't really ARM specific requests. The two new requests are
>> ATTACH_TABLE and INVALIDATE, which would be used by x86 IOMMUs as well.
>>
>> Sharing CPU address space with the HW IOMMU (SVM) has been in the scope
>> of virtio-iommu since the first RFC, and I've been working with that
>> extension in mind since the beginning. As an example you can have a look
>> at my current draft for this [1], which is inspired from the VFIO work
>> we've been doing with Intel.
>>
>> The negotiation phase inevitably requires vendor-specific fields in the
>> descriptors - host tells which formats are supported, guest chooses a
>> format and attaches page tables. But invalidation and fault reporting
>> descriptors are fairly generic.
> 
> We need to tread carefully here.  People expect it that if user does
> lspci and sees a virtio device then it's reasonably portable.
> 
>>> If you want to go that way down the road, you should avoid
>>> virtio iommu, instead emulate and share code with the ARM SMMU (probably
>>> with a different vendor id so you can implement the
>>> report on map for devices without PRI).
>>
>> vSMMU has to stay in userspace though. The main reason we're proposing
>> virtio-iommu is that emulating every possible vIOMMU model in the kernel
>> would be unmaintainable. With virtio-iommu we can process the fast path
>> in the host kernel, through vhost-iommu, and do the heavy lifting in
>> userspace.
> 
> Interesting.
> 
>> As said above, I'm trying to keep the fast path for
>> virtio-iommu generic.
>>
>> More notes on what I consider to be the fast path, and comparison with
>> vSMMU:
>>
>> (1) The primary use-case we have in mind for vIOMMU is something like
>> DPDK in the guest, assigning a hardware device to guest userspace. DPDK
>> maps a large amount of memory statically, to be used by a pass-through
>> device. For this case I don't think we care about vIOMMU performance.
>> Setup and teardown need to be reasonably fast, sure, but the MAP/UNMAP
>> requests don't have to be optimal.
>>
>>
>> (2) If the assigned device is owned by the guest kernel, then mappings
>> are dynamic and require dma_map/unmap() to be fast, but there generally
>> is no need for a vIOMMU, since device and drivers are trusted by the
>> guest kernel. Even when the user does enable a vIOMMU for this case
>> (allowing to over-commit guest memory, which needs to be pinned
>> otherwise),
> 
> BTW that's in theory in practice it doesn't really work.
> 
>> we generally play tricks like lazy TLBI (non-strict mode) to
>> make it faster.
> 
> Simple lazy TLB for guest/userspace drivers would be a big no no.
> You need something smarter.
> 
>> Here device and drivers are trusted, therefore the
>> vulnerability window of lazy mode isn't a concern.
>>
>> If the reason to enable the vIOMMU is over-comitting guest memory
>> however, you can't use nested translation because it requires pinning
>> the second-level tables. For this case performance matters a bit,
>> because your invalidate-on-map needs to be fast, even if you enable lazy
>> mode and only receive inval-on-unmap every 10ms. It won't ever be as
>> fast as nested translation, though. For this case I think vSMMU+Caching
>> Mode and userspace virtio-iommu with MAP/UNMAP would perform similarly
>> (given page-sized payloads), because the pagetable walk doesn't add a
>> lot of overhead compared to the context switch. But given the results
>> below, vhost-iommu would be faster than vSMMU+CM.
>>
>>
>> (3) Then there is SVM. For SVM, any destructive change to the process
>> address space requires a synchronous invalidation command to the
>> hardware (at least when using PCI ATS). Given that SVM is based on page
>> faults, fault reporting from host to guest also needs to be fast, as
>> well as fault response from guest to host.
>>
>> I think this is where performance matters the most. To get a feel of the
>> advantage we get with virtio-iommu, I compared the vSMMU page-table
>> sharing implementation [2] and vhost-iommu + VFIO with page table
>> sharing (based on Tomasz Nowicki's vhost-iommu prototype). That's on a
>> ThunderX2 with a 10Gb NIC assigned to the guest kernel, which
>> corresponds to case (2) above, with nesting page tables and without the
>> lazy mode. The host's only job is forwarding invalidation to the HW SMMU.
>>
>> vhost-iommu performed on average 1.8x and 5.5x better than vSMMU on
>> netperf TCP_STREAM and TCP_MAERTS respectively (~200 samples). I think
>> this can be further optimized (that was still polling under the vq
>> lock), and unlike vSMMU, virtio-iommu offers the possibility of
>> multi-queue for improved scalability. In addition, the guest will need
>> to send both TLB and ATC invalidations with vSMMU, but virtio-iommu
>> allows to multiplex those, and to invalidate ranges. Similarly for fault
>> injection, having the ability to report page faults to the guest from
>> the host kernel should be significantly faster than having to go to
>> userspace and back to the kernel.
> 
> Fascinating. Any data about host CPU utilization?
> 
> Eric what do you think?
> 
> Is it true that SMMUv3 is fundmentally slow at the architecture level
> and so a PV interface will always scale better until
> a new hardware interface is designed?

As far as I understand the figures above correspond to vhost-iommu
against vsmmuv3. In the 2 cases the guest owns stage1 tables so the
difference comes from the IOTLB invalidation handling. With vhost we
avoid a kernel <-> userspace round trip which may mostly explain the
difference.

About SMMUv3 issues I already reported one big limitation with respect
to hugepage invalidation. See [RFC v2 4/4] iommu/arm-smmu-v3: add
CMD_TLBI_NH_VA_AM command for iova range invalidation
(https://lkml.org/lkml/2017/8/11/428).

At smmuv3 guest driver level, arm_smmu_tlb_inv_range_nosync(), when
called with a hugepage size, invalidates each 4K/64K page of the region
and not the whole region at once. Each of them are trapped by the SMMUv3
device which forwards them to the host. This stalls the guest. This
issue can be observed in DPDK case - not the use case benchmarked above - .

I raised this point again in recent discussions and it is unclear
whether this is an SMMUv3 driver limitation or an architecture
limitation. Seems a single invalidation within the block mapping should
invalidate the whole mapping at HW level. In the past I hacked a
workaround by defining an implementation defined invalidation command.

Robin/Will, could you please explain the rationale behind the
arm_smmu_tlb_inv_range_nosync() implementation.

Thanks

Eric



> 
> 
>>
>> (4) Virtio and vhost endpoints weren't really a priority for the base
>> virtio-iommu device, we were looking mainly at device pass-through. I
>> have optimizations in mind for this, although a lot of them are based on
>> page tables, not MAP/UNMAP requests. But just getting the vIOMMU closer
>> to vhost devices, avoiding the trip to userspace through vhost-tlb,
>> should already improve things.
>>
>> The important difference when DMA is done by software is that you don't
>> need to mirror all mappings into the HW IOMMU - you don't need
>> inval-on-map. The endpoint can ask the vIOMMU for mappings when it needs
>> them, like vhost-iotlb does for example. So the MAP/UNMAP interface of
>> virtio-iommu performs poorly for emulated/PV endpoints compared to an
>> emulated IOMMU, since it requires three context switches for DMA
>> (MAP/DMA/UNMAP) between host and guest, rather than two (DMA/INVAL).
>> There is a feature I call "posted MAP", that avoids the kick on MAP and
>> instead lets the device fetch the MAP request on TLB miss, but I haven't
>> spent enough time experimenting with this.
>>
>>> Others on the TC might feel differently.
>>>
>>> If someone's looking into adding virtio iommu support in hardware,
>>> that's a different matter. Which is it?
>>
>> I'm not aware of anything like that, and suspect that no one would
>> consider it until virtio-iommu is more widely adopted.
>>
>> Thanks,
>> Jean
>>
>>
>> [1] Diff between current spec and page table sharing draft
>>     (Very rough, missing page fault support and I'd like to rework the
>>      PASID model a bit, but table descriptors p.24-26 for both Arm
>>      SMMUv2 and SMMUv3.)
>>
>> http://jpbrucker.net/virtio-iommu/spec-table/diffs/virtio-iommu-pdf-diff-v0.9-v0.10.dev03.pdf
>>
>> [2] [RFC v2 00/28] vSMMUv3/pSMMUv3 2 stage VFIO integration
>>     https://www.mail-archive.com/qemu-devel@nongnu.org/msg562369.html
Robin Murphy Dec. 13, 2018, 12:37 p.m. UTC | #20
On 2018-12-12 3:27 pm, Auger Eric wrote:
> Hi,
> 
> On 12/12/18 3:56 PM, Michael S. Tsirkin wrote:
>> On Fri, Dec 07, 2018 at 06:52:31PM +0000, Jean-Philippe Brucker wrote:
>>> Sorry for the delay, I wanted to do a little more performance analysis
>>> before continuing.
>>>
>>> On 27/11/2018 18:10, Michael S. Tsirkin wrote:
>>>> On Tue, Nov 27, 2018 at 05:55:20PM +0000, Jean-Philippe Brucker wrote:
>>>>>>> +	if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
>>>>>>> +	    !virtio_has_feature(vdev, VIRTIO_IOMMU_F_MAP_UNMAP))
>>>>>>
>>>>>> Why bother with a feature bit for this then btw?
>>>>>
>>>>> We'll need a new feature bit for sharing page tables with the hardware,
>>>>> because they require different requests (attach_table/invalidate instead
>>>>> of map/unmap.) A future device supporting page table sharing won't
>>>>> necessarily need to support map/unmap.
>>>>>
>>>> I don't see virtio iommu being extended to support ARM specific
>>>> requests. This just won't scale, too many different
>>>> descriptor formats out there.
>>>
>>> They aren't really ARM specific requests. The two new requests are
>>> ATTACH_TABLE and INVALIDATE, which would be used by x86 IOMMUs as well.
>>>
>>> Sharing CPU address space with the HW IOMMU (SVM) has been in the scope
>>> of virtio-iommu since the first RFC, and I've been working with that
>>> extension in mind since the beginning. As an example you can have a look
>>> at my current draft for this [1], which is inspired from the VFIO work
>>> we've been doing with Intel.
>>>
>>> The negotiation phase inevitably requires vendor-specific fields in the
>>> descriptors - host tells which formats are supported, guest chooses a
>>> format and attaches page tables. But invalidation and fault reporting
>>> descriptors are fairly generic.
>>
>> We need to tread carefully here.  People expect it that if user does
>> lspci and sees a virtio device then it's reasonably portable.
>>
>>>> If you want to go that way down the road, you should avoid
>>>> virtio iommu, instead emulate and share code with the ARM SMMU (probably
>>>> with a different vendor id so you can implement the
>>>> report on map for devices without PRI).
>>>
>>> vSMMU has to stay in userspace though. The main reason we're proposing
>>> virtio-iommu is that emulating every possible vIOMMU model in the kernel
>>> would be unmaintainable. With virtio-iommu we can process the fast path
>>> in the host kernel, through vhost-iommu, and do the heavy lifting in
>>> userspace.
>>
>> Interesting.
>>
>>> As said above, I'm trying to keep the fast path for
>>> virtio-iommu generic.
>>>
>>> More notes on what I consider to be the fast path, and comparison with
>>> vSMMU:
>>>
>>> (1) The primary use-case we have in mind for vIOMMU is something like
>>> DPDK in the guest, assigning a hardware device to guest userspace. DPDK
>>> maps a large amount of memory statically, to be used by a pass-through
>>> device. For this case I don't think we care about vIOMMU performance.
>>> Setup and teardown need to be reasonably fast, sure, but the MAP/UNMAP
>>> requests don't have to be optimal.
>>>
>>>
>>> (2) If the assigned device is owned by the guest kernel, then mappings
>>> are dynamic and require dma_map/unmap() to be fast, but there generally
>>> is no need for a vIOMMU, since device and drivers are trusted by the
>>> guest kernel. Even when the user does enable a vIOMMU for this case
>>> (allowing to over-commit guest memory, which needs to be pinned
>>> otherwise),
>>
>> BTW that's in theory in practice it doesn't really work.
>>
>>> we generally play tricks like lazy TLBI (non-strict mode) to
>>> make it faster.
>>
>> Simple lazy TLB for guest/userspace drivers would be a big no no.
>> You need something smarter.
>>
>>> Here device and drivers are trusted, therefore the
>>> vulnerability window of lazy mode isn't a concern.
>>>
>>> If the reason to enable the vIOMMU is over-comitting guest memory
>>> however, you can't use nested translation because it requires pinning
>>> the second-level tables. For this case performance matters a bit,
>>> because your invalidate-on-map needs to be fast, even if you enable lazy
>>> mode and only receive inval-on-unmap every 10ms. It won't ever be as
>>> fast as nested translation, though. For this case I think vSMMU+Caching
>>> Mode and userspace virtio-iommu with MAP/UNMAP would perform similarly
>>> (given page-sized payloads), because the pagetable walk doesn't add a
>>> lot of overhead compared to the context switch. But given the results
>>> below, vhost-iommu would be faster than vSMMU+CM.
>>>
>>>
>>> (3) Then there is SVM. For SVM, any destructive change to the process
>>> address space requires a synchronous invalidation command to the
>>> hardware (at least when using PCI ATS). Given that SVM is based on page
>>> faults, fault reporting from host to guest also needs to be fast, as
>>> well as fault response from guest to host.
>>>
>>> I think this is where performance matters the most. To get a feel of the
>>> advantage we get with virtio-iommu, I compared the vSMMU page-table
>>> sharing implementation [2] and vhost-iommu + VFIO with page table
>>> sharing (based on Tomasz Nowicki's vhost-iommu prototype). That's on a
>>> ThunderX2 with a 10Gb NIC assigned to the guest kernel, which
>>> corresponds to case (2) above, with nesting page tables and without the
>>> lazy mode. The host's only job is forwarding invalidation to the HW SMMU.
>>>
>>> vhost-iommu performed on average 1.8x and 5.5x better than vSMMU on
>>> netperf TCP_STREAM and TCP_MAERTS respectively (~200 samples). I think
>>> this can be further optimized (that was still polling under the vq
>>> lock), and unlike vSMMU, virtio-iommu offers the possibility of
>>> multi-queue for improved scalability. In addition, the guest will need
>>> to send both TLB and ATC invalidations with vSMMU, but virtio-iommu
>>> allows to multiplex those, and to invalidate ranges. Similarly for fault
>>> injection, having the ability to report page faults to the guest from
>>> the host kernel should be significantly faster than having to go to
>>> userspace and back to the kernel.
>>
>> Fascinating. Any data about host CPU utilization?
>>
>> Eric what do you think?
>>
>> Is it true that SMMUv3 is fundmentally slow at the architecture level
>> and so a PV interface will always scale better until
>> a new hardware interface is designed?
> 
> As far as I understand the figures above correspond to vhost-iommu
> against vsmmuv3. In the 2 cases the guest owns stage1 tables so the
> difference comes from the IOTLB invalidation handling. With vhost we
> avoid a kernel <-> userspace round trip which may mostly explain the
> difference.
> 
> About SMMUv3 issues I already reported one big limitation with respect
> to hugepage invalidation. See [RFC v2 4/4] iommu/arm-smmu-v3: add
> CMD_TLBI_NH_VA_AM command for iova range invalidation
> (https://lkml.org/lkml/2017/8/11/428).
> 
> At smmuv3 guest driver level, arm_smmu_tlb_inv_range_nosync(), when
> called with a hugepage size, invalidates each 4K/64K page of the region
> and not the whole region at once. Each of them are trapped by the SMMUv3
> device which forwards them to the host. This stalls the guest. This
> issue can be observed in DPDK case - not the use case benchmarked above - .
> 
> I raised this point again in recent discussions and it is unclear
> whether this is an SMMUv3 driver limitation or an architecture
> limitation. Seems a single invalidation within the block mapping should
> invalidate the whole mapping at HW level. In the past I hacked a
> workaround by defining an implementation defined invalidation command.
> 
> Robin/Will, could you please explain the rationale behind the
> arm_smmu_tlb_inv_range_nosync() implementation.

Fundamentally, TLBI commands only take an address, so invalidations have 
to match the actual leaf PTEs being removed. If iommu_unmap() sees that 
2MB is a valid block size, it may send a single "unmap this 2MB" request 
to the driver, but nobody knows how that region is actually mapped until 
the pagetable code walks the tables. If it does find a 2MB block PTE, 
then it can simply clear it and generate a single invalidation command - 
if a TLB entry exists for that block mapping then any address within the 
block will match it. If however that 2MB region was actually covered by 
a subtable of 4KB pages, then separate TLB entries may exist for any or 
all of those pages, and a single address can at best only match one of 
them. Thus after the table PTE is cleared, a separate command has to be 
generated for each page to ensure that all possible TLB entries 
associated with that table are invalidated.

So if you're seeing page-granularity invalidation, it means that the 
thing you're unmapping wasn't actually mapped as that size of hugepage 
in the first place (or something pathological has caused it to be split 
in the meantime - I recall we once had an amusing bug which caused VFIO 
to do that on teardown). There is one suboptimal case if we're taking 
out potentially multiple levels of table at once (e.g. a 1GB region), 
where we don't bother recursing down the removed table to see whether 
anything was mapped with blocks at the intermediate level, and just 
invalidate the whole lot at page granularity to cover the worst-case 
scenario. I think we assumed that sort of unmap would be unlikely enough 
that it wasn't worth optimising for at the time, but there's certainly 
scope to improve it if unmapping 1GB worth of 2MB blocks turns out to be 
a common thing.

FWIW SMMUv3.2 has introduced actual range-invalidation commands - 
reworking all the TLBI logic in the driver to make worthwhile use of 
those is on the to-do list, but until real 3.2 hardware starts coming to 
light I've not been prioritising it.

Robin.

> 
> Thanks
> 
> Eric
> 
> 
> 
>>
>>
>>>
>>> (4) Virtio and vhost endpoints weren't really a priority for the base
>>> virtio-iommu device, we were looking mainly at device pass-through. I
>>> have optimizations in mind for this, although a lot of them are based on
>>> page tables, not MAP/UNMAP requests. But just getting the vIOMMU closer
>>> to vhost devices, avoiding the trip to userspace through vhost-tlb,
>>> should already improve things.
>>>
>>> The important difference when DMA is done by software is that you don't
>>> need to mirror all mappings into the HW IOMMU - you don't need
>>> inval-on-map. The endpoint can ask the vIOMMU for mappings when it needs
>>> them, like vhost-iotlb does for example. So the MAP/UNMAP interface of
>>> virtio-iommu performs poorly for emulated/PV endpoints compared to an
>>> emulated IOMMU, since it requires three context switches for DMA
>>> (MAP/DMA/UNMAP) between host and guest, rather than two (DMA/INVAL).
>>> There is a feature I call "posted MAP", that avoids the kick on MAP and
>>> instead lets the device fetch the MAP request on TLB miss, but I haven't
>>> spent enough time experimenting with this.
>>>
>>>> Others on the TC might feel differently.
>>>>
>>>> If someone's looking into adding virtio iommu support in hardware,
>>>> that's a different matter. Which is it?
>>>
>>> I'm not aware of anything like that, and suspect that no one would
>>> consider it until virtio-iommu is more widely adopted.
>>>
>>> Thanks,
>>> Jean
>>>
>>>
>>> [1] Diff between current spec and page table sharing draft
>>>      (Very rough, missing page fault support and I'd like to rework the
>>>       PASID model a bit, but table descriptors p.24-26 for both Arm
>>>       SMMUv2 and SMMUv3.)
>>>
>>> http://jpbrucker.net/virtio-iommu/spec-table/diffs/virtio-iommu-pdf-diff-v0.9-v0.10.dev03.pdf
>>>
>>> [2] [RFC v2 00/28] vSMMUv3/pSMMUv3 2 stage VFIO integration
>>>      https://www.mail-archive.com/qemu-devel@nongnu.org/msg562369.html
Eric Auger Dec. 13, 2018, 2:13 p.m. UTC | #21
Hi Robin

On 12/13/18 1:37 PM, Robin Murphy wrote:
> On 2018-12-12 3:27 pm, Auger Eric wrote:
>> Hi,
>>
>> On 12/12/18 3:56 PM, Michael S. Tsirkin wrote:
>>> On Fri, Dec 07, 2018 at 06:52:31PM +0000, Jean-Philippe Brucker wrote:
>>>> Sorry for the delay, I wanted to do a little more performance analysis
>>>> before continuing.
>>>>
>>>> On 27/11/2018 18:10, Michael S. Tsirkin wrote:
>>>>> On Tue, Nov 27, 2018 at 05:55:20PM +0000, Jean-Philippe Brucker wrote:
>>>>>>>> +    if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
>>>>>>>> +        !virtio_has_feature(vdev, VIRTIO_IOMMU_F_MAP_UNMAP))
>>>>>>>
>>>>>>> Why bother with a feature bit for this then btw?
>>>>>>
>>>>>> We'll need a new feature bit for sharing page tables with the
>>>>>> hardware,
>>>>>> because they require different requests (attach_table/invalidate
>>>>>> instead
>>>>>> of map/unmap.) A future device supporting page table sharing won't
>>>>>> necessarily need to support map/unmap.
>>>>>>
>>>>> I don't see virtio iommu being extended to support ARM specific
>>>>> requests. This just won't scale, too many different
>>>>> descriptor formats out there.
>>>>
>>>> They aren't really ARM specific requests. The two new requests are
>>>> ATTACH_TABLE and INVALIDATE, which would be used by x86 IOMMUs as well.
>>>>
>>>> Sharing CPU address space with the HW IOMMU (SVM) has been in the scope
>>>> of virtio-iommu since the first RFC, and I've been working with that
>>>> extension in mind since the beginning. As an example you can have a
>>>> look
>>>> at my current draft for this [1], which is inspired from the VFIO work
>>>> we've been doing with Intel.
>>>>
>>>> The negotiation phase inevitably requires vendor-specific fields in the
>>>> descriptors - host tells which formats are supported, guest chooses a
>>>> format and attaches page tables. But invalidation and fault reporting
>>>> descriptors are fairly generic.
>>>
>>> We need to tread carefully here.  People expect it that if user does
>>> lspci and sees a virtio device then it's reasonably portable.
>>>
>>>>> If you want to go that way down the road, you should avoid
>>>>> virtio iommu, instead emulate and share code with the ARM SMMU
>>>>> (probably
>>>>> with a different vendor id so you can implement the
>>>>> report on map for devices without PRI).
>>>>
>>>> vSMMU has to stay in userspace though. The main reason we're proposing
>>>> virtio-iommu is that emulating every possible vIOMMU model in the
>>>> kernel
>>>> would be unmaintainable. With virtio-iommu we can process the fast path
>>>> in the host kernel, through vhost-iommu, and do the heavy lifting in
>>>> userspace.
>>>
>>> Interesting.
>>>
>>>> As said above, I'm trying to keep the fast path for
>>>> virtio-iommu generic.
>>>>
>>>> More notes on what I consider to be the fast path, and comparison with
>>>> vSMMU:
>>>>
>>>> (1) The primary use-case we have in mind for vIOMMU is something like
>>>> DPDK in the guest, assigning a hardware device to guest userspace. DPDK
>>>> maps a large amount of memory statically, to be used by a pass-through
>>>> device. For this case I don't think we care about vIOMMU performance.
>>>> Setup and teardown need to be reasonably fast, sure, but the MAP/UNMAP
>>>> requests don't have to be optimal.
>>>>
>>>>
>>>> (2) If the assigned device is owned by the guest kernel, then mappings
>>>> are dynamic and require dma_map/unmap() to be fast, but there generally
>>>> is no need for a vIOMMU, since device and drivers are trusted by the
>>>> guest kernel. Even when the user does enable a vIOMMU for this case
>>>> (allowing to over-commit guest memory, which needs to be pinned
>>>> otherwise),
>>>
>>> BTW that's in theory in practice it doesn't really work.
>>>
>>>> we generally play tricks like lazy TLBI (non-strict mode) to
>>>> make it faster.
>>>
>>> Simple lazy TLB for guest/userspace drivers would be a big no no.
>>> You need something smarter.
>>>
>>>> Here device and drivers are trusted, therefore the
>>>> vulnerability window of lazy mode isn't a concern.
>>>>
>>>> If the reason to enable the vIOMMU is over-comitting guest memory
>>>> however, you can't use nested translation because it requires pinning
>>>> the second-level tables. For this case performance matters a bit,
>>>> because your invalidate-on-map needs to be fast, even if you enable
>>>> lazy
>>>> mode and only receive inval-on-unmap every 10ms. It won't ever be as
>>>> fast as nested translation, though. For this case I think vSMMU+Caching
>>>> Mode and userspace virtio-iommu with MAP/UNMAP would perform similarly
>>>> (given page-sized payloads), because the pagetable walk doesn't add a
>>>> lot of overhead compared to the context switch. But given the results
>>>> below, vhost-iommu would be faster than vSMMU+CM.
>>>>
>>>>
>>>> (3) Then there is SVM. For SVM, any destructive change to the process
>>>> address space requires a synchronous invalidation command to the
>>>> hardware (at least when using PCI ATS). Given that SVM is based on page
>>>> faults, fault reporting from host to guest also needs to be fast, as
>>>> well as fault response from guest to host.
>>>>
>>>> I think this is where performance matters the most. To get a feel of
>>>> the
>>>> advantage we get with virtio-iommu, I compared the vSMMU page-table
>>>> sharing implementation [2] and vhost-iommu + VFIO with page table
>>>> sharing (based on Tomasz Nowicki's vhost-iommu prototype). That's on a
>>>> ThunderX2 with a 10Gb NIC assigned to the guest kernel, which
>>>> corresponds to case (2) above, with nesting page tables and without the
>>>> lazy mode. The host's only job is forwarding invalidation to the HW
>>>> SMMU.
>>>>
>>>> vhost-iommu performed on average 1.8x and 5.5x better than vSMMU on
>>>> netperf TCP_STREAM and TCP_MAERTS respectively (~200 samples). I think
>>>> this can be further optimized (that was still polling under the vq
>>>> lock), and unlike vSMMU, virtio-iommu offers the possibility of
>>>> multi-queue for improved scalability. In addition, the guest will need
>>>> to send both TLB and ATC invalidations with vSMMU, but virtio-iommu
>>>> allows to multiplex those, and to invalidate ranges. Similarly for
>>>> fault
>>>> injection, having the ability to report page faults to the guest from
>>>> the host kernel should be significantly faster than having to go to
>>>> userspace and back to the kernel.
>>>
>>> Fascinating. Any data about host CPU utilization?
>>>
>>> Eric what do you think?
>>>
>>> Is it true that SMMUv3 is fundmentally slow at the architecture level
>>> and so a PV interface will always scale better until
>>> a new hardware interface is designed?
>>
>> As far as I understand the figures above correspond to vhost-iommu
>> against vsmmuv3. In the 2 cases the guest owns stage1 tables so the
>> difference comes from the IOTLB invalidation handling. With vhost we
>> avoid a kernel <-> userspace round trip which may mostly explain the
>> difference.
>>
>> About SMMUv3 issues I already reported one big limitation with respect
>> to hugepage invalidation. See [RFC v2 4/4] iommu/arm-smmu-v3: add
>> CMD_TLBI_NH_VA_AM command for iova range invalidation
>> (https://lkml.org/lkml/2017/8/11/428).
>>
>> At smmuv3 guest driver level, arm_smmu_tlb_inv_range_nosync(), when
>> called with a hugepage size, invalidates each 4K/64K page of the region
>> and not the whole region at once. Each of them are trapped by the SMMUv3
>> device which forwards them to the host. This stalls the guest. This
>> issue can be observed in DPDK case - not the use case benchmarked
>> above - .
>>
>> I raised this point again in recent discussions and it is unclear
>> whether this is an SMMUv3 driver limitation or an architecture
>> limitation. Seems a single invalidation within the block mapping should
>> invalidate the whole mapping at HW level. In the past I hacked a
>> workaround by defining an implementation defined invalidation command.
>>
>> Robin/Will, could you please explain the rationale behind the
>> arm_smmu_tlb_inv_range_nosync() implementation.
> 
> Fundamentally, TLBI commands only take an address, so invalidations have
> to match the actual leaf PTEs being removed. If iommu_unmap() sees that
> 2MB is a valid block size, it may send a single "unmap this 2MB" request
> to the driver, but nobody knows how that region is actually mapped until
> the pagetable code walks the tables. If it does find a 2MB block PTE,
> then it can simply clear it and generate a single invalidation command -
> if a TLB entry exists for that block mapping then any address within the
> block will match it. If however that 2MB region was actually covered by
> a subtable of 4KB pages, then separate TLB entries may exist for any or
> all of those pages, and a single address can at best only match one of
> them. Thus after the table PTE is cleared, a separate command has to be
> generated for each page to ensure that all possible TLB entries
> associated with that table are invalidated.
> 
> So if you're seeing page-granularity invalidation, it means that the
> thing you're unmapping wasn't actually mapped as that size of hugepage
> in the first place (or something pathological has caused it to be split
> in the meantime - I recall we once had an amusing bug which caused VFIO
> to do that on teardown). There is one suboptimal case if we're taking
> out potentially multiple levels of table at once (e.g. a 1GB region),
> where we don't bother recursing down the removed table to see whether
> anything was mapped with blocks at the intermediate level, and just
> invalidate the whole lot at page granularity to cover the worst-case
> scenario. I think we assumed that sort of unmap would be unlikely enough
> that it wasn't worth optimising for at the time, but there's certainly
> scope to improve it if unmapping 1GB worth of 2MB blocks turns out to be
> a common thing.

thank you for your reply. This last situation looks like the one I
encountered. I will test with DPDK again and let you know.

Thanks

Eric
> 
> FWIW SMMUv3.2 has introduced actual range-invalidation commands -
> reworking all the TLBI logic in the driver to make worthwhile use of
> those is on the to-do list, but until real 3.2 hardware starts coming to
> light I've not been prioritising it.
> 
> Robin.
> 
>>
>> Thanks
>>
>> Eric
>>
>>
>>
>>>
>>>
>>>>
>>>> (4) Virtio and vhost endpoints weren't really a priority for the base
>>>> virtio-iommu device, we were looking mainly at device pass-through. I
>>>> have optimizations in mind for this, although a lot of them are
>>>> based on
>>>> page tables, not MAP/UNMAP requests. But just getting the vIOMMU closer
>>>> to vhost devices, avoiding the trip to userspace through vhost-tlb,
>>>> should already improve things.
>>>>
>>>> The important difference when DMA is done by software is that you don't
>>>> need to mirror all mappings into the HW IOMMU - you don't need
>>>> inval-on-map. The endpoint can ask the vIOMMU for mappings when it
>>>> needs
>>>> them, like vhost-iotlb does for example. So the MAP/UNMAP interface of
>>>> virtio-iommu performs poorly for emulated/PV endpoints compared to an
>>>> emulated IOMMU, since it requires three context switches for DMA
>>>> (MAP/DMA/UNMAP) between host and guest, rather than two (DMA/INVAL).
>>>> There is a feature I call "posted MAP", that avoids the kick on MAP and
>>>> instead lets the device fetch the MAP request on TLB miss, but I
>>>> haven't
>>>> spent enough time experimenting with this.
>>>>
>>>>> Others on the TC might feel differently.
>>>>>
>>>>> If someone's looking into adding virtio iommu support in hardware,
>>>>> that's a different matter. Which is it?
>>>>
>>>> I'm not aware of anything like that, and suspect that no one would
>>>> consider it until virtio-iommu is more widely adopted.
>>>>
>>>> Thanks,
>>>> Jean
>>>>
>>>>
>>>> [1] Diff between current spec and page table sharing draft
>>>>      (Very rough, missing page fault support and I'd like to rework the
>>>>       PASID model a bit, but table descriptors p.24-26 for both Arm
>>>>       SMMUv2 and SMMUv3.)
>>>>
>>>> http://jpbrucker.net/virtio-iommu/spec-table/diffs/virtio-iommu-pdf-diff-v0.9-v0.10.dev03.pdf
>>>>
>>>>
>>>> [2] [RFC v2 00/28] vSMMUv3/pSMMUv3 2 stage VFIO integration
>>>>      https://www.mail-archive.com/qemu-devel@nongnu.org/msg562369.html
diff mbox series

Patch

diff --git a/MAINTAINERS b/MAINTAINERS
index 1689dcfec800..3d8550c76f4a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -15946,6 +15946,13 @@  S:	Maintained
 F:	drivers/virtio/virtio_input.c
 F:	include/uapi/linux/virtio_input.h
 
+VIRTIO IOMMU DRIVER
+M:	Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
+L:	virtualization@lists.linux-foundation.org
+S:	Maintained
+F:	drivers/iommu/virtio-iommu.c
+F:	include/uapi/linux/virtio_iommu.h
+
 VIRTUAL BOX GUEST DEVICE DRIVER
 M:	Hans de Goede <hdegoede@redhat.com>
 M:	Arnd Bergmann <arnd@arndb.de>
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index bf2bbfa2a399..db5f2b8c23f5 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -464,4 +464,15 @@  config QCOM_IOMMU
 	help
 	  Support for IOMMU on certain Qualcomm SoCs.
 
+config VIRTIO_IOMMU
+	bool "Virtio IOMMU driver"
+	depends on VIRTIO=y
+	select IOMMU_API
+	select INTERVAL_TREE
+	select ARM_DMA_USE_IOMMU if ARM
+	help
+	  Para-virtualised IOMMU driver with virtio.
+
+	  Say Y here if you intend to run this kernel as a guest.
+
 endif # IOMMU_SUPPORT
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 5481e5fe1f95..bd7e55751d09 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -36,3 +36,4 @@  obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
 obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
 obj-$(CONFIG_S390_IOMMU) += s390-iommu.o
 obj-$(CONFIG_QCOM_IOMMU) += qcom_iommu.o
+obj-$(CONFIG_VIRTIO_IOMMU) += virtio-iommu.o
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
new file mode 100644
index 000000000000..7540dab9c8dc
--- /dev/null
+++ b/drivers/iommu/virtio-iommu.c
@@ -0,0 +1,916 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Virtio driver for the paravirtualized IOMMU
+ *
+ * Copyright (C) 2018 Arm Limited
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/amba/bus.h>
+#include <linux/delay.h>
+#include <linux/dma-iommu.h>
+#include <linux/freezer.h>
+#include <linux/interval_tree.h>
+#include <linux/iommu.h>
+#include <linux/module.h>
+#include <linux/of_iommu.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_ids.h>
+#include <linux/wait.h>
+
+#include <uapi/linux/virtio_iommu.h>
+
+#define MSI_IOVA_BASE			0x8000000
+#define MSI_IOVA_LENGTH			0x100000
+
+#define VIOMMU_REQUEST_VQ		0
+#define VIOMMU_NR_VQS			1
+
+struct viommu_dev {
+	struct iommu_device		iommu;
+	struct device			*dev;
+	struct virtio_device		*vdev;
+
+	struct ida			domain_ids;
+
+	struct virtqueue		*vqs[VIOMMU_NR_VQS];
+	spinlock_t			request_lock;
+	struct list_head		requests;
+
+	/* Device configuration */
+	struct iommu_domain_geometry	geometry;
+	u64				pgsize_bitmap;
+	u8				domain_bits;
+};
+
+struct viommu_mapping {
+	phys_addr_t			paddr;
+	struct interval_tree_node	iova;
+	u32				flags;
+};
+
+struct viommu_domain {
+	struct iommu_domain		domain;
+	struct viommu_dev		*viommu;
+	struct mutex			mutex; /* protects viommu pointer */
+	unsigned int			id;
+
+	spinlock_t			mappings_lock;
+	struct rb_root_cached		mappings;
+
+	unsigned long			nr_endpoints;
+};
+
+struct viommu_endpoint {
+	struct viommu_dev		*viommu;
+	struct viommu_domain		*vdomain;
+};
+
+struct viommu_request {
+	struct list_head		list;
+	void				*writeback;
+	unsigned int			write_offset;
+	unsigned int			len;
+	char				buf[];
+};
+
+#define to_viommu_domain(domain)	\
+	container_of(domain, struct viommu_domain, domain)
+
+static int viommu_get_req_errno(void *buf, size_t len)
+{
+	struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail);
+
+	switch (tail->status) {
+	case VIRTIO_IOMMU_S_OK:
+		return 0;
+	case VIRTIO_IOMMU_S_UNSUPP:
+		return -ENOSYS;
+	case VIRTIO_IOMMU_S_INVAL:
+		return -EINVAL;
+	case VIRTIO_IOMMU_S_RANGE:
+		return -ERANGE;
+	case VIRTIO_IOMMU_S_NOENT:
+		return -ENOENT;
+	case VIRTIO_IOMMU_S_FAULT:
+		return -EFAULT;
+	case VIRTIO_IOMMU_S_IOERR:
+	case VIRTIO_IOMMU_S_DEVERR:
+	default:
+		return -EIO;
+	}
+}
+
+static void viommu_set_req_status(void *buf, size_t len, int status)
+{
+	struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail);
+
+	tail->status = status;
+}
+
+static off_t viommu_get_write_desc_offset(struct viommu_dev *viommu,
+					  struct virtio_iommu_req_head *req,
+					  size_t len)
+{
+	size_t tail_size = sizeof(struct virtio_iommu_req_tail);
+
+	return len - tail_size;
+}
+
+/*
+ * __viommu_sync_req - Complete all in-flight requests
+ *
+ * Wait for all added requests to complete. When this function returns, all
+ * requests that were in-flight at the time of the call have completed.
+ */
+static int __viommu_sync_req(struct viommu_dev *viommu)
+{
+	int ret = 0;
+	unsigned int len;
+	size_t write_len;
+	struct viommu_request *req;
+	struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
+
+	assert_spin_locked(&viommu->request_lock);
+
+	virtqueue_kick(vq);
+
+	while (!list_empty(&viommu->requests)) {
+		len = 0;
+		req = virtqueue_get_buf(vq, &len);
+		if (!req)
+			continue;
+
+		if (!len)
+			viommu_set_req_status(req->buf, req->len,
+					      VIRTIO_IOMMU_S_IOERR);
+
+		write_len = req->len - req->write_offset;
+		if (req->writeback && len == write_len)
+			memcpy(req->writeback, req->buf + req->write_offset,
+			       write_len);
+
+		list_del(&req->list);
+		kfree(req);
+	}
+
+	return ret;
+}
+
+static int viommu_sync_req(struct viommu_dev *viommu)
+{
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&viommu->request_lock, flags);
+	ret = __viommu_sync_req(viommu);
+	if (ret)
+		dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret);
+	spin_unlock_irqrestore(&viommu->request_lock, flags);
+
+	return ret;
+}
+
+/*
+ * __viommu_add_request - Add one request to the queue
+ * @buf: pointer to the request buffer
+ * @len: length of the request buffer
+ * @writeback: copy data back to the buffer when the request completes.
+ *
+ * Add a request to the queue. Only synchronize the queue if it's already full.
+ * Otherwise don't kick the queue nor wait for requests to complete.
+ *
+ * When @writeback is true, data written by the device, including the request
+ * status, is copied into @buf after the request completes. This is unsafe if
+ * the caller allocates @buf on stack and drops the lock between add_req() and
+ * sync_req().
+ *
+ * Return 0 if the request was successfully added to the queue.
+ */
+static int __viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len,
+			    bool writeback)
+{
+	int ret;
+	off_t write_offset;
+	struct viommu_request *req;
+	struct scatterlist top_sg, bottom_sg;
+	struct scatterlist *sg[2] = { &top_sg, &bottom_sg };
+	struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
+
+	assert_spin_locked(&viommu->request_lock);
+
+	write_offset = viommu_get_write_desc_offset(viommu, buf, len);
+	if (write_offset <= 0)
+		return -EINVAL;
+
+	req = kzalloc(sizeof(*req) + len, GFP_ATOMIC);
+	if (!req)
+		return -ENOMEM;
+
+	req->len = len;
+	if (writeback) {
+		req->writeback = buf + write_offset;
+		req->write_offset = write_offset;
+	}
+	memcpy(&req->buf, buf, write_offset);
+
+	sg_init_one(&top_sg, req->buf, write_offset);
+	sg_init_one(&bottom_sg, req->buf + write_offset, len - write_offset);
+
+	ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC);
+	if (ret == -ENOSPC) {
+		/* If the queue is full, sync and retry */
+		if (!__viommu_sync_req(viommu))
+			ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC);
+	}
+	if (ret)
+		goto err_free;
+
+	list_add_tail(&req->list, &viommu->requests);
+	return 0;
+
+err_free:
+	kfree(req);
+	return ret;
+}
+
+static int viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len)
+{
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&viommu->request_lock, flags);
+	ret = __viommu_add_req(viommu, buf, len, false);
+	if (ret)
+		dev_dbg(viommu->dev, "could not add request: %d\n", ret);
+	spin_unlock_irqrestore(&viommu->request_lock, flags);
+
+	return ret;
+}
+
+/*
+ * Send a request and wait for it to complete. Return the request status (as an
+ * errno)
+ */
+static int viommu_send_req_sync(struct viommu_dev *viommu, void *buf,
+				size_t len)
+{
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&viommu->request_lock, flags);
+
+	ret = __viommu_add_req(viommu, buf, len, true);
+	if (ret) {
+		dev_dbg(viommu->dev, "could not add request (%d)\n", ret);
+		goto out_unlock;
+	}
+
+	ret = __viommu_sync_req(viommu);
+	if (ret) {
+		dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret);
+		/* Fall-through (get the actual request status) */
+	}
+
+	ret = viommu_get_req_errno(buf, len);
+out_unlock:
+	spin_unlock_irqrestore(&viommu->request_lock, flags);
+	return ret;
+}
+
+/*
+ * viommu_add_mapping - add a mapping to the internal tree
+ *
+ * On success, return the new mapping. Otherwise return NULL.
+ */
+static int viommu_add_mapping(struct viommu_domain *vdomain, unsigned long iova,
+			      phys_addr_t paddr, size_t size, u32 flags)
+{
+	unsigned long irqflags;
+	struct viommu_mapping *mapping;
+
+	mapping = kzalloc(sizeof(*mapping), GFP_ATOMIC);
+	if (!mapping)
+		return -ENOMEM;
+
+	mapping->paddr		= paddr;
+	mapping->iova.start	= iova;
+	mapping->iova.last	= iova + size - 1;
+	mapping->flags		= flags;
+
+	spin_lock_irqsave(&vdomain->mappings_lock, irqflags);
+	interval_tree_insert(&mapping->iova, &vdomain->mappings);
+	spin_unlock_irqrestore(&vdomain->mappings_lock, irqflags);
+
+	return 0;
+}
+
+/*
+ * viommu_del_mappings - remove mappings from the internal tree
+ *
+ * @vdomain: the domain
+ * @iova: start of the range
+ * @size: size of the range. A size of 0 corresponds to the entire address
+ *	space.
+ *
+ * On success, returns the number of unmapped bytes (>= size)
+ */
+static size_t viommu_del_mappings(struct viommu_domain *vdomain,
+				  unsigned long iova, size_t size)
+{
+	size_t unmapped = 0;
+	unsigned long flags;
+	unsigned long last = iova + size - 1;
+	struct viommu_mapping *mapping = NULL;
+	struct interval_tree_node *node, *next;
+
+	spin_lock_irqsave(&vdomain->mappings_lock, flags);
+	next = interval_tree_iter_first(&vdomain->mappings, iova, last);
+	while (next) {
+		node = next;
+		mapping = container_of(node, struct viommu_mapping, iova);
+		next = interval_tree_iter_next(node, iova, last);
+
+		/* Trying to split a mapping? */
+		if (mapping->iova.start < iova)
+			break;
+
+		/*
+		 * Virtio-iommu doesn't allow UNMAP to split a mapping created
+		 * with a single MAP request, so remove the full mapping.
+		 */
+		unmapped += mapping->iova.last - mapping->iova.start + 1;
+
+		interval_tree_remove(node, &vdomain->mappings);
+		kfree(mapping);
+	}
+	spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
+
+	return unmapped;
+}
+
+/*
+ * viommu_replay_mappings - re-send MAP requests
+ *
+ * When reattaching a domain that was previously detached from all endpoints,
+ * mappings were deleted from the device. Re-create the mappings available in
+ * the internal tree.
+ */
+static int viommu_replay_mappings(struct viommu_domain *vdomain)
+{
+	int ret = 0;
+	unsigned long flags;
+	struct viommu_mapping *mapping;
+	struct interval_tree_node *node;
+	struct virtio_iommu_req_map map;
+
+	spin_lock_irqsave(&vdomain->mappings_lock, flags);
+	node = interval_tree_iter_first(&vdomain->mappings, 0, -1UL);
+	while (node) {
+		mapping = container_of(node, struct viommu_mapping, iova);
+		map = (struct virtio_iommu_req_map) {
+			.head.type	= VIRTIO_IOMMU_T_MAP,
+			.domain		= cpu_to_le32(vdomain->id),
+			.virt_start	= cpu_to_le64(mapping->iova.start),
+			.virt_end	= cpu_to_le64(mapping->iova.last),
+			.phys_start	= cpu_to_le64(mapping->paddr),
+			.flags		= cpu_to_le32(mapping->flags),
+		};
+
+		ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
+		if (ret)
+			break;
+
+		node = interval_tree_iter_next(node, 0, -1UL);
+	}
+	spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
+
+	return ret;
+}
+
+/* IOMMU API */
+
+static struct iommu_domain *viommu_domain_alloc(unsigned type)
+{
+	struct viommu_domain *vdomain;
+
+	if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
+		return NULL;
+
+	vdomain = kzalloc(sizeof(*vdomain), GFP_KERNEL);
+	if (!vdomain)
+		return NULL;
+
+	mutex_init(&vdomain->mutex);
+	spin_lock_init(&vdomain->mappings_lock);
+	vdomain->mappings = RB_ROOT_CACHED;
+
+	if (type == IOMMU_DOMAIN_DMA &&
+	    iommu_get_dma_cookie(&vdomain->domain)) {
+		kfree(vdomain);
+		return NULL;
+	}
+
+	return &vdomain->domain;
+}
+
+static int viommu_domain_finalise(struct viommu_dev *viommu,
+				  struct iommu_domain *domain)
+{
+	int ret;
+	struct viommu_domain *vdomain = to_viommu_domain(domain);
+	unsigned int max_domain = viommu->domain_bits > 31 ? ~0 :
+				  (1U << viommu->domain_bits) - 1;
+
+	vdomain->viommu		= viommu;
+
+	domain->pgsize_bitmap	= viommu->pgsize_bitmap;
+	domain->geometry	= viommu->geometry;
+
+	ret = ida_alloc_max(&viommu->domain_ids, max_domain, GFP_KERNEL);
+	if (ret >= 0)
+		vdomain->id = (unsigned int)ret;
+
+	return ret > 0 ? 0 : ret;
+}
+
+static void viommu_domain_free(struct iommu_domain *domain)
+{
+	struct viommu_domain *vdomain = to_viommu_domain(domain);
+
+	iommu_put_dma_cookie(domain);
+
+	/* Free all remaining mappings (size 2^64) */
+	viommu_del_mappings(vdomain, 0, 0);
+
+	if (vdomain->viommu)
+		ida_free(&vdomain->viommu->domain_ids, vdomain->id);
+
+	kfree(vdomain);
+}
+
+static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
+{
+	int i;
+	int ret = 0;
+	struct virtio_iommu_req_attach req;
+	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+	struct viommu_endpoint *vdev = fwspec->iommu_priv;
+	struct viommu_domain *vdomain = to_viommu_domain(domain);
+
+	mutex_lock(&vdomain->mutex);
+	if (!vdomain->viommu) {
+		/*
+		 * Properly initialize the domain now that we know which viommu
+		 * owns it.
+		 */
+		ret = viommu_domain_finalise(vdev->viommu, domain);
+	} else if (vdomain->viommu != vdev->viommu) {
+		dev_err(dev, "cannot attach to foreign vIOMMU\n");
+		ret = -EXDEV;
+	}
+	mutex_unlock(&vdomain->mutex);
+
+	if (ret)
+		return ret;
+
+	/*
+	 * In the virtio-iommu device, when attaching the endpoint to a new
+	 * domain, it is detached from the old one and, if as as a result the
+	 * old domain isn't attached to any endpoint, all mappings are removed
+	 * from the old domain and it is freed.
+	 *
+	 * In the driver the old domain still exists, and its mappings will be
+	 * recreated if it gets reattached to an endpoint. Otherwise it will be
+	 * freed explicitly.
+	 *
+	 * vdev->vdomain is protected by group->mutex
+	 */
+	if (vdev->vdomain)
+		vdev->vdomain->nr_endpoints--;
+
+	req = (struct virtio_iommu_req_attach) {
+		.head.type	= VIRTIO_IOMMU_T_ATTACH,
+		.domain		= cpu_to_le32(vdomain->id),
+	};
+
+	for (i = 0; i < fwspec->num_ids; i++) {
+		req.endpoint = cpu_to_le32(fwspec->ids[i]);
+
+		ret = viommu_send_req_sync(vdomain->viommu, &req, sizeof(req));
+		if (ret)
+			return ret;
+	}
+
+	if (!vdomain->nr_endpoints) {
+		/*
+		 * This endpoint is the first to be attached to the domain.
+		 * Replay existing mappings (e.g. SW MSI).
+		 */
+		ret = viommu_replay_mappings(vdomain);
+		if (ret)
+			return ret;
+	}
+
+	vdomain->nr_endpoints++;
+	vdev->vdomain = vdomain;
+
+	return 0;
+}
+
+static int viommu_map(struct iommu_domain *domain, unsigned long iova,
+		      phys_addr_t paddr, size_t size, int prot)
+{
+	int ret;
+	int flags;
+	struct virtio_iommu_req_map map;
+	struct viommu_domain *vdomain = to_viommu_domain(domain);
+
+	flags = (prot & IOMMU_READ ? VIRTIO_IOMMU_MAP_F_READ : 0) |
+		(prot & IOMMU_WRITE ? VIRTIO_IOMMU_MAP_F_WRITE : 0) |
+		(prot & IOMMU_MMIO ? VIRTIO_IOMMU_MAP_F_MMIO : 0);
+
+	ret = viommu_add_mapping(vdomain, iova, paddr, size, flags);
+	if (ret)
+		return ret;
+
+	map = (struct virtio_iommu_req_map) {
+		.head.type	= VIRTIO_IOMMU_T_MAP,
+		.domain		= cpu_to_le32(vdomain->id),
+		.virt_start	= cpu_to_le64(iova),
+		.phys_start	= cpu_to_le64(paddr),
+		.virt_end	= cpu_to_le64(iova + size - 1),
+		.flags		= cpu_to_le32(flags),
+	};
+
+	if (!vdomain->nr_endpoints)
+		return 0;
+
+	ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
+	if (ret)
+		viommu_del_mappings(vdomain, iova, size);
+
+	return ret;
+}
+
+static size_t viommu_unmap(struct iommu_domain *domain, unsigned long iova,
+			   size_t size)
+{
+	int ret = 0;
+	size_t unmapped;
+	struct virtio_iommu_req_unmap unmap;
+	struct viommu_domain *vdomain = to_viommu_domain(domain);
+
+	unmapped = viommu_del_mappings(vdomain, iova, size);
+	if (unmapped < size)
+		return 0;
+
+	/* Device already removed all mappings after detach. */
+	if (!vdomain->nr_endpoints)
+		return unmapped;
+
+	unmap = (struct virtio_iommu_req_unmap) {
+		.head.type	= VIRTIO_IOMMU_T_UNMAP,
+		.domain		= cpu_to_le32(vdomain->id),
+		.virt_start	= cpu_to_le64(iova),
+		.virt_end	= cpu_to_le64(iova + unmapped - 1),
+	};
+
+	ret = viommu_add_req(vdomain->viommu, &unmap, sizeof(unmap));
+	return ret ? 0 : unmapped;
+}
+
+static phys_addr_t viommu_iova_to_phys(struct iommu_domain *domain,
+				       dma_addr_t iova)
+{
+	u64 paddr = 0;
+	unsigned long flags;
+	struct viommu_mapping *mapping;
+	struct interval_tree_node *node;
+	struct viommu_domain *vdomain = to_viommu_domain(domain);
+
+	spin_lock_irqsave(&vdomain->mappings_lock, flags);
+	node = interval_tree_iter_first(&vdomain->mappings, iova, iova);
+	if (node) {
+		mapping = container_of(node, struct viommu_mapping, iova);
+		paddr = mapping->paddr + (iova - mapping->iova.start);
+	}
+	spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
+
+	return paddr;
+}
+
+static void viommu_iotlb_sync(struct iommu_domain *domain)
+{
+	struct viommu_domain *vdomain = to_viommu_domain(domain);
+
+	viommu_sync_req(vdomain->viommu);
+}
+
+static void viommu_get_resv_regions(struct device *dev, struct list_head *head)
+{
+	struct iommu_resv_region *region;
+	int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
+
+	region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, prot,
+					 IOMMU_RESV_SW_MSI);
+	if (!region)
+		return;
+
+	list_add_tail(&region->list, head);
+	iommu_dma_get_resv_regions(dev, head);
+}
+
+static void viommu_put_resv_regions(struct device *dev, struct list_head *head)
+{
+	struct iommu_resv_region *entry, *next;
+
+	list_for_each_entry_safe(entry, next, head, list)
+		kfree(entry);
+}
+
+static struct iommu_ops viommu_ops;
+static struct virtio_driver virtio_iommu_drv;
+
+static int viommu_match_node(struct device *dev, void *data)
+{
+	return dev->parent->fwnode == data;
+}
+
+static struct viommu_dev *viommu_get_by_fwnode(struct fwnode_handle *fwnode)
+{
+	struct device *dev = driver_find_device(&virtio_iommu_drv.driver, NULL,
+						fwnode, viommu_match_node);
+	put_device(dev);
+
+	return dev ? dev_to_virtio(dev)->priv : NULL;
+}
+
+static int viommu_add_device(struct device *dev)
+{
+	int ret;
+	struct iommu_group *group;
+	struct viommu_endpoint *vdev;
+	struct viommu_dev *viommu = NULL;
+	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+
+	if (!fwspec || fwspec->ops != &viommu_ops)
+		return -ENODEV;
+
+	viommu = viommu_get_by_fwnode(fwspec->iommu_fwnode);
+	if (!viommu)
+		return -ENODEV;
+
+	vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
+	if (!vdev)
+		return -ENOMEM;
+
+	vdev->viommu = viommu;
+	fwspec->iommu_priv = vdev;
+
+	ret = iommu_device_link(&viommu->iommu, dev);
+	if (ret)
+		goto err_free_dev;
+
+	/*
+	 * Last step creates a default domain and attaches to it. Everything
+	 * must be ready.
+	 */
+	group = iommu_group_get_for_dev(dev);
+	if (IS_ERR(group)) {
+		ret = PTR_ERR(group);
+		goto err_unlink_dev;
+	}
+
+	iommu_group_put(group);
+
+	return PTR_ERR_OR_ZERO(group);
+
+err_unlink_dev:
+	iommu_device_unlink(&viommu->iommu, dev);
+err_free_dev:
+	kfree(vdev);
+
+	return ret;
+}
+
+static void viommu_remove_device(struct device *dev)
+{
+	struct viommu_endpoint *vdev;
+	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+
+	if (!fwspec || fwspec->ops != &viommu_ops)
+		return;
+
+	vdev = fwspec->iommu_priv;
+
+	iommu_group_remove_device(dev);
+	iommu_device_unlink(&vdev->viommu->iommu, dev);
+	kfree(vdev);
+}
+
+static struct iommu_group *viommu_device_group(struct device *dev)
+{
+	if (dev_is_pci(dev))
+		return pci_device_group(dev);
+	else
+		return generic_device_group(dev);
+}
+
+static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args)
+{
+	return iommu_fwspec_add_ids(dev, args->args, 1);
+}
+
+static struct iommu_ops viommu_ops = {
+	.domain_alloc		= viommu_domain_alloc,
+	.domain_free		= viommu_domain_free,
+	.attach_dev		= viommu_attach_dev,
+	.map			= viommu_map,
+	.unmap			= viommu_unmap,
+	.iova_to_phys		= viommu_iova_to_phys,
+	.iotlb_sync		= viommu_iotlb_sync,
+	.add_device		= viommu_add_device,
+	.remove_device		= viommu_remove_device,
+	.device_group		= viommu_device_group,
+	.get_resv_regions	= viommu_get_resv_regions,
+	.put_resv_regions	= viommu_put_resv_regions,
+	.of_xlate		= viommu_of_xlate,
+};
+
+static int viommu_init_vqs(struct viommu_dev *viommu)
+{
+	struct virtio_device *vdev = dev_to_virtio(viommu->dev);
+	const char *name = "request";
+	void *ret;
+
+	ret = virtio_find_single_vq(vdev, NULL, name);
+	if (IS_ERR(ret)) {
+		dev_err(viommu->dev, "cannot find VQ\n");
+		return PTR_ERR(ret);
+	}
+
+	viommu->vqs[VIOMMU_REQUEST_VQ] = ret;
+
+	return 0;
+}
+
+static int viommu_probe(struct virtio_device *vdev)
+{
+	struct device *parent_dev = vdev->dev.parent;
+	struct viommu_dev *viommu = NULL;
+	struct device *dev = &vdev->dev;
+	u64 input_start = 0;
+	u64 input_end = -1UL;
+	int ret;
+
+	if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
+	    !virtio_has_feature(vdev, VIRTIO_IOMMU_F_MAP_UNMAP))
+		return -ENODEV;
+
+	viommu = devm_kzalloc(dev, sizeof(*viommu), GFP_KERNEL);
+	if (!viommu)
+		return -ENOMEM;
+
+	spin_lock_init(&viommu->request_lock);
+	ida_init(&viommu->domain_ids);
+	viommu->dev = dev;
+	viommu->vdev = vdev;
+	INIT_LIST_HEAD(&viommu->requests);
+
+	ret = viommu_init_vqs(viommu);
+	if (ret)
+		return ret;
+
+	virtio_cread(vdev, struct virtio_iommu_config, page_size_mask,
+		     &viommu->pgsize_bitmap);
+
+	if (!viommu->pgsize_bitmap) {
+		ret = -EINVAL;
+		goto err_free_vqs;
+	}
+
+	viommu->domain_bits = 32;
+
+	/* Optional features */
+	virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
+			     struct virtio_iommu_config, input_range.start,
+			     &input_start);
+
+	virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
+			     struct virtio_iommu_config, input_range.end,
+			     &input_end);
+
+	virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_BITS,
+			     struct virtio_iommu_config, domain_bits,
+			     &viommu->domain_bits);
+
+	viommu->geometry = (struct iommu_domain_geometry) {
+		.aperture_start	= input_start,
+		.aperture_end	= input_end,
+		.force_aperture	= true,
+	};
+
+	viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap;
+
+	virtio_device_ready(vdev);
+
+	ret = iommu_device_sysfs_add(&viommu->iommu, dev, NULL, "%s",
+				     virtio_bus_name(vdev));
+	if (ret)
+		goto err_free_vqs;
+
+	iommu_device_set_ops(&viommu->iommu, &viommu_ops);
+	iommu_device_set_fwnode(&viommu->iommu, parent_dev->fwnode);
+
+	iommu_device_register(&viommu->iommu);
+
+#ifdef CONFIG_PCI
+	if (pci_bus_type.iommu_ops != &viommu_ops) {
+		pci_request_acs();
+		ret = bus_set_iommu(&pci_bus_type, &viommu_ops);
+		if (ret)
+			goto err_unregister;
+	}
+#endif
+#ifdef CONFIG_ARM_AMBA
+	if (amba_bustype.iommu_ops != &viommu_ops) {
+		ret = bus_set_iommu(&amba_bustype, &viommu_ops);
+		if (ret)
+			goto err_unregister;
+	}
+#endif
+	if (platform_bus_type.iommu_ops != &viommu_ops) {
+		ret = bus_set_iommu(&platform_bus_type, &viommu_ops);
+		if (ret)
+			goto err_unregister;
+	}
+
+	vdev->priv = viommu;
+
+	dev_info(dev, "input address: %u bits\n",
+		 order_base_2(viommu->geometry.aperture_end));
+	dev_info(dev, "page mask: %#llx\n", viommu->pgsize_bitmap);
+
+	return 0;
+
+err_unregister:
+	iommu_device_sysfs_remove(&viommu->iommu);
+	iommu_device_unregister(&viommu->iommu);
+err_free_vqs:
+	vdev->config->del_vqs(vdev);
+
+	return ret;
+}
+
+static void viommu_remove(struct virtio_device *vdev)
+{
+	struct viommu_dev *viommu = vdev->priv;
+
+	iommu_device_sysfs_remove(&viommu->iommu);
+	iommu_device_unregister(&viommu->iommu);
+
+	/* Stop all virtqueues */
+	vdev->config->reset(vdev);
+	vdev->config->del_vqs(vdev);
+
+	dev_info(&vdev->dev, "device removed\n");
+}
+
+static void viommu_config_changed(struct virtio_device *vdev)
+{
+	dev_warn(&vdev->dev, "config changed\n");
+}
+
+static unsigned int features[] = {
+	VIRTIO_IOMMU_F_MAP_UNMAP,
+	VIRTIO_IOMMU_F_DOMAIN_BITS,
+	VIRTIO_IOMMU_F_INPUT_RANGE,
+};
+
+static struct virtio_device_id id_table[] = {
+	{ VIRTIO_ID_IOMMU, VIRTIO_DEV_ANY_ID },
+	{ 0 },
+};
+
+static struct virtio_driver virtio_iommu_drv = {
+	.driver.name		= KBUILD_MODNAME,
+	.driver.owner		= THIS_MODULE,
+	.id_table		= id_table,
+	.feature_table		= features,
+	.feature_table_size	= ARRAY_SIZE(features),
+	.probe			= viommu_probe,
+	.remove			= viommu_remove,
+	.config_changed		= viommu_config_changed,
+};
+
+module_virtio_driver(virtio_iommu_drv);
+
+MODULE_DESCRIPTION("Virtio IOMMU driver");
+MODULE_AUTHOR("Jean-Philippe Brucker <jean-philippe.brucker@arm.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h
index 6d5c3b2d4f4d..cfe47c5d9a56 100644
--- a/include/uapi/linux/virtio_ids.h
+++ b/include/uapi/linux/virtio_ids.h
@@ -43,5 +43,6 @@ 
 #define VIRTIO_ID_INPUT        18 /* virtio input */
 #define VIRTIO_ID_VSOCK        19 /* virtio vsock transport */
 #define VIRTIO_ID_CRYPTO       20 /* virtio crypto */
+#define VIRTIO_ID_IOMMU        23 /* virtio IOMMU */
 
 #endif /* _LINUX_VIRTIO_IDS_H */
diff --git a/include/uapi/linux/virtio_iommu.h b/include/uapi/linux/virtio_iommu.h
new file mode 100644
index 000000000000..e7c05e3afa44
--- /dev/null
+++ b/include/uapi/linux/virtio_iommu.h
@@ -0,0 +1,104 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Virtio-iommu definition v0.9
+ *
+ * Copyright (C) 2018 Arm Ltd.
+ */
+#ifndef _UAPI_LINUX_VIRTIO_IOMMU_H
+#define _UAPI_LINUX_VIRTIO_IOMMU_H
+
+#include <linux/types.h>
+
+/* Feature bits */
+#define VIRTIO_IOMMU_F_INPUT_RANGE		0
+#define VIRTIO_IOMMU_F_DOMAIN_BITS		1
+#define VIRTIO_IOMMU_F_MAP_UNMAP		2
+#define VIRTIO_IOMMU_F_BYPASS			3
+
+struct virtio_iommu_range {
+	__u64					start;
+	__u64					end;
+};
+
+struct virtio_iommu_config {
+	/* Supported page sizes */
+	__u64					page_size_mask;
+	/* Supported IOVA range */
+	struct virtio_iommu_range		input_range;
+	/* Max domain ID size */
+	__u8					domain_bits;
+	__u8					padding[3];
+};
+
+/* Request types */
+#define VIRTIO_IOMMU_T_ATTACH			0x01
+#define VIRTIO_IOMMU_T_DETACH			0x02
+#define VIRTIO_IOMMU_T_MAP			0x03
+#define VIRTIO_IOMMU_T_UNMAP			0x04
+
+/* Status types */
+#define VIRTIO_IOMMU_S_OK			0x00
+#define VIRTIO_IOMMU_S_IOERR			0x01
+#define VIRTIO_IOMMU_S_UNSUPP			0x02
+#define VIRTIO_IOMMU_S_DEVERR			0x03
+#define VIRTIO_IOMMU_S_INVAL			0x04
+#define VIRTIO_IOMMU_S_RANGE			0x05
+#define VIRTIO_IOMMU_S_NOENT			0x06
+#define VIRTIO_IOMMU_S_FAULT			0x07
+
+struct virtio_iommu_req_head {
+	__u8					type;
+	__u8					reserved[3];
+};
+
+struct virtio_iommu_req_tail {
+	__u8					status;
+	__u8					reserved[3];
+};
+
+struct virtio_iommu_req_attach {
+	struct virtio_iommu_req_head		head;
+	__le32					domain;
+	__le32					endpoint;
+	__u8					reserved[8];
+	struct virtio_iommu_req_tail		tail;
+};
+
+struct virtio_iommu_req_detach {
+	struct virtio_iommu_req_head		head;
+	__le32					domain;
+	__le32					endpoint;
+	__u8					reserved[8];
+	struct virtio_iommu_req_tail		tail;
+};
+
+#define VIRTIO_IOMMU_MAP_F_READ			(1 << 0)
+#define VIRTIO_IOMMU_MAP_F_WRITE		(1 << 1)
+#define VIRTIO_IOMMU_MAP_F_EXEC			(1 << 2)
+#define VIRTIO_IOMMU_MAP_F_MMIO			(1 << 3)
+
+#define VIRTIO_IOMMU_MAP_F_MASK			(VIRTIO_IOMMU_MAP_F_READ |	\
+						 VIRTIO_IOMMU_MAP_F_WRITE |	\
+						 VIRTIO_IOMMU_MAP_F_EXEC |	\
+						 VIRTIO_IOMMU_MAP_F_MMIO)
+
+struct virtio_iommu_req_map {
+	struct virtio_iommu_req_head		head;
+	__le32					domain;
+	__le64					virt_start;
+	__le64					virt_end;
+	__le64					phys_start;
+	__le32					flags;
+	struct virtio_iommu_req_tail		tail;
+};
+
+struct virtio_iommu_req_unmap {
+	struct virtio_iommu_req_head		head;
+	__le32					domain;
+	__le64					virt_start;
+	__le64					virt_end;
+	__u8					reserved[4];
+	struct virtio_iommu_req_tail		tail;
+};
+
+#endif