diff mbox series

[RFC,1/7] vhost/iommufd: Add the functions support iommufd

Message ID 20230923170540.1447301-2-lulu@redhat.com (mailing list archive)
State RFC
Headers show
Series vdpa: Add support for iommufd | expand

Checks

Context Check Description
netdev/tree_selection success Not a local patch

Commit Message

Cindy Lu Sept. 23, 2023, 5:05 p.m. UTC
Add a new file vhost/iommufd.c to support the function of
iommufd, This file contains iommufd function of emulated device and
the physical device. 

Signed-off-by: Cindy Lu <lulu@redhat.com>
---
 drivers/vhost/iommufd.c | 151 ++++++++++++++++++++++++++++++++++++++++
 drivers/vhost/vhost.h   |  21 ++++++
 2 files changed, 172 insertions(+)
 create mode 100644 drivers/vhost/iommufd.c

Comments

Jason Wang Sept. 26, 2023, 2:47 a.m. UTC | #1
On Sun, Sep 24, 2023 at 1:05 AM Cindy Lu <lulu@redhat.com> wrote:
>
> Add a new file vhost/iommufd.c to support the function of
> iommufd, This file contains iommufd function of emulated device and
> the physical device.
>
> Signed-off-by: Cindy Lu <lulu@redhat.com>
> ---
>  drivers/vhost/iommufd.c | 151 ++++++++++++++++++++++++++++++++++++++++
>  drivers/vhost/vhost.h   |  21 ++++++
>  2 files changed, 172 insertions(+)
>  create mode 100644 drivers/vhost/iommufd.c
>
> diff --git a/drivers/vhost/iommufd.c b/drivers/vhost/iommufd.c
> new file mode 100644
> index 000000000000..080858f76fd5
> --- /dev/null
> +++ b/drivers/vhost/iommufd.c
> @@ -0,0 +1,151 @@
> +#include <linux/vdpa.h>
> +#include <linux/iommufd.h>
> +
> +#include "vhost.h"
> +
> +MODULE_IMPORT_NS(IOMMUFD);
> +
> +int vdpa_iommufd_bind(struct vdpa_device *vdpa, struct iommufd_ctx *ictx,
> +                     u32 *ioas_id, u32 *device_id)
> +{
> +       int ret;
> +
> +       vhost_vdpa_lockdep_assert_held(vdpa);
> +
> +       /*
> +        * If the driver doesn't provide this op then it means the device does
> +        * not do DMA at all. So nothing to do.
> +        */
> +       if (!vdpa->config->bind_iommufd)
> +               return 0;
> +
> +       ret = vdpa->config->bind_iommufd(vdpa, ictx, device_id);
> +       if (ret)
> +               return ret;
> +
> +       ret = vdpa->config->attach_ioas(vdpa, ioas_id);
> +       if (ret)
> +               goto err_unbind;
> +       vdpa->iommufd_attached = true;
> +
> +       return 0;
> +
> +err_unbind:
> +       if (vdpa->config->unbind_iommufd)
> +               vdpa->config->unbind_iommufd(vdpa);
> +       return ret;
> +}
> +
> +void vdpa_iommufd_unbind(struct vdpa_device *vdpa)
> +{
> +       vhost_vdpa_lockdep_assert_held(vdpa);
> +
> +       if (vdpa->config->unbind_iommufd)
> +               vdpa->config->unbind_iommufd(vdpa);
> +}
> +
> +int vdpa_iommufd_physical_bind(struct vdpa_device *vdpa,
> +                              struct iommufd_ctx *ictx, u32 *out_device_id)
> +{
> +       struct device *dma_dev = vdpa_get_dma_dev(vdpa);
> +       struct iommufd_device *idev;
> +
> +       idev = iommufd_device_bind(ictx, dma_dev, out_device_id);
> +       if (IS_ERR(idev))
> +               return PTR_ERR(idev);
> +       vdpa->iommufd_device = idev;
> +       return 0;
> +}
> +EXPORT_SYMBOL_GPL(vdpa_iommufd_physical_bind);
> +
> +void vdpa_iommufd_physical_unbind(struct vdpa_device *vdpa)
> +{
> +       vhost_vdpa_lockdep_assert_held(vdpa);
> +
> +       if (vdpa->iommufd_attached) {
> +               iommufd_device_detach(vdpa->iommufd_device);
> +               vdpa->iommufd_attached = false;
> +       }
> +       iommufd_device_unbind(vdpa->iommufd_device);
> +       vdpa->iommufd_device = NULL;
> +}
> +EXPORT_SYMBOL_GPL(vdpa_iommufd_physical_unbind);
> +
> +int vdpa_iommufd_physical_attach_ioas(struct vdpa_device *vdpa, u32 *pt_id)
> +{
> +       unsigned int flags = 0;
> +
> +       return iommufd_device_attach(vdpa->iommufd_device, pt_id);
> +}
> +EXPORT_SYMBOL_GPL(vdpa_iommufd_physical_attach_ioas);
> +
> +static void vdpa_emulated_unmap(void *data, unsigned long iova,
> +                               unsigned long length)
> +{
> +       struct vdpa_device *vdpa = data;
> +
> +       vdpa->config->dma_unmap(vdpa, 0, iova, length);
> +}
> +
> +static const struct iommufd_access_ops vdpa_user_ops = {
> +       .needs_pin_pages = 1,

Note that simulators support VA, so no page pinning in that case while rebasing.

static bool use_va = true;
module_param(use_va, bool, 0444);
MODULE_PARM_DESC(use_va, "Enable/disable the device's ability to use VA");

So we need to handle that case as well.

(Note that it looks like VA mode is broken, I may need some time to fix that).

> +       .unmap = vdpa_emulated_unmap,
> +};
> +
> +int vdpa_iommufd_emulated_bind(struct vdpa_device *vdpa,
> +                              struct iommufd_ctx *ictx, u32 *out_device_id)
> +{
> +       vhost_vdpa_lockdep_assert_held(vdpa);
> +
> +       vdpa->iommufd_ictx = ictx;
> +       iommufd_ctx_get(ictx);
> +       struct iommufd_device *idev;
> +
> +       idev = iommufd_device_bind(ictx, vdpa->dma_dev, out_device_id);

This seems not appropriate for emulated devices as it deals with the
concepts that only exist in physical devices like the IOMMU domain
etc.

If possible, please refer how VFIO handles this (I guess it should
have something)

Thanks
diff mbox series

Patch

diff --git a/drivers/vhost/iommufd.c b/drivers/vhost/iommufd.c
new file mode 100644
index 000000000000..080858f76fd5
--- /dev/null
+++ b/drivers/vhost/iommufd.c
@@ -0,0 +1,151 @@ 
+#include <linux/vdpa.h>
+#include <linux/iommufd.h>
+
+#include "vhost.h"
+
+MODULE_IMPORT_NS(IOMMUFD);
+
+int vdpa_iommufd_bind(struct vdpa_device *vdpa, struct iommufd_ctx *ictx,
+		      u32 *ioas_id, u32 *device_id)
+{
+	int ret;
+
+	vhost_vdpa_lockdep_assert_held(vdpa);
+
+	/*
+        * If the driver doesn't provide this op then it means the device does
+        * not do DMA at all. So nothing to do.
+        */
+	if (!vdpa->config->bind_iommufd)
+		return 0;
+
+	ret = vdpa->config->bind_iommufd(vdpa, ictx, device_id);
+	if (ret)
+		return ret;
+
+	ret = vdpa->config->attach_ioas(vdpa, ioas_id);
+	if (ret)
+		goto err_unbind;
+	vdpa->iommufd_attached = true;
+
+	return 0;
+
+err_unbind:
+	if (vdpa->config->unbind_iommufd)
+		vdpa->config->unbind_iommufd(vdpa);
+	return ret;
+}
+
+void vdpa_iommufd_unbind(struct vdpa_device *vdpa)
+{
+	vhost_vdpa_lockdep_assert_held(vdpa);
+
+	if (vdpa->config->unbind_iommufd)
+		vdpa->config->unbind_iommufd(vdpa);
+}
+
+int vdpa_iommufd_physical_bind(struct vdpa_device *vdpa,
+			       struct iommufd_ctx *ictx, u32 *out_device_id)
+{
+	struct device *dma_dev = vdpa_get_dma_dev(vdpa);
+	struct iommufd_device *idev;
+
+	idev = iommufd_device_bind(ictx, dma_dev, out_device_id);
+	if (IS_ERR(idev))
+		return PTR_ERR(idev);
+	vdpa->iommufd_device = idev;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vdpa_iommufd_physical_bind);
+
+void vdpa_iommufd_physical_unbind(struct vdpa_device *vdpa)
+{
+	vhost_vdpa_lockdep_assert_held(vdpa);
+
+	if (vdpa->iommufd_attached) {
+		iommufd_device_detach(vdpa->iommufd_device);
+		vdpa->iommufd_attached = false;
+	}
+	iommufd_device_unbind(vdpa->iommufd_device);
+	vdpa->iommufd_device = NULL;
+}
+EXPORT_SYMBOL_GPL(vdpa_iommufd_physical_unbind);
+
+int vdpa_iommufd_physical_attach_ioas(struct vdpa_device *vdpa, u32 *pt_id)
+{
+	unsigned int flags = 0;
+
+	return iommufd_device_attach(vdpa->iommufd_device, pt_id);
+}
+EXPORT_SYMBOL_GPL(vdpa_iommufd_physical_attach_ioas);
+
+static void vdpa_emulated_unmap(void *data, unsigned long iova,
+				unsigned long length)
+{
+	struct vdpa_device *vdpa = data;
+
+	vdpa->config->dma_unmap(vdpa, 0, iova, length);
+}
+
+static const struct iommufd_access_ops vdpa_user_ops = {
+	.needs_pin_pages = 1,
+	.unmap = vdpa_emulated_unmap,
+};
+
+int vdpa_iommufd_emulated_bind(struct vdpa_device *vdpa,
+			       struct iommufd_ctx *ictx, u32 *out_device_id)
+{
+	vhost_vdpa_lockdep_assert_held(vdpa);
+
+	vdpa->iommufd_ictx = ictx;
+	iommufd_ctx_get(ictx);
+	struct iommufd_device *idev;
+
+	idev = iommufd_device_bind(ictx, vdpa->dma_dev, out_device_id);
+
+	if (IS_ERR(idev))
+		return PTR_ERR(idev);
+	vdpa->iommufd_device = idev;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vdpa_iommufd_emulated_bind);
+
+void vdpa_iommufd_emulated_unbind(struct vdpa_device *vdpa)
+{
+	vhost_vdpa_lockdep_assert_held(vdpa);
+
+	if (vdpa->iommufd_access) {
+		iommufd_access_destroy(vdpa->iommufd_access);
+		vdpa->iommufd_access = NULL;
+	}
+	iommufd_ctx_put(vdpa->iommufd_ictx);
+	vdpa->iommufd_ictx = NULL;
+}
+EXPORT_SYMBOL_GPL(vdpa_iommufd_emulated_unbind);
+
+int vdpa_iommufd_emulated_attach_ioas(struct vdpa_device *vdpa, u32 *pt_id)
+{
+	struct iommufd_access *user;
+
+	vhost_vdpa_lockdep_assert_held(vdpa);
+
+	user = iommufd_access_create(vdpa->iommufd_ictx, *pt_id, &vdpa_user_ops,
+				     vdpa);
+	if (IS_ERR(user))
+		return PTR_ERR(user);
+	vdpa->iommufd_access = user;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vdpa_iommufd_emulated_attach_ioas);
+int vdpa_iommufd_emulated_detach_ioas(struct vdpa_device *vdpa)
+{
+	vhost_vdpa_lockdep_assert_held(vdpa);
+
+	if (!vdpa->iommufd_ictx || !vdpa->iommufd_access)
+		return -1;
+
+	iommufd_access_destroy(vdpa->iommufd_access);
+	vdpa->iommufd_access = NULL;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vdpa_iommufd_emulated_detach_ioas);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 790b296271f1..c470a5596d9c 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -291,6 +291,27 @@  static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
 }
 #endif
 
+struct iommufd_ctx;
+struct vdpa_device;
+void vhost_vdpa_lockdep_assert_held(struct vdpa_device *vdpa);
+
+#if IS_ENABLED(CONFIG_IOMMUFD)
+int vdpa_iommufd_bind(struct vdpa_device *vdpa, struct iommufd_ctx *ictx,
+		      u32 *ioas_id, u32 *device_id);
+void vdpa_iommufd_unbind(struct vdpa_device *vdpa);
+#else
+static inline int vdpa_iommufd_bind(struct vdpa_device *vdpa,
+				    struct iommufd_ctx *ictx, u32 *ioas_id,
+				    u32 *device_id)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline void vdpa_iommufd_unbind(struct vdpa_device *vdpa)
+{
+}
+#endif
+
 /* Memory accessors */
 static inline u16 vhost16_to_cpu(struct vhost_virtqueue *vq, __virtio16 val)
 {