@@ -389,6 +389,14 @@ static bool vhost_vdpa_can_resume(const struct vhost_vdpa *v)
return ops->resume;
}
+static bool vhost_vdpa_has_desc_group(const struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ return ops->get_vq_desc_group;
+}
+
static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
{
struct vdpa_device *vdpa = v->vdpa;
@@ -690,6 +698,7 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
if (copy_from_user(&features, featurep, sizeof(features)))
return -EFAULT;
if (features & ~(VHOST_VDPA_BACKEND_FEATURES |
+ BIT_ULL(VHOST_BACKEND_F_DESC_ASID) |
BIT_ULL(VHOST_BACKEND_F_SUSPEND) |
BIT_ULL(VHOST_BACKEND_F_RESUME) |
BIT_ULL(VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK)))
@@ -700,6 +709,12 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
if ((features & BIT_ULL(VHOST_BACKEND_F_RESUME)) &&
!vhost_vdpa_can_resume(v))
return -EOPNOTSUPP;
+ if ((features & BIT_ULL(VHOST_BACKEND_F_DESC_ASID)) &&
+ !(features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID)))
+ return -EINVAL;
+ if ((features & BIT_ULL(VHOST_BACKEND_F_DESC_ASID)) &&
+ !vhost_vdpa_has_desc_group(v))
+ return -EOPNOTSUPP;
vhost_set_backend_features(&v->vdev, features);
return 0;
}
@@ -753,6 +768,8 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
features |= BIT_ULL(VHOST_BACKEND_F_SUSPEND);
if (vhost_vdpa_can_resume(v))
features |= BIT_ULL(VHOST_BACKEND_F_RESUME);
+ if (vhost_vdpa_has_desc_group(v))
+ features |= BIT_ULL(VHOST_BACKEND_F_DESC_ASID);
features |= vhost_vdpa_get_backend_features(v);
if (copy_to_user(featurep, &features, sizeof(features)))
r = -EFAULT;
@@ -185,5 +185,10 @@ struct vhost_vdpa_iova_range {
* DRIVER_OK
*/
#define VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK 0x6
+/* Device may expose the virtqueue's descriptor area, driver area and
+ * device area to a different group for ASID binding than where its
+ * buffers may reside. Requires VHOST_BACKEND_F_IOTLB_ASID.
+ */
+#define VHOST_BACKEND_F_DESC_ASID 0x7
#endif