@@ -74,6 +74,7 @@ struct vfio_iommu {
bool nesting;
bool dirty_page_tracking;
bool pinned_page_dirty_scope;
+ uint64_t num_non_hwdbm_groups;
};
struct vfio_domain {
@@ -102,6 +103,7 @@ struct vfio_group {
struct list_head next;
bool mdev_group; /* An mdev group */
bool pinned_page_dirty_scope;
+ bool iommu_hwdbm; /* Valid for non-mdev group */
};
struct vfio_iova {
@@ -976,6 +978,27 @@ static void vfio_update_pgsize_bitmap(struct vfio_iommu *iommu)
}
}
+static int vfio_dev_has_feature(struct device *dev, void *data)
+{
+ enum iommu_dev_features *feat = data;
+
+ if (!iommu_dev_has_feature(dev, *feat))
+ return -ENODEV;
+
+ return 0;
+}
+
+static bool vfio_group_supports_hwdbm(struct vfio_group *group)
+{
+ enum iommu_dev_features feat = IOMMU_DEV_FEAT_HWDBM;
+
+ if (iommu_group_for_each_dev(group->iommu_group, &feat,
+ vfio_dev_has_feature))
+ return false;
+
+ return true;
+}
+
static int update_user_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,
struct vfio_dma *dma, dma_addr_t base_iova,
size_t pgsize)
@@ -2189,6 +2212,12 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
* capable via the page pinning interface.
*/
iommu->pinned_page_dirty_scope = false;
+
+ /* Update the hwdbm status of group and iommu */
+ group->iommu_hwdbm = vfio_group_supports_hwdbm(group);
+ if (!group->iommu_hwdbm)
+ iommu->num_non_hwdbm_groups++;
+
mutex_unlock(&iommu->lock);
vfio_iommu_resv_free(&group_resv_regions);
@@ -2342,6 +2371,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
struct vfio_domain *domain;
struct vfio_group *group;
bool update_dirty_scope = false;
+ bool update_iommu_hwdbm = false;
LIST_HEAD(iova_copy);
mutex_lock(&iommu->lock);
@@ -2380,6 +2410,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
vfio_iommu_detach_group(domain, group);
update_dirty_scope = !group->pinned_page_dirty_scope;
+ update_iommu_hwdbm = !group->iommu_hwdbm;
list_del(&group->next);
kfree(group);
/*
@@ -2417,6 +2448,8 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
*/
if (update_dirty_scope)
update_pinned_page_dirty_scope(iommu);
+ if (update_iommu_hwdbm)
+ iommu->num_non_hwdbm_groups--;
mutex_unlock(&iommu->lock);
}