@@ -79,6 +79,7 @@ struct vfio_iommu {
bool dirty_page_tracking;
bool pinned_page_dirty_scope;
bool container_open;
+ uint64_t num_non_hwdbm_groups;
};
struct vfio_domain {
@@ -116,6 +117,7 @@ struct vfio_group {
struct list_head next;
bool mdev_group; /* An mdev group */
bool pinned_page_dirty_scope;
+ bool iommu_hwdbm; /* For iommu-backed group */
};
struct vfio_iova {
@@ -1187,6 +1189,24 @@ static void vfio_update_pgsize_bitmap(struct vfio_iommu *iommu)
}
}
+static int vfio_dev_enable_feature(struct device *dev, void *data)
+{
+ enum iommu_dev_features *feat = data;
+
+ if (iommu_dev_feature_enabled(dev, *feat))
+ return 0;
+
+ return iommu_dev_enable_feature(dev, *feat);
+}
+
+static bool vfio_group_supports_hwdbm(struct vfio_group *group)
+{
+ enum iommu_dev_features feat = IOMMU_DEV_FEAT_HWDBM;
+
+ return !iommu_group_for_each_dev(group->iommu_group, &feat,
+ vfio_dev_enable_feature);
+}
+
static int update_user_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,
struct vfio_dma *dma, dma_addr_t base_iova,
size_t pgsize)
@@ -2435,6 +2455,12 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
* capable via the page pinning interface.
*/
iommu->num_non_pinned_groups++;
+
+ /* Update the hwdbm status of group and iommu */
+ group->iommu_hwdbm = vfio_group_supports_hwdbm(group);
+ if (!group->iommu_hwdbm)
+ iommu->num_non_hwdbm_groups++;
+
mutex_unlock(&iommu->lock);
vfio_iommu_resv_free(&group_resv_regions);
@@ -2571,6 +2597,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
struct vfio_domain *domain;
struct vfio_group *group;
bool update_dirty_scope = false;
+ bool update_iommu_hwdbm = false;
LIST_HEAD(iova_copy);
mutex_lock(&iommu->lock);
@@ -2609,6 +2636,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
vfio_iommu_detach_group(domain, group);
update_dirty_scope = !group->pinned_page_dirty_scope;
+ update_iommu_hwdbm = !group->iommu_hwdbm;
list_del(&group->next);
kfree(group);
/*
@@ -2651,6 +2679,8 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
if (iommu->dirty_page_tracking)
vfio_iommu_populate_bitmap_full(iommu);
}
+ if (update_iommu_hwdbm)
+ iommu->num_non_hwdbm_groups--;
mutex_unlock(&iommu->lock);
}