@@ -255,6 +255,17 @@ static void vfio_dma_populate_bitmap(struct vfio_iommu *iommu,
vfio_dma_populate_bitmap_full(dma, pgsize);
}
+static void vfio_iommu_populate_bitmap(struct vfio_iommu *iommu)
+{
+ struct rb_node *n;
+ struct vfio_dma *dma;
+
+ for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
+ dma = rb_entry(n, struct vfio_dma, node);
+ vfio_dma_populate_bitmap(iommu, dma);
+ }
+}
+
static int vfio_dma_bitmap_alloc_all(struct vfio_iommu *iommu)
{
struct rb_node *n;
@@ -2190,7 +2201,12 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
* demotes the iommu scope until it declares itself dirty tracking
* capable via the page pinning interface.
*/
- iommu->pinned_page_dirty_scope = false;
+ if (iommu->pinned_page_dirty_scope) {
+ iommu->pinned_page_dirty_scope = false;
+ if (iommu->dirty_page_tracking)
+ vfio_iommu_populate_bitmap(iommu);
+ }
+
mutex_unlock(&iommu->lock);
vfio_iommu_resv_free(&group_resv_regions);
Attach an iommu backend group will potentially access all dma ranges. We should traverse all dma ranges to mark dirty. Fixes: d6a4c185660c ("vfio iommu: Implementation of ioctl for dirty pages tracking") Signed-off-by: Keqian Zhu <zhukeqian1@huawei.com> --- drivers/vfio/vfio_iommu_type1.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-)