diff mbox series

[v1,2/8] vfio/type1: Use iommu_group_replace_domain()

Message ID 20220106022053.2406748-3-baolu.lu@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series Scrap iommu_attach/detach_group() interfaces | expand

Commit Message

Baolu Lu Jan. 6, 2022, 2:20 a.m. UTC
After an IOMMU group is placed in a vfio container, the domain attachment
may be deferred. During this process, other kernel modules can attach
another domain simply in the following way:

	group = iommu_group_get(dev);
	iommu_attach_group(domain, group);

Replace the iommu_attach/detach_group() with iommu_group_replace_domain()
and prohibit use of iommu_attach/detach_group() in other kernel drivers
can solve this problem.

Suggested-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
---
 drivers/vfio/vfio_iommu_type1.c | 22 ++++++++++------------
 1 file changed, 10 insertions(+), 12 deletions(-)
diff mbox series

Patch

diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index f17490ab238f..25276a5db737 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -2213,7 +2213,8 @@  static int vfio_iommu_type1_attach_group(void *iommu_data,
 			goto out_domain;
 	}
 
-	ret = iommu_attach_group(domain->domain, group->iommu_group);
+	ret = iommu_group_replace_domain(group->iommu_group, NULL,
+					 domain->domain);
 	if (ret)
 		goto out_domain;
 
@@ -2280,19 +2281,14 @@  static int vfio_iommu_type1_attach_group(void *iommu_data,
 	list_for_each_entry(d, &iommu->domain_list, next) {
 		if (d->domain->ops == domain->domain->ops &&
 		    d->prot == domain->prot) {
-			iommu_detach_group(domain->domain, group->iommu_group);
-			if (!iommu_attach_group(d->domain,
-						group->iommu_group)) {
+			if (!iommu_group_replace_domain(group->iommu_group,
+							domain->domain,
+							d->domain)) {
 				list_add(&group->next, &d->group_list);
 				iommu_domain_free(domain->domain);
 				kfree(domain);
 				goto done;
 			}
-
-			ret = iommu_attach_group(domain->domain,
-						 group->iommu_group);
-			if (ret)
-				goto out_domain;
 		}
 	}
 
@@ -2327,7 +2323,7 @@  static int vfio_iommu_type1_attach_group(void *iommu_data,
 	return 0;
 
 out_detach:
-	iommu_detach_group(domain->domain, group->iommu_group);
+	iommu_group_replace_domain(group->iommu_group, domain->domain, NULL);
 out_domain:
 	iommu_domain_free(domain->domain);
 	vfio_iommu_iova_free(&iova_copy);
@@ -2488,7 +2484,8 @@  static void vfio_iommu_type1_detach_group(void *iommu_data,
 		if (!group)
 			continue;
 
-		iommu_detach_group(domain->domain, group->iommu_group);
+		iommu_group_replace_domain(group->iommu_group,
+					   domain->domain, NULL);
 		update_dirty_scope = !group->pinned_page_dirty_scope;
 		list_del(&group->next);
 		kfree(group);
@@ -2577,7 +2574,8 @@  static void vfio_release_domain(struct vfio_domain *domain)
 
 	list_for_each_entry_safe(group, group_tmp,
 				 &domain->group_list, next) {
-		iommu_detach_group(domain->domain, group->iommu_group);
+		iommu_group_replace_domain(group->iommu_group,
+					   domain->domain, NULL);
 		list_del(&group->next);
 		kfree(group);
 	}