@@ -82,6 +82,23 @@ struct vfio_group {
struct list_head next;
};
+static int vfio_domains_have_iommu_cap(struct vfio_iommu *iommu, int cap)
+{
+ struct vfio_domain *domain;
+ int ret = 1;
+
+ mutex_lock(&iommu->lock);
+ list_for_each_entry(domain, &iommu->domain_list, next) {
+ if (!(domain->caps & cap)) {
+ ret = 0;
+ break;
+ }
+ }
+ mutex_unlock(&iommu->lock);
+
+ return ret;
+}
+
/*
* This code handles mapping and unmapping of user data buffers
* into DMA'ble space using the IOMMU
@@ -948,23 +965,6 @@ static void vfio_iommu_type1_release(void *iommu_data)
kfree(iommu);
}
-static int vfio_domains_have_iommu_cache(struct vfio_iommu *iommu)
-{
- struct vfio_domain *domain;
- int ret = 1;
-
- mutex_lock(&iommu->lock);
- list_for_each_entry(domain, &iommu->domain_list, next) {
- if (!(domain->caps & IOMMU_CAP_CACHE_COHERENCY)) {
- ret = 0;
- break;
- }
- }
- mutex_unlock(&iommu->lock);
-
- return ret;
-}
-
static long vfio_iommu_type1_ioctl(void *iommu_data,
unsigned int cmd, unsigned long arg)
{
@@ -980,7 +980,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
case VFIO_DMA_CC_IOMMU:
if (!iommu)
return 0;
- return vfio_domains_have_iommu_cache(iommu);
+ return vfio_domains_have_iommu_cap(iommu,
+ IOMMU_CAP_CACHE_COHERENCY);
default:
return 0;
}