@@ -2646,6 +2646,8 @@ static bool amd_iommu_capable(enum iommu_cap cap)
return (irq_remapping_enabled == 1);
case IOMMU_CAP_NOEXEC:
return false;
+ case IOMMU_CAP_VIOMMU_HINT:
+ return amd_iommu_np_cache;
default:
break;
}
@@ -5094,12 +5094,32 @@ static inline bool nested_mode_support(void)
return ret;
}
+static inline bool caching_mode_supported(void)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+ bool ret = false;
+
+ rcu_read_lock();
+ for_each_active_iommu(iommu, drhd) {
+ if (cap_caching_mode(iommu->cap)) {
+ ret = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
static bool intel_iommu_capable(enum iommu_cap cap)
{
if (cap == IOMMU_CAP_CACHE_COHERENCY)
return domain_update_iommu_snooping(NULL) == 1;
if (cap == IOMMU_CAP_INTR_REMAP)
return irq_remapping_enabled == 1;
+ if (cap == IOMMU_CAP_VIOMMU_HINT)
+ return caching_mode_supported();
return false;
}
@@ -931,7 +931,16 @@ static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args)
return iommu_fwspec_add_ids(dev, args->args, 1);
}
+static bool viommu_capable(enum iommu_cap cap)
+{
+ if (cap == IOMMU_CAP_VIOMMU_HINT)
+ return true;
+
+ return false;
+}
+
static struct iommu_ops viommu_ops = {
+ .capable = viommu_capable,
.domain_alloc = viommu_domain_alloc,
.domain_free = viommu_domain_free,
.attach_dev = viommu_attach_dev,
@@ -94,6 +94,8 @@ enum iommu_cap {
transactions */
IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */
IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */
+ IOMMU_CAP_VIOMMU_HINT, /* IOMMU can detect a hit for running in
+ VM */
};
/*