@@ -256,6 +256,8 @@ struct iommu_group
atomic_t ref;
};
+static struct arm_smmu_device *find_smmu(const struct device *dev);
+
static struct iommu_group *iommu_group_alloc(void)
{
struct iommu_group *group = xzalloc(struct iommu_group);
@@ -444,6 +446,8 @@ static struct iommu_group *iommu_group_get(struct device *dev)
#define SMR_VALID (1U << 31)
#define SMR_MASK_SHIFT 16
#define SMR_ID_SHIFT 0
+#define SMR_ID_MASK 0x7fff
+#define SMR_MASK_MASK 0x7fff
#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
#define S2CR_CBNDX_SHIFT 0
@@ -872,6 +876,38 @@ static int register_smmu_master(struct arm_smmu_device *smmu,
fwspec);
}
+static int arm_smmu_dt_add_device_generic(u8 devfn, struct device *dev)
+{
+ struct arm_smmu_device *smmu;
+ struct iommu_fwspec *fwspec;
+
+ fwspec = dev_iommu_fwspec_get(dev);
+ if (fwspec == NULL)
+ return -ENXIO;
+
+ smmu = find_smmu(fwspec->iommu_dev);
+ if (smmu == NULL)
+ return -ENXIO;
+
+ return arm_smmu_dt_add_device_legacy(smmu, dev, fwspec);
+}
+
+static int arm_smmu_dt_xlate_generic(struct device *dev,
+ const struct dt_phandle_args *spec)
+{
+ uint32_t mask, fwid = 0;
+
+ if (spec->args_count > 0)
+ fwid |= (SMR_ID_MASK & spec->args[0]) << SMR_ID_SHIFT;
+
+ if (spec->args_count > 1)
+ fwid |= (SMR_MASK_MASK & spec->args[1]) << SMR_MASK_SHIFT;
+ else if (!of_property_read_u32(spec->np, "stream-match-mask", &mask))
+ fwid |= (SMR_MASK_MASK & mask) << SMR_MASK_SHIFT;
+
+ return iommu_fwspec_add_ids(dev, &fwid, 1);
+}
+
static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
{
struct arm_smmu_device *smmu;
@@ -2837,6 +2873,7 @@ static void arm_smmu_iommu_domain_teardown(struct domain *d)
static const struct iommu_ops arm_smmu_iommu_ops = {
.init = arm_smmu_iommu_domain_init,
.hwdom_init = arm_smmu_iommu_hwdom_init,
+ .add_device = arm_smmu_dt_add_device_generic,
.teardown = arm_smmu_iommu_domain_teardown,
.iotlb_flush = arm_smmu_iotlb_flush,
.iotlb_flush_all = arm_smmu_iotlb_flush_all,
@@ -2844,9 +2881,10 @@ static const struct iommu_ops arm_smmu_iommu_ops = {
.reassign_device = arm_smmu_reassign_dev,
.map_page = arm_iommu_map_page,
.unmap_page = arm_iommu_unmap_page,
+ .dt_xlate = arm_smmu_dt_xlate_generic,
};
-static __init const struct arm_smmu_device *find_smmu(const struct device *dev)
+static struct arm_smmu_device *find_smmu(const struct device *dev)
{
struct arm_smmu_device *smmu;
bool found = false;
@@ -159,22 +159,7 @@ int iommu_add_dt_device(struct dt_device_node *np)
* these callback implemented.
*/
if ( !ops->add_device || !ops->dt_xlate )
- {
- /*
- * Some Device Trees may expose both legacy SMMU and generic
- * IOMMU bindings together. However, the SMMU driver is only
- * supporting the former and will protect them during the
- * initialization. So we need to skip them and not return
- * error here.
- *
- * XXX: This can be dropped when the SMMU is able to deal
- * with generic bindings.
- */
- if ( dt_device_is_protected(np) )
- return 0;
- else
- return -EINVAL;
- }
+ return -EINVAL;
if ( !dt_device_is_available(iommu_spec.np) )
break;