@@ -1887,6 +1887,171 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
return 0;
}
+static int arm_smmu_identity_map_regions(struct arm_smmu_device *smmu,
+ struct device_node *np)
+{
+ struct device *dev = smmu->dev;
+ struct of_phandle_iterator it;
+ unsigned long page_size;
+ unsigned int count = 0;
+ int ret;
+
+ page_size = 1UL << __ffs(smmu->identity->pgsize_bitmap);
+
+ /* parse memory regions and add them to the identity mapping */
+ of_for_each_phandle(&it, ret, np, "memory-region", NULL, 0) {
+ int prot = IOMMU_READ | IOMMU_WRITE;
+ dma_addr_t start, limit, iova;
+ struct resource res;
+
+ ret = of_address_to_resource(it.node, 0, &res);
+ if (ret < 0) {
+ dev_err(dev, "failed to parse memory region %pOF: %d\n",
+ it.node, ret);
+ continue;
+ }
+
+ /* check that region is not empty */
+ if (resource_size(&res) == 0) {
+ dev_dbg(dev, "skipping empty memory region %pOF\n",
+ it.node);
+ continue;
+ }
+
+ start = ALIGN(res.start, page_size);
+ limit = ALIGN(res.start + resource_size(&res), page_size);
+
+ for (iova = start; iova < limit; iova += page_size) {
+ phys_addr_t phys;
+
+ /* check that this IOVA isn't already mapped */
+ phys = iommu_iova_to_phys(smmu->identity, iova);
+ if (phys)
+ continue;
+
+ ret = iommu_map(smmu->identity, iova, iova, page_size,
+ prot);
+ if (ret < 0) {
+ dev_err(dev, "failed to map %pad for %pOF: %d\n",
+ &iova, it.node, ret);
+ continue;
+ }
+ }
+
+ dev_dbg(dev, "identity mapped memory region %pR\n", &res);
+ count++;
+ }
+
+ return count;
+}
+
+static int arm_smmu_identity_add_master(struct arm_smmu_device *smmu,
+ struct of_phandle_args *args)
+{
+ struct arm_smmu_domain *identity = to_smmu_domain(smmu->identity);
+ struct arm_smmu_smr *smrs = smmu->smrs;
+ struct device *dev = smmu->dev;
+ unsigned int index;
+ u16 sid, mask;
+ u32 fwid;
+ int ret;
+
+ /* skip masters that aren't ours */
+ if (args->np != dev->of_node)
+ return 0;
+
+ fwid = arm_smmu_of_parse(args->np, args->args, args->args_count);
+ sid = FIELD_GET(SMR_ID, fwid);
+ mask = FIELD_GET(SMR_MASK, fwid);
+
+ ret = arm_smmu_find_sme(smmu, sid, mask);
+ if (ret < 0) {
+ dev_err(dev, "failed to find SME: %d\n", ret);
+ return ret;
+ }
+
+ index = ret;
+
+ if (smrs && smmu->s2crs[index].count == 0) {
+ smrs[index].id = sid;
+ smrs[index].mask = mask;
+ smrs[index].valid = true;
+ }
+
+ smmu->s2crs[index].type = S2CR_TYPE_TRANS;
+ smmu->s2crs[index].privcfg = S2CR_PRIVCFG_DEFAULT;
+ smmu->s2crs[index].cbndx = identity->cfg.cbndx;
+ smmu->s2crs[index].count++;
+
+ return 0;
+}
+
+static int arm_smmu_identity_add_device(struct arm_smmu_device *smmu,
+ struct device_node *np)
+{
+ struct of_phandle_args args;
+ unsigned int index = 0;
+ int ret;
+
+ /* add stream IDs to the identity mapping */
+ while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells",
+ index, &args)) {
+ ret = arm_smmu_identity_add_master(smmu, &args);
+ if (ret < 0)
+ return ret;
+
+ index++;
+ }
+
+ return 0;
+}
+
+static int arm_smmu_setup_identity(struct arm_smmu_device *smmu)
+{
+ struct arm_smmu_domain *identity;
+ struct device *dev = smmu->dev;
+ struct device_node *np;
+ int ret;
+
+ /* create early identity mapping */
+ smmu->identity = arm_smmu_domain_alloc(IOMMU_DOMAIN_UNMANAGED);
+ if (!smmu->identity) {
+ dev_err(dev, "failed to create identity domain\n");
+ return -ENOMEM;
+ }
+
+ smmu->identity->pgsize_bitmap = smmu->pgsize_bitmap;
+ smmu->identity->type = IOMMU_DOMAIN_UNMANAGED;
+ smmu->identity->ops = &arm_smmu_ops;
+
+ ret = arm_smmu_init_domain_context(smmu->identity, smmu);
+ if (ret < 0) {
+ dev_err(dev, "failed to initialize identity domain: %d\n", ret);
+ return ret;
+ }
+
+ identity = to_smmu_domain(smmu->identity);
+
+ for_each_node_with_property(np, "iommus") {
+ ret = arm_smmu_identity_map_regions(smmu, np);
+ if (ret < 0)
+ continue;
+
+ /*
+ * Do not add devices to the early identity mapping if they
+ * do not define any memory-regions.
+ */
+ if (ret == 0)
+ continue;
+
+ ret = arm_smmu_identity_add_device(smmu, np);
+ if (ret < 0)
+ continue;
+ }
+
+ return 0;
+}
+
struct arm_smmu_match_data {
enum arm_smmu_arch_version version;
enum arm_smmu_implementation model;
@@ -2127,6 +2292,10 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
if (err)
return err;
+ err = arm_smmu_setup_identity(smmu);
+ if (err)
+ return err;
+
if (smmu->version == ARM_SMMU_V2) {
if (smmu->num_context_banks > smmu->num_context_irqs) {
dev_err(dev,
@@ -2169,8 +2338,8 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, smmu);
- arm_smmu_device_reset(smmu);
arm_smmu_test_smr_masks(smmu);
+ arm_smmu_device_reset(smmu);
/*
* We want to avoid touching dev->power.lock in fastpaths unless
@@ -280,6 +280,8 @@ struct arm_smmu_device {
/* IOMMU core code handle */
struct iommu_device iommu;
+
+ struct iommu_domain *identity;
};
enum arm_smmu_context_fmt {