@@ -2580,6 +2580,147 @@ static int arm_smmu_enable_nesting(struct iommu_domain *domain)
return ret;
}
+static int arm_smmu_split_block(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
+ size_t handled_size;
+
+ if (!(smmu->features & (ARM_SMMU_FEAT_BBML1 | ARM_SMMU_FEAT_BBML2))) {
+ dev_err(smmu->dev, "don't support BBML1/2, can't split block\n");
+ return -ENODEV;
+ }
+ if (!ops || !ops->split_block) {
+ pr_err("io-pgtable don't realize split block\n");
+ return -ENODEV;
+ }
+
+ handled_size = ops->split_block(ops, iova, size);
+ if (handled_size != size) {
+ pr_err("split block failed\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int __arm_smmu_merge_page(struct iommu_domain *domain,
+ unsigned long iova, phys_addr_t paddr,
+ size_t size, int prot)
+{
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
+ size_t handled_size;
+
+ if (!ops || !ops->merge_page) {
+ pr_err("io-pgtable don't realize merge page\n");
+ return -ENODEV;
+ }
+
+ while (size) {
+ size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
+
+ handled_size = ops->merge_page(ops, iova, paddr, pgsize, prot);
+ if (handled_size != pgsize) {
+ pr_err("merge page failed\n");
+ return -EFAULT;
+ }
+
+ pr_debug("merge handled: iova 0x%lx pa %pa size 0x%zx\n",
+ iova, &paddr, pgsize);
+
+ iova += pgsize;
+ paddr += pgsize;
+ size -= pgsize;
+ }
+
+ return 0;
+}
+
+static int arm_smmu_merge_page(struct iommu_domain *domain, unsigned long iova,
+ size_t size, int prot)
+{
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
+ phys_addr_t phys;
+ dma_addr_t p, i;
+ size_t cont_size;
+ int ret = 0;
+
+ if (!(smmu->features & (ARM_SMMU_FEAT_BBML1 | ARM_SMMU_FEAT_BBML2))) {
+ dev_err(smmu->dev, "don't support BBML1/2, can't merge page\n");
+ return -ENODEV;
+ }
+
+ if (!ops || !ops->iova_to_phys)
+ return -ENODEV;
+
+ while (size) {
+ phys = ops->iova_to_phys(ops, iova);
+ cont_size = PAGE_SIZE;
+ p = phys + cont_size;
+ i = iova + cont_size;
+
+ while (cont_size < size && p == ops->iova_to_phys(ops, i)) {
+ p += PAGE_SIZE;
+ i += PAGE_SIZE;
+ cont_size += PAGE_SIZE;
+ }
+
+ if (cont_size != PAGE_SIZE) {
+ ret = __arm_smmu_merge_page(domain, iova, phys,
+ cont_size, prot);
+ if (ret)
+ break;
+ }
+
+ iova += cont_size;
+ size -= cont_size;
+ }
+
+ return ret;
+}
+
+static int arm_smmu_switch_dirty_log(struct iommu_domain *domain, bool enable,
+ unsigned long iova, size_t size, int prot)
+{
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+
+ if (!(smmu->features & ARM_SMMU_FEAT_HD))
+ return -ENODEV;
+ if (smmu_domain->stage != ARM_SMMU_DOMAIN_S1)
+ return -EINVAL;
+
+ if (enable) {
+ /*
+ * For SMMU, the hardware dirty management is always enabled if
+ * hardware supports HTTU HD. The action to start dirty log is
+ * spliting block mapping.
+ *
+ * We don't return error even if the split operation fail, as we
+ * can still track dirty at block granule, which is still a much
+ * better choice compared to full dirty policy.
+ */
+ arm_smmu_split_block(domain, iova, size);
+ } else {
+ /*
+ * For SMMU, the hardware dirty management is always enabled if
+ * hardware supports HTTU HD. The action to stop dirty log is
+ * merging page mapping.
+ *
+ * We don't return error even if the merge operation fail, as it
+ * just effects performace of DMA transaction.
+ */
+ arm_smmu_merge_page(domain, iova, size, prot);
+ }
+
+ return 0;
+}
+
static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
{
return iommu_fwspec_add_ids(dev, args->args, 1);
@@ -2678,6 +2819,7 @@ static struct iommu_ops arm_smmu_ops = {
.release_device = arm_smmu_release_device,
.device_group = arm_smmu_device_group,
.enable_nesting = arm_smmu_enable_nesting,
+ .switch_dirty_log = arm_smmu_switch_dirty_log,
.of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
.put_resv_regions = generic_iommu_put_resv_regions,
@@ -2375,8 +2375,8 @@ phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
}
EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
-static size_t iommu_pgsize(struct iommu_domain *domain,
- unsigned long addr_merge, size_t size)
+size_t iommu_pgsize(struct iommu_domain *domain,
+ unsigned long addr_merge, size_t size)
{
unsigned int pgsize_idx;
size_t pgsize;
@@ -2406,6 +2406,7 @@ static size_t iommu_pgsize(struct iommu_domain *domain,
return pgsize;
}
+EXPORT_SYMBOL_GPL(iommu_pgsize);
static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
@@ -427,6 +427,8 @@ extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
struct device *dev, ioasid_t pasid);
extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
+extern size_t iommu_pgsize(struct iommu_domain *domain,
+ unsigned long addr_merge, size_t size);
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot);
extern int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,