new file mode 100644
@@ -0,0 +1,497 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2013 - 2023 Intel Corporation
+
+#include <linux/cacheflush.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-map-ops.h>
+#include <linux/gfp.h>
+#include <linux/highmem.h>
+#include <linux/iova.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include "ipu6.h"
+#include "ipu6-bus.h"
+#include "ipu6-dma.h"
+#include "ipu6-mmu.h"
+
+struct vm_info {
+ struct list_head list;
+ struct page **pages;
+ dma_addr_t ipu6_iova;
+ void *vaddr;
+ unsigned long size;
+};
+
+static struct vm_info *get_vm_info(struct ipu6_mmu *mmu, dma_addr_t iova)
+{
+ struct vm_info *info, *save;
+
+ list_for_each_entry_safe(info, save, &mmu->vma_list, list) {
+ if (iova >= info->ipu6_iova &&
+ iova < (info->ipu6_iova + info->size))
+ return info;
+ }
+
+ return NULL;
+}
+
+static void __dma_clear_buffer(struct page *page, size_t size,
+ unsigned long attrs)
+{
+ void *ptr;
+
+ if (!page)
+ return;
+ /*
+ * Ensure that the allocated pages are zeroed, and that any data
+ * lurking in the kernel direct-mapped region is invalidated.
+ */
+ ptr = page_address(page);
+ memset(ptr, 0, size);
+ if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+ clflush_cache_range(ptr, size);
+}
+
+static struct page **__dma_alloc_buffer(struct device *dev, size_t size,
+ gfp_t gfp,
+ unsigned long attrs)
+{
+ struct page **pages;
+ int count = size >> PAGE_SHIFT;
+ int array_size = count * sizeof(struct page *);
+ int i = 0;
+
+ pages = kvzalloc(array_size, GFP_KERNEL);
+ if (!pages)
+ return NULL;
+
+ gfp |= __GFP_NOWARN;
+
+ while (count) {
+ int j, order = __fls(count);
+
+ pages[i] = alloc_pages(gfp, order);
+ while (!pages[i] && order)
+ pages[i] = alloc_pages(gfp, --order);
+ if (!pages[i])
+ goto error;
+
+ if (order) {
+ split_page(pages[i], order);
+ j = 1 << order;
+ while (j--)
+ pages[i + j] = pages[i] + j;
+ }
+
+ __dma_clear_buffer(pages[i], PAGE_SIZE << order, attrs);
+ i += 1 << order;
+ count -= 1 << order;
+ }
+
+ return pages;
+error:
+ while (i--)
+ if (pages[i])
+ __free_pages(pages[i], 0);
+ kvfree(pages);
+ return NULL;
+}
+
+static int __dma_free_buffer(struct device *dev, struct page **pages,
+ size_t size,
+ unsigned long attrs)
+{
+ int count = PHYS_PFN(size);
+ unsigned int i;
+
+ for (i = 0; i < count && pages[i]; i++) {
+ __dma_clear_buffer(pages[i], PAGE_SIZE, attrs);
+ __free_pages(pages[i], 0);
+ }
+
+ kvfree(pages);
+ return 0;
+}
+
+static void ipu6_dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ void *vaddr;
+ u32 offset;
+ struct vm_info *info;
+ struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu;
+
+ info = get_vm_info(mmu, dma_handle);
+ if (WARN_ON(!info))
+ return;
+
+ offset = dma_handle - info->ipu6_iova;
+ if (WARN_ON(size > (info->size - offset)))
+ return;
+
+ vaddr = info->vaddr + offset;
+ clflush_cache_range(vaddr, size);
+}
+
+static void ipu6_dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sglist,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sglist, sg, nents, i)
+ clflush_cache_range(page_to_virt(sg_page(sg)), sg->length);
+}
+
+static void *ipu6_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ unsigned long attrs)
+{
+ struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu;
+ struct pci_dev *pdev = to_ipu6_bus_device(dev)->isp->pdev;
+ dma_addr_t pci_dma_addr, ipu6_iova;
+ struct vm_info *info;
+ unsigned long count;
+ struct page **pages;
+ struct iova *iova;
+ unsigned int i;
+ int ret;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return NULL;
+
+ size = PAGE_ALIGN(size);
+ count = size >> PAGE_SHIFT;
+
+ iova = alloc_iova(&mmu->dmap->iovad, count,
+ dma_get_mask(dev) >> PAGE_SHIFT, 0);
+ if (!iova)
+ goto out_kfree;
+
+ pages = __dma_alloc_buffer(dev, size, gfp, attrs);
+ if (!pages)
+ goto out_free_iova;
+
+ dev_dbg(dev, "dma_alloc: iova low pfn %lu, high pfn %lu\n",
+ iova->pfn_lo, iova->pfn_hi);
+ for (i = 0; iova->pfn_lo + i <= iova->pfn_hi; i++) {
+ pci_dma_addr = dma_map_page_attrs(&pdev->dev, pages[i], 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL,
+ attrs);
+ dev_dbg(dev, "dma_alloc: mapped pci_dma_addr %pad\n",
+ &pci_dma_addr);
+ if (dma_mapping_error(&pdev->dev, pci_dma_addr)) {
+ dev_err(dev, "pci_dma_mapping for page[%d] failed", i);
+ goto out_unmap;
+ }
+
+ ret = ipu6_mmu_map(mmu->dmap->mmu_info,
+ (iova->pfn_lo + i) << PAGE_SHIFT,
+ pci_dma_addr, PAGE_SIZE);
+ if (ret) {
+ dev_err(dev, "ipu6_mmu_map for pci_dma[%d] %pad failed",
+ i, &pci_dma_addr);
+ dma_unmap_page_attrs(&pdev->dev, pci_dma_addr,
+ PAGE_SIZE, DMA_BIDIRECTIONAL,
+ attrs);
+ goto out_unmap;
+ }
+ }
+
+ info->vaddr = vmap(pages, count, VM_USERMAP, PAGE_KERNEL);
+ if (!info->vaddr)
+ goto out_unmap;
+
+ *dma_handle = iova->pfn_lo << PAGE_SHIFT;
+
+ info->pages = pages;
+ info->ipu6_iova = *dma_handle;
+ info->size = size;
+ list_add(&info->list, &mmu->vma_list);
+
+ return info->vaddr;
+
+out_unmap:
+ for (i--; i >= 0; i--) {
+ ipu6_iova = (iova->pfn_lo + i) << PAGE_SHIFT;
+ pci_dma_addr = ipu6_mmu_iova_to_phys(mmu->dmap->mmu_info,
+ ipu6_iova);
+ dma_unmap_page_attrs(&pdev->dev, pci_dma_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL, attrs);
+
+ ipu6_mmu_unmap(mmu->dmap->mmu_info, ipu6_iova, PAGE_SIZE);
+ }
+
+ __dma_free_buffer(dev, pages, size, attrs);
+
+out_free_iova:
+ __free_iova(&mmu->dmap->iovad, iova);
+out_kfree:
+ kfree(info);
+
+ return NULL;
+}
+
+static void ipu6_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle,
+ unsigned long attrs)
+{
+ struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu;
+ struct pci_dev *pdev = to_ipu6_bus_device(dev)->isp->pdev;
+ struct iova *iova = find_iova(&mmu->dmap->iovad,
+ dma_handle >> PAGE_SHIFT);
+ dma_addr_t pci_dma_addr, ipu6_iova;
+ struct vm_info *info;
+ struct page **pages;
+ unsigned int i;
+
+ if (WARN_ON(!iova))
+ return;
+
+ info = get_vm_info(mmu, dma_handle);
+ if (WARN_ON(!info))
+ return;
+
+ if (WARN_ON(!info->vaddr))
+ return;
+
+ if (WARN_ON(!info->pages))
+ return;
+
+ list_del(&info->list);
+
+ size = PAGE_ALIGN(size);
+
+ pages = info->pages;
+
+ vunmap(vaddr);
+
+ for (i = 0; i < size >> PAGE_SHIFT; i++) {
+ ipu6_iova = (iova->pfn_lo + i) << PAGE_SHIFT;
+ pci_dma_addr = ipu6_mmu_iova_to_phys(mmu->dmap->mmu_info,
+ ipu6_iova);
+ dma_unmap_page_attrs(&pdev->dev, pci_dma_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL, attrs);
+ }
+
+ ipu6_mmu_unmap(mmu->dmap->mmu_info, iova->pfn_lo << PAGE_SHIFT,
+ iova_size(iova) << PAGE_SHIFT);
+
+ __dma_free_buffer(dev, pages, size, attrs);
+
+ mmu->tlb_invalidate(mmu);
+
+ __free_iova(&mmu->dmap->iovad, iova);
+
+ kfree(info);
+}
+
+static int ipu6_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *addr, dma_addr_t iova, size_t size,
+ unsigned long attrs)
+{
+ struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu;
+ size_t count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ struct vm_info *info;
+ size_t i;
+
+ info = get_vm_info(mmu, iova);
+ if (!info)
+ return -EFAULT;
+
+ if (!info->vaddr)
+ return -EFAULT;
+
+ if (vma->vm_start & ~PAGE_MASK)
+ return -EINVAL;
+
+ if (size > info->size)
+ return -EFAULT;
+
+ for (i = 0; i < count; i++)
+ vm_insert_page(vma, vma->vm_start + (i << PAGE_SHIFT),
+ info->pages[i]);
+
+ return 0;
+}
+
+static void ipu6_dma_unmap_sg(struct device *dev,
+ struct scatterlist *sglist,
+ int nents, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct pci_dev *pdev = to_ipu6_bus_device(dev)->isp->pdev;
+ struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu;
+ struct iova *iova = find_iova(&mmu->dmap->iovad,
+ sg_dma_address(sglist) >> PAGE_SHIFT);
+ int i, npages, count;
+ struct scatterlist *sg;
+ dma_addr_t pci_dma_addr;
+
+ if (!nents)
+ return;
+
+ if (WARN_ON(!iova))
+ return;
+
+ if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+ ipu6_dma_sync_sg_for_cpu(dev, sglist, nents, DMA_BIDIRECTIONAL);
+
+ /* get the nents as orig_nents given by caller */
+ count = 0;
+ npages = iova_size(iova);
+ for_each_sg(sglist, sg, nents, i) {
+ if (sg_dma_len(sg) == 0 ||
+ sg_dma_address(sg) == DMA_MAPPING_ERROR)
+ break;
+
+ npages -= PAGE_ALIGN(sg_dma_len(sg)) >> PAGE_SHIFT;
+ count++;
+ if (npages <= 0)
+ break;
+ }
+
+ /* Before IPU6 mmu unmap, return the pci dma address back to sg
+ * assume the nents is less than orig_nents as the least granule
+ * is 1 SZ_4K page
+ */
+ dev_dbg(dev, "trying to unmap concatenated %u ents\n", count);
+ for_each_sg(sglist, sg, count, i) {
+ dev_dbg(dev, "ipu6_unmap sg[%d] %pad\n",
+ i, &sg_dma_address(sg));
+ pci_dma_addr = ipu6_mmu_iova_to_phys(mmu->dmap->mmu_info,
+ sg_dma_address(sg));
+ dev_dbg(dev, "return pci_dma_addr %pad back to sg[%d]\n",
+ &pci_dma_addr, i);
+ sg_dma_address(sg) = pci_dma_addr;
+ }
+
+ dev_dbg(dev, "ipu6_mmu_unmap low pfn %lu high pfn %lu\n",
+ iova->pfn_lo, iova->pfn_hi);
+ ipu6_mmu_unmap(mmu->dmap->mmu_info, iova->pfn_lo << PAGE_SHIFT,
+ iova_size(iova) << PAGE_SHIFT);
+
+ mmu->tlb_invalidate(mmu);
+
+ dma_unmap_sg_attrs(&pdev->dev, sglist, nents, dir, attrs);
+
+ __free_iova(&mmu->dmap->iovad, iova);
+}
+
+static int ipu6_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu;
+ struct pci_dev *pdev = to_ipu6_bus_device(dev)->isp->pdev;
+ struct scatterlist *sg;
+ struct iova *iova;
+ size_t npages = 0;
+ u32 iova_addr;
+ int i, count;
+
+ dev_dbg(dev, "pci_dma_map_sg trying to map %d ents\n", nents);
+ count = dma_map_sg_attrs(&pdev->dev, sglist, nents, dir, attrs);
+ if (count <= 0) {
+ dev_err(dev, "pci_dma_map_sg %d ents failed\n", nents);
+ return 0;
+ }
+
+ dev_dbg(dev, "pci_dma_map_sg %d ents mapped\n", count);
+
+ for_each_sg(sglist, sg, count, i)
+ npages += PAGE_ALIGN(sg_dma_len(sg)) >> PAGE_SHIFT;
+
+ iova = alloc_iova(&mmu->dmap->iovad, npages,
+ dma_get_mask(dev) >> PAGE_SHIFT, 0);
+ if (!iova)
+ return 0;
+
+ dev_dbg(dev, "dmamap: iova low pfn %lu, high pfn %lu\n", iova->pfn_lo,
+ iova->pfn_hi);
+
+ iova_addr = iova->pfn_lo;
+ for_each_sg(sglist, sg, count, i) {
+ int ret;
+
+ dev_dbg(dev, "mapping entry %d: iova 0x%lx phy %pad size %d\n",
+ i, (unsigned long)iova_addr << PAGE_SHIFT,
+ &sg_dma_address(sg), sg_dma_len(sg));
+
+ dev_dbg(dev, "mapping entry %d: sg->length = %d\n", i,
+ sg->length);
+
+ ret = ipu6_mmu_map(mmu->dmap->mmu_info,
+ iova_addr << PAGE_SHIFT,
+ sg_dma_address(sg),
+ PAGE_ALIGN(sg_dma_len(sg)));
+ if (ret)
+ goto out_fail;
+
+ sg_dma_address(sg) = iova_addr << PAGE_SHIFT;
+
+ iova_addr += PAGE_ALIGN(sg_dma_len(sg)) >> PAGE_SHIFT;
+ }
+
+ if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+ ipu6_dma_sync_sg_for_cpu(dev, sglist, nents, DMA_BIDIRECTIONAL);
+
+ return count;
+
+out_fail:
+ ipu6_dma_unmap_sg(dev, sglist, i, dir, attrs);
+
+ return 0;
+}
+
+/*
+ * Create scatter-list for the already allocated DMA buffer
+ */
+static int ipu6_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t handle, size_t size,
+ unsigned long attrs)
+{
+ struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu;
+ struct vm_info *info;
+ int n_pages;
+ int ret = 0;
+
+ info = get_vm_info(mmu, handle);
+ if (!info)
+ return -EFAULT;
+
+ if (!info->vaddr)
+ return -EFAULT;
+
+ if (WARN_ON(!info->pages))
+ return -ENOMEM;
+
+ n_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+ ret = sg_alloc_table_from_pages(sgt, info->pages, n_pages, 0, size,
+ GFP_KERNEL);
+ if (ret)
+ dev_warn(dev, "IPU6 get sgt table failed\n");
+
+ return ret;
+}
+
+const struct dma_map_ops ipu6_dma_ops = {
+ .alloc = ipu6_dma_alloc,
+ .free = ipu6_dma_free,
+ .mmap = ipu6_dma_mmap,
+ .map_sg = ipu6_dma_map_sg,
+ .unmap_sg = ipu6_dma_unmap_sg,
+ .sync_single_for_cpu = ipu6_dma_sync_single_for_cpu,
+ .sync_single_for_device = ipu6_dma_sync_single_for_cpu,
+ .sync_sg_for_cpu = ipu6_dma_sync_sg_for_cpu,
+ .sync_sg_for_device = ipu6_dma_sync_sg_for_cpu,
+ .get_sgtable = ipu6_dma_get_sgtable,
+};
new file mode 100644
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2013 - 2023 Intel Corporation */
+
+#ifndef IPU6_DMA_H
+#define IPU6_DMA_H
+
+#include <linux/iova.h>
+
+struct ipu6_mmu_info;
+
+struct ipu6_dma_mapping {
+ struct ipu6_mmu_info *mmu_info;
+ struct iova_domain iovad;
+ struct kref ref;
+};
+
+extern const struct dma_map_ops ipu6_dma_ops;
+
+#endif /* IPU6_DMA_H */
new file mode 100644
@@ -0,0 +1,833 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2013 - 2023 Intel Corporation
+
+#include <linux/cacheflush.h>
+#include <linux/device.h>
+#include <linux/iova.h>
+#include <linux/sizes.h>
+
+#include "ipu6.h"
+#include "ipu6-buttress.h"
+#include "ipu6-dma.h"
+#include "ipu6-mmu.h"
+#include "ipu6-platform.h"
+#include "ipu6-platform-regs.h"
+
+#define ISP_PAGE_SHIFT 12
+#define ISP_PAGE_SIZE BIT(ISP_PAGE_SHIFT)
+#define ISP_PAGE_MASK (~(ISP_PAGE_SIZE - 1))
+
+#define ISP_L1PT_SHIFT 22
+#define ISP_L1PT_MASK (~((1U << ISP_L1PT_SHIFT) - 1))
+
+#define ISP_L2PT_SHIFT 12
+#define ISP_L2PT_MASK (~(ISP_L1PT_MASK | (~(ISP_PAGE_MASK))))
+
+#define ISP_L1PT_PTES 1024
+#define ISP_L2PT_PTES 1024
+
+#define ISP_PADDR_SHIFT 12
+
+#define REG_TLB_INVALIDATE 0x0000
+
+#define REG_L1_PHYS 0x0004 /* 27-bit pfn */
+#define REG_INFO 0x0008
+
+#define TBL_PHYS_ADDR(a) ((phys_addr_t)(a) << ISP_PADDR_SHIFT)
+
+static void tlb_invalidate(struct ipu6_mmu *mmu)
+{
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&mmu->ready_lock, flags);
+ if (!mmu->ready) {
+ spin_unlock_irqrestore(&mmu->ready_lock, flags);
+ return;
+ }
+
+ for (i = 0; i < mmu->nr_mmus; i++) {
+ /*
+ * To avoid the HW bug induced dead lock in some of the IPU6
+ * MMUs on successive invalidate calls, we need to first do a
+ * read to the page table base before writing the invalidate
+ * register. MMUs which need to implement this WA, will have
+ * the insert_read_before_invalidate flags set as true.
+ * Disregard the return value of the read.
+ */
+ if (mmu->mmu_hw[i].insert_read_before_invalidate)
+ readl(mmu->mmu_hw[i].base + REG_L1_PHYS);
+
+ writel(0xffffffff, mmu->mmu_hw[i].base +
+ REG_TLB_INVALIDATE);
+ /*
+ * The TLB invalidation is a "single cycle" (IOMMU clock cycles)
+ * When the actual MMIO write reaches the IPU6 TLB Invalidate
+ * register, wmb() will force the TLB invalidate out if the CPU
+ * attempts to update the IOMMU page table (or sooner).
+ */
+ wmb();
+ }
+ spin_unlock_irqrestore(&mmu->ready_lock, flags);
+}
+
+#ifdef DEBUG
+static void page_table_dump(struct ipu6_mmu_info *mmu_info)
+{
+ u32 l1_idx;
+
+ dev_dbg(mmu_info->dev, "begin IOMMU page table dump\n");
+
+ for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++) {
+ u32 l2_idx;
+ u32 iova = (phys_addr_t)l1_idx << ISP_L1PT_SHIFT;
+
+ if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval)
+ continue;
+ dev_dbg(mmu_info->dev,
+ "l1 entry %u; iovas 0x%8.8x-0x%8.8x, at %p\n",
+ l1_idx, iova, iova + ISP_PAGE_SIZE,
+ (void *)TBL_PHYS_ADDR(mmu_info->l1_pt[l1_idx]));
+
+ for (l2_idx = 0; l2_idx < ISP_L2PT_PTES; l2_idx++) {
+ u32 *l2_pt = mmu_info->l2_pts[l1_idx];
+ u32 iova2 = iova + (l2_idx << ISP_L2PT_SHIFT);
+
+ if (l2_pt[l2_idx] == mmu_info->dummy_page_pteval)
+ continue;
+
+ dev_dbg(mmu_info->dev,
+ "\tl2 entry %u; iova 0x%8.8x, phys %p\n",
+ l2_idx, iova2,
+ (void *)TBL_PHYS_ADDR(l2_pt[l2_idx]));
+ }
+ }
+
+ dev_dbg(mmu_info->dev, "end IOMMU page table dump\n");
+}
+#endif /* DEBUG */
+
+static dma_addr_t map_single(struct ipu6_mmu_info *mmu_info, void *ptr)
+{
+ dma_addr_t dma;
+
+ dma = dma_map_single(mmu_info->dev, ptr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(mmu_info->dev, dma))
+ return 0;
+
+ return dma;
+}
+
+static int get_dummy_page(struct ipu6_mmu_info *mmu_info)
+{
+ void *pt = (void *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
+ dma_addr_t dma;
+
+ if (!pt)
+ return -ENOMEM;
+
+ dev_dbg(mmu_info->dev, "dummy_page: get_zeroed_page() == %p\n", pt);
+
+ dma = map_single(mmu_info, pt);
+ if (!dma) {
+ dev_err(mmu_info->dev, "Failed to map dummy page\n");
+ goto err_free_page;
+ }
+
+ mmu_info->dummy_page = pt;
+ mmu_info->dummy_page_pteval = dma >> ISP_PAGE_SHIFT;
+
+ return 0;
+
+err_free_page:
+ free_page((unsigned long)pt);
+ return -ENOMEM;
+}
+
+static void free_dummy_page(struct ipu6_mmu_info *mmu_info)
+{
+ dma_unmap_single(mmu_info->dev,
+ TBL_PHYS_ADDR(mmu_info->dummy_page_pteval),
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ free_page((unsigned long)mmu_info->dummy_page);
+}
+
+static int alloc_dummy_l2_pt(struct ipu6_mmu_info *mmu_info)
+{
+ u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
+ dma_addr_t dma;
+ unsigned int i;
+
+ if (!pt)
+ return -ENOMEM;
+
+ dev_dbg(mmu_info->dev, "dummy_l2: get_zeroed_page() = %p\n", pt);
+
+ dma = map_single(mmu_info, pt);
+ if (!dma) {
+ dev_err(mmu_info->dev, "Failed to map l2pt page\n");
+ goto err_free_page;
+ }
+
+ for (i = 0; i < ISP_L2PT_PTES; i++)
+ pt[i] = mmu_info->dummy_page_pteval;
+
+ mmu_info->dummy_l2_pt = pt;
+ mmu_info->dummy_l2_pteval = dma >> ISP_PAGE_SHIFT;
+
+ return 0;
+
+err_free_page:
+ free_page((unsigned long)pt);
+ return -ENOMEM;
+}
+
+static void free_dummy_l2_pt(struct ipu6_mmu_info *mmu_info)
+{
+ dma_unmap_single(mmu_info->dev,
+ TBL_PHYS_ADDR(mmu_info->dummy_l2_pteval),
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ free_page((unsigned long)mmu_info->dummy_l2_pt);
+}
+
+static u32 *alloc_l1_pt(struct ipu6_mmu_info *mmu_info)
+{
+ u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
+ dma_addr_t dma;
+ unsigned int i;
+
+ if (!pt)
+ return NULL;
+
+ dev_dbg(mmu_info->dev, "alloc_l1: get_zeroed_page() = %p\n", pt);
+
+ for (i = 0; i < ISP_L1PT_PTES; i++)
+ pt[i] = mmu_info->dummy_l2_pteval;
+
+ dma = map_single(mmu_info, pt);
+ if (!dma) {
+ dev_err(mmu_info->dev, "Failed to map l1pt page\n");
+ goto err_free_page;
+ }
+
+ mmu_info->l1_pt_dma = dma >> ISP_PADDR_SHIFT;
+ dev_dbg(mmu_info->dev, "l1 pt %p mapped at %llx\n", pt, dma);
+
+ return pt;
+
+err_free_page:
+ free_page((unsigned long)pt);
+ return NULL;
+}
+
+static u32 *alloc_l2_pt(struct ipu6_mmu_info *mmu_info)
+{
+ u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
+ unsigned int i;
+
+ if (!pt)
+ return NULL;
+
+ dev_dbg(mmu_info->dev, "alloc_l2: get_zeroed_page() = %p\n", pt);
+
+ for (i = 0; i < ISP_L1PT_PTES; i++)
+ pt[i] = mmu_info->dummy_page_pteval;
+
+ return pt;
+}
+
+static int l2_map(struct ipu6_mmu_info *mmu_info, unsigned long iova,
+ phys_addr_t paddr, size_t size)
+{
+ u32 l1_idx = iova >> ISP_L1PT_SHIFT;
+ u32 iova_start = iova;
+ u32 *l2_pt, *l2_virt;
+ unsigned int l2_idx;
+ unsigned long flags;
+ dma_addr_t dma;
+ u32 l1_entry;
+
+ dev_dbg(mmu_info->dev,
+ "mapping l2 page table for l1 index %u (iova %8.8x)\n",
+ l1_idx, (u32)iova);
+
+ spin_lock_irqsave(&mmu_info->lock, flags);
+ l1_entry = mmu_info->l1_pt[l1_idx];
+ if (l1_entry == mmu_info->dummy_l2_pteval) {
+ l2_virt = mmu_info->l2_pts[l1_idx];
+ if (likely(!l2_virt)) {
+ l2_virt = alloc_l2_pt(mmu_info);
+ if (!l2_virt) {
+ spin_unlock_irqrestore(&mmu_info->lock, flags);
+ return -ENOMEM;
+ }
+ }
+
+ dma = map_single(mmu_info, l2_virt);
+ if (!dma) {
+ dev_err(mmu_info->dev, "Failed to map l2pt page\n");
+ free_page((unsigned long)l2_virt);
+ spin_unlock_irqrestore(&mmu_info->lock, flags);
+ return -EINVAL;
+ }
+
+ l1_entry = dma >> ISP_PADDR_SHIFT;
+
+ dev_dbg(mmu_info->dev, "page for l1_idx %u %p allocated\n",
+ l1_idx, l2_virt);
+ mmu_info->l1_pt[l1_idx] = l1_entry;
+ mmu_info->l2_pts[l1_idx] = l2_virt;
+ clflush_cache_range(&mmu_info->l1_pt[l1_idx],
+ sizeof(mmu_info->l1_pt[l1_idx]));
+ }
+
+ l2_pt = mmu_info->l2_pts[l1_idx];
+
+ dev_dbg(mmu_info->dev, "l2_pt at %p with dma 0x%x\n", l2_pt, l1_entry);
+
+ paddr = ALIGN(paddr, ISP_PAGE_SIZE);
+
+ l2_idx = (iova_start & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT;
+
+ dev_dbg(mmu_info->dev, "l2_idx %u, phys 0x%8.8x\n", l2_idx,
+ l2_pt[l2_idx]);
+ if (l2_pt[l2_idx] != mmu_info->dummy_page_pteval) {
+ spin_unlock_irqrestore(&mmu_info->lock, flags);
+ return -EINVAL;
+ }
+
+ l2_pt[l2_idx] = paddr >> ISP_PADDR_SHIFT;
+
+ clflush_cache_range(&l2_pt[l2_idx], sizeof(l2_pt[l2_idx]));
+ spin_unlock_irqrestore(&mmu_info->lock, flags);
+
+ dev_dbg(mmu_info->dev, "l2 index %u mapped as 0x%8.8x\n", l2_idx,
+ l2_pt[l2_idx]);
+
+ return 0;
+}
+
+static int __ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova,
+ phys_addr_t paddr, size_t size)
+{
+ u32 iova_start = round_down(iova, ISP_PAGE_SIZE);
+ u32 iova_end = ALIGN(iova + size, ISP_PAGE_SIZE);
+
+ dev_dbg(mmu_info->dev,
+ "mapping iova 0x%8.8x--0x%8.8x, size %zu at paddr 0x%10.10llx\n",
+ iova_start, iova_end, size, paddr);
+
+ return l2_map(mmu_info, iova_start, paddr, size);
+}
+
+static size_t l2_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova,
+ phys_addr_t dummy, size_t size)
+{
+ u32 l1_idx = iova >> ISP_L1PT_SHIFT;
+ u32 iova_start = iova;
+ unsigned int l2_idx;
+ size_t unmapped = 0;
+ unsigned long flags;
+ u32 *l2_pt;
+
+ dev_dbg(mmu_info->dev, "unmapping l2 page table for l1 index %u (iova 0x%8.8lx)\n",
+ l1_idx, iova);
+
+ spin_lock_irqsave(&mmu_info->lock, flags);
+ if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval) {
+ spin_unlock_irqrestore(&mmu_info->lock, flags);
+ dev_err(mmu_info->dev,
+ "unmap iova 0x%8.8lx l1 idx %u which was not mapped\n",
+ iova, l1_idx);
+ return 0;
+ }
+
+ for (l2_idx = (iova_start & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT;
+ (iova_start & ISP_L1PT_MASK) + (l2_idx << ISP_PAGE_SHIFT)
+ < iova_start + size && l2_idx < ISP_L2PT_PTES; l2_idx++) {
+ l2_pt = mmu_info->l2_pts[l1_idx];
+ dev_dbg(mmu_info->dev,
+ "unmap l2 index %u with pteval 0x%10.10llx\n",
+ l2_idx, TBL_PHYS_ADDR(l2_pt[l2_idx]));
+ l2_pt[l2_idx] = mmu_info->dummy_page_pteval;
+
+ clflush_cache_range(&l2_pt[l2_idx], sizeof(l2_pt[l2_idx]));
+ unmapped++;
+ }
+ spin_unlock_irqrestore(&mmu_info->lock, flags);
+
+ return unmapped << ISP_PAGE_SHIFT;
+}
+
+static size_t __ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info,
+ unsigned long iova, size_t size)
+{
+ return l2_unmap(mmu_info, iova, 0, size);
+}
+
+static int allocate_trash_buffer(struct ipu6_mmu *mmu)
+{
+ unsigned int n_pages = PAGE_ALIGN(IPU6_MMUV2_TRASH_RANGE) >> PAGE_SHIFT;
+ struct iova *iova;
+ unsigned int i;
+ dma_addr_t dma;
+ u32 iova_addr;
+ int ret;
+
+ /* Allocate 8MB in iova range */
+ iova = alloc_iova(&mmu->dmap->iovad, n_pages,
+ mmu->dmap->mmu_info->aperture_end >> PAGE_SHIFT, 0);
+ if (!iova) {
+ dev_err(mmu->dev, "cannot allocate iova range for trash\n");
+ return -ENOMEM;
+ }
+
+ dma = dma_map_page(mmu->dmap->mmu_info->dev, mmu->trash_page, 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(mmu->dmap->mmu_info->dev, dma)) {
+ dev_err(mmu->dmap->mmu_info->dev, "Failed to map trash page\n");
+ ret = -ENOMEM;
+ goto out_free_iova;
+ }
+
+ mmu->pci_trash_page = dma;
+
+ /*
+ * Map the 8MB iova address range to the same physical trash page
+ * mmu->trash_page which is already reserved at the probe
+ */
+ iova_addr = iova->pfn_lo;
+ for (i = 0; i < n_pages; i++) {
+ ret = ipu6_mmu_map(mmu->dmap->mmu_info, iova_addr << PAGE_SHIFT,
+ mmu->pci_trash_page, PAGE_SIZE);
+ if (ret) {
+ dev_err(mmu->dev,
+ "mapping trash buffer range failed\n");
+ goto out_unmap;
+ }
+
+ iova_addr++;
+ }
+
+ mmu->iova_trash_page = iova->pfn_lo << PAGE_SHIFT;
+ dev_dbg(mmu->dev, "iova trash buffer for MMUID: %d is %u\n",
+ mmu->mmid, (unsigned int)mmu->iova_trash_page);
+ return 0;
+
+out_unmap:
+ ipu6_mmu_unmap(mmu->dmap->mmu_info, iova->pfn_lo << PAGE_SHIFT,
+ (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT);
+ dma_unmap_page(mmu->dmap->mmu_info->dev, mmu->pci_trash_page,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+out_free_iova:
+ __free_iova(&mmu->dmap->iovad, iova);
+ return ret;
+}
+
+int ipu6_mmu_hw_init(struct ipu6_mmu *mmu)
+{
+ struct ipu6_mmu_info *mmu_info;
+ unsigned long flags;
+ unsigned int i;
+
+ mmu_info = mmu->dmap->mmu_info;
+
+ /* Initialise the each MMU HW block */
+ for (i = 0; i < mmu->nr_mmus; i++) {
+ struct ipu6_mmu_hw *mmu_hw = &mmu->mmu_hw[i];
+ unsigned int j;
+ u16 block_addr;
+
+ /* Write page table address per MMU */
+ writel((phys_addr_t)mmu_info->l1_pt_dma,
+ mmu->mmu_hw[i].base + REG_L1_PHYS);
+
+ /* Set info bits per MMU */
+ writel(mmu->mmu_hw[i].info_bits,
+ mmu->mmu_hw[i].base + REG_INFO);
+
+ /* Configure MMU TLB stream configuration for L1 */
+ for (j = 0, block_addr = 0; j < mmu_hw->nr_l1streams;
+ block_addr += mmu->mmu_hw[i].l1_block_sz[j], j++) {
+ if (block_addr > IPU6_MAX_LI_BLOCK_ADDR) {
+ dev_err(mmu->dev, "invalid L1 configuration\n");
+ return -EINVAL;
+ }
+
+ /* Write block start address for each streams */
+ writel(block_addr, mmu_hw->base +
+ mmu_hw->l1_stream_id_reg_offset + 4 * j);
+ }
+
+ /* Configure MMU TLB stream configuration for L2 */
+ for (j = 0, block_addr = 0; j < mmu_hw->nr_l2streams;
+ block_addr += mmu->mmu_hw[i].l2_block_sz[j], j++) {
+ if (block_addr > IPU6_MAX_L2_BLOCK_ADDR) {
+ dev_err(mmu->dev, "invalid L2 configuration\n");
+ return -EINVAL;
+ }
+
+ writel(block_addr, mmu_hw->base +
+ mmu_hw->l2_stream_id_reg_offset + 4 * j);
+ }
+ }
+
+ if (!mmu->trash_page) {
+ int ret;
+
+ mmu->trash_page = alloc_page(GFP_KERNEL);
+ if (!mmu->trash_page) {
+ dev_err(mmu->dev, "insufficient memory for trash buffer\n");
+ return -ENOMEM;
+ }
+
+ ret = allocate_trash_buffer(mmu);
+ if (ret) {
+ __free_page(mmu->trash_page);
+ mmu->trash_page = NULL;
+ dev_err(mmu->dev, "trash buffer allocation failed\n");
+ return ret;
+ }
+ }
+
+ spin_lock_irqsave(&mmu->ready_lock, flags);
+ mmu->ready = true;
+ spin_unlock_irqrestore(&mmu->ready_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_mmu_hw_init, INTEL_IPU6);
+
+static struct ipu6_mmu_info *ipu6_mmu_alloc(struct ipu6_device *isp)
+{
+ struct ipu6_mmu_info *mmu_info;
+ int ret;
+
+ mmu_info = kzalloc(sizeof(*mmu_info), GFP_KERNEL);
+ if (!mmu_info)
+ return NULL;
+
+ mmu_info->aperture_start = 0;
+ mmu_info->aperture_end = DMA_BIT_MASK(isp->secure_mode ?
+ IPU6_MMU_ADDR_BITS :
+ IPU6_MMU_ADDR_BITS_NON_SECURE);
+ mmu_info->pgsize_bitmap = SZ_4K;
+ mmu_info->dev = &isp->pdev->dev;
+
+ ret = get_dummy_page(mmu_info);
+ if (ret)
+ goto err_free_info;
+
+ ret = alloc_dummy_l2_pt(mmu_info);
+ if (ret)
+ goto err_free_dummy_page;
+
+ mmu_info->l2_pts = vzalloc(ISP_L2PT_PTES * sizeof(*mmu_info->l2_pts));
+ if (!mmu_info->l2_pts)
+ goto err_free_dummy_l2_pt;
+
+ /*
+ * We always map the L1 page table (a single page as well as
+ * the L2 page tables).
+ */
+ mmu_info->l1_pt = alloc_l1_pt(mmu_info);
+ if (!mmu_info->l1_pt)
+ goto err_free_l2_pts;
+
+ spin_lock_init(&mmu_info->lock);
+
+ dev_dbg(mmu_info->dev, "domain initialised\n");
+
+ return mmu_info;
+
+err_free_l2_pts:
+ vfree(mmu_info->l2_pts);
+err_free_dummy_l2_pt:
+ free_dummy_l2_pt(mmu_info);
+err_free_dummy_page:
+ free_dummy_page(mmu_info);
+err_free_info:
+ kfree(mmu_info);
+
+ return NULL;
+}
+
+int ipu6_mmu_hw_cleanup(struct ipu6_mmu *mmu)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mmu->ready_lock, flags);
+ mmu->ready = false;
+ spin_unlock_irqrestore(&mmu->ready_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(ipu6_mmu_hw_cleanup, INTEL_IPU6);
+
+static struct ipu6_dma_mapping *alloc_dma_mapping(struct ipu6_device *isp)
+{
+ struct ipu6_dma_mapping *dmap;
+
+ dmap = kzalloc(sizeof(*dmap), GFP_KERNEL);
+ if (!dmap)
+ return NULL;
+
+ dmap->mmu_info = ipu6_mmu_alloc(isp);
+ if (!dmap->mmu_info) {
+ kfree(dmap);
+ return NULL;
+ }
+ init_iova_domain(&dmap->iovad, SZ_4K, 1);
+ dmap->mmu_info->dmap = dmap;
+
+ kref_init(&dmap->ref);
+
+ dev_dbg(&isp->pdev->dev, "alloc mapping\n");
+
+ iova_cache_get();
+
+ return dmap;
+}
+
+phys_addr_t ipu6_mmu_iova_to_phys(struct ipu6_mmu_info *mmu_info,
+ dma_addr_t iova)
+{
+ phys_addr_t phy_addr;
+ unsigned long flags;
+ u32 *l2_pt;
+
+ spin_lock_irqsave(&mmu_info->lock, flags);
+ l2_pt = mmu_info->l2_pts[iova >> ISP_L1PT_SHIFT];
+ phy_addr = (phys_addr_t)l2_pt[(iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT];
+ phy_addr <<= ISP_PAGE_SHIFT;
+ spin_unlock_irqrestore(&mmu_info->lock, flags);
+
+ return phy_addr;
+}
+
+static size_t ipu6_mmu_pgsize(unsigned long pgsize_bitmap,
+ unsigned long addr_merge, size_t size)
+{
+ unsigned int pgsize_idx;
+ size_t pgsize;
+
+ /* Max page size that still fits into 'size' */
+ pgsize_idx = __fls(size);
+
+ if (likely(addr_merge)) {
+ /* Max page size allowed by address */
+ unsigned int align_pgsize_idx = __ffs(addr_merge);
+
+ pgsize_idx = min(pgsize_idx, align_pgsize_idx);
+ }
+
+ pgsize = (1UL << (pgsize_idx + 1)) - 1;
+ pgsize &= pgsize_bitmap;
+
+ WARN_ON(!pgsize);
+
+ /* pick the biggest page */
+ pgsize_idx = __fls(pgsize);
+ pgsize = 1UL << pgsize_idx;
+
+ return pgsize;
+}
+
+size_t ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova,
+ size_t size)
+{
+ size_t unmapped_page, unmapped = 0;
+ unsigned int min_pagesz;
+
+ /* find out the minimum page size supported */
+ min_pagesz = 1 << __ffs(mmu_info->pgsize_bitmap);
+
+ /*
+ * The virtual address and the size of the mapping must be
+ * aligned (at least) to the size of the smallest page supported
+ * by the hardware
+ */
+ if (!IS_ALIGNED(iova | size, min_pagesz)) {
+ dev_err(NULL, "unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
+ iova, size, min_pagesz);
+ return -EINVAL;
+ }
+
+ /*
+ * Keep iterating until we either unmap 'size' bytes (or more)
+ * or we hit an area that isn't mapped.
+ */
+ while (unmapped < size) {
+ size_t pgsize = ipu6_mmu_pgsize(mmu_info->pgsize_bitmap,
+ iova, size - unmapped);
+
+ unmapped_page = __ipu6_mmu_unmap(mmu_info, iova, pgsize);
+ if (!unmapped_page)
+ break;
+
+ dev_dbg(mmu_info->dev, "unmapped: iova 0x%lx size 0x%zx\n",
+ iova, unmapped_page);
+
+ iova += unmapped_page;
+ unmapped += unmapped_page;
+ }
+
+ return unmapped;
+}
+
+int ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova,
+ phys_addr_t paddr, size_t size)
+{
+ unsigned long orig_iova = iova;
+ unsigned int min_pagesz;
+ size_t orig_size = size;
+ int ret = 0;
+
+ if (mmu_info->pgsize_bitmap == 0UL)
+ return -ENODEV;
+
+ /* find out the minimum page size supported */
+ min_pagesz = 1 << __ffs(mmu_info->pgsize_bitmap);
+
+ /*
+ * both the virtual address and the physical one, as well as
+ * the size of the mapping, must be aligned (at least) to the
+ * size of the smallest page supported by the hardware
+ */
+ if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
+ dev_err(mmu_info->dev,
+ "unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
+ iova, &paddr, size, min_pagesz);
+ return -EINVAL;
+ }
+
+ dev_dbg(mmu_info->dev, "map: iova 0x%lx pa %pa size 0x%zx\n",
+ iova, &paddr, size);
+
+ while (size) {
+ size_t pgsize = ipu6_mmu_pgsize(mmu_info->pgsize_bitmap,
+ iova | paddr, size);
+
+ dev_dbg(mmu_info->dev,
+ "mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
+ iova, &paddr, pgsize);
+
+ ret = __ipu6_mmu_map(mmu_info, iova, paddr, pgsize);
+ if (ret)
+ break;
+
+ iova += pgsize;
+ paddr += pgsize;
+ size -= pgsize;
+ }
+
+ /* unroll mapping in case something went wrong */
+ if (ret)
+ ipu6_mmu_unmap(mmu_info, orig_iova, orig_size - size);
+
+ return ret;
+}
+
+static void ipu6_mmu_destroy(struct ipu6_mmu *mmu)
+{
+ struct ipu6_dma_mapping *dmap = mmu->dmap;
+ struct ipu6_mmu_info *mmu_info = dmap->mmu_info;
+ struct iova *iova;
+ u32 l1_idx;
+
+ if (mmu->iova_trash_page) {
+ iova = find_iova(&dmap->iovad,
+ mmu->iova_trash_page >> PAGE_SHIFT);
+ if (iova) {
+ /* unmap and free the trash buffer iova */
+ ipu6_mmu_unmap(mmu_info, iova->pfn_lo << PAGE_SHIFT,
+ (iova->pfn_hi - iova->pfn_lo + 1) <<
+ PAGE_SHIFT);
+ __free_iova(&dmap->iovad, iova);
+ } else {
+ dev_err(mmu->dev, "trash buffer iova not found.\n");
+ }
+
+ mmu->iova_trash_page = 0;
+ dma_unmap_page(mmu_info->dev, mmu->pci_trash_page,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ mmu->pci_trash_page = 0;
+ __free_page(mmu->trash_page);
+ }
+
+ for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++) {
+ if (mmu_info->l1_pt[l1_idx] != mmu_info->dummy_l2_pteval) {
+ dma_unmap_single(mmu_info->dev,
+ TBL_PHYS_ADDR(mmu_info->l1_pt[l1_idx]),
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ free_page((unsigned long)mmu_info->l2_pts[l1_idx]);
+ }
+ }
+
+ free_dummy_page(mmu_info);
+ dma_unmap_single(mmu_info->dev, mmu_info->l1_pt_dma << ISP_PADDR_SHIFT,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ free_page((unsigned long)mmu_info->dummy_l2_pt);
+ free_page((unsigned long)mmu_info->l1_pt);
+ kfree(mmu_info);
+}
+
+struct ipu6_mmu *ipu6_mmu_init(struct device *dev,
+ void __iomem *base, int mmid,
+ const struct ipu6_hw_variants *hw)
+{
+ struct ipu6_device *isp = pci_get_drvdata(to_pci_dev(dev));
+ struct ipu6_mmu_pdata *pdata;
+ struct ipu6_mmu *mmu;
+ unsigned int i;
+
+ if (hw->nr_mmus > IPU6_MMU_MAX_DEVICES)
+ return ERR_PTR(-EINVAL);
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < hw->nr_mmus; i++) {
+ struct ipu6_mmu_hw *pdata_mmu = &pdata->mmu_hw[i];
+ const struct ipu6_mmu_hw *src_mmu = &hw->mmu_hw[i];
+
+ if (src_mmu->nr_l1streams > IPU6_MMU_MAX_TLB_L1_STREAMS ||
+ src_mmu->nr_l2streams > IPU6_MMU_MAX_TLB_L2_STREAMS)
+ return ERR_PTR(-EINVAL);
+
+ *pdata_mmu = *src_mmu;
+ pdata_mmu->base = base + src_mmu->offset;
+ }
+
+ mmu = devm_kzalloc(dev, sizeof(*mmu), GFP_KERNEL);
+ if (!mmu)
+ return ERR_PTR(-ENOMEM);
+
+ mmu->mmid = mmid;
+ mmu->mmu_hw = pdata->mmu_hw;
+ mmu->nr_mmus = hw->nr_mmus;
+ mmu->tlb_invalidate = tlb_invalidate;
+ mmu->ready = false;
+ INIT_LIST_HEAD(&mmu->vma_list);
+ spin_lock_init(&mmu->ready_lock);
+
+ mmu->dmap = alloc_dma_mapping(isp);
+ if (!mmu->dmap) {
+ dev_err(dev, "can't alloc dma mapping\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return mmu;
+}
+
+void ipu6_mmu_cleanup(struct ipu6_mmu *mmu)
+{
+ struct ipu6_dma_mapping *dmap = mmu->dmap;
+
+ ipu6_mmu_destroy(mmu);
+ mmu->dmap = NULL;
+ iova_cache_put();
+ put_iova_domain(&dmap->iovad);
+ kfree(dmap);
+}
new file mode 100644
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2013 - 2023 Intel Corporation */
+
+#ifndef IPU6_MMU_H
+#define IPU6_MMU_H
+
+#define ISYS_MMID 1
+#define PSYS_MMID 0
+
+struct ipu6_mmu_info {
+ struct device *dev;
+
+ u32 __iomem *l1_pt;
+ u32 l1_pt_dma;
+ u32 **l2_pts;
+
+ u32 *dummy_l2_pt;
+ u32 dummy_l2_pteval;
+ void *dummy_page;
+ u32 dummy_page_pteval;
+
+ dma_addr_t aperture_start;
+ dma_addr_t aperture_end;
+ unsigned long pgsize_bitmap;
+
+ spinlock_t lock; /* Serialize access to users */
+ struct ipu6_dma_mapping *dmap;
+};
+
+struct ipu6_mmu {
+ struct list_head node;
+
+ struct ipu6_mmu_hw *mmu_hw;
+ unsigned int nr_mmus;
+ unsigned int mmid;
+
+ phys_addr_t pgtbl;
+ struct device *dev;
+
+ struct ipu6_dma_mapping *dmap;
+ struct list_head vma_list;
+
+ struct page *trash_page;
+ dma_addr_t pci_trash_page; /* IOVA from PCI DMA services (parent) */
+ dma_addr_t iova_trash_page; /* IOVA for IPU6 child nodes to use */
+
+ bool ready;
+ spinlock_t ready_lock; /* Serialize access to bool ready */
+
+ void (*tlb_invalidate)(struct ipu6_mmu *mmu);
+};
+
+struct ipu6_mmu *ipu6_mmu_init(struct device *dev,
+ void __iomem *base, int mmid,
+ const struct ipu6_hw_variants *hw);
+void ipu6_mmu_cleanup(struct ipu6_mmu *mmu);
+int ipu6_mmu_hw_init(struct ipu6_mmu *mmu);
+int ipu6_mmu_hw_cleanup(struct ipu6_mmu *mmu);
+int ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova,
+ phys_addr_t paddr, size_t size);
+size_t ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova,
+ size_t size);
+phys_addr_t ipu6_mmu_iova_to_phys(struct ipu6_mmu_info *mmu_info,
+ dma_addr_t iova);
+#endif