Message ID | 1729835552-14825-3-git-send-email-bingbu.cao@intel.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | [v2,1/3] media: ipu6: move the l2_unmap() up before l2_map() | expand |
Hi Bingbu, On Fri, Oct 25, 2024 at 01:52:32PM +0800, Bingbu Cao wrote: > The MMU mapping flow is optimized for improve the performance, the > unmapping flow could also be optimized to follow same flow. > > This patch changes the ipu6_mmu_unmap() as a void function and also > remove ipu6_mmu_pgsize() as driver unload this to the PCI core > driver DMA ops instead of handling in driver. > > Signed-off-by: Bingbu Cao <bingbu.cao@intel.com> > --- > v2: squash the ipu6_mmu_pgsize() removal into this patchset > --- > drivers/media/pci/intel/ipu6/ipu6-mmu.c | 125 +++++++++++--------------------- > drivers/media/pci/intel/ipu6/ipu6-mmu.h | 4 +- > 2 files changed, 45 insertions(+), 84 deletions(-) > > diff --git a/drivers/media/pci/intel/ipu6/ipu6-mmu.c b/drivers/media/pci/intel/ipu6/ipu6-mmu.c > index e957ccb4691d..2d9c6fbd5da2 100644 > --- a/drivers/media/pci/intel/ipu6/ipu6-mmu.c > +++ b/drivers/media/pci/intel/ipu6/ipu6-mmu.c > @@ -252,44 +252,51 @@ static u32 *alloc_l2_pt(struct ipu6_mmu_info *mmu_info) > return pt; > } > > -static size_t l2_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova, > - phys_addr_t dummy, size_t size) > +static void l2_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova, > + phys_addr_t dummy, size_t size) > { > - u32 l1_idx = iova >> ISP_L1PT_SHIFT; > - u32 iova_start = iova; > + unsigned int l2_entries; > unsigned int l2_idx; > - size_t unmapped = 0; > unsigned long flags; > + u32 l1_idx; > u32 *l2_pt; > > - dev_dbg(mmu_info->dev, "unmapping l2 page table for l1 index %u (iova 0x%8.8lx)\n", > - l1_idx, iova); > - > spin_lock_irqsave(&mmu_info->lock, flags); > - if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval) { > - spin_unlock_irqrestore(&mmu_info->lock, flags); > - dev_err(mmu_info->dev, > - "unmap iova 0x%8.8lx l1 idx %u which was not mapped\n", > - iova, l1_idx); > - return 0; > - } > - > - for (l2_idx = (iova_start & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT; > - (iova_start & ISP_L1PT_MASK) + (l2_idx << ISP_PAGE_SHIFT) > - < iova_start + size && l2_idx < ISP_L2PT_PTES; l2_idx++) { > - l2_pt = mmu_info->l2_pts[l1_idx]; > + for (l1_idx = iova >> ISP_L1PT_SHIFT; > + size > 0 && l1_idx < ISP_L1PT_PTES; l1_idx++) { > dev_dbg(mmu_info->dev, > - "unmap l2 index %u with pteval 0x%10.10llx\n", > - l2_idx, TBL_PHYS_ADDR(l2_pt[l2_idx])); > - l2_pt[l2_idx] = mmu_info->dummy_page_pteval; > + "unmapping l2 pgtable (l1 index %u (iova 0x%8.8lx))\n", > + l1_idx, iova); > > - clflush_cache_range((void *)&l2_pt[l2_idx], > - sizeof(l2_pt[l2_idx])); > - unmapped++; > + if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval) { > + dev_err(mmu_info->dev, > + "unmap not mapped iova 0x%8.8lx l1 index %u\n", > + iova, l1_idx); > + continue; > + } > + l2_pt = mmu_info->l2_pts[l1_idx]; > + > + l2_entries = 0; > + for (l2_idx = (iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT; > + size > 0 && l2_idx < ISP_L2PT_PTES; l2_idx++) { > + dev_dbg(mmu_info->dev, > + "unmap l2 index %u with pteval 0x%10.10llx\n", > + l2_idx, TBL_PHYS_ADDR(l2_pt[l2_idx])); > + l2_pt[l2_idx] = mmu_info->dummy_page_pteval; > + > + iova += ISP_PAGE_SIZE; > + size -= ISP_PAGE_SIZE; > + > + l2_entries++; > + } > + > + WARN_ON_ONCE(!l2_entries); > + clflush_cache_range(&l2_pt[l2_idx - l2_entries], > + sizeof(l2_pt[0]) * l2_entries); > } > + > + WARN_ON_ONCE(size); > spin_unlock_irqrestore(&mmu_info->lock, flags); > - > - return unmapped << ISP_PAGE_SHIFT; > } > > static int l2_map(struct ipu6_mmu_info *mmu_info, unsigned long iova, > @@ -394,8 +401,8 @@ static int __ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova, > return l2_map(mmu_info, iova_start, paddr, size); > } > > -static size_t __ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, > - unsigned long iova, size_t size) > +static void __ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, > + unsigned long iova, size_t size) > { > return l2_unmap(mmu_info, iova, 0, size); > } > @@ -637,40 +644,13 @@ phys_addr_t ipu6_mmu_iova_to_phys(struct ipu6_mmu_info *mmu_info, > return phy_addr; > } > > -static size_t ipu6_mmu_pgsize(unsigned long pgsize_bitmap, This changes the API visible to the rest of the driver. It'd be nice to have it as a separate patch. > - unsigned long addr_merge, size_t size) > +void ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova, > + size_t size) > { > - unsigned int pgsize_idx; > - size_t pgsize; > - > - /* Max page size that still fits into 'size' */ > - pgsize_idx = __fls(size); > - > - if (likely(addr_merge)) { > - /* Max page size allowed by address */ > - unsigned int align_pgsize_idx = __ffs(addr_merge); > - > - pgsize_idx = min(pgsize_idx, align_pgsize_idx); > - } > - > - pgsize = (1UL << (pgsize_idx + 1)) - 1; > - pgsize &= pgsize_bitmap; > - > - WARN_ON(!pgsize); > - > - /* pick the biggest page */ > - pgsize_idx = __fls(pgsize); > - pgsize = 1UL << pgsize_idx; > - > - return pgsize; > -} > - > -size_t ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova, > - size_t size) > -{ > - size_t unmapped_page, unmapped = 0; > unsigned int min_pagesz; > > + dev_dbg(mmu_info->dev, "unmapping iova 0x%lx size 0x%zx\n", iova, size); > + > /* find out the minimum page size supported */ > min_pagesz = 1 << __ffs(mmu_info->pgsize_bitmap); > > @@ -682,29 +662,10 @@ size_t ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova, > if (!IS_ALIGNED(iova | size, min_pagesz)) { > dev_err(NULL, "unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n", > iova, size, min_pagesz); > - return -EINVAL; > + return; > } > > - /* > - * Keep iterating until we either unmap 'size' bytes (or more) > - * or we hit an area that isn't mapped. > - */ > - while (unmapped < size) { > - size_t pgsize = ipu6_mmu_pgsize(mmu_info->pgsize_bitmap, > - iova, size - unmapped); > - > - unmapped_page = __ipu6_mmu_unmap(mmu_info, iova, pgsize); > - if (!unmapped_page) > - break; > - > - dev_dbg(mmu_info->dev, "unmapped: iova 0x%lx size 0x%zx\n", > - iova, unmapped_page); > - > - iova += unmapped_page; > - unmapped += unmapped_page; > - } > - > - return unmapped; > + return __ipu6_mmu_unmap(mmu_info, iova, size); > } > > int ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova, > diff --git a/drivers/media/pci/intel/ipu6/ipu6-mmu.h b/drivers/media/pci/intel/ipu6/ipu6-mmu.h > index 21cdb0f146eb..8e40b4a66d7d 100644 > --- a/drivers/media/pci/intel/ipu6/ipu6-mmu.h > +++ b/drivers/media/pci/intel/ipu6/ipu6-mmu.h > @@ -66,8 +66,8 @@ int ipu6_mmu_hw_init(struct ipu6_mmu *mmu); > void ipu6_mmu_hw_cleanup(struct ipu6_mmu *mmu); > int ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova, > phys_addr_t paddr, size_t size); > -size_t ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova, > - size_t size); > +void ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova, > + size_t size); > phys_addr_t ipu6_mmu_iova_to_phys(struct ipu6_mmu_info *mmu_info, > dma_addr_t iova); > #endif
diff --git a/drivers/media/pci/intel/ipu6/ipu6-mmu.c b/drivers/media/pci/intel/ipu6/ipu6-mmu.c index e957ccb4691d..2d9c6fbd5da2 100644 --- a/drivers/media/pci/intel/ipu6/ipu6-mmu.c +++ b/drivers/media/pci/intel/ipu6/ipu6-mmu.c @@ -252,44 +252,51 @@ static u32 *alloc_l2_pt(struct ipu6_mmu_info *mmu_info) return pt; } -static size_t l2_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova, - phys_addr_t dummy, size_t size) +static void l2_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova, + phys_addr_t dummy, size_t size) { - u32 l1_idx = iova >> ISP_L1PT_SHIFT; - u32 iova_start = iova; + unsigned int l2_entries; unsigned int l2_idx; - size_t unmapped = 0; unsigned long flags; + u32 l1_idx; u32 *l2_pt; - dev_dbg(mmu_info->dev, "unmapping l2 page table for l1 index %u (iova 0x%8.8lx)\n", - l1_idx, iova); - spin_lock_irqsave(&mmu_info->lock, flags); - if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval) { - spin_unlock_irqrestore(&mmu_info->lock, flags); - dev_err(mmu_info->dev, - "unmap iova 0x%8.8lx l1 idx %u which was not mapped\n", - iova, l1_idx); - return 0; - } - - for (l2_idx = (iova_start & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT; - (iova_start & ISP_L1PT_MASK) + (l2_idx << ISP_PAGE_SHIFT) - < iova_start + size && l2_idx < ISP_L2PT_PTES; l2_idx++) { - l2_pt = mmu_info->l2_pts[l1_idx]; + for (l1_idx = iova >> ISP_L1PT_SHIFT; + size > 0 && l1_idx < ISP_L1PT_PTES; l1_idx++) { dev_dbg(mmu_info->dev, - "unmap l2 index %u with pteval 0x%10.10llx\n", - l2_idx, TBL_PHYS_ADDR(l2_pt[l2_idx])); - l2_pt[l2_idx] = mmu_info->dummy_page_pteval; + "unmapping l2 pgtable (l1 index %u (iova 0x%8.8lx))\n", + l1_idx, iova); - clflush_cache_range((void *)&l2_pt[l2_idx], - sizeof(l2_pt[l2_idx])); - unmapped++; + if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval) { + dev_err(mmu_info->dev, + "unmap not mapped iova 0x%8.8lx l1 index %u\n", + iova, l1_idx); + continue; + } + l2_pt = mmu_info->l2_pts[l1_idx]; + + l2_entries = 0; + for (l2_idx = (iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT; + size > 0 && l2_idx < ISP_L2PT_PTES; l2_idx++) { + dev_dbg(mmu_info->dev, + "unmap l2 index %u with pteval 0x%10.10llx\n", + l2_idx, TBL_PHYS_ADDR(l2_pt[l2_idx])); + l2_pt[l2_idx] = mmu_info->dummy_page_pteval; + + iova += ISP_PAGE_SIZE; + size -= ISP_PAGE_SIZE; + + l2_entries++; + } + + WARN_ON_ONCE(!l2_entries); + clflush_cache_range(&l2_pt[l2_idx - l2_entries], + sizeof(l2_pt[0]) * l2_entries); } + + WARN_ON_ONCE(size); spin_unlock_irqrestore(&mmu_info->lock, flags); - - return unmapped << ISP_PAGE_SHIFT; } static int l2_map(struct ipu6_mmu_info *mmu_info, unsigned long iova, @@ -394,8 +401,8 @@ static int __ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova, return l2_map(mmu_info, iova_start, paddr, size); } -static size_t __ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, - unsigned long iova, size_t size) +static void __ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, + unsigned long iova, size_t size) { return l2_unmap(mmu_info, iova, 0, size); } @@ -637,40 +644,13 @@ phys_addr_t ipu6_mmu_iova_to_phys(struct ipu6_mmu_info *mmu_info, return phy_addr; } -static size_t ipu6_mmu_pgsize(unsigned long pgsize_bitmap, - unsigned long addr_merge, size_t size) +void ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova, + size_t size) { - unsigned int pgsize_idx; - size_t pgsize; - - /* Max page size that still fits into 'size' */ - pgsize_idx = __fls(size); - - if (likely(addr_merge)) { - /* Max page size allowed by address */ - unsigned int align_pgsize_idx = __ffs(addr_merge); - - pgsize_idx = min(pgsize_idx, align_pgsize_idx); - } - - pgsize = (1UL << (pgsize_idx + 1)) - 1; - pgsize &= pgsize_bitmap; - - WARN_ON(!pgsize); - - /* pick the biggest page */ - pgsize_idx = __fls(pgsize); - pgsize = 1UL << pgsize_idx; - - return pgsize; -} - -size_t ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova, - size_t size) -{ - size_t unmapped_page, unmapped = 0; unsigned int min_pagesz; + dev_dbg(mmu_info->dev, "unmapping iova 0x%lx size 0x%zx\n", iova, size); + /* find out the minimum page size supported */ min_pagesz = 1 << __ffs(mmu_info->pgsize_bitmap); @@ -682,29 +662,10 @@ size_t ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova, if (!IS_ALIGNED(iova | size, min_pagesz)) { dev_err(NULL, "unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n", iova, size, min_pagesz); - return -EINVAL; + return; } - /* - * Keep iterating until we either unmap 'size' bytes (or more) - * or we hit an area that isn't mapped. - */ - while (unmapped < size) { - size_t pgsize = ipu6_mmu_pgsize(mmu_info->pgsize_bitmap, - iova, size - unmapped); - - unmapped_page = __ipu6_mmu_unmap(mmu_info, iova, pgsize); - if (!unmapped_page) - break; - - dev_dbg(mmu_info->dev, "unmapped: iova 0x%lx size 0x%zx\n", - iova, unmapped_page); - - iova += unmapped_page; - unmapped += unmapped_page; - } - - return unmapped; + return __ipu6_mmu_unmap(mmu_info, iova, size); } int ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova, diff --git a/drivers/media/pci/intel/ipu6/ipu6-mmu.h b/drivers/media/pci/intel/ipu6/ipu6-mmu.h index 21cdb0f146eb..8e40b4a66d7d 100644 --- a/drivers/media/pci/intel/ipu6/ipu6-mmu.h +++ b/drivers/media/pci/intel/ipu6/ipu6-mmu.h @@ -66,8 +66,8 @@ int ipu6_mmu_hw_init(struct ipu6_mmu *mmu); void ipu6_mmu_hw_cleanup(struct ipu6_mmu *mmu); int ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova, phys_addr_t paddr, size_t size); -size_t ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova, - size_t size); +void ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova, + size_t size); phys_addr_t ipu6_mmu_iova_to_phys(struct ipu6_mmu_info *mmu_info, dma_addr_t iova); #endif
The MMU mapping flow is optimized for improve the performance, the unmapping flow could also be optimized to follow same flow. This patch changes the ipu6_mmu_unmap() as a void function and also remove ipu6_mmu_pgsize() as driver unload this to the PCI core driver DMA ops instead of handling in driver. Signed-off-by: Bingbu Cao <bingbu.cao@intel.com> --- v2: squash the ipu6_mmu_pgsize() removal into this patchset --- drivers/media/pci/intel/ipu6/ipu6-mmu.c | 125 +++++++++++--------------------- drivers/media/pci/intel/ipu6/ipu6-mmu.h | 4 +- 2 files changed, 45 insertions(+), 84 deletions(-)