diff mbox series

[33/48] mm: Add unmap_mapping_folio()

Message ID 20211208042256.1923824-34-willy@infradead.org (mailing list archive)
State New
Headers show
Series Folios for 5.17 | expand

Commit Message

Matthew Wilcox Dec. 8, 2021, 4:22 a.m. UTC
Convert both callers of unmap_mapping_page() to call unmap_mapping_folio()
instead.  Also move zap_details from linux/mm.h to mm/internal.h

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/linux/mm.h | 24 ------------------------
 mm/internal.h      | 25 ++++++++++++++++++++++++-
 mm/memory.c        | 27 +++++++++++++--------------
 mm/truncate.c      |  4 ++--
 4 files changed, 39 insertions(+), 41 deletions(-)

Comments

Christoph Hellwig Dec. 23, 2021, 7:36 a.m. UTC | #1
On Wed, Dec 08, 2021 at 04:22:41AM +0000, Matthew Wilcox (Oracle) wrote:
> Convert both callers of unmap_mapping_page() to call unmap_mapping_folio()
> instead.  Also move zap_details from linux/mm.h to mm/internal.h

In fact it could even move to mm/memory.c as no one needs it outside of
that file. __oom_reap_task_mm always passes a NULL zap_details argument
to unmap_page_range.

Otherwise looks good,

Reviewed-by: Christoph Hellwig <hch@lst.de>
Matthew Wilcox Jan. 2, 2022, 4:11 p.m. UTC | #2
On Thu, Dec 23, 2021 at 08:36:45AM +0100, Christoph Hellwig wrote:
> On Wed, Dec 08, 2021 at 04:22:41AM +0000, Matthew Wilcox (Oracle) wrote:
> > Convert both callers of unmap_mapping_page() to call unmap_mapping_folio()
> > instead.  Also move zap_details from linux/mm.h to mm/internal.h
> 
> In fact it could even move to mm/memory.c as no one needs it outside of
> that file. __oom_reap_task_mm always passes a NULL zap_details argument
> to unmap_page_range.

Umm ... no?

static inline bool
zap_skip_check_mapping(struct zap_details *details, struct page *page)
{
        if (!details || !page)
                return false;

        return details->zap_mapping &&
            (details->zap_mapping != page_rmapping(page));
}
Christoph Hellwig Jan. 3, 2022, 7:53 a.m. UTC | #3
On Sun, Jan 02, 2022 at 04:11:08PM +0000, Matthew Wilcox wrote:
> On Thu, Dec 23, 2021 at 08:36:45AM +0100, Christoph Hellwig wrote:
> > On Wed, Dec 08, 2021 at 04:22:41AM +0000, Matthew Wilcox (Oracle) wrote:
> > > Convert both callers of unmap_mapping_page() to call unmap_mapping_folio()
> > > instead.  Also move zap_details from linux/mm.h to mm/internal.h
> > 
> > In fact it could even move to mm/memory.c as no one needs it outside of
> > that file. __oom_reap_task_mm always passes a NULL zap_details argument
> > to unmap_page_range.
> 
> Umm ... no?
> 
> static inline bool
> zap_skip_check_mapping(struct zap_details *details, struct page *page)
> {
>         if (!details || !page)
>                 return false;
> 
>         return details->zap_mapping &&
>             (details->zap_mapping != page_rmapping(page));
> }

And now check where zap_skip_check_mapping is actually called..
diff mbox series

Patch

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 145f045b0ddc..c9cdb26802fb 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1825,28 +1825,6 @@  static inline bool can_do_mlock(void) { return false; }
 extern int user_shm_lock(size_t, struct ucounts *);
 extern void user_shm_unlock(size_t, struct ucounts *);
 
-/*
- * Parameter block passed down to zap_pte_range in exceptional cases.
- */
-struct zap_details {
-	struct address_space *zap_mapping;	/* Check page->mapping if set */
-	struct page *single_page;		/* Locked page to be unmapped */
-};
-
-/*
- * We set details->zap_mappings when we want to unmap shared but keep private
- * pages. Return true if skip zapping this page, false otherwise.
- */
-static inline bool
-zap_skip_check_mapping(struct zap_details *details, struct page *page)
-{
-	if (!details || !page)
-		return false;
-
-	return details->zap_mapping &&
-	    (details->zap_mapping != page_rmapping(page));
-}
-
 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
 			     pte_t pte);
 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
@@ -1892,7 +1870,6 @@  extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
 extern int fixup_user_fault(struct mm_struct *mm,
 			    unsigned long address, unsigned int fault_flags,
 			    bool *unlocked);
-void unmap_mapping_page(struct page *page);
 void unmap_mapping_pages(struct address_space *mapping,
 		pgoff_t start, pgoff_t nr, bool even_cows);
 void unmap_mapping_range(struct address_space *mapping,
@@ -1913,7 +1890,6 @@  static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address,
 	BUG();
 	return -EFAULT;
 }
-static inline void unmap_mapping_page(struct page *page) { }
 static inline void unmap_mapping_pages(struct address_space *mapping,
 		pgoff_t start, pgoff_t nr, bool even_cows) { }
 static inline void unmap_mapping_range(struct address_space *mapping,
diff --git a/mm/internal.h b/mm/internal.h
index 3b79a5c9427a..3f359f4830da 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -74,6 +74,28 @@  static inline bool can_madv_lru_vma(struct vm_area_struct *vma)
 	return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP));
 }
 
+/*
+ * Parameter block passed down to zap_pte_range in exceptional cases.
+ */
+struct zap_details {
+	struct address_space *zap_mapping;	/* Check page->mapping if set */
+	struct folio *single_folio;	/* Locked folio to be unmapped */
+};
+
+/*
+ * We set details->zap_mappings when we want to unmap shared but keep private
+ * pages. Return true if skip zapping this page, false otherwise.
+ */
+static inline bool
+zap_skip_check_mapping(struct zap_details *details, struct page *page)
+{
+	if (!details || !page)
+		return false;
+
+	return details->zap_mapping &&
+	    (details->zap_mapping != page_rmapping(page));
+}
+
 void unmap_page_range(struct mmu_gather *tlb,
 			     struct vm_area_struct *vma,
 			     unsigned long addr, unsigned long end,
@@ -388,6 +410,7 @@  void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
 void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma);
 
 #ifdef CONFIG_MMU
+void unmap_mapping_folio(struct folio *folio);
 extern long populate_vma_page_range(struct vm_area_struct *vma,
 		unsigned long start, unsigned long end, int *locked);
 extern long faultin_vma_page_range(struct vm_area_struct *vma,
@@ -491,8 +514,8 @@  static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
 	}
 	return fpin;
 }
-
 #else /* !CONFIG_MMU */
+static inline void unmap_mapping_folio(struct folio *folio) { }
 static inline void clear_page_mlock(struct page *page) { }
 static inline void mlock_vma_page(struct page *page) { }
 static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
diff --git a/mm/memory.c b/mm/memory.c
index 8f1de811a1dc..a86027026f2a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1443,8 +1443,8 @@  static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
 			else if (zap_huge_pmd(tlb, vma, pmd, addr))
 				goto next;
 			/* fall through */
-		} else if (details && details->single_page &&
-			   PageTransCompound(details->single_page) &&
+		} else if (details && details->single_folio &&
+			   folio_test_pmd_mappable(details->single_folio) &&
 			   next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
 			spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
 			/*
@@ -3332,31 +3332,30 @@  static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
 }
 
 /**
- * unmap_mapping_page() - Unmap single page from processes.
- * @page: The locked page to be unmapped.
+ * unmap_mapping_folio() - Unmap single folio from processes.
+ * @folio: The locked folio to be unmapped.
  *
- * Unmap this page from any userspace process which still has it mmaped.
+ * Unmap this folio from any userspace process which still has it mmaped.
  * Typically, for efficiency, the range of nearby pages has already been
  * unmapped by unmap_mapping_pages() or unmap_mapping_range().  But once
- * truncation or invalidation holds the lock on a page, it may find that
- * the page has been remapped again: and then uses unmap_mapping_page()
+ * truncation or invalidation holds the lock on a folio, it may find that
+ * the page has been remapped again: and then uses unmap_mapping_folio()
  * to unmap it finally.
  */
-void unmap_mapping_page(struct page *page)
+void unmap_mapping_folio(struct folio *folio)
 {
-	struct address_space *mapping = page->mapping;
+	struct address_space *mapping = folio->mapping;
 	struct zap_details details = { };
 	pgoff_t	first_index;
 	pgoff_t	last_index;
 
-	VM_BUG_ON(!PageLocked(page));
-	VM_BUG_ON(PageTail(page));
+	VM_BUG_ON(!folio_test_locked(folio));
 
-	first_index = page->index;
-	last_index = page->index + thp_nr_pages(page) - 1;
+	first_index = folio->index;
+	last_index = folio->index + folio_nr_pages(folio) - 1;
 
 	details.zap_mapping = mapping;
-	details.single_page = page;
+	details.single_folio = folio;
 
 	i_mmap_lock_write(mapping);
 	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
diff --git a/mm/truncate.c b/mm/truncate.c
index ab86b07c1e9c..c98feea75a10 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -180,7 +180,7 @@  void do_invalidatepage(struct page *page, unsigned int offset,
 static void truncate_cleanup_folio(struct folio *folio)
 {
 	if (folio_mapped(folio))
-		unmap_mapping_page(&folio->page);
+		unmap_mapping_folio(folio);
 
 	if (folio_has_private(folio))
 		do_invalidatepage(&folio->page, 0, folio_size(folio));
@@ -670,7 +670,7 @@  int invalidate_inode_pages2_range(struct address_space *mapping,
 			wait_on_page_writeback(page);
 
 			if (page_mapped(page))
-				unmap_mapping_page(page);
+				unmap_mapping_folio(page_folio(page));
 			BUG_ON(page_mapped(page));
 
 			ret2 = do_launder_page(mapping, page);