diff mbox series

[v2,3/6] mm: page_vma_mapped: support checking if a pfn is mapped into a vma

Message ID 20220202143307.96282-4-songmuchun@bytedance.com (mailing list archive)
State New, archived
Headers show
Series Fix some bugs related to ramp and dax | expand

Commit Message

Muchun Song Feb. 2, 2022, 2:33 p.m. UTC
page_vma_mapped_walk() is supposed to check if a page is mapped into a vma.
However, not all page frames (e.g. PFN_DEV) have a associated struct page
with it. There is going to be some duplicate codes similar with this function
if someone want to check if a pfn (without a struct page) is mapped into a
vma. So add support for checking if a pfn is mapped into a vma. In the next
patch, the dax will use this new feature.

Signed-off-by: Muchun Song <songmuchun@bytedance.com>
---
 include/linux/rmap.h    | 14 ++++++++--
 include/linux/swapops.h | 13 +++++++---
 mm/internal.h           | 28 +++++++++++++-------
 mm/page_vma_mapped.c    | 68 +++++++++++++++++++++++++++++++------------------
 4 files changed, 83 insertions(+), 40 deletions(-)

Comments

Matthew Wilcox Feb. 2, 2022, 6:28 p.m. UTC | #1
On Wed, Feb 02, 2022 at 10:33:04PM +0800, Muchun Song wrote:
> page_vma_mapped_walk() is supposed to check if a page is mapped into a vma.
> However, not all page frames (e.g. PFN_DEV) have a associated struct page
> with it. There is going to be some duplicate codes similar with this function
> if someone want to check if a pfn (without a struct page) is mapped into a
> vma. So add support for checking if a pfn is mapped into a vma. In the next
> patch, the dax will use this new feature.

I'm coming to more or less the same solution for fixing the bug in
page_mapped_in_vma().  If you call it with a head page, it will look
for any page in the THP instead of the precise page.  I think we can do
a fairly significant simplification though, so I'm going to go off
and work on that next ...
diff mbox series

Patch

diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 221c3c6438a7..78373935ad49 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -204,9 +204,18 @@  int make_device_exclusive_range(struct mm_struct *mm, unsigned long start,
 #define PVMW_SYNC		(1 << 0)
 /* Look for migarion entries rather than present PTEs */
 #define PVMW_MIGRATION		(1 << 1)
+/* Walk the page table by checking the pfn instead of a struct page */
+#define PVMW_PFN_WALK		(1 << 2)
 
 struct page_vma_mapped_walk {
-	struct page *page;
+	union {
+		struct page *page;
+		struct {
+			unsigned long pfn;
+			unsigned int nr;
+			pgoff_t index;
+		};
+	};
 	struct vm_area_struct *vma;
 	unsigned long address;
 	pmd_t *pmd;
@@ -218,7 +227,8 @@  struct page_vma_mapped_walk {
 static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw)
 {
 	/* HugeTLB pte is set to the relevant page table entry without pte_mapped. */
-	if (pvmw->pte && !PageHuge(pvmw->page))
+	if (pvmw->pte && (pvmw->flags & PVMW_PFN_WALK ||
+			  !PageHuge(pvmw->page)))
 		pte_unmap(pvmw->pte);
 	if (pvmw->ptl)
 		spin_unlock(pvmw->ptl);
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index d356ab4047f7..d28bf65fd6a5 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -247,17 +247,22 @@  static inline int is_writable_migration_entry(swp_entry_t entry)
 
 #endif
 
-static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)
+static inline unsigned long pfn_swap_entry_to_pfn(swp_entry_t entry)
 {
-	struct page *p = pfn_to_page(swp_offset(entry));
+	unsigned long pfn = swp_offset(entry);
 
 	/*
 	 * Any use of migration entries may only occur while the
 	 * corresponding page is locked
 	 */
-	BUG_ON(is_migration_entry(entry) && !PageLocked(p));
+	BUG_ON(is_migration_entry(entry) && !PageLocked(pfn_to_page(pfn)));
+
+	return pfn;
+}
 
-	return p;
+static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)
+{
+	return pfn_to_page(pfn_swap_entry_to_pfn(entry));
 }
 
 /*
diff --git a/mm/internal.h b/mm/internal.h
index deb9bda18e59..5458cd08df33 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -478,25 +478,35 @@  vma_address(struct page *page, struct vm_area_struct *vma)
 }
 
 /*
- * Then at what user virtual address will none of the page be found in vma?
- * Assumes that vma_address() already returned a good starting address.
- * If page is a compound head, the entire compound page is considered.
+ * Return the end of user virtual address at the specific offset within
+ * a vma.
  */
 static inline unsigned long
-vma_address_end(struct page *page, struct vm_area_struct *vma)
+vma_pgoff_address_end(pgoff_t pgoff, unsigned long nr_pages,
+		      struct vm_area_struct *vma)
 {
-	pgoff_t pgoff;
-	unsigned long address;
+	unsigned long address = vma->vm_start;
 
-	VM_BUG_ON_PAGE(PageKsm(page), page);	/* KSM page->index unusable */
-	pgoff = page_to_pgoff(page) + compound_nr(page);
-	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
+	address += (pgoff + nr_pages - vma->vm_pgoff) << PAGE_SHIFT;
 	/* Check for address beyond vma (or wrapped through 0?) */
 	if (address < vma->vm_start || address > vma->vm_end)
 		address = vma->vm_end;
 	return address;
 }
 
+/*
+ * Return the end of user virtual address of a page within a vma. Assumes that
+ * vma_address() already returned a good starting address. If page is a compound
+ * head, the entire compound page is considered.
+ */
+static inline unsigned long
+vma_address_end(struct page *page, struct vm_area_struct *vma)
+{
+	VM_BUG_ON_PAGE(PageKsm(page), page);	/* KSM page->index unusable */
+	return vma_pgoff_address_end(page_to_pgoff(page), compound_nr(page),
+				     vma);
+}
+
 static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
 						    struct file *fpin)
 {
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index f7b331081791..bd172268084f 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -53,10 +53,17 @@  static bool map_pte(struct page_vma_mapped_walk *pvmw)
 	return true;
 }
 
-static inline bool pfn_is_match(struct page *page, unsigned long pfn)
+static inline bool pfn_is_match(struct page_vma_mapped_walk *pvmw,
+				unsigned long pfn)
 {
-	unsigned long page_pfn = page_to_pfn(page);
+	struct page *page;
+	unsigned long page_pfn;
 
+	if (pvmw->flags & PVMW_PFN_WALK)
+		return pfn >= pvmw->pfn && pfn - pvmw->pfn < pvmw->nr;
+
+	page = pvmw->page;
+	page_pfn = page_to_pfn(page);
 	/* normal page and hugetlbfs page */
 	if (!PageTransCompound(page) || PageHuge(page))
 		return page_pfn == pfn;
@@ -116,7 +123,7 @@  static bool check_pte(struct page_vma_mapped_walk *pvmw)
 		pfn = pte_pfn(*pvmw->pte);
 	}
 
-	return pfn_is_match(pvmw->page, pfn);
+	return pfn_is_match(pvmw, pfn);
 }
 
 static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
@@ -127,24 +134,24 @@  static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
 }
 
 /**
- * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
- * @pvmw->address
- * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
- * must be set. pmd, pte and ptl must be NULL.
+ * page_vma_mapped_walk - check if @pvmw->page or @pvmw->pfn is mapped in
+ * @pvmw->vma at @pvmw->address
+ * @pvmw: pointer to struct page_vma_mapped_walk. page (or pfn and nr and
+ * index), vma, address and flags must be set. pmd, pte and ptl must be NULL.
  *
- * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
- * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
- * adjusted if needed (for PTE-mapped THPs).
+ * Returns true if the page or pfn is mapped in the vma. @pvmw->pmd and
+ * @pvmw->pte point to relevant page table entries. @pvmw->ptl is locked.
+ * @pvmw->address is adjusted if needed (for PTE-mapped THPs).
  *
  * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
- * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
- * a loop to find all PTEs that map the THP.
+ * (usually THP or Huge DEVMAP). For PMD-mapped page, you should run
+ * page_vma_mapped_walk() in a loop to find all PTEs that map the huge page.
  *
  * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
  * regardless of which page table level the page is mapped at. @pvmw->pmd is
  * NULL.
  *
- * Returns false if there are no more page table entries for the page in
+ * Returns false if there are no more page table entries for the page or pfn in
  * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
  *
  * If you need to stop the walk before page_vma_mapped_walk() returned false,
@@ -153,8 +160,9 @@  static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
 bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
 {
 	struct mm_struct *mm = pvmw->vma->vm_mm;
-	struct page *page = pvmw->page;
+	struct page *page = NULL;
 	unsigned long end;
+	unsigned long pfn;
 	pgd_t *pgd;
 	p4d_t *p4d;
 	pud_t *pud;
@@ -164,7 +172,11 @@  bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
 	if (pvmw->pmd && !pvmw->pte)
 		return not_found(pvmw);
 
-	if (unlikely(PageHuge(page))) {
+	if (!(pvmw->flags & PVMW_PFN_WALK))
+		page = pvmw->page;
+	pfn = page ? page_to_pfn(page) : pvmw->pfn;
+
+	if (unlikely(page && PageHuge(page))) {
 		/* The only possible mapping was handled on last iteration */
 		if (pvmw->pte)
 			return not_found(pvmw);
@@ -187,9 +199,13 @@  bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
 	 * any PageKsm page: whose page->index misleads vma_address()
 	 * and vma_address_end() to disaster.
 	 */
-	end = PageTransCompound(page) ?
-		vma_address_end(page, pvmw->vma) :
-		pvmw->address + PAGE_SIZE;
+	if (page)
+		end = PageTransCompound(page) ?
+		      vma_address_end(page, pvmw->vma) :
+		      pvmw->address + PAGE_SIZE;
+	else
+		end = vma_pgoff_address_end(pvmw->index, pvmw->nr, pvmw->vma);
+
 	if (pvmw->pte)
 		goto next_pte;
 restart:
@@ -217,14 +233,14 @@  bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
 		 * subsequent update.
 		 */
 		pmde = READ_ONCE(*pvmw->pmd);
-
-		if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+		if (pmd_leaf(pmde) || is_pmd_migration_entry(pmde)) {
 			pvmw->ptl = pmd_lock(mm, pvmw->pmd);
 			pmde = *pvmw->pmd;
-			if (likely(pmd_trans_huge(pmde))) {
+			if (likely(pmd_leaf(pmde))) {
 				if (pvmw->flags & PVMW_MIGRATION)
 					return not_found(pvmw);
-				if (pmd_page(pmde) != page)
+				if (pmd_pfn(pmde) != pfn)
 					return not_found(pvmw);
 				return true;
 			}
@@ -236,20 +252,22 @@  bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
 					return not_found(pvmw);
 				entry = pmd_to_swp_entry(pmde);
 				if (!is_migration_entry(entry) ||
-				    pfn_swap_entry_to_page(entry) != page)
+				    pfn_swap_entry_to_pfn(entry) != pfn)
 					return not_found(pvmw);
 				return true;
 			}
 			/* THP pmd was split under us: handle on pte level */
 			spin_unlock(pvmw->ptl);
 			pvmw->ptl = NULL;
-		} else if (!pmd_present(pmde)) {
+		} else
+#endif
+		if (!pmd_present(pmde)) {
 			/*
 			 * If PVMW_SYNC, take and drop THP pmd lock so that we
 			 * cannot return prematurely, while zap_huge_pmd() has
 			 * cleared *pmd but not decremented compound_mapcount().
 			 */
-			if ((pvmw->flags & PVMW_SYNC) &&
+			if ((pvmw->flags & PVMW_SYNC) && page &&
 			    PageTransCompound(page)) {
 				spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);