@@ -208,7 +208,8 @@ void hugepage_add_anon_rmap(struct folio *, struct vm_area_struct *,
void hugepage_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
unsigned long address);
-static inline void __page_dup_rmap(struct page *page, bool compound)
+static inline void __page_dup_rmap(struct page *page,
+ struct vm_area_struct *dst_vma, bool compound)
{
struct folio *folio = page_folio(page);
@@ -225,17 +226,19 @@ static inline void __page_dup_rmap(struct page *page, bool compound)
atomic_inc(&folio->_total_mapcount);
}
-static inline void page_dup_file_rmap(struct page *page, bool compound)
+static inline void page_dup_file_rmap(struct page *page,
+ struct vm_area_struct *dst_vma, bool compound)
{
- __page_dup_rmap(page, compound);
+ __page_dup_rmap(page, dst_vma, compound);
}
/**
* page_try_dup_anon_rmap - try duplicating a mapping of an already mapped
* anonymous page
* @page: the page to duplicate the mapping for
+ * @dst_vma: the destination vma
+ * @src_vma: the source vma
* @compound: the page is mapped as compound or as a small page
- * @vma: the source vma
*
* The caller needs to hold the PT lock and the vma->vma_mm->write_protect_seq.
*
@@ -247,8 +250,10 @@ static inline void page_dup_file_rmap(struct page *page, bool compound)
*
* Returns 0 if duplicating the mapping succeeded. Returns -EBUSY otherwise.
*/
-static inline int page_try_dup_anon_rmap(struct page *page, bool compound,
- struct vm_area_struct *vma)
+static inline int page_try_dup_anon_rmap(struct page *page,
+ struct vm_area_struct *dst_vma,
+ struct vm_area_struct *src_vma,
+ bool compound)
{
VM_BUG_ON_PAGE(!PageAnon(page), page);
@@ -267,7 +272,7 @@ static inline int page_try_dup_anon_rmap(struct page *page, bool compound,
* future on write faults.
*/
if (likely(!is_device_private_page(page) &&
- unlikely(page_needs_cow_for_dma(vma, page))))
+ unlikely(page_needs_cow_for_dma(src_vma, page))))
return -EBUSY;
ClearPageAnonExclusive(page);
@@ -276,7 +281,7 @@ static inline int page_try_dup_anon_rmap(struct page *page, bool compound,
* the page R/O into both processes.
*/
dup:
- __page_dup_rmap(page, compound);
+ __page_dup_rmap(page, dst_vma, compound);
return 0;
}
@@ -1166,7 +1166,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
get_page(src_page);
- if (unlikely(page_try_dup_anon_rmap(src_page, true, src_vma))) {
+ if (unlikely(page_try_dup_anon_rmap(src_page, dst_vma, src_vma, true))) {
/* Page maybe pinned: split and retry the fault on PTEs. */
put_page(src_page);
pte_free(dst_mm, pgtable);
@@ -5401,9 +5401,10 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
* sleep during the process.
*/
if (!folio_test_anon(pte_folio)) {
- page_dup_file_rmap(&pte_folio->page, true);
+ page_dup_file_rmap(&pte_folio->page, dst_vma,
+ true);
} else if (page_try_dup_anon_rmap(&pte_folio->page,
- true, src_vma)) {
+ dst_vma, src_vma, true)) {
pte_t src_pte_old = entry;
struct folio *new_folio;
@@ -6272,7 +6273,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
if (anon_rmap)
hugepage_add_new_anon_rmap(folio, vma, haddr);
else
- page_dup_file_rmap(&folio->page, true);
+ page_dup_file_rmap(&folio->page, vma, true);
new_pte = make_huge_pte(vma, &folio->page, ((vma->vm_flags & VM_WRITE)
&& (vma->vm_flags & VM_SHARED)));
/*
@@ -6723,7 +6724,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
goto out_release_unlock;
if (folio_in_pagecache)
- page_dup_file_rmap(&folio->page, true);
+ page_dup_file_rmap(&folio->page, dst_vma, true);
else
hugepage_add_new_anon_rmap(folio, dst_vma, dst_addr);
@@ -836,7 +836,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
get_page(page);
rss[mm_counter(page)]++;
/* Cannot fail as these pages cannot get pinned. */
- BUG_ON(page_try_dup_anon_rmap(page, false, src_vma));
+ BUG_ON(page_try_dup_anon_rmap(page, dst_vma, src_vma, false));
/*
* We do not preserve soft-dirty information, because so
@@ -950,7 +950,7 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
* future.
*/
folio_get(folio);
- if (unlikely(page_try_dup_anon_rmap(page, false, src_vma))) {
+ if (unlikely(page_try_dup_anon_rmap(page, dst_vma, src_vma, false))) {
/* Page may be pinned, we have to copy. */
folio_put(folio);
return copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
@@ -959,7 +959,7 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
rss[MM_ANONPAGES]++;
} else if (page) {
folio_get(folio);
- page_dup_file_rmap(page, false);
+ page_dup_file_rmap(page, dst_vma, false);
rss[mm_counter_file(page)]++;
}
@@ -252,7 +252,7 @@ static bool remove_migration_pte(struct folio *folio,
hugepage_add_anon_rmap(folio, vma, pvmw.address,
rmap_flags);
else
- page_dup_file_rmap(new, true);
+ page_dup_file_rmap(new, vma, true);
set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte,
psize);
} else
We'll need access to the destination MM when modifying the total mapcount of a partially-mappable folio next. So pass in the destination VMA for consistency. While at it, change the parameter order for page_try_dup_anon_rmap() such that the "bool compound" parameter is last, to match the other rmap functions. Signed-off-by: David Hildenbrand <david@redhat.com> --- include/linux/rmap.h | 21 +++++++++++++-------- mm/huge_memory.c | 2 +- mm/hugetlb.c | 9 +++++---- mm/memory.c | 6 +++--- mm/migrate.c | 2 +- 5 files changed, 23 insertions(+), 17 deletions(-)