diff mbox series

[v1,2/6] mm/rmap: move SetPageAnonExclusive out of __page_set_anon_rmap()

Message ID 20230913125113.313322-3-david@redhat.com (mailing list archive)
State New
Headers show
Series Anon rmap cleanups | expand

Commit Message

David Hildenbrand Sept. 13, 2023, 12:51 p.m. UTC
Let's handle it in the caller. No need to pass the page. While at it,
rename the function to __folio_set_anon() and pass "bool exclusive" instead
of "int exclusive".

Signed-off-by: David Hildenbrand <david@redhat.com>
---
 mm/rmap.c | 41 +++++++++++++++++++++--------------------
 1 file changed, 21 insertions(+), 20 deletions(-)

Comments

Matthew Wilcox Sept. 13, 2023, 2:32 p.m. UTC | #1
On Wed, Sep 13, 2023 at 02:51:09PM +0200, David Hildenbrand wrote:
> @@ -1246,11 +1241,13 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
>  
>  	if (likely(!folio_test_ksm(folio))) {
>  		if (first)
> -			__page_set_anon_rmap(folio, page, vma, address,
> -					     !!(flags & RMAP_EXCLUSIVE));
> +			__folio_set_anon(folio, vma, address,
> +					 !!(flags & RMAP_EXCLUSIVE));
>  		else
>  			__page_check_anon_rmap(folio, page, vma, address);
>  	}
> +	if (flags & RMAP_EXCLUSIVE)
> +		SetPageAnonExclusive(page);

Won't we end up setting AnonExclusive on ksm pages, or do we make sure
to never pass RMAP_EXCLUSIVE for ksm pages?

Maybe better to move these last two lines inside the previous test,
just to avoid the question.
David Hildenbrand Sept. 13, 2023, 2:34 p.m. UTC | #2
On 13.09.23 16:32, Matthew Wilcox wrote:
> On Wed, Sep 13, 2023 at 02:51:09PM +0200, David Hildenbrand wrote:
>> @@ -1246,11 +1241,13 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
>>   
>>   	if (likely(!folio_test_ksm(folio))) {
>>   		if (first)
>> -			__page_set_anon_rmap(folio, page, vma, address,
>> -					     !!(flags & RMAP_EXCLUSIVE));
>> +			__folio_set_anon(folio, vma, address,
>> +					 !!(flags & RMAP_EXCLUSIVE));
>>   		else
>>   			__page_check_anon_rmap(folio, page, vma, address);
>>   	}
>> +	if (flags & RMAP_EXCLUSIVE)
>> +		SetPageAnonExclusive(page);
> 
> Won't we end up setting AnonExclusive on ksm pages, or do we make sure
> to never pass RMAP_EXCLUSIVE for ksm pages?

Not if there is a bug and someone passes RMAP_EXCLUSIVE for these. :)

Fortunately, we do have

VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page);

in SetPageAnonExclusive() to catch such bugs.

> 
> Maybe better to move these last two lines inside the previous test,
> just to avoid the question.

That could end up hiding another BUG, so I'd rather let 
SetPageAnonExclusive() catch it.
diff mbox series

Patch

diff --git a/mm/rmap.c b/mm/rmap.c
index ca2787c1fe05..ab16baa0fcb3 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1122,27 +1122,25 @@  void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
 }
 
 /**
- * __page_set_anon_rmap - set up new anonymous rmap
- * @folio:	Folio which contains page.
- * @page:	Page to add to rmap.
- * @vma:	VM area to add page to.
+ * __folio_set_anon - set up a new anonymous rmap for a folio
+ * @folio:	The folio to set up the new anonymous rmap for.
+ * @vma:	VM area to add the folio to.
  * @address:	User virtual address of the mapping
- * @exclusive:	the page is exclusively owned by the current process
+ * @exclusive:	Whether the folio is exclusive to the process.
  */
-static void __page_set_anon_rmap(struct folio *folio, struct page *page,
-	struct vm_area_struct *vma, unsigned long address, int exclusive)
+static void __folio_set_anon(struct folio *folio, struct vm_area_struct *vma,
+			     unsigned long address, bool exclusive)
 {
 	struct anon_vma *anon_vma = vma->anon_vma;
 
 	BUG_ON(!anon_vma);
 
 	if (folio_test_anon(folio))
-		goto out;
+		return;
 
 	/*
-	 * If the page isn't exclusively mapped into this vma,
-	 * we must use the _oldest_ possible anon_vma for the
-	 * page mapping!
+	 * If the folio isn't exclusive to this vma, we must use the _oldest_
+	 * possible anon_vma for the folio mapping!
 	 */
 	if (!exclusive)
 		anon_vma = anon_vma->root;
@@ -1156,9 +1154,6 @@  static void __page_set_anon_rmap(struct folio *folio, struct page *page,
 	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
 	WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma);
 	folio->index = linear_page_index(vma, address);
-out:
-	if (exclusive)
-		SetPageAnonExclusive(page);
 }
 
 /**
@@ -1246,11 +1241,13 @@  void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
 
 	if (likely(!folio_test_ksm(folio))) {
 		if (first)
-			__page_set_anon_rmap(folio, page, vma, address,
-					     !!(flags & RMAP_EXCLUSIVE));
+			__folio_set_anon(folio, vma, address,
+					 !!(flags & RMAP_EXCLUSIVE));
 		else
 			__page_check_anon_rmap(folio, page, vma, address);
 	}
+	if (flags & RMAP_EXCLUSIVE)
+		SetPageAnonExclusive(page);
 
 	mlock_vma_folio(folio, vma, compound);
 }
@@ -1289,7 +1286,8 @@  void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
 	}
 
 	__lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr);
-	__page_set_anon_rmap(folio, &folio->page, vma, address, 1);
+	__folio_set_anon(folio, vma, address, true);
+	SetPageAnonExclusive(&folio->page);
 }
 
 /**
@@ -2539,8 +2537,10 @@  void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
 	VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page);
 	VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page);
 	if (first)
-		__page_set_anon_rmap(folio, page, vma, address,
-				     !!(flags & RMAP_EXCLUSIVE));
+		__folio_set_anon(folio, vma, address,
+				 !!(flags & RMAP_EXCLUSIVE));
+	if (flags & RMAP_EXCLUSIVE)
+		SetPageAnonExclusive(page);
 }
 
 void hugepage_add_new_anon_rmap(struct folio *folio,
@@ -2550,6 +2550,7 @@  void hugepage_add_new_anon_rmap(struct folio *folio,
 	/* increment count (starts at -1) */
 	atomic_set(&folio->_entire_mapcount, 0);
 	folio_clear_hugetlb_restore_reserve(folio);
-	__page_set_anon_rmap(folio, &folio->page, vma, address, 1);
+	__folio_set_anon(folio, vma, address, true);
+	SetPageAnonExclusive(&folio->page);
 }
 #endif /* CONFIG_HUGETLB_PAGE */