diff mbox series

[v4,5/6] alloc_tag: introduce pgtag_ref_handle to abstract page tag references

Message ID 20241023170759.999909-6-surenb@google.com (mailing list archive)
State Handled Elsewhere
Headers show
Series page allocation tag compression | expand

Checks

Context Check Description
mcgrof/vmtest-main-PR fail merge-conflict

Commit Message

Suren Baghdasaryan Oct. 23, 2024, 5:07 p.m. UTC
To simplify later changes to page tag references, introduce new
pgtag_ref_handle type. This allows easy replacement of page_ext
as a storage of page allocation tags.

Signed-off-by: Suren Baghdasaryan <surenb@google.com>
---
 include/linux/mm.h          | 25 +++++-----
 include/linux/pgalloc_tag.h | 92 ++++++++++++++++++++++---------------
 2 files changed, 67 insertions(+), 50 deletions(-)

Comments

Pasha Tatashin Oct. 23, 2024, 5:35 p.m. UTC | #1
On Wed, Oct 23, 2024 at 1:08 PM Suren Baghdasaryan <surenb@google.com> wrote:
>
> To simplify later changes to page tag references, introduce new
> pgtag_ref_handle type. This allows easy replacement of page_ext
> as a storage of page allocation tags.
>
> Signed-off-by: Suren Baghdasaryan <surenb@google.com>

Reviewed-by: Pasha Tatashin <pasha.tatashin@soleen.com>
Andrew Morton Oct. 23, 2024, 9 p.m. UTC | #2
On Wed, 23 Oct 2024 10:07:58 -0700 Suren Baghdasaryan <surenb@google.com> wrote:

> To simplify later changes to page tag references, introduce new
> pgtag_ref_handle type. This allows easy replacement of page_ext
> as a storage of page allocation tags.
> 
> ...
>
>  static inline void pgalloc_tag_copy(struct folio *new, struct folio *old)
>  {
> +	union pgtag_ref_handle handle;
> +	union codetag_ref ref;
>  	struct alloc_tag *tag;
> -	union codetag_ref *ref;
>  
>  	tag = pgalloc_tag_get(&old->page);
>  	if (!tag)
>  		return;
>  
> -	ref = get_page_tag_ref(&new->page);
> -	if (!ref)
> +	if (!get_page_tag_ref(&new->page, &ref, &handle))
>  		return;
>  
>  	/* Clear the old ref to the original allocation tag. */
>  	clear_page_tag_ref(&old->page);
>  	/* Decrement the counters of the tag on get_new_folio. */
> -	alloc_tag_sub(ref, folio_nr_pages(new));
> -
> -	__alloc_tag_ref_set(ref, tag);
> -
> -	put_page_tag_ref(ref);
> +	alloc_tag_sub(&ref, folio_nr_pages(new));

mm-stable has folio_size(new) here, fixed up.

I think we aleady discussed this, but there's a crazy amount of
inlining here.  pgalloc_tag_split() is huge, and has four callsites.
Suren Baghdasaryan Oct. 23, 2024, 9:09 p.m. UTC | #3
On Wed, Oct 23, 2024 at 2:00 PM Andrew Morton <akpm@linux-foundation.org> wrote:
>
> On Wed, 23 Oct 2024 10:07:58 -0700 Suren Baghdasaryan <surenb@google.com> wrote:
>
> > To simplify later changes to page tag references, introduce new
> > pgtag_ref_handle type. This allows easy replacement of page_ext
> > as a storage of page allocation tags.
> >
> > ...
> >
> >  static inline void pgalloc_tag_copy(struct folio *new, struct folio *old)
> >  {
> > +     union pgtag_ref_handle handle;
> > +     union codetag_ref ref;
> >       struct alloc_tag *tag;
> > -     union codetag_ref *ref;
> >
> >       tag = pgalloc_tag_get(&old->page);
> >       if (!tag)
> >               return;
> >
> > -     ref = get_page_tag_ref(&new->page);
> > -     if (!ref)
> > +     if (!get_page_tag_ref(&new->page, &ref, &handle))
> >               return;
> >
> >       /* Clear the old ref to the original allocation tag. */
> >       clear_page_tag_ref(&old->page);
> >       /* Decrement the counters of the tag on get_new_folio. */
> > -     alloc_tag_sub(ref, folio_nr_pages(new));
> > -
> > -     __alloc_tag_ref_set(ref, tag);
> > -
> > -     put_page_tag_ref(ref);
> > +     alloc_tag_sub(&ref, folio_nr_pages(new));
>
> mm-stable has folio_size(new) here, fixed up.

Oh, right. You merged that patch tonight and I formatted my patchset
yesterday :)
Thanks for the fixup.

>
> I think we aleady discussed this, but there's a crazy amount of
> inlining here.  pgalloc_tag_split() is huge, and has four callsites.

I must have missed that discussion but I am happy to unline this
function. I think splitting is heavy enough operation that this
uninlining would not have be noticeable.
Thanks!
Suren Baghdasaryan Oct. 24, 2024, 4:25 p.m. UTC | #4
On Wed, Oct 23, 2024 at 2:09 PM Suren Baghdasaryan <surenb@google.com> wrote:
>
> On Wed, Oct 23, 2024 at 2:00 PM Andrew Morton <akpm@linux-foundation.org> wrote:
> >
> > On Wed, 23 Oct 2024 10:07:58 -0700 Suren Baghdasaryan <surenb@google.com> wrote:
> >
> > > To simplify later changes to page tag references, introduce new
> > > pgtag_ref_handle type. This allows easy replacement of page_ext
> > > as a storage of page allocation tags.
> > >
> > > ...
> > >
> > >  static inline void pgalloc_tag_copy(struct folio *new, struct folio *old)
> > >  {
> > > +     union pgtag_ref_handle handle;
> > > +     union codetag_ref ref;
> > >       struct alloc_tag *tag;
> > > -     union codetag_ref *ref;
> > >
> > >       tag = pgalloc_tag_get(&old->page);
> > >       if (!tag)
> > >               return;
> > >
> > > -     ref = get_page_tag_ref(&new->page);
> > > -     if (!ref)
> > > +     if (!get_page_tag_ref(&new->page, &ref, &handle))
> > >               return;
> > >
> > >       /* Clear the old ref to the original allocation tag. */
> > >       clear_page_tag_ref(&old->page);
> > >       /* Decrement the counters of the tag on get_new_folio. */
> > > -     alloc_tag_sub(ref, folio_nr_pages(new));
> > > -
> > > -     __alloc_tag_ref_set(ref, tag);
> > > -
> > > -     put_page_tag_ref(ref);
> > > +     alloc_tag_sub(&ref, folio_nr_pages(new));
> >
> > mm-stable has folio_size(new) here, fixed up.
>
> Oh, right. You merged that patch tonight and I formatted my patchset
> yesterday :)
> Thanks for the fixup.
>
> >
> > I think we aleady discussed this, but there's a crazy amount of
> > inlining here.  pgalloc_tag_split() is huge, and has four callsites.
>
> I must have missed that discussion but I am happy to unline this
> function. I think splitting is heavy enough operation that this
> uninlining would not have be noticeable.

Posted requested uninlining at
https://lore.kernel.org/all/20241024162318.1640781-1-surenb@google.com/

> Thanks!
diff mbox series

Patch

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 5cd22303fbc0..8efb4a6a1a70 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -4180,37 +4180,38 @@  static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new
 		return;
 
 	for (i = nr_pages; i < (1 << old_order); i += nr_pages) {
-		union codetag_ref *ref = get_page_tag_ref(folio_page(folio, i));
+		union pgtag_ref_handle handle;
+		union codetag_ref ref;
 
-		if (ref) {
+		if (get_page_tag_ref(folio_page(folio, i), &ref, &handle)) {
 			/* Set new reference to point to the original tag */
-			alloc_tag_ref_set(ref, tag);
-			put_page_tag_ref(ref);
+			alloc_tag_ref_set(&ref, tag);
+			update_page_tag_ref(handle, &ref);
+			put_page_tag_ref(handle);
 		}
 	}
 }
 
 static inline void pgalloc_tag_copy(struct folio *new, struct folio *old)
 {
+	union pgtag_ref_handle handle;
+	union codetag_ref ref;
 	struct alloc_tag *tag;
-	union codetag_ref *ref;
 
 	tag = pgalloc_tag_get(&old->page);
 	if (!tag)
 		return;
 
-	ref = get_page_tag_ref(&new->page);
-	if (!ref)
+	if (!get_page_tag_ref(&new->page, &ref, &handle))
 		return;
 
 	/* Clear the old ref to the original allocation tag. */
 	clear_page_tag_ref(&old->page);
 	/* Decrement the counters of the tag on get_new_folio. */
-	alloc_tag_sub(ref, folio_nr_pages(new));
-
-	__alloc_tag_ref_set(ref, tag);
-
-	put_page_tag_ref(ref);
+	alloc_tag_sub(&ref, folio_nr_pages(new));
+	__alloc_tag_ref_set(&ref, tag);
+	update_page_tag_ref(handle, &ref);
+	put_page_tag_ref(handle);
 }
 #else /* !CONFIG_MEM_ALLOC_PROFILING */
 static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order)
diff --git a/include/linux/pgalloc_tag.h b/include/linux/pgalloc_tag.h
index 59a3deb792a8..b13cd3313a88 100644
--- a/include/linux/pgalloc_tag.h
+++ b/include/linux/pgalloc_tag.h
@@ -11,46 +11,59 @@ 
 
 #include <linux/page_ext.h>
 
+union pgtag_ref_handle {
+	union codetag_ref *ref;	/* reference in page extension */
+};
+
 extern struct page_ext_operations page_alloc_tagging_ops;
 
-static inline union codetag_ref *codetag_ref_from_page_ext(struct page_ext *page_ext)
+/* Should be called only if mem_alloc_profiling_enabled() */
+static inline bool get_page_tag_ref(struct page *page, union codetag_ref *ref,
+				    union pgtag_ref_handle *handle)
 {
-	return (union codetag_ref *)page_ext_data(page_ext, &page_alloc_tagging_ops);
-}
+	struct page_ext *page_ext;
+	union codetag_ref *tmp;
 
-static inline struct page_ext *page_ext_from_codetag_ref(union codetag_ref *ref)
-{
-	return (void *)ref - page_alloc_tagging_ops.offset;
+	if (!page)
+		return false;
+
+	page_ext = page_ext_get(page);
+	if (!page_ext)
+		return false;
+
+	tmp = (union codetag_ref *)page_ext_data(page_ext, &page_alloc_tagging_ops);
+	ref->ct = tmp->ct;
+	handle->ref = tmp;
+	return true;
 }
 
-/* Should be called only if mem_alloc_profiling_enabled() */
-static inline union codetag_ref *get_page_tag_ref(struct page *page)
+static inline void put_page_tag_ref(union pgtag_ref_handle handle)
 {
-	if (page) {
-		struct page_ext *page_ext = page_ext_get(page);
+	if (WARN_ON(!handle.ref))
+		return;
 
-		if (page_ext)
-			return codetag_ref_from_page_ext(page_ext);
-	}
-	return NULL;
+	page_ext_put((void *)handle.ref - page_alloc_tagging_ops.offset);
 }
 
-static inline void put_page_tag_ref(union codetag_ref *ref)
+static inline void update_page_tag_ref(union pgtag_ref_handle handle,
+				       union codetag_ref *ref)
 {
-	if (WARN_ON(!ref))
+	if (WARN_ON(!handle.ref || !ref))
 		return;
 
-	page_ext_put(page_ext_from_codetag_ref(ref));
+	handle.ref->ct = ref->ct;
 }
 
 static inline void clear_page_tag_ref(struct page *page)
 {
 	if (mem_alloc_profiling_enabled()) {
-		union codetag_ref *ref = get_page_tag_ref(page);
+		union pgtag_ref_handle handle;
+		union codetag_ref ref;
 
-		if (ref) {
-			set_codetag_empty(ref);
-			put_page_tag_ref(ref);
+		if (get_page_tag_ref(page, &ref, &handle)) {
+			set_codetag_empty(&ref);
+			update_page_tag_ref(handle, &ref);
+			put_page_tag_ref(handle);
 		}
 	}
 }
@@ -59,11 +72,13 @@  static inline void pgalloc_tag_add(struct page *page, struct task_struct *task,
 				   unsigned int nr)
 {
 	if (mem_alloc_profiling_enabled()) {
-		union codetag_ref *ref = get_page_tag_ref(page);
+		union pgtag_ref_handle handle;
+		union codetag_ref ref;
 
-		if (ref) {
-			alloc_tag_add(ref, task->alloc_tag, PAGE_SIZE * nr);
-			put_page_tag_ref(ref);
+		if (get_page_tag_ref(page, &ref, &handle)) {
+			alloc_tag_add(&ref, task->alloc_tag, PAGE_SIZE * nr);
+			update_page_tag_ref(handle, &ref);
+			put_page_tag_ref(handle);
 		}
 	}
 }
@@ -71,11 +86,13 @@  static inline void pgalloc_tag_add(struct page *page, struct task_struct *task,
 static inline void pgalloc_tag_sub(struct page *page, unsigned int nr)
 {
 	if (mem_alloc_profiling_enabled()) {
-		union codetag_ref *ref = get_page_tag_ref(page);
+		union pgtag_ref_handle handle;
+		union codetag_ref ref;
 
-		if (ref) {
-			alloc_tag_sub(ref, PAGE_SIZE * nr);
-			put_page_tag_ref(ref);
+		if (get_page_tag_ref(page, &ref, &handle)) {
+			alloc_tag_sub(&ref, PAGE_SIZE * nr);
+			update_page_tag_ref(handle, &ref);
+			put_page_tag_ref(handle);
 		}
 	}
 }
@@ -85,13 +102,14 @@  static inline struct alloc_tag *pgalloc_tag_get(struct page *page)
 	struct alloc_tag *tag = NULL;
 
 	if (mem_alloc_profiling_enabled()) {
-		union codetag_ref *ref = get_page_tag_ref(page);
-
-		alloc_tag_sub_check(ref);
-		if (ref) {
-			if (ref->ct)
-				tag = ct_to_alloc_tag(ref->ct);
-			put_page_tag_ref(ref);
+		union pgtag_ref_handle handle;
+		union codetag_ref ref;
+
+		if (get_page_tag_ref(page, &ref, &handle)) {
+			alloc_tag_sub_check(&ref);
+			if (ref.ct)
+				tag = ct_to_alloc_tag(ref.ct);
+			put_page_tag_ref(handle);
 		}
 	}
 
@@ -106,8 +124,6 @@  static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr)
 
 #else /* CONFIG_MEM_ALLOC_PROFILING */
 
-static inline union codetag_ref *get_page_tag_ref(struct page *page) { return NULL; }
-static inline void put_page_tag_ref(union codetag_ref *ref) {}
 static inline void clear_page_tag_ref(struct page *page) {}
 static inline void pgalloc_tag_add(struct page *page, struct task_struct *task,
 				   unsigned int nr) {}