diff mbox series

[7/9] mm/madvise: let madvise_{dontneed,free}_single_vma() caller batches tlb flushes

Message ID 20250310172318.653630-8-sj@kernel.org (mailing list archive)
State New
Headers show
Series mm/madvise: batch tlb flushes for MADV_DONTNEED and MADV_FREE | expand

Commit Message

SeongJae Park March 10, 2025, 5:23 p.m. UTC
Update madvise_dontneed_single_vma() and madvise_free_single_vma()
functions so that the caller can pass an mmu_gather object that should
be initialized and will be finished outside, for batched tlb flushes.
Also modify their internal code to support such usage by skipping the
initialization and finishing of self-allocated mmu_gather object if it
received a valid mmu_gather object.

Signed-off-by: SeongJae Park <sj@kernel.org>
---
 mm/internal.h |  3 +++
 mm/madvise.c  | 37 +++++++++++++++++++++++++------------
 mm/memory.c   | 16 +++++++++++++---
 3 files changed, 41 insertions(+), 15 deletions(-)

Comments

Lorenzo Stoakes March 11, 2025, 1:07 p.m. UTC | #1
Super super UBER nitty but... pretty sure the subject here should be <= 75
chars right? :P

On Mon, Mar 10, 2025 at 10:23:16AM -0700, SeongJae Park wrote:
> Update madvise_dontneed_single_vma() and madvise_free_single_vma()
> functions so that the caller can pass an mmu_gather object that should
> be initialized and will be finished outside, for batched tlb flushes.
> Also modify their internal code to support such usage by skipping the
> initialization and finishing of self-allocated mmu_gather object if it
> received a valid mmu_gather object.
>
> Signed-off-by: SeongJae Park <sj@kernel.org>
> ---
>  mm/internal.h |  3 +++
>  mm/madvise.c  | 37 +++++++++++++++++++++++++------------
>  mm/memory.c   | 16 +++++++++++++---
>  3 files changed, 41 insertions(+), 15 deletions(-)
>
> diff --git a/mm/internal.h b/mm/internal.h
> index 0caa64dc2cb7..ce7fb2383f65 100644
> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -438,6 +438,9 @@ void unmap_page_range(struct mmu_gather *tlb,
>  			     struct vm_area_struct *vma,
>  			     unsigned long addr, unsigned long end,
>  			     struct zap_details *details);
> +void unmap_vma_single(struct mmu_gather *tlb, struct vm_area_struct *vma,
> +		      unsigned long addr, unsigned long size,
> +		      struct zap_details *details);
>  int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
>  			   gfp_t gfp);
>
> diff --git a/mm/madvise.c b/mm/madvise.c
> index ba2a78795207..d7ea71c6422c 100644
> --- a/mm/madvise.c
> +++ b/mm/madvise.c
> @@ -794,12 +794,19 @@ static const struct mm_walk_ops madvise_free_walk_ops = {
>  	.walk_lock		= PGWALK_RDLOCK,
>  };
>
> -static int madvise_free_single_vma(struct vm_area_struct *vma,
> -			unsigned long start_addr, unsigned long end_addr)
> +static int madvise_free_single_vma(
> +		struct mmu_gather *caller_tlb, struct vm_area_struct *vma,

I find this interface horrible, and super confusing. It's not clear at all
what's going on here.

Why not use your new helper struct to add a field you can thread through
here?

> +		unsigned long start_addr, unsigned long end_addr)
>  {
>  	struct mm_struct *mm = vma->vm_mm;
>  	struct mmu_notifier_range range;
> -	struct mmu_gather tlb;
> +	struct mmu_gather self_tlb;
> +	struct mmu_gather *tlb;
> +
> +	if (caller_tlb)
> +		tlb = caller_tlb;
> +	else
> +		tlb = &self_tlb;
>
>  	/* MADV_FREE works for only anon vma at the moment */
>  	if (!vma_is_anonymous(vma))
> @@ -815,16 +822,18 @@ static int madvise_free_single_vma(struct vm_area_struct *vma,
>  				range.start, range.end);
>
>  	lru_add_drain();
> -	tlb_gather_mmu(&tlb, mm);
> +	if (!caller_tlb)
> +		tlb_gather_mmu(tlb, mm);

Yeah really don't like this.

Ideally we'd abstract the mmu_gather struct to the helper struct (which I
see you do in a subsequent patch anyway) would be ideal if you could find a
way to make that work.

But if not, then:

if (behavior->batched_tlb)
	tlb_gather_mmu(&tlb, mm);

etc. etc.

Would work better.

>  	update_hiwater_rss(mm);
>
>  	mmu_notifier_invalidate_range_start(&range);
> -	tlb_start_vma(&tlb, vma);
> +	tlb_start_vma(tlb, vma);

Also not a fan of making tlb refer to a pointer now when before it
didn't... I mean that's more of a nit and maybe unavoidable, but still!

I mean yeah ok this is probably unavoidable, ignore.

>  	walk_page_range(vma->vm_mm, range.start, range.end,
> -			&madvise_free_walk_ops, &tlb);
> -	tlb_end_vma(&tlb, vma);
> +			&madvise_free_walk_ops, tlb);
> +	tlb_end_vma(tlb, vma);
>  	mmu_notifier_invalidate_range_end(&range);
> -	tlb_finish_mmu(&tlb);
> +	if (!caller_tlb)
> +		tlb_finish_mmu(tlb);
>
>  	return 0;
>  }
> @@ -848,7 +857,8 @@ static int madvise_free_single_vma(struct vm_area_struct *vma,
>   * An interface that causes the system to free clean pages and flush
>   * dirty pages is already available as msync(MS_INVALIDATE).
>   */
> -static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
> +static long madvise_dontneed_single_vma(struct mmu_gather *tlb,
> +					struct vm_area_struct *vma,
>  					unsigned long start, unsigned long end)
>  {
>  	struct zap_details details = {
> @@ -856,7 +866,10 @@ static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
>  		.even_cows = true,
>  	};
>
> -	zap_page_range_single(vma, start, end - start, &details);
> +	if (!tlb)
> +		zap_page_range_single(vma, start, end - start, &details);

Please don't put the negation case first, it's confusing. Swap them!


> +	else
> +		unmap_vma_single(tlb, vma, start, end - start, &details);
>  	return 0;
>  }
>
> @@ -951,9 +964,9 @@ static long madvise_dontneed_free(struct vm_area_struct *vma,
>  	}
>
>  	if (behavior == MADV_DONTNEED || behavior == MADV_DONTNEED_LOCKED)
> -		return madvise_dontneed_single_vma(vma, start, end);
> +		return madvise_dontneed_single_vma(NULL, vma, start, end);
>  	else if (behavior == MADV_FREE)
> -		return madvise_free_single_vma(vma, start, end);
> +		return madvise_free_single_vma(NULL, vma, start, end);

Not to labour the point, but this is also horrid, passing a mystery NULL
parameter first...

>  	else
>  		return -EINVAL;
>  }
> diff --git a/mm/memory.c b/mm/memory.c
> index 88c478e2ed1a..3256b9713cbd 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -1995,9 +1995,19 @@ void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
>  	mmu_notifier_invalidate_range_end(&range);
>  }
>
> -static void unmap_vma_single(struct mmu_gather *tlb,
> -		struct vm_area_struct *vma, unsigned long address,
> -		unsigned long size, struct zap_details *details)
> +/**
> + * unmap_vma_single - remove user pages in a given range
> + * @tlb: pointer to the caller's struct mmu_gather
> + * @vma: vm_area_struct holding the applicable pages
> + * @address: starting address of the pages
> + * @size: number of bytes to remove
> + * @details: details of shared cache invalidation
> + *
> + * @tlb shouldn't be NULL.  The range must fit into one VMA.

Can we add some VM_WARN_ON[_ONCE]()'s for these conditions please?

Thanks for documenting!

> + */
> +void unmap_vma_single(struct mmu_gather *tlb, struct vm_area_struct *vma,
> +		      unsigned long address, unsigned long size,
> +		      struct zap_details *details)
>  {
>  	const unsigned long end = address + size;
>  	struct mmu_notifier_range range;
> --
> 2.39.5
diff mbox series

Patch

diff --git a/mm/internal.h b/mm/internal.h
index 0caa64dc2cb7..ce7fb2383f65 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -438,6 +438,9 @@  void unmap_page_range(struct mmu_gather *tlb,
 			     struct vm_area_struct *vma,
 			     unsigned long addr, unsigned long end,
 			     struct zap_details *details);
+void unmap_vma_single(struct mmu_gather *tlb, struct vm_area_struct *vma,
+		      unsigned long addr, unsigned long size,
+		      struct zap_details *details);
 int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
 			   gfp_t gfp);
 
diff --git a/mm/madvise.c b/mm/madvise.c
index ba2a78795207..d7ea71c6422c 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -794,12 +794,19 @@  static const struct mm_walk_ops madvise_free_walk_ops = {
 	.walk_lock		= PGWALK_RDLOCK,
 };
 
-static int madvise_free_single_vma(struct vm_area_struct *vma,
-			unsigned long start_addr, unsigned long end_addr)
+static int madvise_free_single_vma(
+		struct mmu_gather *caller_tlb, struct vm_area_struct *vma,
+		unsigned long start_addr, unsigned long end_addr)
 {
 	struct mm_struct *mm = vma->vm_mm;
 	struct mmu_notifier_range range;
-	struct mmu_gather tlb;
+	struct mmu_gather self_tlb;
+	struct mmu_gather *tlb;
+
+	if (caller_tlb)
+		tlb = caller_tlb;
+	else
+		tlb = &self_tlb;
 
 	/* MADV_FREE works for only anon vma at the moment */
 	if (!vma_is_anonymous(vma))
@@ -815,16 +822,18 @@  static int madvise_free_single_vma(struct vm_area_struct *vma,
 				range.start, range.end);
 
 	lru_add_drain();
-	tlb_gather_mmu(&tlb, mm);
+	if (!caller_tlb)
+		tlb_gather_mmu(tlb, mm);
 	update_hiwater_rss(mm);
 
 	mmu_notifier_invalidate_range_start(&range);
-	tlb_start_vma(&tlb, vma);
+	tlb_start_vma(tlb, vma);
 	walk_page_range(vma->vm_mm, range.start, range.end,
-			&madvise_free_walk_ops, &tlb);
-	tlb_end_vma(&tlb, vma);
+			&madvise_free_walk_ops, tlb);
+	tlb_end_vma(tlb, vma);
 	mmu_notifier_invalidate_range_end(&range);
-	tlb_finish_mmu(&tlb);
+	if (!caller_tlb)
+		tlb_finish_mmu(tlb);
 
 	return 0;
 }
@@ -848,7 +857,8 @@  static int madvise_free_single_vma(struct vm_area_struct *vma,
  * An interface that causes the system to free clean pages and flush
  * dirty pages is already available as msync(MS_INVALIDATE).
  */
-static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
+static long madvise_dontneed_single_vma(struct mmu_gather *tlb,
+					struct vm_area_struct *vma,
 					unsigned long start, unsigned long end)
 {
 	struct zap_details details = {
@@ -856,7 +866,10 @@  static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
 		.even_cows = true,
 	};
 
-	zap_page_range_single(vma, start, end - start, &details);
+	if (!tlb)
+		zap_page_range_single(vma, start, end - start, &details);
+	else
+		unmap_vma_single(tlb, vma, start, end - start, &details);
 	return 0;
 }
 
@@ -951,9 +964,9 @@  static long madvise_dontneed_free(struct vm_area_struct *vma,
 	}
 
 	if (behavior == MADV_DONTNEED || behavior == MADV_DONTNEED_LOCKED)
-		return madvise_dontneed_single_vma(vma, start, end);
+		return madvise_dontneed_single_vma(NULL, vma, start, end);
 	else if (behavior == MADV_FREE)
-		return madvise_free_single_vma(vma, start, end);
+		return madvise_free_single_vma(NULL, vma, start, end);
 	else
 		return -EINVAL;
 }
diff --git a/mm/memory.c b/mm/memory.c
index 88c478e2ed1a..3256b9713cbd 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1995,9 +1995,19 @@  void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
 	mmu_notifier_invalidate_range_end(&range);
 }
 
-static void unmap_vma_single(struct mmu_gather *tlb,
-		struct vm_area_struct *vma, unsigned long address,
-		unsigned long size, struct zap_details *details)
+/**
+ * unmap_vma_single - remove user pages in a given range
+ * @tlb: pointer to the caller's struct mmu_gather
+ * @vma: vm_area_struct holding the applicable pages
+ * @address: starting address of the pages
+ * @size: number of bytes to remove
+ * @details: details of shared cache invalidation
+ *
+ * @tlb shouldn't be NULL.  The range must fit into one VMA.
+ */
+void unmap_vma_single(struct mmu_gather *tlb, struct vm_area_struct *vma,
+		      unsigned long address, unsigned long size,
+		      struct zap_details *details)
 {
 	const unsigned long end = address + size;
 	struct mmu_notifier_range range;