diff mbox series

[v2,3/3] mm: introduce mmap_lock_speculate_{try_begin|retry}

Message ID 20241121162826.987947-3-surenb@google.com (mailing list archive)
State New
Headers show
Series [v2,1/3] seqlock: add raw_seqcount_try_begin | expand

Commit Message

Suren Baghdasaryan Nov. 21, 2024, 4:28 p.m. UTC
Add helper functions to speculatively perform operations without
read-locking mmap_lock, expecting that mmap_lock will not be
write-locked and mm is not modified from under us.

Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
---
Changes since v1 [1]
- Changed to use new raw_seqcount_try_begin() API, per Peter Zijlstra
- Renamed mmap_lock_speculation_{begin|end} into
mmap_lock_speculate_{try_begin|retry}, per Peter Zijlstra

Note: the return value of mmap_lock_speculate_retry() is opposive to
what it was in mmap_lock_speculation_end(). true now means speculation failed.

[1] https://lore.kernel.org/all/20241024205231.1944747-2-surenb@google.com/

 include/linux/mmap_lock.h | 33 +++++++++++++++++++++++++++++++--
 1 file changed, 31 insertions(+), 2 deletions(-)

Comments

Peter Zijlstra Nov. 22, 2024, 11:05 a.m. UTC | #1
On Thu, Nov 21, 2024 at 08:28:26AM -0800, Suren Baghdasaryan wrote:
> Add helper functions to speculatively perform operations without
> read-locking mmap_lock, expecting that mmap_lock will not be
> write-locked and mm is not modified from under us.
> 
> Suggested-by: Peter Zijlstra <peterz@infradead.org>
> Signed-off-by: Suren Baghdasaryan <surenb@google.com>

Thanks for these, you're okay with me taking these through tip/perf/core
for the next cycle along with Andrii's uprobe patch?
Suren Baghdasaryan Nov. 22, 2024, 3:03 p.m. UTC | #2
On Fri, Nov 22, 2024 at 3:06 AM Peter Zijlstra <peterz@infradead.org> wrote:
>
> On Thu, Nov 21, 2024 at 08:28:26AM -0800, Suren Baghdasaryan wrote:
> > Add helper functions to speculatively perform operations without
> > read-locking mmap_lock, expecting that mmap_lock will not be
> > write-locked and mm is not modified from under us.
> >
> > Suggested-by: Peter Zijlstra <peterz@infradead.org>
> > Signed-off-by: Suren Baghdasaryan <surenb@google.com>
>
> Thanks for these, you're okay with me taking these through tip/perf/core
> for the next cycle along with Andrii's uprobe patch?

Yes, I'm fine with that plan. Thank you!
Liam R. Howlett Nov. 22, 2024, 3:15 p.m. UTC | #3
* Suren Baghdasaryan <surenb@google.com> [241121 11:28]:
> Add helper functions to speculatively perform operations without
> read-locking mmap_lock, expecting that mmap_lock will not be
> write-locked and mm is not modified from under us.
> 
> Suggested-by: Peter Zijlstra <peterz@infradead.org>
> Signed-off-by: Suren Baghdasaryan <surenb@google.com>

Reviewed-by: Liam R. Howlett <Liam.Howlett@Oracle.com>

> ---
> Changes since v1 [1]
> - Changed to use new raw_seqcount_try_begin() API, per Peter Zijlstra
> - Renamed mmap_lock_speculation_{begin|end} into
> mmap_lock_speculate_{try_begin|retry}, per Peter Zijlstra
> 
> Note: the return value of mmap_lock_speculate_retry() is opposive to
> what it was in mmap_lock_speculation_end(). true now means speculation failed.
> 
> [1] https://lore.kernel.org/all/20241024205231.1944747-2-surenb@google.com/
> 
>  include/linux/mmap_lock.h | 33 +++++++++++++++++++++++++++++++--
>  1 file changed, 31 insertions(+), 2 deletions(-)
> 
> diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
> index 083b7fa2588e..0b39a0f99a3b 100644
> --- a/include/linux/mmap_lock.h
> +++ b/include/linux/mmap_lock.h
> @@ -71,6 +71,7 @@ static inline void mmap_assert_write_locked(const struct mm_struct *mm)
>  }
>  
>  #ifdef CONFIG_PER_VMA_LOCK
> +
>  static inline void mm_lock_seqcount_init(struct mm_struct *mm)
>  {
>  	seqcount_init(&mm->mm_lock_seq);
> @@ -86,11 +87,39 @@ static inline void mm_lock_seqcount_end(struct mm_struct *mm)
>  	do_raw_write_seqcount_end(&mm->mm_lock_seq);
>  }
>  
> -#else
> +static inline bool mmap_lock_speculate_try_begin(struct mm_struct *mm, unsigned int *seq)
> +{
> +	/*
> +	 * Since mmap_lock is a sleeping lock, and waiting for it to become
> +	 * unlocked is more or less equivalent with taking it ourselves, don't
> +	 * bother with the speculative path if mmap_lock is already write-locked
> +	 * and take the slow path, which takes the lock.
> +	 */
> +	return raw_seqcount_try_begin(&mm->mm_lock_seq, *seq);
> +}
> +
> +static inline bool mmap_lock_speculate_retry(struct mm_struct *mm, unsigned int seq)
> +{
> +	return do_read_seqcount_retry(&mm->mm_lock_seq, seq);
> +}
> +
> +#else /* CONFIG_PER_VMA_LOCK */
> +
>  static inline void mm_lock_seqcount_init(struct mm_struct *mm) {}
>  static inline void mm_lock_seqcount_begin(struct mm_struct *mm) {}
>  static inline void mm_lock_seqcount_end(struct mm_struct *mm) {}
> -#endif
> +
> +static inline bool mmap_lock_speculate_try_begin(struct mm_struct *mm, unsigned int *seq)
> +{
> +	return false;
> +}
> +
> +static inline bool mmap_lock_speculate_retry(struct mm_struct *mm, unsigned int seq)
> +{
> +	return true;
> +}
> +
> +#endif /* CONFIG_PER_VMA_LOCK */
>  
>  static inline void mmap_init_lock(struct mm_struct *mm)
>  {
> -- 
> 2.47.0.338.g60cca15819-goog
> 
>
diff mbox series

Patch

diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
index 083b7fa2588e..0b39a0f99a3b 100644
--- a/include/linux/mmap_lock.h
+++ b/include/linux/mmap_lock.h
@@ -71,6 +71,7 @@  static inline void mmap_assert_write_locked(const struct mm_struct *mm)
 }
 
 #ifdef CONFIG_PER_VMA_LOCK
+
 static inline void mm_lock_seqcount_init(struct mm_struct *mm)
 {
 	seqcount_init(&mm->mm_lock_seq);
@@ -86,11 +87,39 @@  static inline void mm_lock_seqcount_end(struct mm_struct *mm)
 	do_raw_write_seqcount_end(&mm->mm_lock_seq);
 }
 
-#else
+static inline bool mmap_lock_speculate_try_begin(struct mm_struct *mm, unsigned int *seq)
+{
+	/*
+	 * Since mmap_lock is a sleeping lock, and waiting for it to become
+	 * unlocked is more or less equivalent with taking it ourselves, don't
+	 * bother with the speculative path if mmap_lock is already write-locked
+	 * and take the slow path, which takes the lock.
+	 */
+	return raw_seqcount_try_begin(&mm->mm_lock_seq, *seq);
+}
+
+static inline bool mmap_lock_speculate_retry(struct mm_struct *mm, unsigned int seq)
+{
+	return do_read_seqcount_retry(&mm->mm_lock_seq, seq);
+}
+
+#else /* CONFIG_PER_VMA_LOCK */
+
 static inline void mm_lock_seqcount_init(struct mm_struct *mm) {}
 static inline void mm_lock_seqcount_begin(struct mm_struct *mm) {}
 static inline void mm_lock_seqcount_end(struct mm_struct *mm) {}
-#endif
+
+static inline bool mmap_lock_speculate_try_begin(struct mm_struct *mm, unsigned int *seq)
+{
+	return false;
+}
+
+static inline bool mmap_lock_speculate_retry(struct mm_struct *mm, unsigned int seq)
+{
+	return true;
+}
+
+#endif /* CONFIG_PER_VMA_LOCK */
 
 static inline void mmap_init_lock(struct mm_struct *mm)
 {