Message ID | 20241122174416.1367052-3-surenb@google.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | [v3,1/3] seqlock: add raw_seqcount_try_begin | expand |
On Fri, Nov 22, 2024 at 09:44:16AM -0800, Suren Baghdasaryan wrote: >Add helper functions to speculatively perform operations without >read-locking mmap_lock, expecting that mmap_lock will not be >write-locked and mm is not modified from under us. > >Suggested-by: Peter Zijlstra <peterz@infradead.org> >Signed-off-by: Suren Baghdasaryan <surenb@google.com> >Reviewed-by: Liam R. Howlett <Liam.Howlett@Oracle.com> >--- >Changes since v2 [1] >- Added SOB, per Liam Howlett > >[1] https://lore.kernel.org/all/20241121162826.987947-3-surenb@google.com/ > > include/linux/mmap_lock.h | 33 +++++++++++++++++++++++++++++++-- > 1 file changed, 31 insertions(+), 2 deletions(-) > >diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h >index 9715326f5a85..8ac3041df053 100644 >--- a/include/linux/mmap_lock.h >+++ b/include/linux/mmap_lock.h >@@ -71,6 +71,7 @@ static inline void mmap_assert_write_locked(const struct mm_struct *mm) > } > > #ifdef CONFIG_PER_VMA_LOCK >+ > static inline void mm_lock_seqcount_init(struct mm_struct *mm) > { > seqcount_init(&mm->mm_lock_seq); >@@ -87,11 +88,39 @@ static inline void mm_lock_seqcount_end(struct mm_struct *mm) > do_raw_write_seqcount_end(&mm->mm_lock_seq); > } > >-#else >+static inline bool mmap_lock_speculate_try_begin(struct mm_struct *mm, unsigned int *seq) >+{ >+ /* >+ * Since mmap_lock is a sleeping lock, and waiting for it to become >+ * unlocked is more or less equivalent with taking it ourselves, don't >+ * bother with the speculative path if mmap_lock is already write-locked >+ * and take the slow path, which takes the lock. >+ */ >+ return raw_seqcount_try_begin(&mm->mm_lock_seq, *seq); >+} >+ >+static inline bool mmap_lock_speculate_retry(struct mm_struct *mm, unsigned int seq) >+{ >+ return do_read_seqcount_retry(&mm->mm_lock_seq, seq); Just curious why we don't use read_seqcount_retry(). Looks this is the only user outside seqlock.h. >+} >+ >+#else /* CONFIG_PER_VMA_LOCK */ >+ > static inline void mm_lock_seqcount_init(struct mm_struct *mm) {} > static inline void mm_lock_seqcount_begin(struct mm_struct *mm) {} > static inline void mm_lock_seqcount_end(struct mm_struct *mm) {} >-#endif >+ >+static inline bool mmap_lock_speculate_try_begin(struct mm_struct *mm, unsigned int *seq) >+{ >+ return false; >+} >+ >+static inline bool mmap_lock_speculate_retry(struct mm_struct *mm, unsigned int seq) >+{ >+ return true; >+} >+ >+#endif /* CONFIG_PER_VMA_LOCK */ > > static inline void mmap_init_lock(struct mm_struct *mm) > { >-- >2.47.0.371.ga323438b13-goog
On Sun, Nov 24, 2024 at 4:58 PM Wei Yang <richard.weiyang@gmail.com> wrote: > > On Fri, Nov 22, 2024 at 09:44:16AM -0800, Suren Baghdasaryan wrote: > >Add helper functions to speculatively perform operations without > >read-locking mmap_lock, expecting that mmap_lock will not be > >write-locked and mm is not modified from under us. > > > >Suggested-by: Peter Zijlstra <peterz@infradead.org> > >Signed-off-by: Suren Baghdasaryan <surenb@google.com> > >Reviewed-by: Liam R. Howlett <Liam.Howlett@Oracle.com> > >--- > >Changes since v2 [1] > >- Added SOB, per Liam Howlett > > > >[1] https://lore.kernel.org/all/20241121162826.987947-3-surenb@google.com/ > > > > include/linux/mmap_lock.h | 33 +++++++++++++++++++++++++++++++-- > > 1 file changed, 31 insertions(+), 2 deletions(-) > > > >diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h > >index 9715326f5a85..8ac3041df053 100644 > >--- a/include/linux/mmap_lock.h > >+++ b/include/linux/mmap_lock.h > >@@ -71,6 +71,7 @@ static inline void mmap_assert_write_locked(const struct mm_struct *mm) > > } > > > > #ifdef CONFIG_PER_VMA_LOCK > >+ > > static inline void mm_lock_seqcount_init(struct mm_struct *mm) > > { > > seqcount_init(&mm->mm_lock_seq); > >@@ -87,11 +88,39 @@ static inline void mm_lock_seqcount_end(struct mm_struct *mm) > > do_raw_write_seqcount_end(&mm->mm_lock_seq); > > } > > > >-#else > >+static inline bool mmap_lock_speculate_try_begin(struct mm_struct *mm, unsigned int *seq) > >+{ > >+ /* > >+ * Since mmap_lock is a sleeping lock, and waiting for it to become > >+ * unlocked is more or less equivalent with taking it ourselves, don't > >+ * bother with the speculative path if mmap_lock is already write-locked > >+ * and take the slow path, which takes the lock. > >+ */ > >+ return raw_seqcount_try_begin(&mm->mm_lock_seq, *seq); > >+} > >+ > >+static inline bool mmap_lock_speculate_retry(struct mm_struct *mm, unsigned int seq) > >+{ > >+ return do_read_seqcount_retry(&mm->mm_lock_seq, seq); > > Just curious why we don't use read_seqcount_retry(). > > Looks this is the only user outside seqlock.h. Ah, good eye! read_seqcount_retry() would be better. Peter, do you want me to post a new patchset or you can patch it when picking it up? > > >+} > >+ > >+#else /* CONFIG_PER_VMA_LOCK */ > >+ > > static inline void mm_lock_seqcount_init(struct mm_struct *mm) {} > > static inline void mm_lock_seqcount_begin(struct mm_struct *mm) {} > > static inline void mm_lock_seqcount_end(struct mm_struct *mm) {} > >-#endif > >+ > >+static inline bool mmap_lock_speculate_try_begin(struct mm_struct *mm, unsigned int *seq) > >+{ > >+ return false; > >+} > >+ > >+static inline bool mmap_lock_speculate_retry(struct mm_struct *mm, unsigned int seq) > >+{ > >+ return true; > >+} > >+ > >+#endif /* CONFIG_PER_VMA_LOCK */ > > > > static inline void mmap_init_lock(struct mm_struct *mm) > > { > >-- > >2.47.0.371.ga323438b13-goog > > -- > Wei Yang > Help you, Help me
On Mon, Nov 25, 2024 at 08:18:38AM -0800, Suren Baghdasaryan wrote: > > >+static inline bool mmap_lock_speculate_retry(struct mm_struct *mm, unsigned int seq) > > >+{ > > >+ return do_read_seqcount_retry(&mm->mm_lock_seq, seq); > > > > Just curious why we don't use read_seqcount_retry(). > > > > Looks this is the only user outside seqlock.h. > > Ah, good eye! read_seqcount_retry() would be better. > > Peter, do you want me to post a new patchset or you can patch it when > picking it up? Fixed up my local copy, thanks!
diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h index 9715326f5a85..8ac3041df053 100644 --- a/include/linux/mmap_lock.h +++ b/include/linux/mmap_lock.h @@ -71,6 +71,7 @@ static inline void mmap_assert_write_locked(const struct mm_struct *mm) } #ifdef CONFIG_PER_VMA_LOCK + static inline void mm_lock_seqcount_init(struct mm_struct *mm) { seqcount_init(&mm->mm_lock_seq); @@ -87,11 +88,39 @@ static inline void mm_lock_seqcount_end(struct mm_struct *mm) do_raw_write_seqcount_end(&mm->mm_lock_seq); } -#else +static inline bool mmap_lock_speculate_try_begin(struct mm_struct *mm, unsigned int *seq) +{ + /* + * Since mmap_lock is a sleeping lock, and waiting for it to become + * unlocked is more or less equivalent with taking it ourselves, don't + * bother with the speculative path if mmap_lock is already write-locked + * and take the slow path, which takes the lock. + */ + return raw_seqcount_try_begin(&mm->mm_lock_seq, *seq); +} + +static inline bool mmap_lock_speculate_retry(struct mm_struct *mm, unsigned int seq) +{ + return do_read_seqcount_retry(&mm->mm_lock_seq, seq); +} + +#else /* CONFIG_PER_VMA_LOCK */ + static inline void mm_lock_seqcount_init(struct mm_struct *mm) {} static inline void mm_lock_seqcount_begin(struct mm_struct *mm) {} static inline void mm_lock_seqcount_end(struct mm_struct *mm) {} -#endif + +static inline bool mmap_lock_speculate_try_begin(struct mm_struct *mm, unsigned int *seq) +{ + return false; +} + +static inline bool mmap_lock_speculate_retry(struct mm_struct *mm, unsigned int seq) +{ + return true; +} + +#endif /* CONFIG_PER_VMA_LOCK */ static inline void mmap_init_lock(struct mm_struct *mm) {