Message ID | 20241028010818.2487581-3-andrii@kernel.org (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | uprobes,mm: speculative lockless VMA-to-uprobe lookup | expand |
On 10/28/24 02:08, Andrii Nakryiko wrote: > From: Suren Baghdasaryan <surenb@google.com> > > Add helper functions to speculatively perform operations without > read-locking mmap_lock, expecting that mmap_lock will not be > write-locked and mm is not modified from under us. > > Suggested-by: Peter Zijlstra <peterz@infradead.org> > Signed-off-by: Suren Baghdasaryan <surenb@google.com> > Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Acked-by: Vlastimil Babka <vbabka@suse.cz> > --- > include/linux/mmap_lock.h | 29 +++++++++++++++++++++++++++-- > 1 file changed, 27 insertions(+), 2 deletions(-) > > diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h > index 6b3272686860..58dde2e35f7e 100644 > --- a/include/linux/mmap_lock.h > +++ b/include/linux/mmap_lock.h > @@ -71,6 +71,7 @@ static inline void mmap_assert_write_locked(const struct mm_struct *mm) > } > > #ifdef CONFIG_PER_VMA_LOCK > + > static inline void mm_lock_seqcount_init(struct mm_struct *mm) > { > seqcount_init(&mm->mm_lock_seq); > @@ -86,11 +87,35 @@ static inline void mm_lock_seqcount_end(struct mm_struct *mm) > do_raw_write_seqcount_end(&mm->mm_lock_seq); > } > > -#else > +static inline bool mmap_lock_speculation_begin(struct mm_struct *mm, unsigned int *seq) > +{ > + *seq = raw_read_seqcount(&mm->mm_lock_seq); > + /* Allow speculation if mmap_lock is not write-locked */ > + return (*seq & 1) == 0; > +} > + > +static inline bool mmap_lock_speculation_end(struct mm_struct *mm, unsigned int seq) > +{ > + return !do_read_seqcount_retry(&mm->mm_lock_seq, seq); > +} > + > +#else /* CONFIG_PER_VMA_LOCK */ > + > static inline void mm_lock_seqcount_init(struct mm_struct *mm) {} > static inline void mm_lock_seqcount_begin(struct mm_struct *mm) {} > static inline void mm_lock_seqcount_end(struct mm_struct *mm) {} > -#endif > + > +static inline bool mmap_lock_speculation_begin(struct mm_struct *mm, unsigned int *seq) > +{ > + return false; > +} > + > +static inline bool mmap_lock_speculation_end(struct mm_struct *mm, unsigned int seq) > +{ > + return false; > +} > + > +#endif /* CONFIG_PER_VMA_LOCK */ > > static inline void mmap_init_lock(struct mm_struct *mm) > {
diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h index 6b3272686860..58dde2e35f7e 100644 --- a/include/linux/mmap_lock.h +++ b/include/linux/mmap_lock.h @@ -71,6 +71,7 @@ static inline void mmap_assert_write_locked(const struct mm_struct *mm) } #ifdef CONFIG_PER_VMA_LOCK + static inline void mm_lock_seqcount_init(struct mm_struct *mm) { seqcount_init(&mm->mm_lock_seq); @@ -86,11 +87,35 @@ static inline void mm_lock_seqcount_end(struct mm_struct *mm) do_raw_write_seqcount_end(&mm->mm_lock_seq); } -#else +static inline bool mmap_lock_speculation_begin(struct mm_struct *mm, unsigned int *seq) +{ + *seq = raw_read_seqcount(&mm->mm_lock_seq); + /* Allow speculation if mmap_lock is not write-locked */ + return (*seq & 1) == 0; +} + +static inline bool mmap_lock_speculation_end(struct mm_struct *mm, unsigned int seq) +{ + return !do_read_seqcount_retry(&mm->mm_lock_seq, seq); +} + +#else /* CONFIG_PER_VMA_LOCK */ + static inline void mm_lock_seqcount_init(struct mm_struct *mm) {} static inline void mm_lock_seqcount_begin(struct mm_struct *mm) {} static inline void mm_lock_seqcount_end(struct mm_struct *mm) {} -#endif + +static inline bool mmap_lock_speculation_begin(struct mm_struct *mm, unsigned int *seq) +{ + return false; +} + +static inline bool mmap_lock_speculation_end(struct mm_struct *mm, unsigned int seq) +{ + return false; +} + +#endif /* CONFIG_PER_VMA_LOCK */ static inline void mmap_init_lock(struct mm_struct *mm) {