Message ID | 20240111212424.3572189-2-willy@infradead.org (mailing list archive) |
---|---|
State | Superseded, archived |
Headers | show |
Series | Remove the XFS mrlock | expand |
On 1/11/24 16:24, Matthew Wilcox (Oracle) wrote: > Modelled after lockdep_assert_held() and lockdep_assert_held_write(), > but are always active, even when lockdep is disabled. Of course, they > don't test that _this_ thread is the owner, but it's sufficient to catch > many bugs and doesn't incur the same performance penalty as lockdep. I don't mind the new *assert_held*nolockdep APIs. The only nit that I have is that their behavior is slightly different from the corresponding lockdep counterparts as they don't imply the current process is holding the lock. So we may need to have some comment to document the difference and set the right expectation. Of course it can be done with a follow-up patch. Acked-by: Waiman Long <longman@redhat.com> > > Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> > Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> > --- > include/linux/rwbase_rt.h | 9 ++++++-- > include/linux/rwsem.h | 46 ++++++++++++++++++++++++++++++++++----- > 2 files changed, 48 insertions(+), 7 deletions(-) > > diff --git a/include/linux/rwbase_rt.h b/include/linux/rwbase_rt.h > index 1d264dd08625..29c4e4f243e4 100644 > --- a/include/linux/rwbase_rt.h > +++ b/include/linux/rwbase_rt.h > @@ -26,12 +26,17 @@ struct rwbase_rt { > } while (0) > > > -static __always_inline bool rw_base_is_locked(struct rwbase_rt *rwb) > +static __always_inline bool rw_base_is_locked(const struct rwbase_rt *rwb) > { > return atomic_read(&rwb->readers) != READER_BIAS; > } > > -static __always_inline bool rw_base_is_contended(struct rwbase_rt *rwb) > +static inline void rw_base_assert_held_write(const struct rwbase_rt *rwb) > +{ > + WARN_ON(atomic_read(&rwb->readers) != WRITER_BIAS); > +} > + > +static __always_inline bool rw_base_is_contended(const struct rwbase_rt *rwb) > { > return atomic_read(&rwb->readers) > 0; > } > diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h > index 9c29689ff505..4f1c18992f76 100644 > --- a/include/linux/rwsem.h > +++ b/include/linux/rwsem.h > @@ -66,14 +66,24 @@ struct rw_semaphore { > #endif > }; > > -/* In all implementations count != 0 means locked */ > +#define RWSEM_UNLOCKED_VALUE 0UL > +#define RWSEM_WRITER_LOCKED (1UL << 0) > +#define __RWSEM_COUNT_INIT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE) > + > static inline int rwsem_is_locked(struct rw_semaphore *sem) > { > - return atomic_long_read(&sem->count) != 0; > + return atomic_long_read(&sem->count) != RWSEM_UNLOCKED_VALUE; > } > > -#define RWSEM_UNLOCKED_VALUE 0L > -#define __RWSEM_COUNT_INIT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE) > +static inline void rwsem_assert_held_nolockdep(const struct rw_semaphore *sem) > +{ > + WARN_ON(atomic_long_read(&sem->count) == RWSEM_UNLOCKED_VALUE); > +} > + > +static inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *sem) > +{ > + WARN_ON(!(atomic_long_read(&sem->count) & RWSEM_WRITER_LOCKED)); > +} > > /* Common initializer macros and functions */ > > @@ -152,11 +162,21 @@ do { \ > __init_rwsem((sem), #sem, &__key); \ > } while (0) > > -static __always_inline int rwsem_is_locked(struct rw_semaphore *sem) > +static __always_inline int rwsem_is_locked(const struct rw_semaphore *sem) > { > return rw_base_is_locked(&sem->rwbase); > } > > +static inline void rwsem_assert_held_nolockdep(const struct rw_semaphore *sem) > +{ > + WARN_ON(!rwsem_is_locked(sem)); > +} > + > +static inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *sem) > +{ > + rw_base_assert_held_write(sem); > +} > + > static __always_inline int rwsem_is_contended(struct rw_semaphore *sem) > { > return rw_base_is_contended(&sem->rwbase); > @@ -169,6 +189,22 @@ static __always_inline int rwsem_is_contended(struct rw_semaphore *sem) > * the RT specific variant. > */ > > +static inline void rwsem_assert_held(const struct rw_semaphore *sem) > +{ > + if (IS_ENABLED(CONFIG_LOCKDEP)) > + lockdep_assert_held(sem); > + else > + rwsem_assert_held_nolockdep(sem); > +} > + > +static inline void rwsem_assert_held_write(const struct rw_semaphore *sem) > +{ > + if (IS_ENABLED(CONFIG_LOCKDEP)) > + lockdep_assert_held_write(sem); > + else > + rwsem_assert_held_write_nolockdep(sem); > +} > + > /* > * lock for reading > */
On Thu, Jan 11, 2024 at 09:24:22PM +0000, Matthew Wilcox (Oracle) wrote: > Modelled after lockdep_assert_held() and lockdep_assert_held_write(), > but are always active, even when lockdep is disabled. Of course, they > don't test that _this_ thread is the owner, but it's sufficient to catch > many bugs and doesn't incur the same performance penalty as lockdep. > > Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> > Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Looks good to me, having read this patchset backwards. Acked-by: Darrick J. Wong <djwong@kernel.org> --D > --- > include/linux/rwbase_rt.h | 9 ++++++-- > include/linux/rwsem.h | 46 ++++++++++++++++++++++++++++++++++----- > 2 files changed, 48 insertions(+), 7 deletions(-) > > diff --git a/include/linux/rwbase_rt.h b/include/linux/rwbase_rt.h > index 1d264dd08625..29c4e4f243e4 100644 > --- a/include/linux/rwbase_rt.h > +++ b/include/linux/rwbase_rt.h > @@ -26,12 +26,17 @@ struct rwbase_rt { > } while (0) > > > -static __always_inline bool rw_base_is_locked(struct rwbase_rt *rwb) > +static __always_inline bool rw_base_is_locked(const struct rwbase_rt *rwb) > { > return atomic_read(&rwb->readers) != READER_BIAS; > } > > -static __always_inline bool rw_base_is_contended(struct rwbase_rt *rwb) > +static inline void rw_base_assert_held_write(const struct rwbase_rt *rwb) > +{ > + WARN_ON(atomic_read(&rwb->readers) != WRITER_BIAS); > +} > + > +static __always_inline bool rw_base_is_contended(const struct rwbase_rt *rwb) > { > return atomic_read(&rwb->readers) > 0; > } > diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h > index 9c29689ff505..4f1c18992f76 100644 > --- a/include/linux/rwsem.h > +++ b/include/linux/rwsem.h > @@ -66,14 +66,24 @@ struct rw_semaphore { > #endif > }; > > -/* In all implementations count != 0 means locked */ > +#define RWSEM_UNLOCKED_VALUE 0UL > +#define RWSEM_WRITER_LOCKED (1UL << 0) > +#define __RWSEM_COUNT_INIT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE) > + > static inline int rwsem_is_locked(struct rw_semaphore *sem) > { > - return atomic_long_read(&sem->count) != 0; > + return atomic_long_read(&sem->count) != RWSEM_UNLOCKED_VALUE; > } > > -#define RWSEM_UNLOCKED_VALUE 0L > -#define __RWSEM_COUNT_INIT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE) > +static inline void rwsem_assert_held_nolockdep(const struct rw_semaphore *sem) > +{ > + WARN_ON(atomic_long_read(&sem->count) == RWSEM_UNLOCKED_VALUE); > +} > + > +static inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *sem) > +{ > + WARN_ON(!(atomic_long_read(&sem->count) & RWSEM_WRITER_LOCKED)); > +} > > /* Common initializer macros and functions */ > > @@ -152,11 +162,21 @@ do { \ > __init_rwsem((sem), #sem, &__key); \ > } while (0) > > -static __always_inline int rwsem_is_locked(struct rw_semaphore *sem) > +static __always_inline int rwsem_is_locked(const struct rw_semaphore *sem) > { > return rw_base_is_locked(&sem->rwbase); > } > > +static inline void rwsem_assert_held_nolockdep(const struct rw_semaphore *sem) > +{ > + WARN_ON(!rwsem_is_locked(sem)); > +} > + > +static inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *sem) > +{ > + rw_base_assert_held_write(sem); > +} > + > static __always_inline int rwsem_is_contended(struct rw_semaphore *sem) > { > return rw_base_is_contended(&sem->rwbase); > @@ -169,6 +189,22 @@ static __always_inline int rwsem_is_contended(struct rw_semaphore *sem) > * the RT specific variant. > */ > > +static inline void rwsem_assert_held(const struct rw_semaphore *sem) > +{ > + if (IS_ENABLED(CONFIG_LOCKDEP)) > + lockdep_assert_held(sem); > + else > + rwsem_assert_held_nolockdep(sem); > +} > + > +static inline void rwsem_assert_held_write(const struct rw_semaphore *sem) > +{ > + if (IS_ENABLED(CONFIG_LOCKDEP)) > + lockdep_assert_held_write(sem); > + else > + rwsem_assert_held_write_nolockdep(sem); > +} > + > /* > * lock for reading > */ > -- > 2.43.0 > >
diff --git a/include/linux/rwbase_rt.h b/include/linux/rwbase_rt.h index 1d264dd08625..29c4e4f243e4 100644 --- a/include/linux/rwbase_rt.h +++ b/include/linux/rwbase_rt.h @@ -26,12 +26,17 @@ struct rwbase_rt { } while (0) -static __always_inline bool rw_base_is_locked(struct rwbase_rt *rwb) +static __always_inline bool rw_base_is_locked(const struct rwbase_rt *rwb) { return atomic_read(&rwb->readers) != READER_BIAS; } -static __always_inline bool rw_base_is_contended(struct rwbase_rt *rwb) +static inline void rw_base_assert_held_write(const struct rwbase_rt *rwb) +{ + WARN_ON(atomic_read(&rwb->readers) != WRITER_BIAS); +} + +static __always_inline bool rw_base_is_contended(const struct rwbase_rt *rwb) { return atomic_read(&rwb->readers) > 0; } diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 9c29689ff505..4f1c18992f76 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -66,14 +66,24 @@ struct rw_semaphore { #endif }; -/* In all implementations count != 0 means locked */ +#define RWSEM_UNLOCKED_VALUE 0UL +#define RWSEM_WRITER_LOCKED (1UL << 0) +#define __RWSEM_COUNT_INIT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE) + static inline int rwsem_is_locked(struct rw_semaphore *sem) { - return atomic_long_read(&sem->count) != 0; + return atomic_long_read(&sem->count) != RWSEM_UNLOCKED_VALUE; } -#define RWSEM_UNLOCKED_VALUE 0L -#define __RWSEM_COUNT_INIT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE) +static inline void rwsem_assert_held_nolockdep(const struct rw_semaphore *sem) +{ + WARN_ON(atomic_long_read(&sem->count) == RWSEM_UNLOCKED_VALUE); +} + +static inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *sem) +{ + WARN_ON(!(atomic_long_read(&sem->count) & RWSEM_WRITER_LOCKED)); +} /* Common initializer macros and functions */ @@ -152,11 +162,21 @@ do { \ __init_rwsem((sem), #sem, &__key); \ } while (0) -static __always_inline int rwsem_is_locked(struct rw_semaphore *sem) +static __always_inline int rwsem_is_locked(const struct rw_semaphore *sem) { return rw_base_is_locked(&sem->rwbase); } +static inline void rwsem_assert_held_nolockdep(const struct rw_semaphore *sem) +{ + WARN_ON(!rwsem_is_locked(sem)); +} + +static inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *sem) +{ + rw_base_assert_held_write(sem); +} + static __always_inline int rwsem_is_contended(struct rw_semaphore *sem) { return rw_base_is_contended(&sem->rwbase); @@ -169,6 +189,22 @@ static __always_inline int rwsem_is_contended(struct rw_semaphore *sem) * the RT specific variant. */ +static inline void rwsem_assert_held(const struct rw_semaphore *sem) +{ + if (IS_ENABLED(CONFIG_LOCKDEP)) + lockdep_assert_held(sem); + else + rwsem_assert_held_nolockdep(sem); +} + +static inline void rwsem_assert_held_write(const struct rw_semaphore *sem) +{ + if (IS_ENABLED(CONFIG_LOCKDEP)) + lockdep_assert_held_write(sem); + else + rwsem_assert_held_write_nolockdep(sem); +} + /* * lock for reading */