Message ID | 20190715143705.117908-3-joel@joelfernandes.org (mailing list archive) |
---|---|
State | Not Applicable, archived |
Headers | show |
Series | Harden list_for_each_entry_rcu() and family | expand |
On Mon, Jul 15, 2019 at 10:36:58AM -0400, Joel Fernandes (Google) wrote: > This patch adds support for checking RCU reader sections in list > traversal macros. Optionally, if the list macro is called under SRCU or > other lock/mutex protection, then appropriate lockdep expressions can be > passed to make the checks pass. > > Existing list_for_each_entry_rcu() invocations don't need to pass the > optional fourth argument (cond) unless they are under some non-RCU > protection and needs to make lockdep check pass. > > Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org> Now that I am on the correct version, again please fold in the checks for the extra argument. The ability to have an optional argument looks quite helpful, especially when compared to growing the RCU API! A few more things below. > --- > include/linux/rculist.h | 28 ++++++++++++++++++++----- > include/linux/rcupdate.h | 7 +++++++ > kernel/rcu/Kconfig.debug | 11 ++++++++++ > kernel/rcu/update.c | 44 ++++++++++++++++++++++++---------------- > 4 files changed, 67 insertions(+), 23 deletions(-) > > diff --git a/include/linux/rculist.h b/include/linux/rculist.h > index e91ec9ddcd30..1048160625bb 100644 > --- a/include/linux/rculist.h > +++ b/include/linux/rculist.h > @@ -40,6 +40,20 @@ static inline void INIT_LIST_HEAD_RCU(struct list_head *list) > */ > #define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next))) > > +/* > + * Check during list traversal that we are within an RCU reader > + */ > + > +#ifdef CONFIG_PROVE_RCU_LIST This new Kconfig option is OK temporarily, but unless there is reason to fear malfunction that a few weeks of rcutorture, 0day, and -next won't find, it would be better to just use CONFIG_PROVE_RCU. The overall goal is to reduce the number of RCU knobs rather than grow them, must though history might lead one to believe otherwise. :-/ > +#define __list_check_rcu(dummy, cond, ...) \ > + ({ \ > + RCU_LOCKDEP_WARN(!cond && !rcu_read_lock_any_held(), \ > + "RCU-list traversed in non-reader section!"); \ > + }) > +#else > +#define __list_check_rcu(dummy, cond, ...) ({}) > +#endif > + > /* > * Insert a new entry between two known consecutive entries. > * > @@ -343,14 +357,16 @@ static inline void list_splice_tail_init_rcu(struct list_head *list, > * @pos: the type * to use as a loop cursor. > * @head: the head for your list. > * @member: the name of the list_head within the struct. > + * @cond: optional lockdep expression if called from non-RCU protection. > * > * This list-traversal primitive may safely run concurrently with > * the _rcu list-mutation primitives such as list_add_rcu() > * as long as the traversal is guarded by rcu_read_lock(). > */ > -#define list_for_each_entry_rcu(pos, head, member) \ > - for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \ > - &pos->member != (head); \ > +#define list_for_each_entry_rcu(pos, head, member, cond...) \ > + for (__list_check_rcu(dummy, ## cond, 0), \ > + pos = list_entry_rcu((head)->next, typeof(*pos), member); \ > + &pos->member != (head); \ > pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) > > /** > @@ -616,13 +632,15 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n, > * @pos: the type * to use as a loop cursor. > * @head: the head for your list. > * @member: the name of the hlist_node within the struct. > + * @cond: optional lockdep expression if called from non-RCU protection. > * > * This list-traversal primitive may safely run concurrently with > * the _rcu list-mutation primitives such as hlist_add_head_rcu() > * as long as the traversal is guarded by rcu_read_lock(). > */ > -#define hlist_for_each_entry_rcu(pos, head, member) \ > - for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\ > +#define hlist_for_each_entry_rcu(pos, head, member, cond...) \ > + for (__list_check_rcu(dummy, ## cond, 0), \ > + pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\ > typeof(*(pos)), member); \ > pos; \ > pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\ > diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h > index 8f7167478c1d..f3c29efdf19a 100644 > --- a/include/linux/rcupdate.h > +++ b/include/linux/rcupdate.h > @@ -221,6 +221,7 @@ int debug_lockdep_rcu_enabled(void); > int rcu_read_lock_held(void); > int rcu_read_lock_bh_held(void); > int rcu_read_lock_sched_held(void); > +int rcu_read_lock_any_held(void); > > #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ > > @@ -241,6 +242,12 @@ static inline int rcu_read_lock_sched_held(void) > { > return !preemptible(); > } > + > +static inline int rcu_read_lock_any_held(void) > +{ > + return !preemptible(); > +} > + > #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ > > #ifdef CONFIG_PROVE_RCU > diff --git a/kernel/rcu/Kconfig.debug b/kernel/rcu/Kconfig.debug > index 5ec3ea4028e2..7fbd21dbfcd0 100644 > --- a/kernel/rcu/Kconfig.debug > +++ b/kernel/rcu/Kconfig.debug > @@ -8,6 +8,17 @@ menu "RCU Debugging" > config PROVE_RCU > def_bool PROVE_LOCKING > > +config PROVE_RCU_LIST > + bool "RCU list lockdep debugging" > + depends on PROVE_RCU This must also depend on RCU_EXPERT. > + default n > + help > + Enable RCU lockdep checking for list usages. By default it is > + turned off since there are several list RCU users that still > + need to be converted to pass a lockdep expression. To prevent > + false-positive splats, we keep it default disabled but once all > + users are converted, we can remove this config option. > + > config TORTURE_TEST > tristate > default n > diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c > index 9dd5aeef6e70..b7a4e3b5fa98 100644 > --- a/kernel/rcu/update.c > +++ b/kernel/rcu/update.c > @@ -91,14 +91,18 @@ module_param(rcu_normal_after_boot, int, 0); > * Similarly, we avoid claiming an SRCU read lock held if the current > * CPU is offline. > */ > +#define rcu_read_lock_held_common() \ > + if (!debug_lockdep_rcu_enabled()) \ > + return 1; \ > + if (!rcu_is_watching()) \ > + return 0; \ > + if (!rcu_lockdep_current_cpu_online()) \ > + return 0; Nice abstraction of common code! Thanx, Paul > + > int rcu_read_lock_sched_held(void) > { > - if (!debug_lockdep_rcu_enabled()) > - return 1; > - if (!rcu_is_watching()) > - return 0; > - if (!rcu_lockdep_current_cpu_online()) > - return 0; > + rcu_read_lock_held_common(); > + > return lock_is_held(&rcu_sched_lock_map) || !preemptible(); > } > EXPORT_SYMBOL(rcu_read_lock_sched_held); > @@ -257,12 +261,8 @@ NOKPROBE_SYMBOL(debug_lockdep_rcu_enabled); > */ > int rcu_read_lock_held(void) > { > - if (!debug_lockdep_rcu_enabled()) > - return 1; > - if (!rcu_is_watching()) > - return 0; > - if (!rcu_lockdep_current_cpu_online()) > - return 0; > + rcu_read_lock_held_common(); > + > return lock_is_held(&rcu_lock_map); > } > EXPORT_SYMBOL_GPL(rcu_read_lock_held); > @@ -284,16 +284,24 @@ EXPORT_SYMBOL_GPL(rcu_read_lock_held); > */ > int rcu_read_lock_bh_held(void) > { > - if (!debug_lockdep_rcu_enabled()) > - return 1; > - if (!rcu_is_watching()) > - return 0; > - if (!rcu_lockdep_current_cpu_online()) > - return 0; > + rcu_read_lock_held_common(); > + > return in_softirq() || irqs_disabled(); > } > EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); > > +int rcu_read_lock_any_held(void) > +{ > + rcu_read_lock_held_common(); > + > + if (lock_is_held(&rcu_lock_map) || > + lock_is_held(&rcu_bh_lock_map) || > + lock_is_held(&rcu_sched_lock_map)) > + return 1; > + return !preemptible(); > +} > +EXPORT_SYMBOL_GPL(rcu_read_lock_any_held); > + > #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ > > /** > -- > 2.22.0.510.g264f2c817a-goog >
On Tue, Jul 16, 2019 at 11:38:33AM -0700, Paul E. McKenney wrote: > On Mon, Jul 15, 2019 at 10:36:58AM -0400, Joel Fernandes (Google) wrote: > > This patch adds support for checking RCU reader sections in list > > traversal macros. Optionally, if the list macro is called under SRCU or > > other lock/mutex protection, then appropriate lockdep expressions can be > > passed to make the checks pass. > > > > Existing list_for_each_entry_rcu() invocations don't need to pass the > > optional fourth argument (cond) unless they are under some non-RCU > > protection and needs to make lockdep check pass. > > > > Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org> > > Now that I am on the correct version, again please fold in the checks > for the extra argument. The ability to have an optional argument looks > quite helpful, especially when compared to growing the RCU API! I did fold this and replied with a pull request URL based on /dev branch. But we can hold off on the pull requests until we decide on the below comments: > A few more things below. > > --- > > include/linux/rculist.h | 28 ++++++++++++++++++++----- > > include/linux/rcupdate.h | 7 +++++++ > > kernel/rcu/Kconfig.debug | 11 ++++++++++ > > kernel/rcu/update.c | 44 ++++++++++++++++++++++++---------------- > > 4 files changed, 67 insertions(+), 23 deletions(-) > > > > diff --git a/include/linux/rculist.h b/include/linux/rculist.h > > index e91ec9ddcd30..1048160625bb 100644 > > --- a/include/linux/rculist.h > > +++ b/include/linux/rculist.h > > @@ -40,6 +40,20 @@ static inline void INIT_LIST_HEAD_RCU(struct list_head *list) > > */ > > #define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next))) > > > > +/* > > + * Check during list traversal that we are within an RCU reader > > + */ > > + > > +#ifdef CONFIG_PROVE_RCU_LIST > > This new Kconfig option is OK temporarily, but unless there is reason to > fear malfunction that a few weeks of rcutorture, 0day, and -next won't > find, it would be better to just use CONFIG_PROVE_RCU. The overall goal > is to reduce the number of RCU knobs rather than grow them, must though > history might lead one to believe otherwise. :-/ If you want, we can try to drop this option and just use PROVE_RCU however I must say there may be several warnings that need to be fixed in a short period of time (even a few weeks may be too short) considering the 1000+ uses of RCU lists. But I don't mind dropping it and it may just accelerate the fixing up of all callers. > > +#define __list_check_rcu(dummy, cond, ...) \ > > + ({ \ > > + RCU_LOCKDEP_WARN(!cond && !rcu_read_lock_any_held(), \ > > + "RCU-list traversed in non-reader section!"); \ > > + }) > > +#else > > +#define __list_check_rcu(dummy, cond, ...) ({}) > > +#endif > > + > > /* > > * Insert a new entry between two known consecutive entries. > > * > > @@ -343,14 +357,16 @@ static inline void list_splice_tail_init_rcu(struct list_head *list, > > * @pos: the type * to use as a loop cursor. > > * @head: the head for your list. > > * @member: the name of the list_head within the struct. > > + * @cond: optional lockdep expression if called from non-RCU protection. > > * > > * This list-traversal primitive may safely run concurrently with > > * the _rcu list-mutation primitives such as list_add_rcu() > > * as long as the traversal is guarded by rcu_read_lock(). > > */ > > -#define list_for_each_entry_rcu(pos, head, member) \ > > - for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \ > > - &pos->member != (head); \ > > +#define list_for_each_entry_rcu(pos, head, member, cond...) \ > > + for (__list_check_rcu(dummy, ## cond, 0), \ > > + pos = list_entry_rcu((head)->next, typeof(*pos), member); \ > > + &pos->member != (head); \ > > pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) > > > > /** > > @@ -616,13 +632,15 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n, > > * @pos: the type * to use as a loop cursor. > > * @head: the head for your list. > > * @member: the name of the hlist_node within the struct. > > + * @cond: optional lockdep expression if called from non-RCU protection. > > * > > * This list-traversal primitive may safely run concurrently with > > * the _rcu list-mutation primitives such as hlist_add_head_rcu() > > * as long as the traversal is guarded by rcu_read_lock(). > > */ > > -#define hlist_for_each_entry_rcu(pos, head, member) \ > > - for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\ > > +#define hlist_for_each_entry_rcu(pos, head, member, cond...) \ > > + for (__list_check_rcu(dummy, ## cond, 0), \ > > + pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\ > > typeof(*(pos)), member); \ > > pos; \ > > pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\ > > diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h > > index 8f7167478c1d..f3c29efdf19a 100644 > > --- a/include/linux/rcupdate.h > > +++ b/include/linux/rcupdate.h > > @@ -221,6 +221,7 @@ int debug_lockdep_rcu_enabled(void); > > int rcu_read_lock_held(void); > > int rcu_read_lock_bh_held(void); > > int rcu_read_lock_sched_held(void); > > +int rcu_read_lock_any_held(void); > > > > #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ > > > > @@ -241,6 +242,12 @@ static inline int rcu_read_lock_sched_held(void) > > { > > return !preemptible(); > > } > > + > > +static inline int rcu_read_lock_any_held(void) > > +{ > > + return !preemptible(); > > +} > > + > > #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ > > > > #ifdef CONFIG_PROVE_RCU > > diff --git a/kernel/rcu/Kconfig.debug b/kernel/rcu/Kconfig.debug > > index 5ec3ea4028e2..7fbd21dbfcd0 100644 > > --- a/kernel/rcu/Kconfig.debug > > +++ b/kernel/rcu/Kconfig.debug > > @@ -8,6 +8,17 @@ menu "RCU Debugging" > > config PROVE_RCU > > def_bool PROVE_LOCKING > > > > +config PROVE_RCU_LIST > > + bool "RCU list lockdep debugging" > > + depends on PROVE_RCU > > This must also depend on RCU_EXPERT. Sure. > > + default n > > + help > > + Enable RCU lockdep checking for list usages. By default it is > > + turned off since there are several list RCU users that still > > + need to be converted to pass a lockdep expression. To prevent > > + false-positive splats, we keep it default disabled but once all > > + users are converted, we can remove this config option. > > + > > config TORTURE_TEST > > tristate > > default n > > diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c > > index 9dd5aeef6e70..b7a4e3b5fa98 100644 > > --- a/kernel/rcu/update.c > > +++ b/kernel/rcu/update.c > > @@ -91,14 +91,18 @@ module_param(rcu_normal_after_boot, int, 0); > > * Similarly, we avoid claiming an SRCU read lock held if the current > > * CPU is offline. > > */ > > +#define rcu_read_lock_held_common() \ > > + if (!debug_lockdep_rcu_enabled()) \ > > + return 1; \ > > + if (!rcu_is_watching()) \ > > + return 0; \ > > + if (!rcu_lockdep_current_cpu_online()) \ > > + return 0; > > Nice abstraction of common code! Thanks!
On Tue, Jul 16, 2019 at 02:46:49PM -0400, Joel Fernandes wrote: > On Tue, Jul 16, 2019 at 11:38:33AM -0700, Paul E. McKenney wrote: > > On Mon, Jul 15, 2019 at 10:36:58AM -0400, Joel Fernandes (Google) wrote: > > > This patch adds support for checking RCU reader sections in list > > > traversal macros. Optionally, if the list macro is called under SRCU or > > > other lock/mutex protection, then appropriate lockdep expressions can be > > > passed to make the checks pass. > > > > > > Existing list_for_each_entry_rcu() invocations don't need to pass the > > > optional fourth argument (cond) unless they are under some non-RCU > > > protection and needs to make lockdep check pass. > > > > > > Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org> > > > > Now that I am on the correct version, again please fold in the checks > > for the extra argument. The ability to have an optional argument looks > > quite helpful, especially when compared to growing the RCU API! > > I did fold this and replied with a pull request URL based on /dev branch. But > we can hold off on the pull requests until we decide on the below comments: > > > A few more things below. > > > --- > > > include/linux/rculist.h | 28 ++++++++++++++++++++----- > > > include/linux/rcupdate.h | 7 +++++++ > > > kernel/rcu/Kconfig.debug | 11 ++++++++++ > > > kernel/rcu/update.c | 44 ++++++++++++++++++++++++---------------- > > > 4 files changed, 67 insertions(+), 23 deletions(-) > > > > > > diff --git a/include/linux/rculist.h b/include/linux/rculist.h > > > index e91ec9ddcd30..1048160625bb 100644 > > > --- a/include/linux/rculist.h > > > +++ b/include/linux/rculist.h > > > @@ -40,6 +40,20 @@ static inline void INIT_LIST_HEAD_RCU(struct list_head *list) > > > */ > > > #define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next))) > > > > > > +/* > > > + * Check during list traversal that we are within an RCU reader > > > + */ > > > + > > > +#ifdef CONFIG_PROVE_RCU_LIST > > > > This new Kconfig option is OK temporarily, but unless there is reason to > > fear malfunction that a few weeks of rcutorture, 0day, and -next won't > > find, it would be better to just use CONFIG_PROVE_RCU. The overall goal > > is to reduce the number of RCU knobs rather than grow them, must though > > history might lead one to believe otherwise. :-/ > > If you want, we can try to drop this option and just use PROVE_RCU however I > must say there may be several warnings that need to be fixed in a short > period of time (even a few weeks may be too short) considering the 1000+ > uses of RCU lists. Do many people other than me build with CONFIG_PROVE_RCU? If so, then that would be a good reason for a temporary CONFIG_PROVE_RCU_LIST, as in going away in a release or two once the warnings get fixed. > But I don't mind dropping it and it may just accelerate the fixing up of all > callers. I will let you decide based on the above question. But if you have CONFIG_PROVE_RCU_LIST, as noted below, it needs to depend on RCU_EXPERT. Thanx, Paul > > > +#define __list_check_rcu(dummy, cond, ...) \ > > > + ({ \ > > > + RCU_LOCKDEP_WARN(!cond && !rcu_read_lock_any_held(), \ > > > + "RCU-list traversed in non-reader section!"); \ > > > + }) > > > +#else > > > +#define __list_check_rcu(dummy, cond, ...) ({}) > > > +#endif > > > + > > > /* > > > * Insert a new entry between two known consecutive entries. > > > * > > > @@ -343,14 +357,16 @@ static inline void list_splice_tail_init_rcu(struct list_head *list, > > > * @pos: the type * to use as a loop cursor. > > > * @head: the head for your list. > > > * @member: the name of the list_head within the struct. > > > + * @cond: optional lockdep expression if called from non-RCU protection. > > > * > > > * This list-traversal primitive may safely run concurrently with > > > * the _rcu list-mutation primitives such as list_add_rcu() > > > * as long as the traversal is guarded by rcu_read_lock(). > > > */ > > > -#define list_for_each_entry_rcu(pos, head, member) \ > > > - for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \ > > > - &pos->member != (head); \ > > > +#define list_for_each_entry_rcu(pos, head, member, cond...) \ > > > + for (__list_check_rcu(dummy, ## cond, 0), \ > > > + pos = list_entry_rcu((head)->next, typeof(*pos), member); \ > > > + &pos->member != (head); \ > > > pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) > > > > > > /** > > > @@ -616,13 +632,15 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n, > > > * @pos: the type * to use as a loop cursor. > > > * @head: the head for your list. > > > * @member: the name of the hlist_node within the struct. > > > + * @cond: optional lockdep expression if called from non-RCU protection. > > > * > > > * This list-traversal primitive may safely run concurrently with > > > * the _rcu list-mutation primitives such as hlist_add_head_rcu() > > > * as long as the traversal is guarded by rcu_read_lock(). > > > */ > > > -#define hlist_for_each_entry_rcu(pos, head, member) \ > > > - for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\ > > > +#define hlist_for_each_entry_rcu(pos, head, member, cond...) \ > > > + for (__list_check_rcu(dummy, ## cond, 0), \ > > > + pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\ > > > typeof(*(pos)), member); \ > > > pos; \ > > > pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\ > > > diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h > > > index 8f7167478c1d..f3c29efdf19a 100644 > > > --- a/include/linux/rcupdate.h > > > +++ b/include/linux/rcupdate.h > > > @@ -221,6 +221,7 @@ int debug_lockdep_rcu_enabled(void); > > > int rcu_read_lock_held(void); > > > int rcu_read_lock_bh_held(void); > > > int rcu_read_lock_sched_held(void); > > > +int rcu_read_lock_any_held(void); > > > > > > #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ > > > > > > @@ -241,6 +242,12 @@ static inline int rcu_read_lock_sched_held(void) > > > { > > > return !preemptible(); > > > } > > > + > > > +static inline int rcu_read_lock_any_held(void) > > > +{ > > > + return !preemptible(); > > > +} > > > + > > > #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ > > > > > > #ifdef CONFIG_PROVE_RCU > > > diff --git a/kernel/rcu/Kconfig.debug b/kernel/rcu/Kconfig.debug > > > index 5ec3ea4028e2..7fbd21dbfcd0 100644 > > > --- a/kernel/rcu/Kconfig.debug > > > +++ b/kernel/rcu/Kconfig.debug > > > @@ -8,6 +8,17 @@ menu "RCU Debugging" > > > config PROVE_RCU > > > def_bool PROVE_LOCKING > > > > > > +config PROVE_RCU_LIST > > > + bool "RCU list lockdep debugging" > > > + depends on PROVE_RCU > > > > This must also depend on RCU_EXPERT. > > Sure. > > > > + default n > > > + help > > > + Enable RCU lockdep checking for list usages. By default it is > > > + turned off since there are several list RCU users that still > > > + need to be converted to pass a lockdep expression. To prevent > > > + false-positive splats, we keep it default disabled but once all > > > + users are converted, we can remove this config option. > > > + > > > config TORTURE_TEST > > > tristate > > > default n > > > diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c > > > index 9dd5aeef6e70..b7a4e3b5fa98 100644 > > > --- a/kernel/rcu/update.c > > > +++ b/kernel/rcu/update.c > > > @@ -91,14 +91,18 @@ module_param(rcu_normal_after_boot, int, 0); > > > * Similarly, we avoid claiming an SRCU read lock held if the current > > > * CPU is offline. > > > */ > > > +#define rcu_read_lock_held_common() \ > > > + if (!debug_lockdep_rcu_enabled()) \ > > > + return 1; \ > > > + if (!rcu_is_watching()) \ > > > + return 0; \ > > > + if (!rcu_lockdep_current_cpu_online()) \ > > > + return 0; > > > > Nice abstraction of common code! > > Thanks! >
On Tue, Jul 16, 2019 at 11:53:03AM -0700, Paul E. McKenney wrote: [snip] > > > A few more things below. > > > > --- > > > > include/linux/rculist.h | 28 ++++++++++++++++++++----- > > > > include/linux/rcupdate.h | 7 +++++++ > > > > kernel/rcu/Kconfig.debug | 11 ++++++++++ > > > > kernel/rcu/update.c | 44 ++++++++++++++++++++++++---------------- > > > > 4 files changed, 67 insertions(+), 23 deletions(-) > > > > > > > > diff --git a/include/linux/rculist.h b/include/linux/rculist.h > > > > index e91ec9ddcd30..1048160625bb 100644 > > > > --- a/include/linux/rculist.h > > > > +++ b/include/linux/rculist.h > > > > @@ -40,6 +40,20 @@ static inline void INIT_LIST_HEAD_RCU(struct list_head *list) > > > > */ > > > > #define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next))) > > > > > > > > +/* > > > > + * Check during list traversal that we are within an RCU reader > > > > + */ > > > > + > > > > +#ifdef CONFIG_PROVE_RCU_LIST > > > > > > This new Kconfig option is OK temporarily, but unless there is reason to > > > fear malfunction that a few weeks of rcutorture, 0day, and -next won't > > > find, it would be better to just use CONFIG_PROVE_RCU. The overall goal > > > is to reduce the number of RCU knobs rather than grow them, must though > > > history might lead one to believe otherwise. :-/ > > > > If you want, we can try to drop this option and just use PROVE_RCU however I > > must say there may be several warnings that need to be fixed in a short > > period of time (even a few weeks may be too short) considering the 1000+ > > uses of RCU lists. > Do many people other than me build with CONFIG_PROVE_RCU? If so, then > that would be a good reason for a temporary CONFIG_PROVE_RCU_LIST, > as in going away in a release or two once the warnings get fixed. PROVE_RCU is enabled by default with PROVE_LOCKING, so it is used quite heavilty. > > But I don't mind dropping it and it may just accelerate the fixing up of all > > callers. > > I will let you decide based on the above question. But if you have > CONFIG_PROVE_RCU_LIST, as noted below, it needs to depend on RCU_EXPERT. Ok, will make it depend. But yes for temporary purpose, I will leave it as a config and remove it later. thanks, - Joel
On Tue, Jul 16, 2019 at 06:02:05PM -0400, Joel Fernandes wrote: > On Tue, Jul 16, 2019 at 11:53:03AM -0700, Paul E. McKenney wrote: > [snip] > > > > A few more things below. > > > > > --- > > > > > include/linux/rculist.h | 28 ++++++++++++++++++++----- > > > > > include/linux/rcupdate.h | 7 +++++++ > > > > > kernel/rcu/Kconfig.debug | 11 ++++++++++ > > > > > kernel/rcu/update.c | 44 ++++++++++++++++++++++++---------------- > > > > > 4 files changed, 67 insertions(+), 23 deletions(-) > > > > > > > > > > diff --git a/include/linux/rculist.h b/include/linux/rculist.h > > > > > index e91ec9ddcd30..1048160625bb 100644 > > > > > --- a/include/linux/rculist.h > > > > > +++ b/include/linux/rculist.h > > > > > @@ -40,6 +40,20 @@ static inline void INIT_LIST_HEAD_RCU(struct list_head *list) > > > > > */ > > > > > #define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next))) > > > > > > > > > > +/* > > > > > + * Check during list traversal that we are within an RCU reader > > > > > + */ > > > > > + > > > > > +#ifdef CONFIG_PROVE_RCU_LIST > > > > > > > > This new Kconfig option is OK temporarily, but unless there is reason to > > > > fear malfunction that a few weeks of rcutorture, 0day, and -next won't > > > > find, it would be better to just use CONFIG_PROVE_RCU. The overall goal > > > > is to reduce the number of RCU knobs rather than grow them, must though > > > > history might lead one to believe otherwise. :-/ > > > > > > If you want, we can try to drop this option and just use PROVE_RCU however I > > > must say there may be several warnings that need to be fixed in a short > > > period of time (even a few weeks may be too short) considering the 1000+ > > > uses of RCU lists. > > Do many people other than me build with CONFIG_PROVE_RCU? If so, then > > that would be a good reason for a temporary CONFIG_PROVE_RCU_LIST, > > as in going away in a release or two once the warnings get fixed. > > PROVE_RCU is enabled by default with PROVE_LOCKING, so it is used quite > heavilty. > > > > But I don't mind dropping it and it may just accelerate the fixing up of all > > > callers. > > > > I will let you decide based on the above question. But if you have > > CONFIG_PROVE_RCU_LIST, as noted below, it needs to depend on RCU_EXPERT. > > Ok, will make it depend. But yes for temporary purpose, I will leave it as a > config and remove it later. Very good, thank you! Plus you got another ack. ;-) Thanx, Paul
diff --git a/include/linux/rculist.h b/include/linux/rculist.h index e91ec9ddcd30..1048160625bb 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -40,6 +40,20 @@ static inline void INIT_LIST_HEAD_RCU(struct list_head *list) */ #define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next))) +/* + * Check during list traversal that we are within an RCU reader + */ + +#ifdef CONFIG_PROVE_RCU_LIST +#define __list_check_rcu(dummy, cond, ...) \ + ({ \ + RCU_LOCKDEP_WARN(!cond && !rcu_read_lock_any_held(), \ + "RCU-list traversed in non-reader section!"); \ + }) +#else +#define __list_check_rcu(dummy, cond, ...) ({}) +#endif + /* * Insert a new entry between two known consecutive entries. * @@ -343,14 +357,16 @@ static inline void list_splice_tail_init_rcu(struct list_head *list, * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the list_head within the struct. + * @cond: optional lockdep expression if called from non-RCU protection. * * This list-traversal primitive may safely run concurrently with * the _rcu list-mutation primitives such as list_add_rcu() * as long as the traversal is guarded by rcu_read_lock(). */ -#define list_for_each_entry_rcu(pos, head, member) \ - for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \ - &pos->member != (head); \ +#define list_for_each_entry_rcu(pos, head, member, cond...) \ + for (__list_check_rcu(dummy, ## cond, 0), \ + pos = list_entry_rcu((head)->next, typeof(*pos), member); \ + &pos->member != (head); \ pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) /** @@ -616,13 +632,15 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n, * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the hlist_node within the struct. + * @cond: optional lockdep expression if called from non-RCU protection. * * This list-traversal primitive may safely run concurrently with * the _rcu list-mutation primitives such as hlist_add_head_rcu() * as long as the traversal is guarded by rcu_read_lock(). */ -#define hlist_for_each_entry_rcu(pos, head, member) \ - for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\ +#define hlist_for_each_entry_rcu(pos, head, member, cond...) \ + for (__list_check_rcu(dummy, ## cond, 0), \ + pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\ typeof(*(pos)), member); \ pos; \ pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\ diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 8f7167478c1d..f3c29efdf19a 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -221,6 +221,7 @@ int debug_lockdep_rcu_enabled(void); int rcu_read_lock_held(void); int rcu_read_lock_bh_held(void); int rcu_read_lock_sched_held(void); +int rcu_read_lock_any_held(void); #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ @@ -241,6 +242,12 @@ static inline int rcu_read_lock_sched_held(void) { return !preemptible(); } + +static inline int rcu_read_lock_any_held(void) +{ + return !preemptible(); +} + #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ #ifdef CONFIG_PROVE_RCU diff --git a/kernel/rcu/Kconfig.debug b/kernel/rcu/Kconfig.debug index 5ec3ea4028e2..7fbd21dbfcd0 100644 --- a/kernel/rcu/Kconfig.debug +++ b/kernel/rcu/Kconfig.debug @@ -8,6 +8,17 @@ menu "RCU Debugging" config PROVE_RCU def_bool PROVE_LOCKING +config PROVE_RCU_LIST + bool "RCU list lockdep debugging" + depends on PROVE_RCU + default n + help + Enable RCU lockdep checking for list usages. By default it is + turned off since there are several list RCU users that still + need to be converted to pass a lockdep expression. To prevent + false-positive splats, we keep it default disabled but once all + users are converted, we can remove this config option. + config TORTURE_TEST tristate default n diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index 9dd5aeef6e70..b7a4e3b5fa98 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c @@ -91,14 +91,18 @@ module_param(rcu_normal_after_boot, int, 0); * Similarly, we avoid claiming an SRCU read lock held if the current * CPU is offline. */ +#define rcu_read_lock_held_common() \ + if (!debug_lockdep_rcu_enabled()) \ + return 1; \ + if (!rcu_is_watching()) \ + return 0; \ + if (!rcu_lockdep_current_cpu_online()) \ + return 0; + int rcu_read_lock_sched_held(void) { - if (!debug_lockdep_rcu_enabled()) - return 1; - if (!rcu_is_watching()) - return 0; - if (!rcu_lockdep_current_cpu_online()) - return 0; + rcu_read_lock_held_common(); + return lock_is_held(&rcu_sched_lock_map) || !preemptible(); } EXPORT_SYMBOL(rcu_read_lock_sched_held); @@ -257,12 +261,8 @@ NOKPROBE_SYMBOL(debug_lockdep_rcu_enabled); */ int rcu_read_lock_held(void) { - if (!debug_lockdep_rcu_enabled()) - return 1; - if (!rcu_is_watching()) - return 0; - if (!rcu_lockdep_current_cpu_online()) - return 0; + rcu_read_lock_held_common(); + return lock_is_held(&rcu_lock_map); } EXPORT_SYMBOL_GPL(rcu_read_lock_held); @@ -284,16 +284,24 @@ EXPORT_SYMBOL_GPL(rcu_read_lock_held); */ int rcu_read_lock_bh_held(void) { - if (!debug_lockdep_rcu_enabled()) - return 1; - if (!rcu_is_watching()) - return 0; - if (!rcu_lockdep_current_cpu_online()) - return 0; + rcu_read_lock_held_common(); + return in_softirq() || irqs_disabled(); } EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); +int rcu_read_lock_any_held(void) +{ + rcu_read_lock_held_common(); + + if (lock_is_held(&rcu_lock_map) || + lock_is_held(&rcu_bh_lock_map) || + lock_is_held(&rcu_sched_lock_map)) + return 1; + return !preemptible(); +} +EXPORT_SYMBOL_GPL(rcu_read_lock_any_held); + #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ /**
This patch adds support for checking RCU reader sections in list traversal macros. Optionally, if the list macro is called under SRCU or other lock/mutex protection, then appropriate lockdep expressions can be passed to make the checks pass. Existing list_for_each_entry_rcu() invocations don't need to pass the optional fourth argument (cond) unless they are under some non-RCU protection and needs to make lockdep check pass. Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org> --- include/linux/rculist.h | 28 ++++++++++++++++++++----- include/linux/rcupdate.h | 7 +++++++ kernel/rcu/Kconfig.debug | 11 ++++++++++ kernel/rcu/update.c | 44 ++++++++++++++++++++++++---------------- 4 files changed, 67 insertions(+), 23 deletions(-)