Message ID | 20231216033300.3553457-11-kent.overstreet@linux.dev (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | big header dependency cleanup targeting sched.h | expand |
On 12/15/23 22:32, Kent Overstreet wrote: > held_lock is embedded in task_struct, and we don't want sched.h pulling > in all of lockdep.h > > Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev> > --- > include/linux/lockdep.h | 57 ----------------------------------- > include/linux/lockdep_types.h | 57 +++++++++++++++++++++++++++++++++++ > 2 files changed, 57 insertions(+), 57 deletions(-) > > diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h > index dc2844b071c2..08b0d1d9d78b 100644 > --- a/include/linux/lockdep.h > +++ b/include/linux/lockdep.h > @@ -82,63 +82,6 @@ struct lock_chain { > u64 chain_key; > }; > > -#define MAX_LOCKDEP_KEYS_BITS 13 > -#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS) > -#define INITIAL_CHAIN_KEY -1 > - > -struct held_lock { > - /* > - * One-way hash of the dependency chain up to this point. We > - * hash the hashes step by step as the dependency chain grows. > - * > - * We use it for dependency-caching and we skip detection > - * passes and dependency-updates if there is a cache-hit, so > - * it is absolutely critical for 100% coverage of the validator > - * to have a unique key value for every unique dependency path > - * that can occur in the system, to make a unique hash value > - * as likely as possible - hence the 64-bit width. > - * > - * The task struct holds the current hash value (initialized > - * with zero), here we store the previous hash value: > - */ > - u64 prev_chain_key; > - unsigned long acquire_ip; > - struct lockdep_map *instance; > - struct lockdep_map *nest_lock; > -#ifdef CONFIG_LOCK_STAT > - u64 waittime_stamp; > - u64 holdtime_stamp; > -#endif > - /* > - * class_idx is zero-indexed; it points to the element in > - * lock_classes this held lock instance belongs to. class_idx is in > - * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive. > - */ > - unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; > - /* > - * The lock-stack is unified in that the lock chains of interrupt > - * contexts nest ontop of process context chains, but we 'separate' > - * the hashes by starting with 0 if we cross into an interrupt > - * context, and we also keep do not add cross-context lock > - * dependencies - the lock usage graph walking covers that area > - * anyway, and we'd just unnecessarily increase the number of > - * dependencies otherwise. [Note: hardirq and softirq contexts > - * are separated from each other too.] > - * > - * The following field is used to detect when we cross into an > - * interrupt context: > - */ > - unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ > - unsigned int trylock:1; /* 16 bits */ > - > - unsigned int read:2; /* see lock_acquire() comment */ > - unsigned int check:1; /* see lock_acquire() comment */ > - unsigned int hardirqs_off:1; > - unsigned int sync:1; > - unsigned int references:11; /* 32 bits */ > - unsigned int pin_count; > -}; > - > /* > * Initialization, self-test and debugging-output methods: > */ > diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h > index 2ebc323d345a..9c533c8d701e 100644 > --- a/include/linux/lockdep_types.h > +++ b/include/linux/lockdep_types.h > @@ -198,6 +198,63 @@ struct lockdep_map { > > struct pin_cookie { unsigned int val; }; > > +#define MAX_LOCKDEP_KEYS_BITS 13 > +#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS) > +#define INITIAL_CHAIN_KEY -1 > + > +struct held_lock { > + /* > + * One-way hash of the dependency chain up to this point. We > + * hash the hashes step by step as the dependency chain grows. > + * > + * We use it for dependency-caching and we skip detection > + * passes and dependency-updates if there is a cache-hit, so > + * it is absolutely critical for 100% coverage of the validator > + * to have a unique key value for every unique dependency path > + * that can occur in the system, to make a unique hash value > + * as likely as possible - hence the 64-bit width. > + * > + * The task struct holds the current hash value (initialized > + * with zero), here we store the previous hash value: > + */ > + u64 prev_chain_key; > + unsigned long acquire_ip; > + struct lockdep_map *instance; > + struct lockdep_map *nest_lock; > +#ifdef CONFIG_LOCK_STAT > + u64 waittime_stamp; > + u64 holdtime_stamp; > +#endif > + /* > + * class_idx is zero-indexed; it points to the element in > + * lock_classes this held lock instance belongs to. class_idx is in > + * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive. > + */ > + unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; > + /* > + * The lock-stack is unified in that the lock chains of interrupt > + * contexts nest ontop of process context chains, but we 'separate' > + * the hashes by starting with 0 if we cross into an interrupt > + * context, and we also keep do not add cross-context lock > + * dependencies - the lock usage graph walking covers that area > + * anyway, and we'd just unnecessarily increase the number of > + * dependencies otherwise. [Note: hardirq and softirq contexts > + * are separated from each other too.] > + * > + * The following field is used to detect when we cross into an > + * interrupt context: > + */ > + unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ > + unsigned int trylock:1; /* 16 bits */ > + > + unsigned int read:2; /* see lock_acquire() comment */ > + unsigned int check:1; /* see lock_acquire() comment */ > + unsigned int hardirqs_off:1; > + unsigned int sync:1; > + unsigned int references:11; /* 32 bits */ > + unsigned int pin_count; > +}; > + > #else /* !CONFIG_LOCKDEP */ > > /* Acked-by: Waiman Long <longman@redhat.com>
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index dc2844b071c2..08b0d1d9d78b 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -82,63 +82,6 @@ struct lock_chain { u64 chain_key; }; -#define MAX_LOCKDEP_KEYS_BITS 13 -#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS) -#define INITIAL_CHAIN_KEY -1 - -struct held_lock { - /* - * One-way hash of the dependency chain up to this point. We - * hash the hashes step by step as the dependency chain grows. - * - * We use it for dependency-caching and we skip detection - * passes and dependency-updates if there is a cache-hit, so - * it is absolutely critical for 100% coverage of the validator - * to have a unique key value for every unique dependency path - * that can occur in the system, to make a unique hash value - * as likely as possible - hence the 64-bit width. - * - * The task struct holds the current hash value (initialized - * with zero), here we store the previous hash value: - */ - u64 prev_chain_key; - unsigned long acquire_ip; - struct lockdep_map *instance; - struct lockdep_map *nest_lock; -#ifdef CONFIG_LOCK_STAT - u64 waittime_stamp; - u64 holdtime_stamp; -#endif - /* - * class_idx is zero-indexed; it points to the element in - * lock_classes this held lock instance belongs to. class_idx is in - * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive. - */ - unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; - /* - * The lock-stack is unified in that the lock chains of interrupt - * contexts nest ontop of process context chains, but we 'separate' - * the hashes by starting with 0 if we cross into an interrupt - * context, and we also keep do not add cross-context lock - * dependencies - the lock usage graph walking covers that area - * anyway, and we'd just unnecessarily increase the number of - * dependencies otherwise. [Note: hardirq and softirq contexts - * are separated from each other too.] - * - * The following field is used to detect when we cross into an - * interrupt context: - */ - unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ - unsigned int trylock:1; /* 16 bits */ - - unsigned int read:2; /* see lock_acquire() comment */ - unsigned int check:1; /* see lock_acquire() comment */ - unsigned int hardirqs_off:1; - unsigned int sync:1; - unsigned int references:11; /* 32 bits */ - unsigned int pin_count; -}; - /* * Initialization, self-test and debugging-output methods: */ diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h index 2ebc323d345a..9c533c8d701e 100644 --- a/include/linux/lockdep_types.h +++ b/include/linux/lockdep_types.h @@ -198,6 +198,63 @@ struct lockdep_map { struct pin_cookie { unsigned int val; }; +#define MAX_LOCKDEP_KEYS_BITS 13 +#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS) +#define INITIAL_CHAIN_KEY -1 + +struct held_lock { + /* + * One-way hash of the dependency chain up to this point. We + * hash the hashes step by step as the dependency chain grows. + * + * We use it for dependency-caching and we skip detection + * passes and dependency-updates if there is a cache-hit, so + * it is absolutely critical for 100% coverage of the validator + * to have a unique key value for every unique dependency path + * that can occur in the system, to make a unique hash value + * as likely as possible - hence the 64-bit width. + * + * The task struct holds the current hash value (initialized + * with zero), here we store the previous hash value: + */ + u64 prev_chain_key; + unsigned long acquire_ip; + struct lockdep_map *instance; + struct lockdep_map *nest_lock; +#ifdef CONFIG_LOCK_STAT + u64 waittime_stamp; + u64 holdtime_stamp; +#endif + /* + * class_idx is zero-indexed; it points to the element in + * lock_classes this held lock instance belongs to. class_idx is in + * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive. + */ + unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; + /* + * The lock-stack is unified in that the lock chains of interrupt + * contexts nest ontop of process context chains, but we 'separate' + * the hashes by starting with 0 if we cross into an interrupt + * context, and we also keep do not add cross-context lock + * dependencies - the lock usage graph walking covers that area + * anyway, and we'd just unnecessarily increase the number of + * dependencies otherwise. [Note: hardirq and softirq contexts + * are separated from each other too.] + * + * The following field is used to detect when we cross into an + * interrupt context: + */ + unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ + unsigned int trylock:1; /* 16 bits */ + + unsigned int read:2; /* see lock_acquire() comment */ + unsigned int check:1; /* see lock_acquire() comment */ + unsigned int hardirqs_off:1; + unsigned int sync:1; + unsigned int references:11; /* 32 bits */ + unsigned int pin_count; +}; + #else /* !CONFIG_LOCKDEP */ /*
held_lock is embedded in task_struct, and we don't want sched.h pulling in all of lockdep.h Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev> --- include/linux/lockdep.h | 57 ----------------------------------- include/linux/lockdep_types.h | 57 +++++++++++++++++++++++++++++++++++ 2 files changed, 57 insertions(+), 57 deletions(-)