diff mbox series

[v2,net-next,01/11] af_unix: Define locking order for unix_table_double_lock().

Message ID 20240611222905.34695-2-kuniyu@amazon.com (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series af_unix: Remove spin_lock_nested() and convert to lock_cmp_fn. | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 856 this patch: 856
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers success CCed 5 of 5 maintainers
netdev/build_clang success Errors and warnings before: 854 this patch: 854
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 860 this patch: 860
netdev/checkpatch warning WARNING: line length of 88 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
netdev/contest success net-next-2024-06-14--06-00 (tests: 647)

Commit Message

Kuniyuki Iwashima June 11, 2024, 10:28 p.m. UTC
When created, AF_UNIX socket is put into net->unx.table.buckets[],
and the hash is stored in sk->sk_hash.

  * unbound socket  : 0 <= sk_hash <= UNIX_HASH_MOD

When bind() is called, the socket could be moved to another bucket.

  * pathname socket : 0 <= sk_hash <= UNIX_HASH_MOD
  * abstract socket : UNIX_HASH_MOD + 1 <= sk_hash <= UNIX_HASH_MOD * 2 + 1

Then, we call unix_table_double_lock() which locks a single bucket
or two.

Let's define the order as unix_table_lock_cmp_fn() instead of using
spin_lock_nested().

The locking is always done in ascending order of sk->sk_hash, which
is the index of buckets/locks array allocated by kvmalloc_array().

  sk_hash_A < sk_hash_B
  <=> &locks[sk_hash_A].dep_map < &locks[sk_hash_B].dep_map

So, the relation of two sk->sk_hash can be derived from the addresses
of dep_map in the array of locks.

Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
---
 net/unix/af_unix.c | 12 +++++++++++-
 1 file changed, 11 insertions(+), 1 deletion(-)

Comments

Kent Overstreet June 11, 2024, 11:15 p.m. UTC | #1
On Tue, Jun 11, 2024 at 03:28:55PM GMT, Kuniyuki Iwashima wrote:
> When created, AF_UNIX socket is put into net->unx.table.buckets[],
> and the hash is stored in sk->sk_hash.
> 
>   * unbound socket  : 0 <= sk_hash <= UNIX_HASH_MOD
> 
> When bind() is called, the socket could be moved to another bucket.
> 
>   * pathname socket : 0 <= sk_hash <= UNIX_HASH_MOD
>   * abstract socket : UNIX_HASH_MOD + 1 <= sk_hash <= UNIX_HASH_MOD * 2 + 1
> 
> Then, we call unix_table_double_lock() which locks a single bucket
> or two.
> 
> Let's define the order as unix_table_lock_cmp_fn() instead of using
> spin_lock_nested().
> 
> The locking is always done in ascending order of sk->sk_hash, which
> is the index of buckets/locks array allocated by kvmalloc_array().
> 
>   sk_hash_A < sk_hash_B
>   <=> &locks[sk_hash_A].dep_map < &locks[sk_hash_B].dep_map
> 
> So, the relation of two sk->sk_hash can be derived from the addresses
> of dep_map in the array of locks.
> 
> Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>

Reviewed-by: Kent Overstreet <kent.overstreet@linux.dev>

> ---
>  net/unix/af_unix.c | 12 +++++++++++-
>  1 file changed, 11 insertions(+), 1 deletion(-)
> 
> diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
> index 3821f8945b1e..22bb941f174e 100644
> --- a/net/unix/af_unix.c
> +++ b/net/unix/af_unix.c
> @@ -126,6 +126,15 @@ static spinlock_t bsd_socket_locks[UNIX_HASH_SIZE / 2];
>   *    hash table is protected with spinlock.
>   *    each socket state is protected by separate spinlock.
>   */
> +#ifdef CONFIG_PROVE_LOCKING
> +#define cmp_ptr(l, r)	(((l) > (r)) - ((l) < (r)))
> +
> +static int unix_table_lock_cmp_fn(const struct lockdep_map *a,
> +				  const struct lockdep_map *b)
> +{
> +	return cmp_ptr(a, b);
> +}
> +#endif
>  
>  static unsigned int unix_unbound_hash(struct sock *sk)
>  {
> @@ -168,7 +177,7 @@ static void unix_table_double_lock(struct net *net,
>  		swap(hash1, hash2);
>  
>  	spin_lock(&net->unx.table.locks[hash1]);
> -	spin_lock_nested(&net->unx.table.locks[hash2], SINGLE_DEPTH_NESTING);
> +	spin_lock(&net->unx.table.locks[hash2]);
>  }
>  
>  static void unix_table_double_unlock(struct net *net,
> @@ -3578,6 +3587,7 @@ static int __net_init unix_net_init(struct net *net)
>  
>  	for (i = 0; i < UNIX_HASH_SIZE; i++) {
>  		spin_lock_init(&net->unx.table.locks[i]);
> +		lock_set_cmp_fn(&net->unx.table.locks[i], unix_table_lock_cmp_fn, NULL);
>  		INIT_HLIST_HEAD(&net->unx.table.buckets[i]);
>  	}
>  
> -- 
> 2.30.2
>
diff mbox series

Patch

diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 3821f8945b1e..22bb941f174e 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -126,6 +126,15 @@  static spinlock_t bsd_socket_locks[UNIX_HASH_SIZE / 2];
  *    hash table is protected with spinlock.
  *    each socket state is protected by separate spinlock.
  */
+#ifdef CONFIG_PROVE_LOCKING
+#define cmp_ptr(l, r)	(((l) > (r)) - ((l) < (r)))
+
+static int unix_table_lock_cmp_fn(const struct lockdep_map *a,
+				  const struct lockdep_map *b)
+{
+	return cmp_ptr(a, b);
+}
+#endif
 
 static unsigned int unix_unbound_hash(struct sock *sk)
 {
@@ -168,7 +177,7 @@  static void unix_table_double_lock(struct net *net,
 		swap(hash1, hash2);
 
 	spin_lock(&net->unx.table.locks[hash1]);
-	spin_lock_nested(&net->unx.table.locks[hash2], SINGLE_DEPTH_NESTING);
+	spin_lock(&net->unx.table.locks[hash2]);
 }
 
 static void unix_table_double_unlock(struct net *net,
@@ -3578,6 +3587,7 @@  static int __net_init unix_net_init(struct net *net)
 
 	for (i = 0; i < UNIX_HASH_SIZE; i++) {
 		spin_lock_init(&net->unx.table.locks[i]);
+		lock_set_cmp_fn(&net->unx.table.locks[i], unix_table_lock_cmp_fn, NULL);
 		INIT_HLIST_HEAD(&net->unx.table.buckets[i]);
 	}