Message ID | 20240611222905.34695-3-kuniyu@amazon.com (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | af_unix: Remove spin_lock_nested() and convert to lock_cmp_fn. | expand |
On Tue, Jun 11, 2024 at 03:28:56PM GMT, Kuniyuki Iwashima wrote: > unix_dgram_connect() and unix_dgram_{send,recv}msg() lock the socket > and peer in ascending order of the socket address. > > Let's define the order as unix_state_lock_cmp_fn() instead of using > unix_state_lock_nested(). > > Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com> Reviewed-by: Kent Overstreet <kent.overstreet@linux.dev> > --- > net/unix/af_unix.c | 16 +++++++++++++++- > 1 file changed, 15 insertions(+), 1 deletion(-) > > diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c > index 22bb941f174e..c09bf2b03582 100644 > --- a/net/unix/af_unix.c > +++ b/net/unix/af_unix.c > @@ -134,6 +134,18 @@ static int unix_table_lock_cmp_fn(const struct lockdep_map *a, > { > return cmp_ptr(a, b); > } > + > +static int unix_state_lock_cmp_fn(const struct lockdep_map *_a, > + const struct lockdep_map *_b) > +{ > + const struct unix_sock *a, *b; > + > + a = container_of(_a, struct unix_sock, lock.dep_map); > + b = container_of(_b, struct unix_sock, lock.dep_map); > + > + /* unix_state_double_lock(): ascending address order. */ > + return cmp_ptr(a, b); > +} > #endif > > static unsigned int unix_unbound_hash(struct sock *sk) > @@ -987,6 +999,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, > u->path.dentry = NULL; > u->path.mnt = NULL; > spin_lock_init(&u->lock); > + lock_set_cmp_fn(&u->lock, unix_state_lock_cmp_fn, NULL); > mutex_init(&u->iolock); /* single task reading lock */ > mutex_init(&u->bindlock); /* single task binding lock */ > init_waitqueue_head(&u->peer_wait); > @@ -1335,11 +1348,12 @@ static void unix_state_double_lock(struct sock *sk1, struct sock *sk2) > unix_state_lock(sk1); > return; > } > + > if (sk1 > sk2) > swap(sk1, sk2); > > unix_state_lock(sk1); > - unix_state_lock_nested(sk2, U_LOCK_SECOND); > + unix_state_lock(sk2); > } > > static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2) > -- > 2.30.2 >
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 22bb941f174e..c09bf2b03582 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -134,6 +134,18 @@ static int unix_table_lock_cmp_fn(const struct lockdep_map *a, { return cmp_ptr(a, b); } + +static int unix_state_lock_cmp_fn(const struct lockdep_map *_a, + const struct lockdep_map *_b) +{ + const struct unix_sock *a, *b; + + a = container_of(_a, struct unix_sock, lock.dep_map); + b = container_of(_b, struct unix_sock, lock.dep_map); + + /* unix_state_double_lock(): ascending address order. */ + return cmp_ptr(a, b); +} #endif static unsigned int unix_unbound_hash(struct sock *sk) @@ -987,6 +999,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, u->path.dentry = NULL; u->path.mnt = NULL; spin_lock_init(&u->lock); + lock_set_cmp_fn(&u->lock, unix_state_lock_cmp_fn, NULL); mutex_init(&u->iolock); /* single task reading lock */ mutex_init(&u->bindlock); /* single task binding lock */ init_waitqueue_head(&u->peer_wait); @@ -1335,11 +1348,12 @@ static void unix_state_double_lock(struct sock *sk1, struct sock *sk2) unix_state_lock(sk1); return; } + if (sk1 > sk2) swap(sk1, sk2); unix_state_lock(sk1); - unix_state_lock_nested(sk2, U_LOCK_SECOND); + unix_state_lock(sk2); } static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
unix_dgram_connect() and unix_dgram_{send,recv}msg() lock the socket and peer in ascending order of the socket address. Let's define the order as unix_state_lock_cmp_fn() instead of using unix_state_lock_nested(). Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com> --- net/unix/af_unix.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-)