@@ -21,7 +21,6 @@ struct sock *unix_peer_get(struct sock *sk);
#define UNIX_HASH_BITS 8
extern unsigned int unix_tot_inflight;
-extern spinlock_t unix_table_locks[UNIX_HASH_SIZE];
struct unix_hashbucket {
spinlock_t lock;
@@ -118,8 +118,6 @@
#include "scm.h"
-spinlock_t unix_table_locks[UNIX_HASH_SIZE];
-EXPORT_SYMBOL_GPL(unix_table_locks);
static atomic_long_t unix_nr_socks;
/* SMP locking strategy:
@@ -166,9 +164,6 @@ static void unix_table_double_lock(struct net *net,
if (hash1 > hash2)
swap(hash1, hash2);
- spin_lock(&unix_table_locks[hash1]);
- spin_lock_nested(&unix_table_locks[hash2], SINGLE_DEPTH_NESTING);
-
spin_lock(&net->unx.hash[hash1].lock);
spin_lock(&net->unx.hash[hash2].lock);
}
@@ -178,9 +173,6 @@ static void unix_table_double_unlock(struct net *net,
{
spin_unlock(&net->unx.hash[hash1].lock);
spin_unlock(&net->unx.hash[hash2].lock);
-
- spin_unlock(&unix_table_locks[hash1]);
- spin_unlock(&unix_table_locks[hash2]);
}
#ifdef CONFIG_SECURITY_NETWORK
@@ -324,20 +316,16 @@ static void __unix_set_addr_hash(struct net *net, struct sock *sk,
static void unix_remove_socket(struct net *net, struct sock *sk)
{
- spin_lock(&unix_table_locks[sk->sk_hash]);
spin_lock(&net->unx.hash[sk->sk_hash].lock);
__unix_remove_socket(sk);
spin_unlock(&net->unx.hash[sk->sk_hash].lock);
- spin_unlock(&unix_table_locks[sk->sk_hash]);
}
static void unix_insert_unbound_socket(struct net *net, struct sock *sk)
{
- spin_lock(&unix_table_locks[sk->sk_hash]);
spin_lock(&net->unx.hash[sk->sk_hash].lock);
__unix_insert_socket(net, sk);
spin_unlock(&net->unx.hash[sk->sk_hash].lock);
- spin_unlock(&unix_table_locks[sk->sk_hash]);
}
static struct sock *__unix_find_socket_byname(struct net *net,
@@ -362,13 +350,11 @@ static inline struct sock *unix_find_socket_byname(struct net *net,
{
struct sock *s;
- spin_lock(&unix_table_locks[hash]);
spin_lock(&net->unx.hash[hash].lock);
s = __unix_find_socket_byname(net, sunname, len, hash);
if (s)
sock_hold(s);
spin_unlock(&net->unx.hash[hash].lock);
- spin_unlock(&unix_table_locks[hash]);
return s;
}
@@ -377,7 +363,6 @@ static struct sock *unix_find_socket_byinode(struct net *net, struct inode *i)
unsigned int hash = unix_bsd_hash(i);
struct sock *s;
- spin_lock(&unix_table_locks[hash]);
spin_lock(&net->unx.hash[hash].lock);
sk_for_each(s, &net->unx.hash[hash].head) {
struct dentry *dentry = unix_sk(s)->path.dentry;
@@ -385,12 +370,10 @@ static struct sock *unix_find_socket_byinode(struct net *net, struct inode *i)
if (dentry && d_backing_inode(dentry) == i) {
sock_hold(s);
spin_unlock(&net->unx.hash[hash].lock);
- spin_unlock(&unix_table_locks[hash]);
return s;
}
}
spin_unlock(&net->unx.hash[hash].lock);
- spin_unlock(&unix_table_locks[hash]);
return NULL;
}
@@ -3253,7 +3236,6 @@ static struct sock *unix_get_first(struct seq_file *seq, loff_t *pos)
struct sock *sk;
while (bucket < UNIX_HASH_SIZE) {
- spin_lock(&unix_table_locks[bucket]);
spin_lock(&net->unx.hash[bucket].lock);
sk = unix_from_bucket(seq, pos);
@@ -3261,7 +3243,6 @@ static struct sock *unix_get_first(struct seq_file *seq, loff_t *pos)
return sk;
spin_unlock(&net->unx.hash[bucket].lock);
- spin_unlock(&unix_table_locks[bucket]);
*pos = set_bucket_offset(++bucket, 1);
}
@@ -3279,7 +3260,6 @@ static struct sock *unix_get_next(struct seq_file *seq, struct sock *sk,
return sk;
spin_unlock(&seq_file_net(seq)->unx.hash[bucket].lock);
- spin_unlock(&unix_table_locks[bucket]);
*pos = set_bucket_offset(++bucket, 1);
@@ -3308,10 +3288,8 @@ static void unix_seq_stop(struct seq_file *seq, void *v)
{
struct sock *sk = v;
- if (sk) {
+ if (sk)
spin_unlock(&seq_file_net(seq)->unx.hash[sk->sk_hash].lock);
- spin_unlock(&unix_table_locks[sk->sk_hash]);
- }
}
static int unix_seq_show(struct seq_file *seq, void *v)
@@ -3415,7 +3393,6 @@ static int bpf_iter_unix_hold_batch(struct seq_file *seq, struct sock *start_sk)
}
spin_unlock(&seq_file_net(seq)->unx.hash[start_sk->sk_hash].lock);
- spin_unlock(&unix_table_locks[start_sk->sk_hash]);
return expected;
}
@@ -3709,9 +3686,6 @@ static int __init af_unix_init(void)
spin_lock_init(&init_net.unx.hash[i].lock);
}
- for (i = 0; i < UNIX_HASH_SIZE; i++)
- spin_lock_init(&unix_table_locks[i]);
-
rc = proto_register(&unix_dgram_proto, 1);
if (rc != 0) {
pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
@@ -208,7 +208,6 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
struct sock *sk;
num = 0;
- spin_lock(&unix_table_locks[slot]);
spin_lock(&net->unx.hash[slot].lock);
sk_for_each(sk, &net->unx.hash[slot].head) {
if (num < s_num)
@@ -220,14 +219,12 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
cb->nlh->nlmsg_seq,
NLM_F_MULTI) < 0) {
spin_unlock(&net->unx.hash[slot].lock);
- spin_unlock(&unix_table_locks[slot]);
goto done;
}
next:
num++;
}
spin_unlock(&net->unx.hash[slot].lock);
- spin_unlock(&unix_table_locks[slot]);
}
done:
cb->args[0] = slot;
@@ -242,18 +239,15 @@ static struct sock *unix_lookup_by_ino(struct net *net, unsigned int ino)
int i;
for (i = 0; i < UNIX_HASH_SIZE; i++) {
- spin_lock(&unix_table_locks[i]);
spin_lock(&net->unx.hash[i].lock);
sk_for_each(sk, &net->unx.hash[i].head) {
if (ino == sock_i_ino(sk)) {
sock_hold(sk);
spin_unlock(&net->unx.hash[i].lock);
- spin_unlock(&unix_table_locks[i]);
return sk;
}
}
spin_unlock(&net->unx.hash[i].lock);
- spin_unlock(&unix_table_locks[i]);
}
return NULL;
}
unix_table_locks are to protect the global hash table, unix_socket_table. The previous commit removed it, so let's clean up the unnecessary locks. Here is a test result on EC2 c5.9xlarge where 10 processes run concurrently in different netns and bind 100,000 sockets for each. without this series : 1m 38s with this series : 11s It is ~10x faster because the global hash table is split into 10 netns in this case. Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com> --- include/net/af_unix.h | 1 - net/unix/af_unix.c | 28 +--------------------------- net/unix/diag.c | 6 ------ 3 files changed, 1 insertion(+), 34 deletions(-)