@@ -1222,27 +1222,35 @@ static int sk_psock_verdict_recv(struct sock *sk, struct sk_buff *skb)
static void sk_psock_verdict_data_ready(struct sock *sk)
{
- struct socket *sock = sk->sk_socket;
+ struct socket *sock;
const struct proto_ops *ops;
int copied;
trace_sk_data_ready(sk);
+ /* We need RCU to prevent the sk_socket from being released.
+ * Especially for Unix sockets, we are currently in the process
+ * context and do not have RCU protection.
+ */
+ rcu_read_lock();
+ sock = sk->sk_socket;
if (unlikely(!sock))
- return;
+ goto unlock;
+
ops = READ_ONCE(sock->ops);
if (!ops || !ops->read_skb)
- return;
+ goto unlock;
+
copied = ops->read_skb(sk, sk_psock_verdict_recv);
if (copied >= 0) {
struct sk_psock *psock;
- rcu_read_lock();
psock = sk_psock(sk);
if (psock)
sk_psock_data_ready(sk, psock);
- rcu_read_unlock();
}
+unlock:
+ rcu_read_unlock();
}
void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
Use RCU lock to protect sk_socket, preventing concurrent close and release by another thread. Because TCP/UDP are already within a relatively large critical section: ''' ip_local_deliver_finish rcu_read_lock ip_protocol_deliver_rcu tcp_rcv/udp_rcv rcu_read_unlock ''' Adding rcu_read_{un}lock() at the entrance and exit of sk_data_ready will not increase performance overhead. Reported-by: syzbot+dd90a702f518e0eac072@syzkaller.appspotmail.com Closes: https://lore.kernel.org/bpf/6734c033.050a0220.2a2fcc.0015.GAE@google.com/ Signed-off-by: Jiayuan Chen <jiayuan.chen@linux.dev> --- net/core/skmsg.c | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-)