@@ -803,9 +803,13 @@ static inline bool __udp_wfree(struct sk_buff *skb)
bool free;
free = refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc);
- /* a full barrier is required before waitqueue_active() */
+ /* a full barrier is required before waitqueue_active() and the
+ * SOCK_NOSPACE test below.
+ */
smp_mb__after_atomic();
+ if (sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
+ goto out;
if (!sock_writeable(sk))
goto out;
@@ -2925,8 +2929,19 @@ __poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait)
/* psock ingress_msg queue should not contain any bad checksum frames */
if (sk_is_readable(sk))
mask |= EPOLLIN | EPOLLRDNORM;
- return mask;
+ if (!sock_writeable(sk)) {
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ /* Order with the wspace read so either we observe it
+ * writeable or udp_sock_wfree() would find SOCK_NOSPACE and
+ * wake us up.
+ */
+ smp_mb__after_atomic();
+
+ if (sock_writeable(sk))
+ mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
+ }
+ return mask;
}
EXPORT_SYMBOL(udp_poll);
Often the write queue is never filled full, however every send skb would still try to unnecessary wake the pollers via sock_wfree(). That holds even if we don't have any write/POLLIN pollers as the socket's waitqueue is rw mixed. Optimise it with SOCK_NOSPACE, which avoids waking up unless there are waiters that were starved of space. With a dummy device io_uring benchmark pushing as much as it can, I've got +5% to CPU bound throughput (2268 Krps -> 2380). Profiles showed ~3-4% total reduction due to the change in the CPU share of destructors. As noted in the previous patch, we introduced udp_wfree and it's not based on sock_wfree() because SOCK_NOSPACE requires support from the poll callback, and there seems to be a bunch of custom ones in the tree. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> --- net/ipv4/udp.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-)