@@ -949,6 +949,7 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
struct io_sendzc *zc = io_kiocb_to_cmd(req);
struct io_notif_slot *notif_slot;
struct io_kiocb *notif;
+ struct ubuf_info *ubuf;
struct msghdr msg;
struct iovec iov;
struct socket *sock;
@@ -1007,10 +1008,15 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
min_ret = iov_iter_count(&msg.msg_iter);
msg.msg_flags = msg_flags;
- msg.msg_ubuf = &io_notif_to_data(notif)->uarg;
msg.sg_from_iter = io_sg_from_iter;
+ msg.msg_ubuf = ubuf = &io_notif_to_data(notif)->uarg;
+ ubuf->flags |= UARGFL_GIFT_REF;
ret = sock_sendmsg(sock, &msg);
+ /* check if the send consumed an additional ref */
+ if (likely(!(ubuf->flags & UARGFL_GIFT_REF)))
+ io_notif_consume_ref(notif);
+
if (unlikely(ret < min_ret)) {
if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
return -EAGAIN;
@@ -68,15 +68,17 @@ struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx,
nd->uarg.skb_flags = SKBFL_ZEROCOPY_FRAG | SKBFL_DONT_ORPHAN;
nd->uarg.flags = UARGFL_CALLER_PINNED;
nd->uarg.callback = io_uring_tx_zerocopy_callback;
+ nd->cached_refs = IO_NOTIF_REF_CACHE_NR;
/* master ref owned by io_notif_slot, will be dropped on flush */
- refcount_set(&nd->uarg.refcnt, 1);
+ refcount_set(&nd->uarg.refcnt, IO_NOTIF_REF_CACHE_NR + 1);
return notif;
}
static inline bool io_notif_drop_refs(struct io_notif_data *nd)
{
- int refs = 1;
+ int refs = nd->cached_refs + 1;
+ nd->cached_refs = 0;
return refcount_sub_and_test(refs, &nd->uarg.refcnt);
}
@@ -9,11 +9,14 @@
#define IO_NOTIF_SPLICE_BATCH 32
#define IORING_MAX_NOTIF_SLOTS (1U << 10)
+#define IO_NOTIF_REF_CACHE_NR 64
struct io_notif_data {
struct file *file;
- struct ubuf_info uarg;
unsigned long account_pages;
+ /* extra uarg->refcnt refs */
+ int cached_refs;
+ struct ubuf_info uarg;
};
struct io_notif_slot {
@@ -88,3 +91,20 @@ static inline int io_notif_account_mem(struct io_kiocb *notif, unsigned len)
}
return 0;
}
+
+static inline void io_notif_consume_ref(struct io_kiocb *notif)
+ __must_hold(&ctx->uring_lock)
+{
+ struct io_notif_data *nd = io_notif_to_data(notif);
+
+ nd->cached_refs--;
+
+ /*
+ * Issue sends without looking at notif->cached_refs first, so we
+ * always have to have at least one ref cached
+ */
+ if (unlikely(!nd->cached_refs)) {
+ refcount_add(IO_NOTIF_REF_CACHE_NR, &nd->uarg.refcnt);
+ nd->cached_refs += IO_NOTIF_REF_CACHE_NR;
+ }
+}
Cache some active notifier references at the io_uring side and get them in batches, so the ammortised cost is low. Then these references can be given away to the network layer using UARGFL_GIFT_REF. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> --- io_uring/net.c | 8 +++++++- io_uring/notif.c | 6 ++++-- io_uring/notif.h | 22 +++++++++++++++++++++- 3 files changed, 32 insertions(+), 4 deletions(-)