diff mbox series

[14/20] io_uring/zcrx: introduce io_zc_get_rbuf_cqe

Message ID 20231107214045.2172393-15-dw@davidwei.uk (mailing list archive)
State Not Applicable
Delegated to: Netdev Maintainers
Headers show
Series Zero copy Rx using io_uring | expand

Checks

Context Check Description
netdev/tree_selection success Guessing tree name failed - patch did not apply, async

Commit Message

David Wei Nov. 7, 2023, 9:40 p.m. UTC
From: Pavel Begunkov <asml.silence@gmail.com>

Add a simple helper for grabbing a new rbuf entry. It greatly helps
zc_rx_recv_frag()'s readability and will be reused later

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: David Wei <dw@davidwei.uk>
---
 io_uring/zc_rx.c | 36 ++++++++++++++++++++++++------------
 1 file changed, 24 insertions(+), 12 deletions(-)
diff mbox series

Patch

diff --git a/io_uring/zc_rx.c b/io_uring/zc_rx.c
index 038692d3265e..c1502ec3e629 100644
--- a/io_uring/zc_rx.c
+++ b/io_uring/zc_rx.c
@@ -576,31 +576,43 @@  static struct io_zc_rx_ifq *io_zc_rx_ifq_skb(struct sk_buff *skb)
 	return NULL;
 }
 
+static inline struct io_uring_rbuf_cqe *io_zc_get_rbuf_cqe(struct io_zc_rx_ifq *ifq)
+{
+	struct io_uring_rbuf_cqe *cqe;
+	unsigned int cq_idx, queued, free, entries;
+	unsigned int mask = ifq->cq_entries - 1;
+
+	cq_idx = ifq->cached_cq_tail & mask;
+	smp_rmb();
+	queued = min(io_zc_rx_cqring_entries(ifq), ifq->cq_entries);
+	free = ifq->cq_entries - queued;
+	entries = min(free, ifq->cq_entries - cq_idx);
+	if (!entries)
+		return NULL;
+
+	cqe = &ifq->cqes[cq_idx];
+	ifq->cached_cq_tail++;
+	return cqe;
+}
+
 static int zc_rx_recv_frag(struct io_zc_rx_ifq *ifq, const skb_frag_t *frag,
 			   int off, int len, bool zc_skb)
 {
 	struct io_uring_rbuf_cqe *cqe;
-	unsigned int cq_idx, queued, free, entries;
 	struct page *page;
-	unsigned int mask;
 	u32 pgid;
 
 	page = skb_frag_page(frag);
 	off += skb_frag_off(frag);
 
 	if (likely(zc_skb && is_zc_rx_page(page))) {
-		mask = ifq->cq_entries - 1;
+		cqe = io_zc_get_rbuf_cqe(ifq);
+		if (!cqe)
+			return -ENOBUFS;
+
 		pgid = page_private(page) & 0xffffffff;
 		io_zc_rx_get_buf_uref(ifq->pool, pgid);
-		cq_idx = ifq->cached_cq_tail & mask;
-		smp_rmb();
-		queued = min(io_zc_rx_cqring_entries(ifq), ifq->cq_entries);
-		free = ifq->cq_entries - queued;
-		entries = min(free, ifq->cq_entries - cq_idx);
-		if (!entries)
-			return -ENOBUFS;
-		cqe = &ifq->cqes[cq_idx];
-		ifq->cached_cq_tail++;
+
 		cqe->region = 0;
 		cqe->off = pgid * PAGE_SIZE + off;
 		cqe->len = len;