@@ -576,31 +576,43 @@ static struct io_zc_rx_ifq *io_zc_rx_ifq_skb(struct sk_buff *skb)
return NULL;
}
+static inline struct io_uring_rbuf_cqe *io_zc_get_rbuf_cqe(struct io_zc_rx_ifq *ifq)
+{
+ struct io_uring_rbuf_cqe *cqe;
+ unsigned int cq_idx, queued, free, entries;
+ unsigned int mask = ifq->cq_entries - 1;
+
+ cq_idx = ifq->cached_cq_tail & mask;
+ smp_rmb();
+ queued = min(io_zc_rx_cqring_entries(ifq), ifq->cq_entries);
+ free = ifq->cq_entries - queued;
+ entries = min(free, ifq->cq_entries - cq_idx);
+ if (!entries)
+ return NULL;
+
+ cqe = &ifq->cqes[cq_idx];
+ ifq->cached_cq_tail++;
+ return cqe;
+}
+
static int zc_rx_recv_frag(struct io_zc_rx_ifq *ifq, const skb_frag_t *frag,
int off, int len, bool zc_skb)
{
struct io_uring_rbuf_cqe *cqe;
- unsigned int cq_idx, queued, free, entries;
struct page *page;
- unsigned int mask;
u32 pgid;
page = skb_frag_page(frag);
off += skb_frag_off(frag);
if (likely(zc_skb && is_zc_rx_page(page))) {
- mask = ifq->cq_entries - 1;
+ cqe = io_zc_get_rbuf_cqe(ifq);
+ if (!cqe)
+ return -ENOBUFS;
+
pgid = page_private(page) & 0xffffffff;
io_zc_rx_get_buf_uref(ifq->pool, pgid);
- cq_idx = ifq->cached_cq_tail & mask;
- smp_rmb();
- queued = min(io_zc_rx_cqring_entries(ifq), ifq->cq_entries);
- free = ifq->cq_entries - queued;
- entries = min(free, ifq->cq_entries - cq_idx);
- if (!entries)
- return -ENOBUFS;
- cqe = &ifq->cqes[cq_idx];
- ifq->cached_cq_tail++;
+
cqe->region = 0;
cqe->off = pgid * PAGE_SIZE + off;
cqe->len = len;