diff mbox series

[11/20] net: add data pool

Message ID 20231107214045.2172393-12-dw@davidwei.uk (mailing list archive)
State Not Applicable
Delegated to: Netdev Maintainers
Headers show
Series Zero copy Rx using io_uring | expand

Checks

Context Check Description
netdev/tree_selection success Guessing tree name failed - patch did not apply, async

Commit Message

David Wei Nov. 7, 2023, 9:40 p.m. UTC
Add a struct data_pool that holds both a page_pool and an ifq (by
extension, a ZC pool).

Each hardware Rx queue configured for ZC will have one data_pool, set in
its struct netdev_rx_queue. Payload hardware Rx queues are filled from
the ZC pool, while header Rx queues are filled from the page_pool as
normal.

Co-developed-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: David Wei <dw@davidwei.uk>
---
 include/net/data_pool.h | 74 +++++++++++++++++++++++++++++++++++++++++
 1 file changed, 74 insertions(+)
 create mode 100644 include/net/data_pool.h
diff mbox series

Patch

diff --git a/include/net/data_pool.h b/include/net/data_pool.h
new file mode 100644
index 000000000000..bf2dff23724a
--- /dev/null
+++ b/include/net/data_pool.h
@@ -0,0 +1,74 @@ 
+#ifndef _DATA_POOL_H
+#define _DATA_POOL_H
+
+#include <linux/io_uring.h>
+#include <linux/io_uring_types.h>
+#include <linux/mm_types.h>
+#include <linux/netdevice.h>
+#include <net/page_pool/helpers.h>
+
+struct data_pool {
+	struct page_pool	*page_pool;
+	struct io_zc_rx_ifq	*zc_ifq;
+	struct ubuf_info	*zc_uarg;
+};
+
+static inline struct page *data_pool_alloc_page(struct data_pool *dp)
+{
+	if (dp->zc_ifq) {
+		struct io_zc_rx_buf *buf;
+
+		buf = io_zc_rx_get_buf(dp->zc_ifq);
+		if (!buf)
+			return NULL;
+		return buf->page;
+	} else {
+		return page_pool_dev_alloc_pages(dp->page_pool);
+	}
+}
+
+static inline void data_pool_fragment_page(struct data_pool *dp,
+					   struct page *page,
+					   unsigned long bias)
+{
+	if (dp->zc_ifq) {
+		struct io_zc_rx_buf *buf;
+
+		buf = io_zc_rx_buf_from_page(dp->zc_ifq, page);
+		atomic_set(&buf->refcount, bias);
+	} else {
+		page_pool_fragment_page(page, bias);
+	}
+}
+
+static inline void data_pool_put_page(struct data_pool *dp, struct page *page)
+{
+	if (dp->zc_ifq) {
+		struct io_zc_rx_buf *buf;
+
+		buf = io_zc_rx_buf_from_page(dp->zc_ifq, page);
+		if (!buf)
+			page_pool_recycle_direct(dp->page_pool, page);
+		else
+			io_zc_rx_put_buf(dp->zc_ifq, buf);
+	} else {
+		WARN_ON_ONCE(page->pp_magic != PP_SIGNATURE);
+
+		page_pool_recycle_direct(dp->page_pool, page);
+	}
+}
+
+static inline dma_addr_t data_pool_get_dma_addr(struct data_pool *dp,
+						struct page *page)
+{
+	if (dp->zc_ifq) {
+		struct io_zc_rx_buf *buf;
+
+		buf = io_zc_rx_buf_from_page(dp->zc_ifq, page);
+		return io_zc_rx_buf_dma(buf);
+	} else {
+		return page_pool_get_dma_addr(page);
+	}
+}
+
+#endif