diff mbox series

[RFC,v3,15/15] io_uring: Notify the application as the fillq is drained.

Message ID 20221102233244.4022405-16-jonathan.lemon@gmail.com (mailing list archive)
State New
Headers show
Series zero-copy RX for io_uring | expand

Commit Message

Jonathan Lemon Nov. 2, 2022, 11:32 p.m. UTC
Userspace maintains a free count of space available in the fillq,
and only returns entries based on the available space.  As the
kernel removes these entries, it needs to notify the application
so more buffers can be queued.

Only one outstanding notifier per queue is used, and it provides
the most recent count of entries removed from the queue.

Signed-off-by: Jonathan Lemon <jonathan.lemon@gmail.com>
---
 io_uring/zctap.c | 40 ++++++++++++++++++++++++++++++++++++++++
 1 file changed, 40 insertions(+)
diff mbox series

Patch

diff --git a/io_uring/zctap.c b/io_uring/zctap.c
index 8515fff20ad5..5fb48f0c0351 100644
--- a/io_uring/zctap.c
+++ b/io_uring/zctap.c
@@ -15,6 +15,7 @@ 
 #include "zctap.h"
 #include "rsrc.h"
 #include "kbuf.h"
+#include "refs.h"
 
 #define NR_ZCTAP_IFQS	1
 
@@ -26,6 +27,7 @@  struct ifq_region {
 	int			cache_count;
 	int			free_count;
 	int			nr_pages;
+	int			taken;
 	u16			id;
 
 	spinlock_t		freelist_lock;
@@ -44,8 +46,14 @@  struct ifq_region {
 struct io_zctap_ifq_priv {
 	struct io_zctap_ifq	ifq;
 	struct ubuf_info	uarg;
+	struct io_kiocb		req;
 };
 
+static struct io_kiocb *io_zctap_ifq_notifier(struct io_zctap_ifq *ifq)
+{
+	return &((struct io_zctap_ifq_priv *)ifq)->req;
+}
+
 static void io_zctap_ifq_put(struct io_zctap_ifq *ifq);
 
 typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
@@ -131,6 +139,17 @@  static void io_zctap_recycle_buf(struct ifq_region *ifr,
 	}
 }
 
+static void io_zctap_fillq_notify(struct io_kiocb *req, bool *locked)
+{
+	int res = *(int *)&req->cmd.data;
+
+	io_post_aux_cqe(req->ctx, 0xface0000, res,
+			IORING_CQE_F_BUFFER|IORING_CQE_F_NOTIF,
+			true);
+
+	io_req_task_complete(req, locked);
+}
+
 /* gets a user-supplied buffer from the fill queue
  *   note: may drain N entries, but still have no usable buffers
  *   XXX add retry limit?
@@ -159,6 +178,7 @@  static struct io_zctap_buf *io_zctap_get_buffer(struct io_zctap_ifq *ifq,
 	if (!addr)
 		return NULL;
 
+	ifr->taken++;
 	pgid = addr & 0xffff;
 //	region_id = (addr >> 16) & 0xffff;
 
@@ -192,6 +212,7 @@  struct io_zctap_buf *io_zctap_get_buf(struct io_zctap_ifq *ifq, int refc)
 {
 	struct io_zctap_buf *buf;
 	struct ifq_region *ifr;
+	struct io_kiocb *req;
 	int count;
 	u16 pgid;
 
@@ -214,6 +235,19 @@  struct io_zctap_buf *io_zctap_get_buf(struct io_zctap_ifq *ifq, int refc)
 	count = io_zctap_get_buffers(ifq, ifr->cache, REGION_REFILL_COUNT);
 	ifr->cache_count += count;
 
+	req = io_zctap_ifq_notifier(ifq);
+	if (ifr->taken && atomic_read(&req->refs) == 1) {
+		int res;
+
+		res = (ifq->fill_bgid << 16) | ifr->taken;
+		*(int *)&req->cmd.data = res;
+		ifr->taken = 0;
+
+		req_ref_get(req);
+		req->io_task_work.func = io_zctap_fillq_notify;
+		io_req_task_work_add(req);
+	}
+
 	if (ifr->cache_count)
 		goto out;
 
@@ -441,6 +475,7 @@  int io_provide_ifq_region(struct io_zctap_ifq *ifq, u16 id)
 	ifr->id = id;
 	ifr->ifq = ifq;
 	ifr->delay_end = 0;
+	ifr->taken = 0;
 	ifr->cache_count = 0;
 
 	err = io_zctap_map_region(ifr, imu);
@@ -529,6 +564,11 @@  static struct io_zctap_ifq *io_zctap_ifq_alloc(struct io_ring_ctx *ctx)
 	priv->ifq.ctx = ctx;
 	priv->ifq.queue_id = -1;
 	priv->ifq.uarg = &priv->uarg;
+
+	priv->req.ctx = ctx;
+	priv->req.task = current;
+	io_req_set_refcount(&priv->req);
+
 	return &priv->ifq;
 }