@@ -6,16 +6,73 @@
#include <linux/mm.h>
#include <linux/io_uring.h>
#include <linux/netdevice.h>
+#include <linux/nospec.h>
#include <uapi/linux/io_uring.h>
#include "io_uring.h"
#include "zctap.h"
+#include "rsrc.h"
+#include "kbuf.h"
#define NR_ZCTAP_IFQS 1
+struct ifq_region {
+ struct io_mapped_ubuf *imu;
+ int free_count;
+ int nr_pages;
+ u16 id;
+ struct page *freelist[];
+};
+
typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
+static void io_remove_ifq_region(struct ifq_region *ifr)
+{
+ kvfree(ifr);
+}
+
+int io_provide_ifq_region(struct io_zctap_ifq *ifq, u16 id)
+{
+ struct io_ring_ctx *ctx = ifq->ctx;
+ struct io_mapped_ubuf *imu;
+ struct ifq_region *ifr;
+ int i, nr_pages;
+ struct page *page;
+
+ /* XXX for now, only allow one region per ifq. */
+ if (ifq->region)
+ return -EFAULT;
+
+ if (unlikely(id >= ctx->nr_user_bufs))
+ return -EFAULT;
+ id = array_index_nospec(id, ctx->nr_user_bufs);
+ imu = ctx->user_bufs[id];
+
+ /* XXX check region is page aligned */
+ if (imu->ubuf & ~PAGE_MASK || imu->ubuf_end & ~PAGE_MASK)
+ return -EFAULT;
+
+ nr_pages = imu->nr_bvecs;
+ ifr = kvmalloc(struct_size(ifr, freelist, nr_pages), GFP_KERNEL);
+ if (!ifr)
+ return -ENOMEM;
+
+ ifr->nr_pages = nr_pages;
+ ifr->imu = imu;
+ ifr->free_count = nr_pages;
+ ifr->id = id;
+
+ for (i = 0; i < nr_pages; i++) {
+ page = imu->bvec[i].bv_page;
+ ifr->freelist[i] = page;
+ }
+
+ ifq->region = ifr;
+
+ return 0;
+}
+
static int __io_queue_mgmt(struct net_device *dev, struct io_zctap_ifq *ifq,
u16 queue_id)
{
@@ -60,6 +117,8 @@ static void io_zctap_ifq_free(struct io_zctap_ifq *ifq)
{
if (ifq->queue_id != -1)
io_close_zctap_ifq(ifq, ifq->queue_id);
+ if (ifq->region)
+ io_remove_ifq_region(ifq->region);
if (ifq->dev)
dev_put(ifq->dev);
kfree(ifq);
@@ -92,7 +151,9 @@ int io_register_ifq(struct io_ring_ctx *ctx,
if (!ifq->dev)
goto out;
- /* region attachment TBD */
+ err = io_provide_ifq_region(ifq, req.region_id);
+ if (err)
+ goto out;
err = io_open_zctap_ifq(ifq, req.queue_id);
if (err)
@@ -6,4 +6,6 @@ int io_register_ifq(struct io_ring_ctx *ctx,
struct io_uring_ifq_req __user *arg);
void io_unregister_zctap_all(struct io_ring_ctx *ctx);
+int io_provide_ifq_region(struct io_zctap_ifq *ifq, u16 id);
+
#endif
This function takes all of a memory region that was previously registered with io_uring, and assigns it as the backing store for the specified ifq, binding the pages to a specific device. The entire region is registered instead of providing individual bufferrs, as this allows the hardware to select the optimal buffer size for incoming packets. The region is registered as part of the register_ifq opcode, instead of separately, since the ifq ring requires memory when it is created. Signed-off-by: Jonathan Lemon <jonathan.lemon@gmail.com> --- io_uring/zctap.c | 63 +++++++++++++++++++++++++++++++++++++++++++++++- io_uring/zctap.h | 2 ++ 2 files changed, 64 insertions(+), 1 deletion(-)