@@ -26,6 +26,29 @@ bool fuse_uring_enabled(void)
return enable_uring;
}
+struct fuse_uring_cmd_pdu {
+ struct fuse_ring_ent *ring_ent;
+};
+
+const struct fuse_iqueue_ops fuse_io_uring_ops;
+
+static void fuse_uring_cmd_set_ring_ent(struct io_uring_cmd *cmd,
+ struct fuse_ring_ent *ring_ent)
+{
+ struct fuse_uring_cmd_pdu *pdu =
+ io_uring_cmd_to_pdu(cmd, struct fuse_uring_cmd_pdu);
+
+ pdu->ring_ent = ring_ent;
+}
+
+static struct fuse_ring_ent *fuse_uring_cmd_to_ring_ent(struct io_uring_cmd *cmd)
+{
+ struct fuse_uring_cmd_pdu *pdu =
+ io_uring_cmd_to_pdu(cmd, struct fuse_uring_cmd_pdu);
+
+ return pdu->ring_ent;
+}
+
static void fuse_uring_req_end(struct fuse_ring_ent *ring_ent, bool set_err,
int error)
{
@@ -779,6 +802,31 @@ static int fuse_uring_commit_fetch(struct io_uring_cmd *cmd, int issue_flags,
return 0;
}
+static bool is_ring_ready(struct fuse_ring *ring, int current_qid)
+{
+ int qid;
+ struct fuse_ring_queue *queue;
+ bool ready = true;
+
+ for (qid = 0; qid < ring->nr_queues && ready; qid++) {
+ if (current_qid == qid)
+ continue;
+
+ queue = ring->queues[qid];
+ if (!queue) {
+ ready = false;
+ break;
+ }
+
+ spin_lock(&queue->lock);
+ if (list_empty(&queue->ent_avail_queue))
+ ready = false;
+ spin_unlock(&queue->lock);
+ }
+
+ return ready;
+}
+
/*
* fuse_uring_req_fetch command handling
*/
@@ -787,10 +835,22 @@ static void _fuse_uring_register(struct fuse_ring_ent *ring_ent,
unsigned int issue_flags)
{
struct fuse_ring_queue *queue = ring_ent->queue;
+ struct fuse_ring *ring = queue->ring;
+ struct fuse_conn *fc = ring->fc;
+ struct fuse_iqueue *fiq = &fc->iq;
spin_lock(&queue->lock);
fuse_uring_ent_avail(ring_ent, queue);
spin_unlock(&queue->lock);
+
+ if (!ring->ready) {
+ bool ready = is_ring_ready(ring, queue->qid);
+
+ if (ready) {
+ WRITE_ONCE(ring->ready, true);
+ fiq->ops = &fuse_io_uring_ops;
+ }
+ }
}
/*
@@ -974,3 +1034,118 @@ int __maybe_unused fuse_uring_cmd(struct io_uring_cmd *cmd,
return -EIOCBQUEUED;
}
+
+/*
+ * This prepares and sends the ring request in fuse-uring task context.
+ * User buffers are not mapped yet - the application does not have permission
+ * to write to it - this has to be executed in ring task context.
+ */
+static void
+fuse_uring_send_req_in_task(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+ struct fuse_ring_ent *ring_ent = fuse_uring_cmd_to_ring_ent(cmd);
+ struct fuse_ring_queue *queue = ring_ent->queue;
+ int err;
+
+ if (unlikely(issue_flags & IO_URING_F_TASK_DEAD)) {
+ err = -ECANCELED;
+ goto terminating;
+ }
+
+ err = fuse_uring_prepare_send(ring_ent);
+ if (err)
+ goto err;
+
+terminating:
+ spin_lock(&queue->lock);
+ ring_ent->state = FRRS_USERSPACE;
+ list_move(&ring_ent->list, &queue->ent_in_userspace);
+ spin_unlock(&queue->lock);
+ io_uring_cmd_done(cmd, err, 0, issue_flags);
+ return;
+err:
+ fuse_uring_next_fuse_req(ring_ent, queue, issue_flags);
+}
+
+static struct fuse_ring_queue *fuse_uring_task_to_queue(struct fuse_ring *ring)
+{
+ unsigned int qid;
+ struct fuse_ring_queue *queue;
+
+ qid = task_cpu(current);
+
+ if (WARN_ONCE(qid >= ring->nr_queues,
+ "Core number (%u) exceeds nr ueues (%zu)\n", qid,
+ ring->nr_queues))
+ qid = 0;
+
+ queue = ring->queues[qid];
+ if (WARN_ONCE(!queue, "Missing queue for qid %d\n", qid))
+ return NULL;
+
+ return queue;
+}
+
+/* queue a fuse request and send it if a ring entry is available */
+void fuse_uring_queue_fuse_req(struct fuse_iqueue *fiq, struct fuse_req *req)
+{
+ struct fuse_conn *fc = req->fm->fc;
+ struct fuse_ring *ring = fc->ring;
+ struct fuse_ring_queue *queue;
+ struct fuse_ring_ent *ring_ent = NULL;
+ int err;
+
+ err = -EINVAL;
+ queue = fuse_uring_task_to_queue(ring);
+ if (!queue)
+ goto err;
+
+ if (req->in.h.opcode != FUSE_NOTIFY_REPLY)
+ req->in.h.unique = fuse_get_unique(fiq);
+
+ spin_lock(&queue->lock);
+ err = -ENOTCONN;
+ if (unlikely(queue->stopped))
+ goto err_unlock;
+
+ ring_ent = list_first_entry_or_null(&queue->ent_avail_queue,
+ struct fuse_ring_ent, list);
+ if (ring_ent)
+ fuse_uring_add_req_to_ring_ent(ring_ent, req);
+ else
+ list_add_tail(&req->list, &queue->fuse_req_queue);
+ spin_unlock(&queue->lock);
+
+ if (ring_ent) {
+ struct io_uring_cmd *cmd = ring_ent->cmd;
+
+ err = -EIO;
+ if (WARN_ON_ONCE(ring_ent->state != FRRS_FUSE_REQ))
+ goto err;
+
+ fuse_uring_cmd_set_ring_ent(cmd, ring_ent);
+ io_uring_cmd_complete_in_task(cmd, fuse_uring_send_req_in_task);
+ }
+
+ return;
+
+err_unlock:
+ spin_unlock(&queue->lock);
+err:
+ req->out.h.error = err;
+ clear_bit(FR_PENDING, &req->flags);
+ fuse_request_end(req);
+}
+
+const struct fuse_iqueue_ops fuse_io_uring_ops = {
+ /* should be send over io-uring as enhancement */
+ .send_forget = fuse_dev_queue_forget,
+
+ /*
+ * could be send over io-uring, but interrupts should be rare,
+ * no need to make the code complex
+ */
+ .send_interrupt = fuse_dev_queue_interrupt,
+ .send_req = fuse_uring_queue_fuse_req,
+};
@@ -122,6 +122,8 @@ struct fuse_ring {
unsigned long teardown_time;
atomic_t queue_refs;
+
+ bool ready;
};
bool fuse_uring_enabled(void);
@@ -129,6 +131,7 @@ void fuse_uring_destruct(struct fuse_conn *fc);
void fuse_uring_stop_queues(struct fuse_ring *ring);
void fuse_uring_abort_end_requests(struct fuse_ring *ring);
int fuse_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags);
+void fuse_uring_queue_fuse_req(struct fuse_iqueue *fiq, struct fuse_req *req);
static inline void fuse_uring_abort(struct fuse_conn *fc)
{
@@ -152,6 +155,11 @@ static inline void fuse_uring_wait_stopped_queues(struct fuse_conn *fc)
atomic_read(&ring->queue_refs) == 0);
}
+static inline bool fuse_uring_ready(struct fuse_conn *fc)
+{
+ return fc->ring && fc->ring->ready;
+}
+
#else /* CONFIG_FUSE_IO_URING */
struct fuse_ring;
This prepares queueing and sending foreground requests through io-uring. Signed-off-by: Bernd Schubert <bschubert@ddn.com> --- fs/fuse/dev_uring.c | 175 ++++++++++++++++++++++++++++++++++++++++++++++++++ fs/fuse/dev_uring_i.h | 8 +++ 2 files changed, 183 insertions(+)