diff mbox series

[v2,5/7] fuse: use locked req consistently in fuse_uring_next_fuse_req()

Message ID 20250125-optimize-fuse-uring-req-timeouts-v2-5-7771a2300343@ddn.com (mailing list archive)
State New
Headers show
Series fuse: {io-uring} Ensure fuse requests are set/read with locks | expand

Commit Message

Bernd Schubert Jan. 25, 2025, 5:44 p.m. UTC
This changes fuse_uring_next_fuse_req() and subfunctions
to use req obtained with a lock to avoid possible issues by
compiler induced re-ordering.

Also fix a function comment, that was missed during previous
code refactoring.

Fixes: a4bdb3d786c0 ("fuse: enable fuse-over-io-uring")
Signed-off-by: Bernd Schubert <bschubert@ddn.com>
---
 fs/fuse/dev_uring.c | 32 ++++++++++++--------------------
 1 file changed, 12 insertions(+), 20 deletions(-)

Comments

Joanne Koong Jan. 27, 2025, 11:47 p.m. UTC | #1
On Sat, Jan 25, 2025 at 9:44 AM Bernd Schubert <bschubert@ddn.com> wrote:
>
> This changes fuse_uring_next_fuse_req() and subfunctions
> to use req obtained with a lock to avoid possible issues by
> compiler induced re-ordering.
>
> Also fix a function comment, that was missed during previous
> code refactoring.
>
> Fixes: a4bdb3d786c0 ("fuse: enable fuse-over-io-uring")
> Signed-off-by: Bernd Schubert <bschubert@ddn.com>

Reviewed-by: Joanne Koong <joannelkoong@gmail.com>

> ---
>  fs/fuse/dev_uring.c | 32 ++++++++++++--------------------
>  1 file changed, 12 insertions(+), 20 deletions(-)
>
> diff --git a/fs/fuse/dev_uring.c b/fs/fuse/dev_uring.c
> index 80bb7396a8410022bbef1efa0522974bda77c81a..e90dd4ae5b2133e427855f1b0e60b73f008f7bc9 100644
> --- a/fs/fuse/dev_uring.c
> +++ b/fs/fuse/dev_uring.c
> @@ -75,16 +75,15 @@ static void fuse_uring_flush_bg(struct fuse_ring_queue *queue)
>         }
>  }
>
> -static void fuse_uring_req_end(struct fuse_ring_ent *ent, int error)
> +static void fuse_uring_req_end(struct fuse_ring_ent *ent, struct fuse_req *req,
> +                              int error)
>  {
>         struct fuse_ring_queue *queue = ent->queue;
> -       struct fuse_req *req;
>         struct fuse_ring *ring = queue->ring;
>         struct fuse_conn *fc = ring->fc;
>
>         lockdep_assert_not_held(&queue->lock);
>         spin_lock(&queue->lock);

Maybe also worth adding a WARN here to ensure that ent->fuse_req == req

> -       req = ent->fuse_req;
>         ent->fuse_req = NULL;
>         if (test_bit(FR_BACKGROUND, &req->flags)) {
>                 queue->active_background--;
> @@ -684,7 +683,7 @@ static int fuse_uring_prepare_send(struct fuse_ring_ent *ent,
>         if (!err)
>                 set_bit(FR_SENT, &req->flags);
>         else
> -               fuse_uring_req_end(ent, err);
> +               fuse_uring_req_end(ent, req, err);
>
>         return err;
>  }
> @@ -768,12 +767,8 @@ static void fuse_uring_add_req_to_ring_ent(struct fuse_ring_ent *ent,
>         fuse_uring_add_to_pq(ent, req);
>  }
>
> -/*
> - * Release the ring entry and fetch the next fuse request if available
> - *
> - * @return true if a new request has been fetched
> - */
> -static bool fuse_uring_ent_assign_req(struct fuse_ring_ent *ent)
> +/* Fetch the next fuse request if available */
> +static struct fuse_req *fuse_uring_ent_assign_req(struct fuse_ring_ent *ent)
>         __must_hold(&queue->lock)
>  {
>         struct fuse_req *req;
> @@ -784,12 +779,10 @@ static bool fuse_uring_ent_assign_req(struct fuse_ring_ent *ent)
>
>         /* get and assign the next entry while it is still holding the lock */
>         req = list_first_entry_or_null(req_queue, struct fuse_req, list);
> -       if (req) {
> +       if (req)
>                 fuse_uring_add_req_to_ring_ent(ent, req);
> -               return true;
> -       }
>
> -       return false;
> +       return req;
>  }
>
>  /*
> @@ -819,7 +812,7 @@ static void fuse_uring_commit(struct fuse_ring_ent *ent, struct fuse_req *req,
>
>         err = fuse_uring_copy_from_ring(ring, req, ent);
>  out:
> -       fuse_uring_req_end(ent, err);
> +       fuse_uring_req_end(ent, req, err);
>  }
>
>  /*
> @@ -830,17 +823,16 @@ static void fuse_uring_next_fuse_req(struct fuse_ring_ent *ent,
>                                      unsigned int issue_flags)
>  {
>         int err;
> -       bool has_next;
> +       struct fuse_req *req;
>
>  retry:
>         spin_lock(&queue->lock);
>         fuse_uring_ent_avail(ent, queue);
> -       has_next = fuse_uring_ent_assign_req(ent);
> +       req = fuse_uring_ent_assign_req(ent);
>         spin_unlock(&queue->lock);
>
> -       if (has_next) {
> -               err = fuse_uring_send_next_to_ring(ent, ent->fuse_req,
> -                                                  issue_flags);
> +       if (req) {
> +               err = fuse_uring_send_next_to_ring(ent, req, issue_flags);
>                 if (err)
>                         goto retry;
>         }
>
> --
> 2.43.0
>
diff mbox series

Patch

diff --git a/fs/fuse/dev_uring.c b/fs/fuse/dev_uring.c
index 80bb7396a8410022bbef1efa0522974bda77c81a..e90dd4ae5b2133e427855f1b0e60b73f008f7bc9 100644
--- a/fs/fuse/dev_uring.c
+++ b/fs/fuse/dev_uring.c
@@ -75,16 +75,15 @@  static void fuse_uring_flush_bg(struct fuse_ring_queue *queue)
 	}
 }
 
-static void fuse_uring_req_end(struct fuse_ring_ent *ent, int error)
+static void fuse_uring_req_end(struct fuse_ring_ent *ent, struct fuse_req *req,
+			       int error)
 {
 	struct fuse_ring_queue *queue = ent->queue;
-	struct fuse_req *req;
 	struct fuse_ring *ring = queue->ring;
 	struct fuse_conn *fc = ring->fc;
 
 	lockdep_assert_not_held(&queue->lock);
 	spin_lock(&queue->lock);
-	req = ent->fuse_req;
 	ent->fuse_req = NULL;
 	if (test_bit(FR_BACKGROUND, &req->flags)) {
 		queue->active_background--;
@@ -684,7 +683,7 @@  static int fuse_uring_prepare_send(struct fuse_ring_ent *ent,
 	if (!err)
 		set_bit(FR_SENT, &req->flags);
 	else
-		fuse_uring_req_end(ent, err);
+		fuse_uring_req_end(ent, req, err);
 
 	return err;
 }
@@ -768,12 +767,8 @@  static void fuse_uring_add_req_to_ring_ent(struct fuse_ring_ent *ent,
 	fuse_uring_add_to_pq(ent, req);
 }
 
-/*
- * Release the ring entry and fetch the next fuse request if available
- *
- * @return true if a new request has been fetched
- */
-static bool fuse_uring_ent_assign_req(struct fuse_ring_ent *ent)
+/* Fetch the next fuse request if available */
+static struct fuse_req *fuse_uring_ent_assign_req(struct fuse_ring_ent *ent)
 	__must_hold(&queue->lock)
 {
 	struct fuse_req *req;
@@ -784,12 +779,10 @@  static bool fuse_uring_ent_assign_req(struct fuse_ring_ent *ent)
 
 	/* get and assign the next entry while it is still holding the lock */
 	req = list_first_entry_or_null(req_queue, struct fuse_req, list);
-	if (req) {
+	if (req)
 		fuse_uring_add_req_to_ring_ent(ent, req);
-		return true;
-	}
 
-	return false;
+	return req;
 }
 
 /*
@@ -819,7 +812,7 @@  static void fuse_uring_commit(struct fuse_ring_ent *ent, struct fuse_req *req,
 
 	err = fuse_uring_copy_from_ring(ring, req, ent);
 out:
-	fuse_uring_req_end(ent, err);
+	fuse_uring_req_end(ent, req, err);
 }
 
 /*
@@ -830,17 +823,16 @@  static void fuse_uring_next_fuse_req(struct fuse_ring_ent *ent,
 				     unsigned int issue_flags)
 {
 	int err;
-	bool has_next;
+	struct fuse_req *req;
 
 retry:
 	spin_lock(&queue->lock);
 	fuse_uring_ent_avail(ent, queue);
-	has_next = fuse_uring_ent_assign_req(ent);
+	req = fuse_uring_ent_assign_req(ent);
 	spin_unlock(&queue->lock);
 
-	if (has_next) {
-		err = fuse_uring_send_next_to_ring(ent, ent->fuse_req,
-						   issue_flags);
+	if (req) {
+		err = fuse_uring_send_next_to_ring(ent, req, issue_flags);
 		if (err)
 			goto retry;
 	}