diff mbox series

[PATCHv5,06/11] io_uring/rw: move fixed buffer import to issue path

Message ID 20250224213116.3509093-7-kbusch@meta.com (mailing list archive)
State New
Headers show
Series ublk zero copy support | expand

Commit Message

Keith Busch Feb. 24, 2025, 9:31 p.m. UTC
From: Keith Busch <kbusch@kernel.org>

Registered buffers may depend on a linked command, which makes the prep
path too early to import. Move to the issue path when the node is
actually needed like all the other users of fixed buffers.

Signed-off-by: Keith Busch <kbusch@kernel.org>
---
 io_uring/opdef.c |  8 ++++----
 io_uring/rw.c    | 43 ++++++++++++++++++++++++++-----------------
 io_uring/rw.h    |  4 ++--
 3 files changed, 32 insertions(+), 23 deletions(-)

Comments

Ming Lei Feb. 25, 2025, 9:26 a.m. UTC | #1
On Mon, Feb 24, 2025 at 01:31:11PM -0800, Keith Busch wrote:
> From: Keith Busch <kbusch@kernel.org>
> 
> Registered buffers may depend on a linked command, which makes the prep
> path too early to import. Move to the issue path when the node is
> actually needed like all the other users of fixed buffers.
> 
> Signed-off-by: Keith Busch <kbusch@kernel.org>

Reviewed-by: Ming Lei <ming.lei@redhat.com>



Thanks,
Ming
Pavel Begunkov Feb. 25, 2025, 1:57 p.m. UTC | #2
On 2/24/25 21:31, Keith Busch wrote:
> From: Keith Busch <kbusch@kernel.org>
> 
> Registered buffers may depend on a linked command, which makes the prep
> path too early to import. Move to the issue path when the node is
> actually needed like all the other users of fixed buffers.
> 
> Signed-off-by: Keith Busch <kbusch@kernel.org>
> ---
>   io_uring/opdef.c |  8 ++++----
>   io_uring/rw.c    | 43 ++++++++++++++++++++++++++-----------------
>   io_uring/rw.h    |  4 ++--
>   3 files changed, 32 insertions(+), 23 deletions(-)
> 
> diff --git a/io_uring/opdef.c b/io_uring/opdef.c
> index 9344534780a02..5369ae33b5ad9 100644
> --- a/io_uring/opdef.c
> +++ b/io_uring/opdef.c
> @@ -104,8 +104,8 @@ const struct io_issue_def io_issue_defs[] = {
>   		.iopoll			= 1,
>   		.iopoll_queue		= 1,
>   		.async_size		= sizeof(struct io_async_rw),
> -		.prep			= io_prep_read_fixed,
> -		.issue			= io_read,
> +		.prep			= io_prep_read,

io_prep_read_fixed() -> io_init_rw_fixed() -> io_prep_rw(do_import=false)
after:
io_prep_read() -> io_prep_rw(do_import=true)

This change flips do_import. I'd say, let's just remove the importing
bits from io_prep_rw_fixed(), and should be good for now.

Apart from that:

Reviewed-by: Pavel Begunkov <asml.silence@gmail.com>
Caleb Sander Mateos Feb. 25, 2025, 8:57 p.m. UTC | #3
On Mon, Feb 24, 2025 at 1:31 PM Keith Busch <kbusch@meta.com> wrote:
>
> From: Keith Busch <kbusch@kernel.org>
>
> Registered buffers may depend on a linked command, which makes the prep
> path too early to import. Move to the issue path when the node is
> actually needed like all the other users of fixed buffers.
>
> Signed-off-by: Keith Busch <kbusch@kernel.org>
> ---
>  io_uring/opdef.c |  8 ++++----
>  io_uring/rw.c    | 43 ++++++++++++++++++++++++++-----------------
>  io_uring/rw.h    |  4 ++--
>  3 files changed, 32 insertions(+), 23 deletions(-)
>
> diff --git a/io_uring/opdef.c b/io_uring/opdef.c
> index 9344534780a02..5369ae33b5ad9 100644
> --- a/io_uring/opdef.c
> +++ b/io_uring/opdef.c
> @@ -104,8 +104,8 @@ const struct io_issue_def io_issue_defs[] = {
>                 .iopoll                 = 1,
>                 .iopoll_queue           = 1,
>                 .async_size             = sizeof(struct io_async_rw),
> -               .prep                   = io_prep_read_fixed,
> -               .issue                  = io_read,
> +               .prep                   = io_prep_read,
> +               .issue                  = io_read_fixed,
>         },
>         [IORING_OP_WRITE_FIXED] = {
>                 .needs_file             = 1,
> @@ -118,8 +118,8 @@ const struct io_issue_def io_issue_defs[] = {
>                 .iopoll                 = 1,
>                 .iopoll_queue           = 1,
>                 .async_size             = sizeof(struct io_async_rw),
> -               .prep                   = io_prep_write_fixed,
> -               .issue                  = io_write,
> +               .prep                   = io_prep_write,
> +               .issue                  = io_write_fixed,
>         },
>         [IORING_OP_POLL_ADD] = {
>                 .needs_file             = 1,
> diff --git a/io_uring/rw.c b/io_uring/rw.c
> index db24bcd4c6335..5f37fa48fdd9b 100644
> --- a/io_uring/rw.c
> +++ b/io_uring/rw.c
> @@ -348,33 +348,20 @@ int io_prep_writev(struct io_kiocb *req, const struct io_uring_sqe *sqe)
>         return io_prep_rwv(req, sqe, ITER_SOURCE);
>  }
>
> -static int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe,
> -                           int ddir)
> +static int io_init_rw_fixed(struct io_kiocb *req, unsigned int issue_flags, int ddir)
>  {
>         struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
> -       struct io_async_rw *io;
> +       struct io_async_rw *io = req->async_data;
>         int ret;
>
> -       ret = io_prep_rw(req, sqe, ddir, false);
> -       if (unlikely(ret))
> -               return ret;
> +       if (io->bytes_done)
> +               return 0;
>
> -       io = req->async_data;
>         ret = io_import_reg_buf(req, &io->iter, rw->addr, rw->len, ddir, 0);

Shouldn't this be passing issue_flags here?

Best,
Caleb



>         iov_iter_save_state(&io->iter, &io->iter_state);
>         return ret;
>  }
>
> -int io_prep_read_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
> -{
> -       return io_prep_rw_fixed(req, sqe, ITER_DEST);
> -}
> -
> -int io_prep_write_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
> -{
> -       return io_prep_rw_fixed(req, sqe, ITER_SOURCE);
> -}
> -
>  /*
>   * Multishot read is prepared just like a normal read/write request, only
>   * difference is that we set the MULTISHOT flag.
> @@ -1138,6 +1125,28 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
>         }
>  }
>
> +int io_read_fixed(struct io_kiocb *req, unsigned int issue_flags)
> +{
> +       int ret;
> +
> +       ret = io_init_rw_fixed(req, issue_flags, ITER_DEST);
> +       if (ret)
> +               return ret;
> +
> +       return io_read(req, issue_flags);
> +}
> +
> +int io_write_fixed(struct io_kiocb *req, unsigned int issue_flags)
> +{
> +       int ret;
> +
> +       ret = io_init_rw_fixed(req, issue_flags, ITER_SOURCE);
> +       if (ret)
> +               return ret;
> +
> +       return io_write(req, issue_flags);
> +}
> +
>  void io_rw_fail(struct io_kiocb *req)
>  {
>         int res;
> diff --git a/io_uring/rw.h b/io_uring/rw.h
> index a45e0c71b59d6..42a491d277273 100644
> --- a/io_uring/rw.h
> +++ b/io_uring/rw.h
> @@ -30,14 +30,14 @@ struct io_async_rw {
>         );
>  };
>
> -int io_prep_read_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe);
> -int io_prep_write_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe);
>  int io_prep_readv(struct io_kiocb *req, const struct io_uring_sqe *sqe);
>  int io_prep_writev(struct io_kiocb *req, const struct io_uring_sqe *sqe);
>  int io_prep_read(struct io_kiocb *req, const struct io_uring_sqe *sqe);
>  int io_prep_write(struct io_kiocb *req, const struct io_uring_sqe *sqe);
>  int io_read(struct io_kiocb *req, unsigned int issue_flags);
>  int io_write(struct io_kiocb *req, unsigned int issue_flags);
> +int io_read_fixed(struct io_kiocb *req, unsigned int issue_flags);
> +int io_write_fixed(struct io_kiocb *req, unsigned int issue_flags);
>  void io_readv_writev_cleanup(struct io_kiocb *req);
>  void io_rw_fail(struct io_kiocb *req);
>  void io_req_rw_complete(struct io_kiocb *req, io_tw_token_t tw);
> --
> 2.43.5
>
Keith Busch Feb. 25, 2025, 9:16 p.m. UTC | #4
On Tue, Feb 25, 2025 at 12:57:43PM -0800, Caleb Sander Mateos wrote:
> On Mon, Feb 24, 2025 at 1:31 PM Keith Busch <kbusch@meta.com> wrote:
> > +static int io_init_rw_fixed(struct io_kiocb *req, unsigned int issue_flags, int ddir)
> >  {
> >         struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
> > -       struct io_async_rw *io;
> > +       struct io_async_rw *io = req->async_data;
> >         int ret;
> >
> > -       ret = io_prep_rw(req, sqe, ddir, false);
> > -       if (unlikely(ret))
> > -               return ret;
> > +       if (io->bytes_done)
> > +               return 0;
> >
> > -       io = req->async_data;
> >         ret = io_import_reg_buf(req, &io->iter, rw->addr, rw->len, ddir, 0);
> 
> Shouldn't this be passing issue_flags here?

Definitely should be doing that, and I have that in my next version
already. Was hoping to get that fixed up version out before anyone
noticed, but you got me.
diff mbox series

Patch

diff --git a/io_uring/opdef.c b/io_uring/opdef.c
index 9344534780a02..5369ae33b5ad9 100644
--- a/io_uring/opdef.c
+++ b/io_uring/opdef.c
@@ -104,8 +104,8 @@  const struct io_issue_def io_issue_defs[] = {
 		.iopoll			= 1,
 		.iopoll_queue		= 1,
 		.async_size		= sizeof(struct io_async_rw),
-		.prep			= io_prep_read_fixed,
-		.issue			= io_read,
+		.prep			= io_prep_read,
+		.issue			= io_read_fixed,
 	},
 	[IORING_OP_WRITE_FIXED] = {
 		.needs_file		= 1,
@@ -118,8 +118,8 @@  const struct io_issue_def io_issue_defs[] = {
 		.iopoll			= 1,
 		.iopoll_queue		= 1,
 		.async_size		= sizeof(struct io_async_rw),
-		.prep			= io_prep_write_fixed,
-		.issue			= io_write,
+		.prep			= io_prep_write,
+		.issue			= io_write_fixed,
 	},
 	[IORING_OP_POLL_ADD] = {
 		.needs_file		= 1,
diff --git a/io_uring/rw.c b/io_uring/rw.c
index db24bcd4c6335..5f37fa48fdd9b 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -348,33 +348,20 @@  int io_prep_writev(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 	return io_prep_rwv(req, sqe, ITER_SOURCE);
 }
 
-static int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe,
-			    int ddir)
+static int io_init_rw_fixed(struct io_kiocb *req, unsigned int issue_flags, int ddir)
 {
 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
-	struct io_async_rw *io;
+	struct io_async_rw *io = req->async_data;
 	int ret;
 
-	ret = io_prep_rw(req, sqe, ddir, false);
-	if (unlikely(ret))
-		return ret;
+	if (io->bytes_done)
+		return 0;
 
-	io = req->async_data;
 	ret = io_import_reg_buf(req, &io->iter, rw->addr, rw->len, ddir, 0);
 	iov_iter_save_state(&io->iter, &io->iter_state);
 	return ret;
 }
 
-int io_prep_read_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
-{
-	return io_prep_rw_fixed(req, sqe, ITER_DEST);
-}
-
-int io_prep_write_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
-{
-	return io_prep_rw_fixed(req, sqe, ITER_SOURCE);
-}
-
 /*
  * Multishot read is prepared just like a normal read/write request, only
  * difference is that we set the MULTISHOT flag.
@@ -1138,6 +1125,28 @@  int io_write(struct io_kiocb *req, unsigned int issue_flags)
 	}
 }
 
+int io_read_fixed(struct io_kiocb *req, unsigned int issue_flags)
+{
+	int ret;
+
+	ret = io_init_rw_fixed(req, issue_flags, ITER_DEST);
+	if (ret)
+		return ret;
+
+	return io_read(req, issue_flags);
+}
+
+int io_write_fixed(struct io_kiocb *req, unsigned int issue_flags)
+{
+	int ret;
+
+	ret = io_init_rw_fixed(req, issue_flags, ITER_SOURCE);
+	if (ret)
+		return ret;
+
+	return io_write(req, issue_flags);
+}
+
 void io_rw_fail(struct io_kiocb *req)
 {
 	int res;
diff --git a/io_uring/rw.h b/io_uring/rw.h
index a45e0c71b59d6..42a491d277273 100644
--- a/io_uring/rw.h
+++ b/io_uring/rw.h
@@ -30,14 +30,14 @@  struct io_async_rw {
 	);
 };
 
-int io_prep_read_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe);
-int io_prep_write_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe);
 int io_prep_readv(struct io_kiocb *req, const struct io_uring_sqe *sqe);
 int io_prep_writev(struct io_kiocb *req, const struct io_uring_sqe *sqe);
 int io_prep_read(struct io_kiocb *req, const struct io_uring_sqe *sqe);
 int io_prep_write(struct io_kiocb *req, const struct io_uring_sqe *sqe);
 int io_read(struct io_kiocb *req, unsigned int issue_flags);
 int io_write(struct io_kiocb *req, unsigned int issue_flags);
+int io_read_fixed(struct io_kiocb *req, unsigned int issue_flags);
+int io_write_fixed(struct io_kiocb *req, unsigned int issue_flags);
 void io_readv_writev_cleanup(struct io_kiocb *req);
 void io_rw_fail(struct io_kiocb *req);
 void io_req_rw_complete(struct io_kiocb *req, io_tw_token_t tw);