diff mbox series

io_uring: fix short read/write with linked ops

Message ID 20221003091923.2096150-1-stevensd@google.com (mailing list archive)
State New
Headers show
Series io_uring: fix short read/write with linked ops | expand

Commit Message

David Stevens Oct. 3, 2022, 9:19 a.m. UTC
From: David Stevens <stevensd@chromium.org>

When continuing a short read/write, account for any completed bytes when
calculating the operation's target length. The operation's actual
accumulated length is fixed up by kiocb_done, and the target and actual
lengths are then compared by __io_complete_rw_common. That function
already propagated the actual length to userspace, but the incorrect
target length was causing it to always cancel linked operations, even
with a successfully completed read/write.

Fixes: 227c0c9673d8 ("io_uring: internally retry short reads")
Signed-off-by: David Stevens <stevensd@chromium.org>
---
 io_uring/rw.c | 12 ++++++++----
 1 file changed, 8 insertions(+), 4 deletions(-)

Comments

Pavel Begunkov Oct. 3, 2022, 10:35 a.m. UTC | #1
On 10/3/22 10:19, David Stevens wrote:
> From: David Stevens <stevensd@chromium.org>
> 
> When continuing a short read/write, account for any completed bytes when
> calculating the operation's target length. The operation's actual
> accumulated length is fixed up by kiocb_done, and the target and actual
> lengths are then compared by __io_complete_rw_common. That function
> already propagated the actual length to userspace, but the incorrect
> target length was causing it to always cancel linked operations, even
> with a successfully completed read/write.

The issue looks same as fixed with

https://git.kernel.dk/cgit/linux-block/commit/?h=for-6.1/io_uring&id=bf68b5b34311ee57ed40749a1257a30b46127556

Can you check if for-6.1 works for you?

git://git.kernel.dk/linux.git for-6.1/io_uring
https://git.kernel.dk/cgit/linux-block/log/?h=for-6.1/io_uring


> Fixes: 227c0c9673d8 ("io_uring: internally retry short reads")
> Signed-off-by: David Stevens <stevensd@chromium.org>
> ---
>   io_uring/rw.c | 12 ++++++++----
>   1 file changed, 8 insertions(+), 4 deletions(-)
> 
> diff --git a/io_uring/rw.c b/io_uring/rw.c
> index 76ebcfebc9a6..aa9967a52dfd 100644
> --- a/io_uring/rw.c
> +++ b/io_uring/rw.c
> @@ -706,13 +706,14 @@ int io_read(struct io_kiocb *req, unsigned int issue_flags)
>   	struct kiocb *kiocb = &rw->kiocb;
>   	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
>   	struct io_async_rw *io;
> -	ssize_t ret, ret2;
> +	ssize_t ret, ret2, target_len;
>   	loff_t *ppos;
>   
>   	if (!req_has_async_data(req)) {
>   		ret = io_import_iovec(READ, req, &iovec, s, issue_flags);
>   		if (unlikely(ret < 0))
>   			return ret;
> +		target_len = iov_iter_count(&s->iter);
>   	} else {
>   		io = req->async_data;
>   		s = &io->s;
> @@ -733,6 +734,7 @@ int io_read(struct io_kiocb *req, unsigned int issue_flags)
>   		 * need to make this conditional.
>   		 */
>   		iov_iter_restore(&s->iter, &s->iter_state);
> +		target_len = iov_iter_count(&s->iter) + io->bytes_done;
>   		iovec = NULL;
>   	}
>   	ret = io_rw_init_file(req, FMODE_READ);
> @@ -740,7 +742,7 @@ int io_read(struct io_kiocb *req, unsigned int issue_flags)
>   		kfree(iovec);
>   		return ret;
>   	}
> -	req->cqe.res = iov_iter_count(&s->iter);
> +	req->cqe.res = target_len;
>   
>   	if (force_nonblock) {
>   		/* If the file doesn't support async, just async punt */
> @@ -850,18 +852,20 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
>   	struct iovec *iovec;
>   	struct kiocb *kiocb = &rw->kiocb;
>   	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
> -	ssize_t ret, ret2;
> +	ssize_t ret, ret2, target_len;
>   	loff_t *ppos;
>   
>   	if (!req_has_async_data(req)) {
>   		ret = io_import_iovec(WRITE, req, &iovec, s, issue_flags);
>   		if (unlikely(ret < 0))
>   			return ret;
> +		target_len = iov_iter_count(&s->iter);
>   	} else {
>   		struct io_async_rw *io = req->async_data;
>   
>   		s = &io->s;
>   		iov_iter_restore(&s->iter, &s->iter_state);
> +		target_len = iov_iter_count(&s->iter) + io->bytes_done;
>   		iovec = NULL;
>   	}
>   	ret = io_rw_init_file(req, FMODE_WRITE);
> @@ -869,7 +873,7 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
>   		kfree(iovec);
>   		return ret;
>   	}
> -	req->cqe.res = iov_iter_count(&s->iter);
> +	req->cqe.res = target_len;
>   
>   	if (force_nonblock) {
>   		/* If the file doesn't support async, just async punt */
David Stevens Oct. 4, 2022, 2:17 a.m. UTC | #2
On Mon, Oct 3, 2022 at 7:40 PM Pavel Begunkov <asml.silence@gmail.com> wrote:
>
> On 10/3/22 10:19, David Stevens wrote:
> > From: David Stevens <stevensd@chromium.org>
> >
> > When continuing a short read/write, account for any completed bytes when
> > calculating the operation's target length. The operation's actual
> > accumulated length is fixed up by kiocb_done, and the target and actual
> > lengths are then compared by __io_complete_rw_common. That function
> > already propagated the actual length to userspace, but the incorrect
> > target length was causing it to always cancel linked operations, even
> > with a successfully completed read/write.
>
> The issue looks same as fixed with
>
> https://git.kernel.dk/cgit/linux-block/commit/?h=for-6.1/io_uring&id=bf68b5b34311ee57ed40749a1257a30b46127556
>
> Can you check if for-6.1 works for you?
>
> git://git.kernel.dk/linux.git for-6.1/io_uring
> https://git.kernel.dk/cgit/linux-block/log/?h=for-6.1/io_uring

Yes, it looks like that fixes the bug I was running into.

-David

> > Fixes: 227c0c9673d8 ("io_uring: internally retry short reads")
> > Signed-off-by: David Stevens <stevensd@chromium.org>
> > ---
> >   io_uring/rw.c | 12 ++++++++----
> >   1 file changed, 8 insertions(+), 4 deletions(-)
> >
> > diff --git a/io_uring/rw.c b/io_uring/rw.c
> > index 76ebcfebc9a6..aa9967a52dfd 100644
> > --- a/io_uring/rw.c
> > +++ b/io_uring/rw.c
> > @@ -706,13 +706,14 @@ int io_read(struct io_kiocb *req, unsigned int issue_flags)
> >       struct kiocb *kiocb = &rw->kiocb;
> >       bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
> >       struct io_async_rw *io;
> > -     ssize_t ret, ret2;
> > +     ssize_t ret, ret2, target_len;
> >       loff_t *ppos;
> >
> >       if (!req_has_async_data(req)) {
> >               ret = io_import_iovec(READ, req, &iovec, s, issue_flags);
> >               if (unlikely(ret < 0))
> >                       return ret;
> > +             target_len = iov_iter_count(&s->iter);
> >       } else {
> >               io = req->async_data;
> >               s = &io->s;
> > @@ -733,6 +734,7 @@ int io_read(struct io_kiocb *req, unsigned int issue_flags)
> >                * need to make this conditional.
> >                */
> >               iov_iter_restore(&s->iter, &s->iter_state);
> > +             target_len = iov_iter_count(&s->iter) + io->bytes_done;
> >               iovec = NULL;
> >       }
> >       ret = io_rw_init_file(req, FMODE_READ);
> > @@ -740,7 +742,7 @@ int io_read(struct io_kiocb *req, unsigned int issue_flags)
> >               kfree(iovec);
> >               return ret;
> >       }
> > -     req->cqe.res = iov_iter_count(&s->iter);
> > +     req->cqe.res = target_len;
> >
> >       if (force_nonblock) {
> >               /* If the file doesn't support async, just async punt */
> > @@ -850,18 +852,20 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
> >       struct iovec *iovec;
> >       struct kiocb *kiocb = &rw->kiocb;
> >       bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
> > -     ssize_t ret, ret2;
> > +     ssize_t ret, ret2, target_len;
> >       loff_t *ppos;
> >
> >       if (!req_has_async_data(req)) {
> >               ret = io_import_iovec(WRITE, req, &iovec, s, issue_flags);
> >               if (unlikely(ret < 0))
> >                       return ret;
> > +             target_len = iov_iter_count(&s->iter);
> >       } else {
> >               struct io_async_rw *io = req->async_data;
> >
> >               s = &io->s;
> >               iov_iter_restore(&s->iter, &s->iter_state);
> > +             target_len = iov_iter_count(&s->iter) + io->bytes_done;
> >               iovec = NULL;
> >       }
> >       ret = io_rw_init_file(req, FMODE_WRITE);
> > @@ -869,7 +873,7 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
> >               kfree(iovec);
> >               return ret;
> >       }
> > -     req->cqe.res = iov_iter_count(&s->iter);
> > +     req->cqe.res = target_len;
> >
> >       if (force_nonblock) {
> >               /* If the file doesn't support async, just async punt */
>
> --
> Pavel Begunkov
diff mbox series

Patch

diff --git a/io_uring/rw.c b/io_uring/rw.c
index 76ebcfebc9a6..aa9967a52dfd 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -706,13 +706,14 @@  int io_read(struct io_kiocb *req, unsigned int issue_flags)
 	struct kiocb *kiocb = &rw->kiocb;
 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
 	struct io_async_rw *io;
-	ssize_t ret, ret2;
+	ssize_t ret, ret2, target_len;
 	loff_t *ppos;
 
 	if (!req_has_async_data(req)) {
 		ret = io_import_iovec(READ, req, &iovec, s, issue_flags);
 		if (unlikely(ret < 0))
 			return ret;
+		target_len = iov_iter_count(&s->iter);
 	} else {
 		io = req->async_data;
 		s = &io->s;
@@ -733,6 +734,7 @@  int io_read(struct io_kiocb *req, unsigned int issue_flags)
 		 * need to make this conditional.
 		 */
 		iov_iter_restore(&s->iter, &s->iter_state);
+		target_len = iov_iter_count(&s->iter) + io->bytes_done;
 		iovec = NULL;
 	}
 	ret = io_rw_init_file(req, FMODE_READ);
@@ -740,7 +742,7 @@  int io_read(struct io_kiocb *req, unsigned int issue_flags)
 		kfree(iovec);
 		return ret;
 	}
-	req->cqe.res = iov_iter_count(&s->iter);
+	req->cqe.res = target_len;
 
 	if (force_nonblock) {
 		/* If the file doesn't support async, just async punt */
@@ -850,18 +852,20 @@  int io_write(struct io_kiocb *req, unsigned int issue_flags)
 	struct iovec *iovec;
 	struct kiocb *kiocb = &rw->kiocb;
 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
-	ssize_t ret, ret2;
+	ssize_t ret, ret2, target_len;
 	loff_t *ppos;
 
 	if (!req_has_async_data(req)) {
 		ret = io_import_iovec(WRITE, req, &iovec, s, issue_flags);
 		if (unlikely(ret < 0))
 			return ret;
+		target_len = iov_iter_count(&s->iter);
 	} else {
 		struct io_async_rw *io = req->async_data;
 
 		s = &io->s;
 		iov_iter_restore(&s->iter, &s->iter_state);
+		target_len = iov_iter_count(&s->iter) + io->bytes_done;
 		iovec = NULL;
 	}
 	ret = io_rw_init_file(req, FMODE_WRITE);
@@ -869,7 +873,7 @@  int io_write(struct io_kiocb *req, unsigned int issue_flags)
 		kfree(iovec);
 		return ret;
 	}
-	req->cqe.res = iov_iter_count(&s->iter);
+	req->cqe.res = target_len;
 
 	if (force_nonblock) {
 		/* If the file doesn't support async, just async punt */