diff mbox series

[3/3] io_uring: use AT_STATX_CACHED for IORING_OP_STATX fast path

Message ID 20210125213614.24001-4-axboe@kernel.dk (mailing list archive)
State New, archived
Headers show
Series [1/3] fs: add support for AT_STATX_CACHED | expand

Commit Message

Jens Axboe Jan. 25, 2021, 9:36 p.m. UTC
Instead of always going async, we can now attempt a cached attempt by
using AT_STATX_CACHED. This turns into LOOKUP_CACHED, and ensures that
we'll only do a fast path dentry lookup for path resolution.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 fs/io_uring.c | 21 ++++++++++++++-------
 1 file changed, 14 insertions(+), 7 deletions(-)
diff mbox series

Patch

diff --git a/fs/io_uring.c b/fs/io_uring.c
index c246df2f95a4..99799cc5a42e 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -4394,20 +4394,27 @@  static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 static int io_statx(struct io_kiocb *req, bool force_nonblock)
 {
 	struct io_statx *ctx = &req->statx;
+	bool cached_set;
 	int ret;
 
-	if (force_nonblock) {
-		/* only need file table for an actual valid fd */
-		if (ctx->dfd == -1 || ctx->dfd == AT_FDCWD)
-			req->flags |= REQ_F_NO_FILE_TABLE;
-		return -EAGAIN;
-	}
+	cached_set = ctx->flags & AT_STATX_CACHED;
+	if (force_nonblock)
+		ctx->flags |= AT_STATX_CACHED;
 
 	ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
 		       ctx->buffer);
 
-	if (ret < 0)
+	if (ret < 0) {
+		/* only retry if nonblock wasn't set */
+		if (ret == -EAGAIN && (!cached_set && force_nonblock)) {
+			/* only need file table for an actual valid fd */
+			if (ctx->dfd == -1 || ctx->dfd == AT_FDCWD)
+				req->flags |= REQ_F_NO_FILE_TABLE;
+			ctx->flags &= ~AT_STATX_CACHED;
+			return -EAGAIN;
+		}
 		req_set_fail_links(req);
+	}
 	io_req_complete(req, ret);
 	return 0;
 }