diff mbox series

[2/3] io_uring/napi: delay napi timeout adjustment

Message ID 2766891d83b5d1ff231210ee3d09387398ef29cc.1722357468.git.asml.silence@gmail.com (mailing list archive)
State New
Headers show
Series Implement absolute value wait timeouts | expand

Commit Message

Pavel Begunkov July 30, 2024, 8:29 p.m. UTC
Instead adjusting busy polling time in io_cqring_wait(), rely on the
deadline value and delay the check until io_napi_busy_loop_timeout()
is called inside the napi busy polling loop. There is a side effect
of comparing a cpu local clock with ktime_get(), however waiting
timeouts are usually long enough not to care and napi.c is already
careless about mixing time flavours, i.e. io_napi_blocking_busy_loop()
gets a cpu local time before disabling preemption.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 io_uring/io_uring.c |  1 -
 io_uring/napi.c     | 30 ++++++------------------------
 io_uring/napi.h     | 16 ----------------
 3 files changed, 6 insertions(+), 41 deletions(-)
diff mbox series

Patch

diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 3942db160f18..9ec07f76ad19 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -2423,7 +2423,6 @@  static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
 
 		dt = timespec64_to_ktime(ts);
 		iowq.timeout = ktime_add(dt, ktime_get());
-		io_napi_adjust_timeout(ctx, &iowq, dt);
 	}
 
 	if (sig) {
diff --git a/io_uring/napi.c b/io_uring/napi.c
index a670f49e30ef..c5c1177e2fb4 100644
--- a/io_uring/napi.c
+++ b/io_uring/napi.c
@@ -109,12 +109,15 @@  static inline void io_napi_remove_stale(struct io_ring_ctx *ctx, bool is_stale)
 }
 
 static inline bool io_napi_busy_loop_timeout(ktime_t start_time,
-					     ktime_t bp)
+					     struct io_wait_queue *iowq)
 {
+	ktime_t bp = iowq->napi_busy_poll_dt;
+
 	if (bp) {
 		ktime_t end_time = ktime_add(start_time, bp);
 		ktime_t now = net_to_ktime(busy_loop_current_time());
 
+		end_time = min(end_time, iowq->timeout);
 		return ktime_after(now, end_time);
 	}
 
@@ -130,8 +133,7 @@  static bool io_napi_busy_loop_should_end(void *data,
 		return true;
 	if (io_should_wake(iowq) || io_has_work(iowq->ctx))
 		return true;
-	if (io_napi_busy_loop_timeout(net_to_ktime(start_time),
-				      iowq->napi_busy_poll_dt))
+	if (io_napi_busy_loop_timeout(net_to_ktime(start_time), iowq))
 		return true;
 
 	return false;
@@ -271,27 +273,6 @@  int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg)
 	return 0;
 }
 
-/*
- * __io_napi_adjust_timeout() - adjust busy loop timeout
- * @ctx: pointer to io-uring context structure
- * @iowq: pointer to io wait queue
- * @ts: pointer to timespec or NULL
- *
- * Adjust the busy loop timeout according to timespec and busy poll timeout.
- * If the specified NAPI timeout is bigger than the wait timeout, then adjust
- * the NAPI timeout accordingly.
- */
-void __io_napi_adjust_timeout(struct io_ring_ctx *ctx, struct io_wait_queue *iowq,
-			      ktime_t to_wait)
-{
-	ktime_t poll_dt = READ_ONCE(ctx->napi_busy_poll_dt);
-
-	if (to_wait)
-		poll_dt = min(poll_dt, to_wait);
-
-	iowq->napi_busy_poll_dt = poll_dt;
-}
-
 /*
  * __io_napi_busy_loop() - execute busy poll loop
  * @ctx: pointer to io-uring context structure
@@ -304,6 +285,7 @@  void __io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq)
 	if ((ctx->flags & IORING_SETUP_SQPOLL) || !ctx->napi_enabled)
 		return;
 
+	iowq->napi_busy_poll_dt = READ_ONCE(ctx->napi_busy_poll_dt);
 	iowq->napi_prefer_busy_poll = READ_ONCE(ctx->napi_prefer_busy_poll);
 	io_napi_blocking_busy_loop(ctx, iowq);
 }
diff --git a/io_uring/napi.h b/io_uring/napi.h
index 88f1c21d5548..87e30b4f8d9e 100644
--- a/io_uring/napi.h
+++ b/io_uring/napi.h
@@ -17,8 +17,6 @@  int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg);
 
 void __io_napi_add(struct io_ring_ctx *ctx, struct socket *sock);
 
-void __io_napi_adjust_timeout(struct io_ring_ctx *ctx,
-		struct io_wait_queue *iowq, ktime_t to_wait);
 void __io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq);
 int io_napi_sqpoll_busy_poll(struct io_ring_ctx *ctx);
 
@@ -27,15 +25,6 @@  static inline bool io_napi(struct io_ring_ctx *ctx)
 	return !list_empty(&ctx->napi_list);
 }
 
-static inline void io_napi_adjust_timeout(struct io_ring_ctx *ctx,
-					  struct io_wait_queue *iowq,
-					  ktime_t to_wait)
-{
-	if (!io_napi(ctx))
-		return;
-	__io_napi_adjust_timeout(ctx, iowq, to_wait);
-}
-
 static inline void io_napi_busy_loop(struct io_ring_ctx *ctx,
 				     struct io_wait_queue *iowq)
 {
@@ -86,11 +75,6 @@  static inline bool io_napi(struct io_ring_ctx *ctx)
 static inline void io_napi_add(struct io_kiocb *req)
 {
 }
-static inline void io_napi_adjust_timeout(struct io_ring_ctx *ctx,
-					  struct io_wait_queue *iowq,
-					  ktime_t to_wait)
-{
-}
 static inline void io_napi_busy_loop(struct io_ring_ctx *ctx,
 				     struct io_wait_queue *iowq)
 {