@@ -2423,7 +2423,6 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
dt = timespec64_to_ktime(ts);
iowq.timeout = ktime_add(dt, ktime_get());
- io_napi_adjust_timeout(ctx, &iowq, dt);
}
if (sig) {
@@ -109,12 +109,15 @@ static inline void io_napi_remove_stale(struct io_ring_ctx *ctx, bool is_stale)
}
static inline bool io_napi_busy_loop_timeout(ktime_t start_time,
- ktime_t bp)
+ struct io_wait_queue *iowq)
{
+ ktime_t bp = iowq->napi_busy_poll_dt;
+
if (bp) {
ktime_t end_time = ktime_add(start_time, bp);
ktime_t now = net_to_ktime(busy_loop_current_time());
+ end_time = min(end_time, iowq->timeout);
return ktime_after(now, end_time);
}
@@ -130,8 +133,7 @@ static bool io_napi_busy_loop_should_end(void *data,
return true;
if (io_should_wake(iowq) || io_has_work(iowq->ctx))
return true;
- if (io_napi_busy_loop_timeout(net_to_ktime(start_time),
- iowq->napi_busy_poll_dt))
+ if (io_napi_busy_loop_timeout(net_to_ktime(start_time), iowq))
return true;
return false;
@@ -271,27 +273,6 @@ int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg)
return 0;
}
-/*
- * __io_napi_adjust_timeout() - adjust busy loop timeout
- * @ctx: pointer to io-uring context structure
- * @iowq: pointer to io wait queue
- * @ts: pointer to timespec or NULL
- *
- * Adjust the busy loop timeout according to timespec and busy poll timeout.
- * If the specified NAPI timeout is bigger than the wait timeout, then adjust
- * the NAPI timeout accordingly.
- */
-void __io_napi_adjust_timeout(struct io_ring_ctx *ctx, struct io_wait_queue *iowq,
- ktime_t to_wait)
-{
- ktime_t poll_dt = READ_ONCE(ctx->napi_busy_poll_dt);
-
- if (to_wait)
- poll_dt = min(poll_dt, to_wait);
-
- iowq->napi_busy_poll_dt = poll_dt;
-}
-
/*
* __io_napi_busy_loop() - execute busy poll loop
* @ctx: pointer to io-uring context structure
@@ -304,6 +285,7 @@ void __io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq)
if ((ctx->flags & IORING_SETUP_SQPOLL) || !ctx->napi_enabled)
return;
+ iowq->napi_busy_poll_dt = READ_ONCE(ctx->napi_busy_poll_dt);
iowq->napi_prefer_busy_poll = READ_ONCE(ctx->napi_prefer_busy_poll);
io_napi_blocking_busy_loop(ctx, iowq);
}
@@ -17,8 +17,6 @@ int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg);
void __io_napi_add(struct io_ring_ctx *ctx, struct socket *sock);
-void __io_napi_adjust_timeout(struct io_ring_ctx *ctx,
- struct io_wait_queue *iowq, ktime_t to_wait);
void __io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq);
int io_napi_sqpoll_busy_poll(struct io_ring_ctx *ctx);
@@ -27,15 +25,6 @@ static inline bool io_napi(struct io_ring_ctx *ctx)
return !list_empty(&ctx->napi_list);
}
-static inline void io_napi_adjust_timeout(struct io_ring_ctx *ctx,
- struct io_wait_queue *iowq,
- ktime_t to_wait)
-{
- if (!io_napi(ctx))
- return;
- __io_napi_adjust_timeout(ctx, iowq, to_wait);
-}
-
static inline void io_napi_busy_loop(struct io_ring_ctx *ctx,
struct io_wait_queue *iowq)
{
@@ -86,11 +75,6 @@ static inline bool io_napi(struct io_ring_ctx *ctx)
static inline void io_napi_add(struct io_kiocb *req)
{
}
-static inline void io_napi_adjust_timeout(struct io_ring_ctx *ctx,
- struct io_wait_queue *iowq,
- ktime_t to_wait)
-{
-}
static inline void io_napi_busy_loop(struct io_ring_ctx *ctx,
struct io_wait_queue *iowq)
{
Instead adjusting busy polling time in io_cqring_wait(), rely on the deadline value and delay the check until io_napi_busy_loop_timeout() is called inside the napi busy polling loop. There is a side effect of comparing a cpu local clock with ktime_get(), however waiting timeouts are usually long enough not to care and napi.c is already careless about mixing time flavours, i.e. io_napi_blocking_busy_loop() gets a cpu local time before disabling preemption. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> --- io_uring/io_uring.c | 1 - io_uring/napi.c | 30 ++++++------------------------ io_uring/napi.h | 16 ---------------- 3 files changed, 6 insertions(+), 41 deletions(-)