diff mbox series

[v10,2/5] io-uring: add napi busy poll support

Message ID 20230425181845.2813854-3-shr@devkernel.io (mailing list archive)
State New
Headers show
Series io_uring: add napi busy polling support | expand

Commit Message

Stefan Roesch April 25, 2023, 6:18 p.m. UTC
This adds the napi busy polling support in io_uring.c. It adds a new
napi_list to the io_ring_ctx structure. This list contains the list of
napi_id's that are currently enabled for busy polling. The list is
synchronized by the new napi_lock spin lock. The current default napi
busy polling time is stored in napi_busy_poll_to. If napi busy polling
is not enabled, the value is 0.

In addition there is also a hash table. The hash table store the napi
id ond the pointer to the above list nodes. The hash table is used to
speed up the lookup to the list elements.

The NAPI_TIMEOUT is stored as a timeout to make sure that the time a
napi entry is stored in the napi list is limited.

The busy poll timeout is also stored as part of the io_wait_queue. This
is necessary as for sq polling the poll interval needs to be adjusted
and the napi callback allows only to pass in one value.

This has been tested with two simple programs from the liburing library
repository: the napi client and the napi server program. The client
sends a request, which has a timestamp in its payload and the server
replies with the same payload. The client calculates the roundtrip time
and stores it to calcualte the results.

The client is running on host1 and the server is running on host 2 (in
the same rack). The measured times below are roundtrip times. They are
average times over 5 runs each. Each run measures 1 million roundtrips.

                   no rx coal          rx coal: frames=88,usecs=33
Default              57us                    56us

client_poll=100us    47us                    46us

server_poll=100us    51us                    46us

client_poll=100us+   40us                    40us
server_poll=100us

client_poll=100us+   41us                    39us
server_poll=100us+
prefer napi busy poll on client

client_poll=100us+   41us                    39us
server_poll=100us+
prefer napi busy poll on server

client_poll=100us+   41us                    39us
server_poll=100us+
prefer napi busy poll on client + server

Signed-off-by: Stefan Roesch <shr@devkernel.io>
Suggested-by: Olivier Langlois <olivier@trillion01.com>
Acked-by: Jakub Kicinski <kuba@kernel.org>
---
 include/linux/io_uring_types.h |  10 ++
 io_uring/Makefile              |   1 +
 io_uring/io_uring.c            |  30 +++-
 io_uring/io_uring.h            |   4 +
 io_uring/napi.c                | 243 +++++++++++++++++++++++++++++++++
 io_uring/napi.h                |  66 +++++++++
 io_uring/poll.c                |   2 +
 7 files changed, 351 insertions(+), 5 deletions(-)
 create mode 100644 io_uring/napi.c
 create mode 100644 io_uring/napi.h

Comments

Jens Axboe April 27, 2023, 1:41 a.m. UTC | #1
On 4/25/23 12:18?PM, Stefan Roesch wrote:

Not too much to complain about, just some minor cleanups that would be
nice to do.

> diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
> index 1b2a20a42413..2b2ca990ee93 100644
> --- a/include/linux/io_uring_types.h
> +++ b/include/linux/io_uring_types.h
> @@ -277,6 +278,15 @@ struct io_ring_ctx {
>  	struct xarray		personalities;
>  	u32			pers_next;
>  
> +#ifdef CONFIG_NET_RX_BUSY_POLL
> +	struct list_head	napi_list;	/* track busy poll napi_id */
> +	spinlock_t		napi_lock;	/* napi_list lock */
> +
> +	DECLARE_HASHTABLE(napi_ht, 4);
> +	unsigned int		napi_busy_poll_to; /* napi busy poll default timeout */
> +	bool			napi_prefer_busy_poll;
> +#endif
> +

I don't mind overly long lines if it's warranted, for a comment it is
not. This should just go above the variable.

> diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
> index efbd6c9c56e5..fff8f84eb560 100644
> --- a/io_uring/io_uring.c
> +++ b/io_uring/io_uring.c
>  	iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
>  	iowq.timeout = KTIME_MAX;
>  
> -	if (uts) {
> -		struct timespec64 ts;
> +	if (!io_napi(ctx)) {
> +		if (uts) {
> +			struct timespec64 ts;
>  
> -		if (get_timespec64(&ts, uts))
> -			return -EFAULT;
> -		iowq.timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
> +			if (get_timespec64(&ts, uts))
> +				return -EFAULT;
> +			iowq.timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
> +		}
> +	} else {
> +		if (uts) {
> +			struct timespec64 ts;
> +
> +			if (get_timespec64(&ts, uts))
> +				return -EFAULT;
> +
> +			io_napi_adjust_timeout(ctx, &iowq, &ts);
> +			iowq.timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
> +		} else {
> +			io_napi_adjust_timeout(ctx, &iowq, NULL);
> +		}
> +		io_napi_busy_loop(ctx, &iowq);
>  	}

This is a little bit of a mess and has a lot of duplication, that is not
ideal. I'd do something like the end-of-email incremental to avoid that.
Note that it's totally untested...

>  	trace_io_uring_cqring_wait(ctx, min_events);
> +
>  	do {
>  		unsigned long check_cq;
>  

Spurious line addition here.

> diff --git a/io_uring/napi.c b/io_uring/napi.c
> new file mode 100644
> index 000000000000..bb7d2b6b7e90
> --- /dev/null
> +++ b/io_uring/napi.c
> +static inline void adjust_timeout(unsigned int poll_to, struct timespec64 *ts,
> +		unsigned int *new_poll_to)
> +{
> +	struct timespec64 pollto = ns_to_timespec64(1000 * (s64)poll_to);

There's a bunch of these, but I'll just mention it here - io_uring
always just aligns a second line of arguments with the first one. We
should do that here too.

> +	if (timespec64_compare(ts, &pollto) > 0) {
> +		*ts = timespec64_sub(*ts, pollto);
> +		*new_poll_to = poll_to;
> +	} else {
> +		u64 to = timespec64_to_ns(ts);
> +
> +		do_div(to, 1000);

Is this going to complain on 32-bit?

> +static void io_napi_multi_busy_loop(struct list_head *napi_list,
> +		struct io_wait_queue *iowq)
> +{
> +	unsigned long start_time = busy_loop_current_time();
> +
> +	do {
> +		if (list_is_singular(napi_list))
> +			break;
> +		if (!__io_napi_busy_loop(napi_list, iowq->napi_prefer_busy_poll))
> +			break;
> +	} while (!io_napi_busy_loop_should_end(iowq, start_time));
> +}

Do we need to check for an empty list here?

> +static void io_napi_blocking_busy_loop(struct list_head *napi_list,
> +		struct io_wait_queue *iowq)
> +{
> +	if (!list_is_singular(napi_list))
> +		io_napi_multi_busy_loop(napi_list, iowq);
> +
> +	if (list_is_singular(napi_list)) {
> +		struct io_napi_ht_entry *ne;
> +
> +		ne = list_first_entry(napi_list, struct io_napi_ht_entry, list);
> +		napi_busy_loop(ne->napi_id, io_napi_busy_loop_should_end, iowq,
> +			iowq->napi_prefer_busy_poll, BUSY_POLL_BUDGET);
> +	}
> +}

Presumably io_napi_multi_busy_loop() can change the state of the list,
which is why we have if (cond) and then if (!cond) here? Would probably
warrant a comment as it looks a bit confusing.

> +/*
> + * io_napi_adjust_timeout() - Add napi id to the busy poll list
> + * @ctx: pointer to io-uring context structure
> + * @iowq: pointer to io wait queue
> + * @ts: pointer to timespec or NULL
> + *
> + * Adjust the busy loop timeout according to timespec and busy poll timeout.
> + */
> +void io_napi_adjust_timeout(struct io_ring_ctx *ctx, struct io_wait_queue *iowq,
> +		struct timespec64 *ts)
> +{
> +	if (ts)
> +		adjust_timeout(READ_ONCE(ctx->napi_busy_poll_to), ts,
> +			&iowq->napi_busy_poll_to);
> +	else
> +		iowq->napi_busy_poll_to = READ_ONCE(ctx->napi_busy_poll_to);
> +}

We should probably just pass 'ctx' to adjust_timeout()? Or do

	unsigned int poll_to = READ_ONCE(ctx->napi_busy_poll_to);

at the top and then use that for both. Would get rid of that overly long
line too.


diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index a4c9a404f631..390f54c546d6 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -2617,29 +2617,17 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
 	iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
 	iowq.timeout = KTIME_MAX;
 
-	if (!io_napi(ctx)) {
-		if (uts) {
-			struct timespec64 ts;
+	if (uts) {
+		struct timespec64 ts;
 
-			if (get_timespec64(&ts, uts))
-				return -EFAULT;
-			iowq.timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
-		}
-	} else {
-		if (uts) {
-			struct timespec64 ts;
-
-			if (get_timespec64(&ts, uts))
-				return -EFAULT;
-
-			io_napi_adjust_timeout(ctx, &iowq, &ts);
-			iowq.timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
-		} else {
-			io_napi_adjust_timeout(ctx, &iowq, NULL);
-		}
-		io_napi_busy_loop(ctx, &iowq);
+		if (get_timespec64(&ts, uts))
+			return -EFAULT;
+		iowq.timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
+		io_napi_adjust_timeout(ctx, &iowq, &ts);
 	}
 
+	io_napi_busy_loop(ctx, &iowq);
+
 	trace_io_uring_cqring_wait(ctx, min_events);
 
 	do {
diff --git a/io_uring/napi.c b/io_uring/napi.c
index ca12ff5f5611..3a0d0317ceec 100644
--- a/io_uring/napi.c
+++ b/io_uring/napi.c
@@ -100,7 +100,8 @@ static bool io_napi_busy_loop_should_end(void *p, unsigned long start_time)
 	       io_napi_busy_loop_timeout(start_time, iowq->napi_busy_poll_to);
 }
 
-static bool __io_napi_busy_loop(struct list_head *napi_list, bool prefer_busy_poll)
+static bool __io_napi_do_busy_loop(struct list_head *napi_list,
+				   bool prefer_busy_poll)
 {
 	struct io_napi_ht_entry *e;
 	struct io_napi_ht_entry *n;
@@ -121,7 +122,7 @@ static void io_napi_multi_busy_loop(struct list_head *napi_list,
 	do {
 		if (list_is_singular(napi_list))
 			break;
-		if (!__io_napi_busy_loop(napi_list, iowq->napi_prefer_busy_poll))
+		if (!__io_napi_do_busy_loop(napi_list, iowq->napi_prefer_busy_poll))
 			break;
 	} while (!io_napi_busy_loop_should_end(iowq, start_time));
 }
@@ -251,16 +252,18 @@ int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg)
 }
 
 /*
- * io_napi_adjust_timeout() - Add napi id to the busy poll list
+ * __io_napi_adjust_timeout() - Add napi id to the busy poll list
  * @ctx: pointer to io-uring context structure
  * @iowq: pointer to io wait queue
  * @ts: pointer to timespec or NULL
  *
  * Adjust the busy loop timeout according to timespec and busy poll timeout.
  */
-void io_napi_adjust_timeout(struct io_ring_ctx *ctx, struct io_wait_queue *iowq,
-		struct timespec64 *ts)
+void __io_napi_adjust_timeout(struct io_ring_ctx *ctx,
+			      struct io_wait_queue *iowq, struct timespec64 *ts)
 {
+	if (!io_napi(ctx))
+		return;
 	if (ts)
 		adjust_timeout(READ_ONCE(ctx->napi_busy_poll_to), ts,
 			&iowq->napi_busy_poll_to);
@@ -269,13 +272,13 @@ void io_napi_adjust_timeout(struct io_ring_ctx *ctx, struct io_wait_queue *iowq,
 }
 
 /*
- * io_napi_busy_loop() - execute busy poll loop
+ * __io_napi_busy_loop() - execute busy poll loop
  * @ctx: pointer to io-uring context structure
  * @iowq: pointer to io wait queue
  *
  * Execute the busy poll loop and merge the spliced off list.
  */
-void io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq)
+void __io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq)
 {
 	iowq->napi_prefer_busy_poll = READ_ONCE(ctx->napi_prefer_busy_poll);
 
@@ -302,8 +305,8 @@ void io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq)
  */
 int io_napi_sqpoll_busy_poll(struct io_ring_ctx *ctx)
 {
-	int ret = 0;
 	LIST_HEAD(napi_list);
+	int ret;
 
 	if (!READ_ONCE(ctx->napi_busy_poll_to))
 		return 0;
@@ -312,9 +315,7 @@ int io_napi_sqpoll_busy_poll(struct io_ring_ctx *ctx)
 	list_splice_init(&ctx->napi_list, &napi_list);
 	spin_unlock(&ctx->napi_lock);
 
-	if (__io_napi_busy_loop(&napi_list, ctx->napi_prefer_busy_poll))
-		ret = 1;
-
+	ret = __io_napi_do_busy_loop(&napi_list, ctx->napi_prefer_busy_poll);
 	io_napi_merge_lists(ctx, &napi_list);
 	return ret;
 }
diff --git a/io_uring/napi.h b/io_uring/napi.h
index 8da8f032a441..b5e93b3777c0 100644
--- a/io_uring/napi.h
+++ b/io_uring/napi.h
@@ -17,9 +17,9 @@ int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg);
 
 void __io_napi_add(struct io_ring_ctx *ctx, struct file *file);
 
-void io_napi_adjust_timeout(struct io_ring_ctx *ctx,
+void __io_napi_adjust_timeout(struct io_ring_ctx *ctx,
 		struct io_wait_queue *iowq, struct timespec64 *ts);
-void io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq);
+void __io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq);
 int io_napi_sqpoll_busy_poll(struct io_ring_ctx *ctx);
 
 static inline bool io_napi(struct io_ring_ctx *ctx)
@@ -27,6 +27,23 @@ static inline bool io_napi(struct io_ring_ctx *ctx)
 	return !list_empty(&ctx->napi_list);
 }
 
+static inline void io_napi_adjust_timeout(struct io_ring_ctx *ctx,
+					  struct io_wait_queue *iowq,
+					  struct timespec64 *ts)
+{
+	if (!io_napi(ctx))
+		return;
+	__io_napi_adjust_timeout(ctx, iowq, ts);
+}
+
+static inline void io_napi_busy_loop(struct io_ring_ctx *ctx,
+				     struct io_wait_queue *iowq)
+{
+	if (!io_napi(ctx))
+		return;
+	__io_napi_busy_loop(ctx, iowq);
+}
+
 /*
  * io_napi_add() - Add napi id to the busy poll list
  * @req: pointer to io_kiocb request
Jens Axboe April 27, 2023, 1:46 a.m. UTC | #2
On 4/26/23 7:41?PM, Jens Axboe wrote:

I'd probably also do this:


diff --git a/io_uring/napi.c b/io_uring/napi.c
index ca12ff5f5611..35a29fd9afbc 100644
--- a/io_uring/napi.c
+++ b/io_uring/napi.c
@@ -95,12 +95,17 @@ static bool io_napi_busy_loop_should_end(void *p, unsigned long start_time)
 {
 	struct io_wait_queue *iowq = p;
 
-	return signal_pending(current) ||
-	       io_should_wake(iowq) ||
-	       io_napi_busy_loop_timeout(start_time, iowq->napi_busy_poll_to);
+	if (signal_pending(current))
+		return true;
+	if (io_should_wake(iowq))
+		return true;
+	if (io_napi_busy_loop_timeout(start_time, iowq->napi_busy_poll_to))
+		return true;
+	return false;
 }
 
as that is easier to read.
Jens Axboe April 27, 2023, 1:50 a.m. UTC | #3
On 4/26/23 7:41?PM, Jens Axboe wrote:
>> +static void io_napi_multi_busy_loop(struct list_head *napi_list,
>> +		struct io_wait_queue *iowq)
>> +{
>> +	unsigned long start_time = busy_loop_current_time();
>> +
>> +	do {
>> +		if (list_is_singular(napi_list))
>> +			break;
>> +		if (!__io_napi_busy_loop(napi_list, iowq->napi_prefer_busy_poll))
>> +			break;
>> +	} while (!io_napi_busy_loop_should_end(iowq, start_time));
>> +}
> 
> Do we need to check for an empty list here?
> 
>> +static void io_napi_blocking_busy_loop(struct list_head *napi_list,
>> +		struct io_wait_queue *iowq)
>> +{
>> +	if (!list_is_singular(napi_list))
>> +		io_napi_multi_busy_loop(napi_list, iowq);
>> +
>> +	if (list_is_singular(napi_list)) {
>> +		struct io_napi_ht_entry *ne;
>> +
>> +		ne = list_first_entry(napi_list, struct io_napi_ht_entry, list);
>> +		napi_busy_loop(ne->napi_id, io_napi_busy_loop_should_end, iowq,
>> +			iowq->napi_prefer_busy_poll, BUSY_POLL_BUDGET);
>> +	}
>> +}
> 
> Presumably io_napi_multi_busy_loop() can change the state of the list,
> which is why we have if (cond) and then if (!cond) here? Would probably
> warrant a comment as it looks a bit confusing.

Doesn't look like that's the case? We just call into
io_napi_multi_busy_loop() -> napi_busy_loop() which doesn't touch it. So
the state should be the same?

We also check if the list isn't singular before we call it, and then
io_napi_multi_busy_loop() breaks out of the loop if it is. And we know
it's not singular when calling, and I don't see what changes it.

Unless I'm missing something, which is quite possible, this looks overly
convoluted and has extra pointless checks?
Jens Axboe April 27, 2023, 1:59 a.m. UTC | #4
On 4/26/23 7:50?PM, Jens Axboe wrote:
> On 4/26/23 7:41?PM, Jens Axboe wrote:
>>> +static void io_napi_multi_busy_loop(struct list_head *napi_list,
>>> +		struct io_wait_queue *iowq)
>>> +{
>>> +	unsigned long start_time = busy_loop_current_time();
>>> +
>>> +	do {
>>> +		if (list_is_singular(napi_list))
>>> +			break;
>>> +		if (!__io_napi_busy_loop(napi_list, iowq->napi_prefer_busy_poll))
>>> +			break;
>>> +	} while (!io_napi_busy_loop_should_end(iowq, start_time));
>>> +}
>>
>> Do we need to check for an empty list here?
>>
>>> +static void io_napi_blocking_busy_loop(struct list_head *napi_list,
>>> +		struct io_wait_queue *iowq)
>>> +{
>>> +	if (!list_is_singular(napi_list))
>>> +		io_napi_multi_busy_loop(napi_list, iowq);
>>> +
>>> +	if (list_is_singular(napi_list)) {
>>> +		struct io_napi_ht_entry *ne;
>>> +
>>> +		ne = list_first_entry(napi_list, struct io_napi_ht_entry, list);
>>> +		napi_busy_loop(ne->napi_id, io_napi_busy_loop_should_end, iowq,
>>> +			iowq->napi_prefer_busy_poll, BUSY_POLL_BUDGET);
>>> +	}
>>> +}
>>
>> Presumably io_napi_multi_busy_loop() can change the state of the list,
>> which is why we have if (cond) and then if (!cond) here? Would probably
>> warrant a comment as it looks a bit confusing.
> 
> Doesn't look like that's the case? We just call into
> io_napi_multi_busy_loop() -> napi_busy_loop() which doesn't touch it. So
> the state should be the same?
> 
> We also check if the list isn't singular before we call it, and then
> io_napi_multi_busy_loop() breaks out of the loop if it is. And we know
> it's not singular when calling, and I don't see what changes it.
> 
> Unless I'm missing something, which is quite possible, this looks overly
> convoluted and has extra pointless checks?

All the cleanups/fixes I ended up doing are below. Not all for this
patch probably, just for the series overall. Not tested at all, so
please just go over them and see what makes sense and let me know which
hunks you don't agree with.


diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index a4c9a404f631..390f54c546d6 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -2617,29 +2617,17 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
 	iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
 	iowq.timeout = KTIME_MAX;
 
-	if (!io_napi(ctx)) {
-		if (uts) {
-			struct timespec64 ts;
+	if (uts) {
+		struct timespec64 ts;
 
-			if (get_timespec64(&ts, uts))
-				return -EFAULT;
-			iowq.timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
-		}
-	} else {
-		if (uts) {
-			struct timespec64 ts;
-
-			if (get_timespec64(&ts, uts))
-				return -EFAULT;
-
-			io_napi_adjust_timeout(ctx, &iowq, &ts);
-			iowq.timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
-		} else {
-			io_napi_adjust_timeout(ctx, &iowq, NULL);
-		}
-		io_napi_busy_loop(ctx, &iowq);
+		if (get_timespec64(&ts, uts))
+			return -EFAULT;
+		iowq.timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
+		io_napi_adjust_timeout(ctx, &iowq, &ts);
 	}
 
+	io_napi_busy_loop(ctx, &iowq);
+
 	trace_io_uring_cqring_wait(ctx, min_events);
 
 	do {
diff --git a/io_uring/napi.c b/io_uring/napi.c
index ca12ff5f5611..50b2bdb10417 100644
--- a/io_uring/napi.c
+++ b/io_uring/napi.c
@@ -60,8 +60,8 @@ void __io_napi_add(struct io_ring_ctx *ctx, struct file *file)
 	spin_unlock(&ctx->napi_lock);
 }
 
-static inline void adjust_timeout(unsigned int poll_to, struct timespec64 *ts,
-		unsigned int *new_poll_to)
+static void adjust_timeout(unsigned int poll_to, struct timespec64 *ts,
+			  unsigned int *new_poll_to)
 {
 	struct timespec64 pollto = ns_to_timespec64(1000 * (s64)poll_to);
 
@@ -95,12 +95,17 @@ static bool io_napi_busy_loop_should_end(void *p, unsigned long start_time)
 {
 	struct io_wait_queue *iowq = p;
 
-	return signal_pending(current) ||
-	       io_should_wake(iowq) ||
-	       io_napi_busy_loop_timeout(start_time, iowq->napi_busy_poll_to);
+	if (signal_pending(current))
+		return true;
+	if (io_should_wake(iowq))
+		return true;
+	if (io_napi_busy_loop_timeout(start_time, iowq->napi_busy_poll_to))
+		return true;
+	return false;
 }
 
-static bool __io_napi_busy_loop(struct list_head *napi_list, bool prefer_busy_poll)
+static bool __io_napi_do_busy_loop(struct list_head *napi_list,
+				   bool prefer_busy_poll)
 {
 	struct io_napi_ht_entry *e;
 	struct io_napi_ht_entry *n;
@@ -113,38 +118,35 @@ static bool __io_napi_busy_loop(struct list_head *napi_list, bool prefer_busy_po
 	return !list_empty(napi_list);
 }
 
-static void io_napi_multi_busy_loop(struct list_head *napi_list,
-		struct io_wait_queue *iowq)
+static void io_napi_multi_busy_loop(struct list_head *list,
+				   struct io_wait_queue *iowq)
 {
 	unsigned long start_time = busy_loop_current_time();
 
 	do {
-		if (list_is_singular(napi_list))
-			break;
-		if (!__io_napi_busy_loop(napi_list, iowq->napi_prefer_busy_poll))
+		if (!__io_napi_do_busy_loop(list, iowq->napi_prefer_busy_poll))
 			break;
 	} while (!io_napi_busy_loop_should_end(iowq, start_time));
 }
 
 static void io_napi_blocking_busy_loop(struct list_head *napi_list,
-		struct io_wait_queue *iowq)
+				       struct io_wait_queue *iowq)
 {
-	if (!list_is_singular(napi_list))
+	if (!list_is_singular(napi_list)) {
 		io_napi_multi_busy_loop(napi_list, iowq);
-
-	if (list_is_singular(napi_list)) {
+	} else {
 		struct io_napi_ht_entry *ne;
 
 		ne = list_first_entry(napi_list, struct io_napi_ht_entry, list);
 		napi_busy_loop(ne->napi_id, io_napi_busy_loop_should_end, iowq,
-			iowq->napi_prefer_busy_poll, BUSY_POLL_BUDGET);
+				iowq->napi_prefer_busy_poll, BUSY_POLL_BUDGET);
 	}
 }
 
 static void io_napi_remove_stale(struct io_ring_ctx *ctx)
 {
-	unsigned int i;
 	struct io_napi_ht_entry *he;
+	unsigned int i;
 
 	hash_for_each(ctx->napi_ht, i, he, node) {
 		if (time_after(jiffies, he->timeout)) {
@@ -152,11 +154,10 @@ static void io_napi_remove_stale(struct io_ring_ctx *ctx)
 			hash_del(&he->node);
 		}
 	}
-
 }
 
 static void io_napi_merge_lists(struct io_ring_ctx *ctx,
-		struct list_head *napi_list)
+				struct list_head *napi_list)
 {
 	spin_lock(&ctx->napi_lock);
 	list_splice(napi_list, &ctx->napi_list);
@@ -186,9 +187,9 @@ void io_napi_init(struct io_ring_ctx *ctx)
  */
 void io_napi_free(struct io_ring_ctx *ctx)
 {
-	unsigned int i;
 	struct io_napi_ht_entry *he;
 	LIST_HEAD(napi_list);
+	unsigned int i;
 
 	spin_lock(&ctx->napi_lock);
 	hash_for_each(ctx->napi_ht, i, he, node)
@@ -206,8 +207,8 @@ void io_napi_free(struct io_ring_ctx *ctx)
 int io_register_napi(struct io_ring_ctx *ctx, void __user *arg)
 {
 	const struct io_uring_napi curr = {
-		.busy_poll_to = ctx->napi_busy_poll_to,
-		.prefer_busy_poll = ctx->napi_prefer_busy_poll
+		.busy_poll_to		= ctx->napi_busy_poll_to,
+		.prefer_busy_poll	= ctx->napi_prefer_busy_poll
 	};
 	struct io_uring_napi napi;
 
@@ -236,14 +237,12 @@ int io_register_napi(struct io_ring_ctx *ctx, void __user *arg)
 int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg)
 {
 	const struct io_uring_napi curr = {
-		.busy_poll_to = ctx->napi_busy_poll_to,
-		.prefer_busy_poll = ctx->napi_prefer_busy_poll
+		.busy_poll_to		= ctx->napi_busy_poll_to,
+		.prefer_busy_poll	= ctx->napi_prefer_busy_poll
 	};
 
-	if (arg) {
-		if (copy_to_user(arg, &curr, sizeof(curr)))
-			return -EFAULT;
-	}
+	if (arg && copy_to_user(arg, &curr, sizeof(curr)))
+		return -EFAULT;
 
 	WRITE_ONCE(ctx->napi_busy_poll_to, 0);
 	WRITE_ONCE(ctx->napi_prefer_busy_poll, false);
@@ -251,31 +250,36 @@ int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg)
 }
 
 /*
- * io_napi_adjust_timeout() - Add napi id to the busy poll list
+ * __io_napi_adjust_timeout() - Add napi id to the busy poll list
  * @ctx: pointer to io-uring context structure
  * @iowq: pointer to io wait queue
  * @ts: pointer to timespec or NULL
  *
  * Adjust the busy loop timeout according to timespec and busy poll timeout.
  */
-void io_napi_adjust_timeout(struct io_ring_ctx *ctx, struct io_wait_queue *iowq,
-		struct timespec64 *ts)
+void __io_napi_adjust_timeout(struct io_ring_ctx *ctx,
+			      struct io_wait_queue *iowq, struct timespec64 *ts)
 {
+	unsigned int poll_to;
+
+	if (!io_napi(ctx))
+		return;
+
+	poll_to = READ_ONCE(ctx->napi_busy_poll_to);
 	if (ts)
-		adjust_timeout(READ_ONCE(ctx->napi_busy_poll_to), ts,
-			&iowq->napi_busy_poll_to);
+		adjust_timeout(poll_to, ts, &iowq->napi_busy_poll_to);
 	else
-		iowq->napi_busy_poll_to = READ_ONCE(ctx->napi_busy_poll_to);
+		iowq->napi_busy_poll_to = poll_to;
 }
 
 /*
- * io_napi_busy_loop() - execute busy poll loop
+ * __io_napi_busy_loop() - execute busy poll loop
  * @ctx: pointer to io-uring context structure
  * @iowq: pointer to io wait queue
  *
  * Execute the busy poll loop and merge the spliced off list.
  */
-void io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq)
+void __io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq)
 {
 	iowq->napi_prefer_busy_poll = READ_ONCE(ctx->napi_prefer_busy_poll);
 
@@ -302,8 +306,8 @@ void io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq)
  */
 int io_napi_sqpoll_busy_poll(struct io_ring_ctx *ctx)
 {
-	int ret = 0;
 	LIST_HEAD(napi_list);
+	int ret;
 
 	if (!READ_ONCE(ctx->napi_busy_poll_to))
 		return 0;
@@ -312,9 +316,7 @@ int io_napi_sqpoll_busy_poll(struct io_ring_ctx *ctx)
 	list_splice_init(&ctx->napi_list, &napi_list);
 	spin_unlock(&ctx->napi_lock);
 
-	if (__io_napi_busy_loop(&napi_list, ctx->napi_prefer_busy_poll))
-		ret = 1;
-
+	ret = __io_napi_do_busy_loop(&napi_list, ctx->napi_prefer_busy_poll);
 	io_napi_merge_lists(ctx, &napi_list);
 	return ret;
 }
diff --git a/io_uring/napi.h b/io_uring/napi.h
index 8da8f032a441..b5e93b3777c0 100644
--- a/io_uring/napi.h
+++ b/io_uring/napi.h
@@ -17,9 +17,9 @@ int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg);
 
 void __io_napi_add(struct io_ring_ctx *ctx, struct file *file);
 
-void io_napi_adjust_timeout(struct io_ring_ctx *ctx,
+void __io_napi_adjust_timeout(struct io_ring_ctx *ctx,
 		struct io_wait_queue *iowq, struct timespec64 *ts);
-void io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq);
+void __io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq);
 int io_napi_sqpoll_busy_poll(struct io_ring_ctx *ctx);
 
 static inline bool io_napi(struct io_ring_ctx *ctx)
@@ -27,6 +27,23 @@ static inline bool io_napi(struct io_ring_ctx *ctx)
 	return !list_empty(&ctx->napi_list);
 }
 
+static inline void io_napi_adjust_timeout(struct io_ring_ctx *ctx,
+					  struct io_wait_queue *iowq,
+					  struct timespec64 *ts)
+{
+	if (!io_napi(ctx))
+		return;
+	__io_napi_adjust_timeout(ctx, iowq, ts);
+}
+
+static inline void io_napi_busy_loop(struct io_ring_ctx *ctx,
+				     struct io_wait_queue *iowq)
+{
+	if (!io_napi(ctx))
+		return;
+	__io_napi_busy_loop(ctx, iowq);
+}
+
 /*
  * io_napi_add() - Add napi id to the busy poll list
  * @req: pointer to io_kiocb request
Ammar Faizi April 27, 2023, 2:56 a.m. UTC | #5
On Tue, Apr 25, 2023 at 11:18:42AM -0700, Stefan Roesch wrote:
> +void __io_napi_add(struct io_ring_ctx *ctx, struct file *file)
> +{
> +	unsigned int napi_id;
> +	struct socket *sock;
> +	struct sock *sk;
> +	struct io_napi_ht_entry *he;
> +
> +	sock = sock_from_file(file);
> +	if (!sock)
> +		return;
> +
> +	sk = sock->sk;
> +	if (!sk)
> +		return;
> +
> +	napi_id = READ_ONCE(sk->sk_napi_id);
> +
> +	/* Non-NAPI IDs can be rejected. */
> +	if (napi_id < MIN_NAPI_ID)
> +		return;
> +
> +	spin_lock(&ctx->napi_lock);
> +	hash_for_each_possible(ctx->napi_ht, he, node, napi_id) {
> +		if (he->napi_id == napi_id) {
> +			he->timeout = jiffies + NAPI_TIMEOUT;
> +			goto out;
> +		}
> +	}
> +
> +	he = kmalloc(sizeof(*he), GFP_NOWAIT);
> +	if (!he)
> +		goto out;
> +
> +	he->napi_id = napi_id;
> +	he->timeout = jiffies + NAPI_TIMEOUT;
> +	hash_add(ctx->napi_ht, &he->node, napi_id);
> +
> +	list_add_tail(&he->list, &ctx->napi_list);
> +
> +out:
> +	spin_unlock(&ctx->napi_lock);
> +}

What about using GFP_KERNEL to allocate 'he' outside the spin lock, then
kfree() it in the (he->napi_id == napi_id) path after unlock?

That would make the critical section shorter. Also, GFP_NOWAIT is likely
to fail under memory pressure.
Jens Axboe April 27, 2023, 11:16 a.m. UTC | #6
On 4/26/23 8:56?PM, Ammar Faizi wrote:
> On Tue, Apr 25, 2023 at 11:18:42AM -0700, Stefan Roesch wrote:
>> +void __io_napi_add(struct io_ring_ctx *ctx, struct file *file)
>> +{
>> +	unsigned int napi_id;
>> +	struct socket *sock;
>> +	struct sock *sk;
>> +	struct io_napi_ht_entry *he;
>> +
>> +	sock = sock_from_file(file);
>> +	if (!sock)
>> +		return;
>> +
>> +	sk = sock->sk;
>> +	if (!sk)
>> +		return;
>> +
>> +	napi_id = READ_ONCE(sk->sk_napi_id);
>> +
>> +	/* Non-NAPI IDs can be rejected. */
>> +	if (napi_id < MIN_NAPI_ID)
>> +		return;
>> +
>> +	spin_lock(&ctx->napi_lock);
>> +	hash_for_each_possible(ctx->napi_ht, he, node, napi_id) {
>> +		if (he->napi_id == napi_id) {
>> +			he->timeout = jiffies + NAPI_TIMEOUT;
>> +			goto out;
>> +		}
>> +	}
>> +
>> +	he = kmalloc(sizeof(*he), GFP_NOWAIT);
>> +	if (!he)
>> +		goto out;
>> +
>> +	he->napi_id = napi_id;
>> +	he->timeout = jiffies + NAPI_TIMEOUT;
>> +	hash_add(ctx->napi_ht, &he->node, napi_id);
>> +
>> +	list_add_tail(&he->list, &ctx->napi_list);
>> +
>> +out:
>> +	spin_unlock(&ctx->napi_lock);
>> +}
> 
> What about using GFP_KERNEL to allocate 'he' outside the spin lock, then
> kfree() it in the (he->napi_id == napi_id) path after unlock?

We actually discussed this in previous versions of this, it kind of
optimizes for the wrong thing. Only the first trip through here should
allocate a 'he' unit, the rest will find it on the hash. That means that
now the common case will alloc+free an extra one, pointlessly.

> That would make the critical section shorter. Also, GFP_NOWAIT is likely
> to fail under memory pressure.

If a ~48 byte allocation fails, then I suspect we have more serious
issues at hand rather than ignoring NAPI for this socket!
Stefan Roesch April 27, 2023, 4:27 p.m. UTC | #7
Jens Axboe <axboe@kernel.dk> writes:

> On 4/25/23 12:18?PM, Stefan Roesch wrote:
>
> Not too much to complain about, just some minor cleanups that would be
> nice to do.
>
>> diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
>> index 1b2a20a42413..2b2ca990ee93 100644
>> --- a/include/linux/io_uring_types.h
>> +++ b/include/linux/io_uring_types.h
>> @@ -277,6 +278,15 @@ struct io_ring_ctx {
>>  	struct xarray		personalities;
>>  	u32			pers_next;
>>
>> +#ifdef CONFIG_NET_RX_BUSY_POLL
>> +	struct list_head	napi_list;	/* track busy poll napi_id */
>> +	spinlock_t		napi_lock;	/* napi_list lock */
>> +
>> +	DECLARE_HASHTABLE(napi_ht, 4);
>> +	unsigned int		napi_busy_poll_to; /* napi busy poll default timeout */
>> +	bool			napi_prefer_busy_poll;
>> +#endif
>> +
>
> I don't mind overly long lines if it's warranted, for a comment it is
> not. This should just go above the variable.
>

Fixed. I was just following what sq_creds was doing a bit earlier in the
file.

>> diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
>> index efbd6c9c56e5..fff8f84eb560 100644
>> --- a/io_uring/io_uring.c
>> +++ b/io_uring/io_uring.c
>>  	iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
>>  	iowq.timeout = KTIME_MAX;
>>
>> -	if (uts) {
>> -		struct timespec64 ts;
>> +	if (!io_napi(ctx)) {
>> +		if (uts) {
>> +			struct timespec64 ts;
>>
>> -		if (get_timespec64(&ts, uts))
>> -			return -EFAULT;
>> -		iowq.timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
>> +			if (get_timespec64(&ts, uts))
>> +				return -EFAULT;
>> +			iowq.timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
>> +		}
>> +	} else {
>> +		if (uts) {
>> +			struct timespec64 ts;
>> +
>> +			if (get_timespec64(&ts, uts))
>> +				return -EFAULT;
>> +
>> +			io_napi_adjust_timeout(ctx, &iowq, &ts);
>> +			iowq.timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
>> +		} else {
>> +			io_napi_adjust_timeout(ctx, &iowq, NULL);
>> +		}
>> +		io_napi_busy_loop(ctx, &iowq);
>>  	}
>
> This is a little bit of a mess and has a lot of duplication, that is not
> ideal. I'd do something like the end-of-email incremental to avoid that.
> Note that it's totally untested...
>
>>  	trace_io_uring_cqring_wait(ctx, min_events);
>> +
>>  	do {
>>  		unsigned long check_cq;
>>
>
> Spurious line addition here.
>

Fixed.

 diff --git a/io_uring/napi.c b/io_uring/napi.c
>> new file mode 100644
>> index 000000000000..bb7d2b6b7e90
>> --- /dev/null
>> +++ b/io_uring/napi.c
>> +static inline void adjust_timeout(unsigned int poll_to, struct timespec64 *ts,
>> +		unsigned int *new_poll_to)
>> +{
>> +	struct timespec64 pollto = ns_to_timespec64(1000 * (s64)poll_to);
>
> There's a bunch of these, but I'll just mention it here - io_uring
> always just aligns a second line of arguments with the first one. We
> should do that here too.
>

Fixed.

>> +	if (timespec64_compare(ts, &pollto) > 0) {
>> +		*ts = timespec64_sub(*ts, pollto);
>> +		*new_poll_to = poll_to;
>> +	} else {
>> +		u64 to = timespec64_to_ns(ts);
>> +
>> +		do_div(to, 1000);
>
> Is this going to complain on 32-bit?
>

My understanding is this should work on 32-bit.

>> +static void io_napi_multi_busy_loop(struct list_head *napi_list,
>> +		struct io_wait_queue *iowq)
>> +{
>> +	unsigned long start_time = busy_loop_current_time();
>> +
>> +	do {
>> +		if (list_is_singular(napi_list))
>> +			break;
>> +		if (!__io_napi_busy_loop(napi_list, iowq->napi_prefer_busy_poll))
>> +			break;
>> +	} while (!io_napi_busy_loop_should_end(iowq, start_time));
>> +}
>
> Do we need to check for an empty list here?
>
This function is only called through io_cqring_wait(),
io_napi_busy_loop(). In io_cqring_wait() we check that the napi list is
not empty.

>> +static void io_napi_blocking_busy_loop(struct list_head *napi_list,
>> +		struct io_wait_queue *iowq)
>> +{
>> +	if (!list_is_singular(napi_list))
>> +		io_napi_multi_busy_loop(napi_list, iowq);
>> +
>> +	if (list_is_singular(napi_list)) {
>> +		struct io_napi_ht_entry *ne;
>> +
>> +		ne = list_first_entry(napi_list, struct io_napi_ht_entry, list);
>> +		napi_busy_loop(ne->napi_id, io_napi_busy_loop_should_end, iowq,
>> +			iowq->napi_prefer_busy_poll, BUSY_POLL_BUDGET);
>> +	}
>> +}
>
> Presumably io_napi_multi_busy_loop() can change the state of the list,
> which is why we have if (cond) and then if (!cond) here? Would probably
> warrant a comment as it looks a bit confusing.
>

I added a comment.

>> +/*
>> + * io_napi_adjust_timeout() - Add napi id to the busy poll list
>> + * @ctx: pointer to io-uring context structure
>> + * @iowq: pointer to io wait queue
>> + * @ts: pointer to timespec or NULL
>> + *
>> + * Adjust the busy loop timeout according to timespec and busy poll timeout.
>> + */
>> +void io_napi_adjust_timeout(struct io_ring_ctx *ctx, struct io_wait_queue *iowq,
>> +		struct timespec64 *ts)
>> +{
>> +	if (ts)
>> +		adjust_timeout(READ_ONCE(ctx->napi_busy_poll_to), ts,
>> +			&iowq->napi_busy_poll_to);
>> +	else
>> +		iowq->napi_busy_poll_to = READ_ONCE(ctx->napi_busy_poll_to);
>> +}
>
> We should probably just pass 'ctx' to adjust_timeout()? Or do
>
> 	unsigned int poll_to = READ_ONCE(ctx->napi_busy_poll_to);
>
> at the top and then use that for both. Would get rid of that overly long
> line too.
>
>
I think it makes sense to combine the two functions. I'll also add a
variable at the top of the function like your example above.

> diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
> index a4c9a404f631..390f54c546d6 100644
> --- a/io_uring/io_uring.c
> +++ b/io_uring/io_uring.c
> @@ -2617,29 +2617,17 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
>  	iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
>  	iowq.timeout = KTIME_MAX;
>
> -	if (!io_napi(ctx)) {
> -		if (uts) {
> -			struct timespec64 ts;
> +	if (uts) {
> +		struct timespec64 ts;
>
> -			if (get_timespec64(&ts, uts))
> -				return -EFAULT;
> -			iowq.timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
> -		}
> -	} else {
> -		if (uts) {
> -			struct timespec64 ts;
> -
> -			if (get_timespec64(&ts, uts))
> -				return -EFAULT;
> -
> -			io_napi_adjust_timeout(ctx, &iowq, &ts);
> -			iowq.timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
> -		} else {
> -			io_napi_adjust_timeout(ctx, &iowq, NULL);
> -		}
> -		io_napi_busy_loop(ctx, &iowq);
> +		if (get_timespec64(&ts, uts))
> +			return -EFAULT;
> +		iowq.timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
> +		io_napi_adjust_timeout(ctx, &iowq, &ts);
>  	}
>
> +	io_napi_busy_loop(ctx, &iowq);
> +
>  	trace_io_uring_cqring_wait(ctx, min_events);
>
>  	do {
> diff --git a/io_uring/napi.c b/io_uring/napi.c
> index ca12ff5f5611..3a0d0317ceec 100644
> --- a/io_uring/napi.c
> +++ b/io_uring/napi.c
> @@ -100,7 +100,8 @@ static bool io_napi_busy_loop_should_end(void *p, unsigned long start_time)
>  	       io_napi_busy_loop_timeout(start_time, iowq->napi_busy_poll_to);
>  }
>
> -static bool __io_napi_busy_loop(struct list_head *napi_list, bool prefer_busy_poll)
> +static bool __io_napi_do_busy_loop(struct list_head *napi_list,
> +				   bool prefer_busy_poll)
>  {
>  	struct io_napi_ht_entry *e;
>  	struct io_napi_ht_entry *n;
> @@ -121,7 +122,7 @@ static void io_napi_multi_busy_loop(struct list_head *napi_list,
>  	do {
>  		if (list_is_singular(napi_list))
>  			break;
> -		if (!__io_napi_busy_loop(napi_list, iowq->napi_prefer_busy_poll))
> +		if (!__io_napi_do_busy_loop(napi_list, iowq->napi_prefer_busy_poll))
>  			break;
>  	} while (!io_napi_busy_loop_should_end(iowq, start_time));
>  }
> @@ -251,16 +252,18 @@ int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg)
>  }
>
>  /*
> - * io_napi_adjust_timeout() - Add napi id to the busy poll list
> + * __io_napi_adjust_timeout() - Add napi id to the busy poll list
>   * @ctx: pointer to io-uring context structure
>   * @iowq: pointer to io wait queue
>   * @ts: pointer to timespec or NULL
>   *
>   * Adjust the busy loop timeout according to timespec and busy poll timeout.
>   */
> -void io_napi_adjust_timeout(struct io_ring_ctx *ctx, struct io_wait_queue *iowq,
> -		struct timespec64 *ts)
> +void __io_napi_adjust_timeout(struct io_ring_ctx *ctx,
> +			      struct io_wait_queue *iowq, struct timespec64 *ts)
>  {
> +	if (!io_napi(ctx))
> +		return;
>  	if (ts)
>  		adjust_timeout(READ_ONCE(ctx->napi_busy_poll_to), ts,
>  			&iowq->napi_busy_poll_to);
> @@ -269,13 +272,13 @@ void io_napi_adjust_timeout(struct io_ring_ctx *ctx, struct io_wait_queue *iowq,
>  }
>
>  /*
> - * io_napi_busy_loop() - execute busy poll loop
> + * __io_napi_busy_loop() - execute busy poll loop
>   * @ctx: pointer to io-uring context structure
>   * @iowq: pointer to io wait queue
>   *
>   * Execute the busy poll loop and merge the spliced off list.
>   */
> -void io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq)
> +void __io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq)
>  {
>  	iowq->napi_prefer_busy_poll = READ_ONCE(ctx->napi_prefer_busy_poll);
>
> @@ -302,8 +305,8 @@ void io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq)
>   */
>  int io_napi_sqpoll_busy_poll(struct io_ring_ctx *ctx)
>  {
> -	int ret = 0;
>  	LIST_HEAD(napi_list);
> +	int ret;
>
>  	if (!READ_ONCE(ctx->napi_busy_poll_to))
>  		return 0;
> @@ -312,9 +315,7 @@ int io_napi_sqpoll_busy_poll(struct io_ring_ctx *ctx)
>  	list_splice_init(&ctx->napi_list, &napi_list);
>  	spin_unlock(&ctx->napi_lock);
>
> -	if (__io_napi_busy_loop(&napi_list, ctx->napi_prefer_busy_poll))
> -		ret = 1;
> -
> +	ret = __io_napi_do_busy_loop(&napi_list, ctx->napi_prefer_busy_poll);
>  	io_napi_merge_lists(ctx, &napi_list);
>  	return ret;
>  }
> diff --git a/io_uring/napi.h b/io_uring/napi.h
> index 8da8f032a441..b5e93b3777c0 100644
> --- a/io_uring/napi.h
> +++ b/io_uring/napi.h
> @@ -17,9 +17,9 @@ int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg);
>
>  void __io_napi_add(struct io_ring_ctx *ctx, struct file *file);
>
> -void io_napi_adjust_timeout(struct io_ring_ctx *ctx,
> +void __io_napi_adjust_timeout(struct io_ring_ctx *ctx,
>  		struct io_wait_queue *iowq, struct timespec64 *ts);
> -void io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq);
> +void __io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq);
>  int io_napi_sqpoll_busy_poll(struct io_ring_ctx *ctx);
>
>  static inline bool io_napi(struct io_ring_ctx *ctx)
> @@ -27,6 +27,23 @@ static inline bool io_napi(struct io_ring_ctx *ctx)
>  	return !list_empty(&ctx->napi_list);
>  }
>
> +static inline void io_napi_adjust_timeout(struct io_ring_ctx *ctx,
> +					  struct io_wait_queue *iowq,
> +					  struct timespec64 *ts)
> +{
> +	if (!io_napi(ctx))
> +		return;
> +	__io_napi_adjust_timeout(ctx, iowq, ts);
> +}
> +
> +static inline void io_napi_busy_loop(struct io_ring_ctx *ctx,
> +				     struct io_wait_queue *iowq)
> +{
> +	if (!io_napi(ctx))
> +		return;
> +	__io_napi_busy_loop(ctx, iowq);
> +}
> +
>  /*
>   * io_napi_add() - Add napi id to the busy poll list
>   * @req: pointer to io_kiocb request
>

I'll have a look at the above proposal.
Stefan Roesch April 27, 2023, 5:34 p.m. UTC | #8
Jens Axboe <axboe@kernel.dk> writes:

> On 4/26/23 7:41?PM, Jens Axboe wrote:
>
> I'd probably also do this:
>
>
> diff --git a/io_uring/napi.c b/io_uring/napi.c
> index ca12ff5f5611..35a29fd9afbc 100644
> --- a/io_uring/napi.c
> +++ b/io_uring/napi.c
> @@ -95,12 +95,17 @@ static bool io_napi_busy_loop_should_end(void *p, unsigned long start_time)
>  {
>  	struct io_wait_queue *iowq = p;
>
> -	return signal_pending(current) ||
> -	       io_should_wake(iowq) ||
> -	       io_napi_busy_loop_timeout(start_time, iowq->napi_busy_poll_to);
> +	if (signal_pending(current))
> +		return true;
> +	if (io_should_wake(iowq))
> +		return true;
> +	if (io_napi_busy_loop_timeout(start_time, iowq->napi_busy_poll_to))
> +		return true;
> +	return false;
>  }
>
> as that is easier to read.
>
Will be changed in the next version.
Stefan Roesch April 27, 2023, 5:44 p.m. UTC | #9
Jens Axboe <axboe@kernel.dk> writes:

> On 4/26/23 7:41?PM, Jens Axboe wrote:
>>> +static void io_napi_multi_busy_loop(struct list_head *napi_list,
>>> +		struct io_wait_queue *iowq)
>>> +{
>>> +	unsigned long start_time = busy_loop_current_time();
>>> +
>>> +	do {
>>> +		if (list_is_singular(napi_list))
>>> +			break;
>>> +		if (!__io_napi_busy_loop(napi_list, iowq->napi_prefer_busy_poll))
>>> +			break;
>>> +	} while (!io_napi_busy_loop_should_end(iowq, start_time));
>>> +}
>>
>> Do we need to check for an empty list here?
>>
>>> +static void io_napi_blocking_busy_loop(struct list_head *napi_list,
>>> +		struct io_wait_queue *iowq)
>>> +{
>>> +	if (!list_is_singular(napi_list))
>>> +		io_napi_multi_busy_loop(napi_list, iowq);
>>> +
>>> +	if (list_is_singular(napi_list)) {
>>> +		struct io_napi_ht_entry *ne;
>>> +
>>> +		ne = list_first_entry(napi_list, struct io_napi_ht_entry, list);
>>> +		napi_busy_loop(ne->napi_id, io_napi_busy_loop_should_end, iowq,
>>> +			iowq->napi_prefer_busy_poll, BUSY_POLL_BUDGET);
>>> +	}
>>> +}
>>
>> Presumably io_napi_multi_busy_loop() can change the state of the list,
>> which is why we have if (cond) and then if (!cond) here? Would probably
>> warrant a comment as it looks a bit confusing.
>
> Doesn't look like that's the case? We just call into
> io_napi_multi_busy_loop() -> napi_busy_loop() which doesn't touch it. So
> the state should be the same?
>
> We also check if the list isn't singular before we call it, and then
> io_napi_multi_busy_loop() breaks out of the loop if it is. And we know
> it's not singular when calling, and I don't see what changes it.
>
> Unless I'm missing something, which is quite possible, this looks overly
> convoluted and has extra pointless checks?

I'll fix it.
Stefan Roesch April 27, 2023, 6:21 p.m. UTC | #10
Jens Axboe <axboe@kernel.dk> writes:

> On 4/26/23 7:50?PM, Jens Axboe wrote:
>> On 4/26/23 7:41?PM, Jens Axboe wrote:
>>>> +static void io_napi_multi_busy_loop(struct list_head *napi_list,
>>>> +		struct io_wait_queue *iowq)
>>>> +{
>>>> +	unsigned long start_time = busy_loop_current_time();
>>>> +
>>>> +	do {
>>>> +		if (list_is_singular(napi_list))
>>>> +			break;
>>>> +		if (!__io_napi_busy_loop(napi_list, iowq->napi_prefer_busy_poll))
>>>> +			break;
>>>> +	} while (!io_napi_busy_loop_should_end(iowq, start_time));
>>>> +}
>>>
>>> Do we need to check for an empty list here?
>>>
>>>> +static void io_napi_blocking_busy_loop(struct list_head *napi_list,
>>>> +		struct io_wait_queue *iowq)
>>>> +{
>>>> +	if (!list_is_singular(napi_list))
>>>> +		io_napi_multi_busy_loop(napi_list, iowq);
>>>> +
>>>> +	if (list_is_singular(napi_list)) {
>>>> +		struct io_napi_ht_entry *ne;
>>>> +
>>>> +		ne = list_first_entry(napi_list, struct io_napi_ht_entry, list);
>>>> +		napi_busy_loop(ne->napi_id, io_napi_busy_loop_should_end, iowq,
>>>> +			iowq->napi_prefer_busy_poll, BUSY_POLL_BUDGET);
>>>> +	}
>>>> +}
>>>
>>> Presumably io_napi_multi_busy_loop() can change the state of the list,
>>> which is why we have if (cond) and then if (!cond) here? Would probably
>>> warrant a comment as it looks a bit confusing.
>>
>> Doesn't look like that's the case? We just call into
>> io_napi_multi_busy_loop() -> napi_busy_loop() which doesn't touch it. So
>> the state should be the same?
>>
>> We also check if the list isn't singular before we call it, and then
>> io_napi_multi_busy_loop() breaks out of the loop if it is. And we know
>> it's not singular when calling, and I don't see what changes it.
>>
>> Unless I'm missing something, which is quite possible, this looks overly
>> convoluted and has extra pointless checks?
>
> All the cleanups/fixes I ended up doing are below. Not all for this
> patch probably, just for the series overall. Not tested at all, so
> please just go over them and see what makes sense and let me know which
> hunks you don't agree with.
>
>
> diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
> index a4c9a404f631..390f54c546d6 100644
> --- a/io_uring/io_uring.c
> +++ b/io_uring/io_uring.c
> @@ -2617,29 +2617,17 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
>  	iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
>  	iowq.timeout = KTIME_MAX;
>
> -	if (!io_napi(ctx)) {
> -		if (uts) {
> -			struct timespec64 ts;
> +	if (uts) {
> +		struct timespec64 ts;
>
> -			if (get_timespec64(&ts, uts))
> -				return -EFAULT;
> -			iowq.timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
> -		}
> -	} else {
> -		if (uts) {
> -			struct timespec64 ts;
> -
> -			if (get_timespec64(&ts, uts))
> -				return -EFAULT;
> -
> -			io_napi_adjust_timeout(ctx, &iowq, &ts);
> -			iowq.timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
> -		} else {
> -			io_napi_adjust_timeout(ctx, &iowq, NULL);
> -		}
> -		io_napi_busy_loop(ctx, &iowq);
> +		if (get_timespec64(&ts, uts))
> +			return -EFAULT;
> +		iowq.timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
> +		io_napi_adjust_timeout(ctx, &iowq, &ts);
>  	}
>
> +	io_napi_busy_loop(ctx, &iowq);
> +
>  	trace_io_uring_cqring_wait(ctx, min_events);
>
>  	do {
> diff --git a/io_uring/napi.c b/io_uring/napi.c
> index ca12ff5f5611..50b2bdb10417 100644
> --- a/io_uring/napi.c
> +++ b/io_uring/napi.c
> @@ -60,8 +60,8 @@ void __io_napi_add(struct io_ring_ctx *ctx, struct file *file)
>  	spin_unlock(&ctx->napi_lock);
>  }
>
> -static inline void adjust_timeout(unsigned int poll_to, struct timespec64 *ts,
> -		unsigned int *new_poll_to)
> +static void adjust_timeout(unsigned int poll_to, struct timespec64 *ts,
> +			  unsigned int *new_poll_to)
>  {
>  	struct timespec64 pollto = ns_to_timespec64(1000 * (s64)poll_to);
>
> @@ -95,12 +95,17 @@ static bool io_napi_busy_loop_should_end(void *p, unsigned long start_time)
>  {
>  	struct io_wait_queue *iowq = p;
>
> -	return signal_pending(current) ||
> -	       io_should_wake(iowq) ||
> -	       io_napi_busy_loop_timeout(start_time, iowq->napi_busy_poll_to);
> +	if (signal_pending(current))
> +		return true;
> +	if (io_should_wake(iowq))
> +		return true;
> +	if (io_napi_busy_loop_timeout(start_time, iowq->napi_busy_poll_to))
> +		return true;
> +	return false;
>  }
>
> -static bool __io_napi_busy_loop(struct list_head *napi_list, bool prefer_busy_poll)
> +static bool __io_napi_do_busy_loop(struct list_head *napi_list,
> +				   bool prefer_busy_poll)
>  {
>  	struct io_napi_ht_entry *e;
>  	struct io_napi_ht_entry *n;
> @@ -113,38 +118,35 @@ static bool __io_napi_busy_loop(struct list_head *napi_list, bool prefer_busy_po
>  	return !list_empty(napi_list);
>  }
>
> -static void io_napi_multi_busy_loop(struct list_head *napi_list,
> -		struct io_wait_queue *iowq)
> +static void io_napi_multi_busy_loop(struct list_head *list,
> +				   struct io_wait_queue *iowq)
>  {
>  	unsigned long start_time = busy_loop_current_time();
>
>  	do {
> -		if (list_is_singular(napi_list))
> -			break;
> -		if (!__io_napi_busy_loop(napi_list, iowq->napi_prefer_busy_poll))
> +		if (!__io_napi_do_busy_loop(list, iowq->napi_prefer_busy_poll))
>  			break;
>  	} while (!io_napi_busy_loop_should_end(iowq, start_time));
>  }
>
>  static void io_napi_blocking_busy_loop(struct list_head *napi_list,
> -		struct io_wait_queue *iowq)
> +				       struct io_wait_queue *iowq)
>  {
> -	if (!list_is_singular(napi_list))
> +	if (!list_is_singular(napi_list)) {
>  		io_napi_multi_busy_loop(napi_list, iowq);
> -
> -	if (list_is_singular(napi_list)) {
> +	} else {
>  		struct io_napi_ht_entry *ne;
>
>  		ne = list_first_entry(napi_list, struct io_napi_ht_entry, list);
>  		napi_busy_loop(ne->napi_id, io_napi_busy_loop_should_end, iowq,
> -			iowq->napi_prefer_busy_poll, BUSY_POLL_BUDGET);
> +				iowq->napi_prefer_busy_poll, BUSY_POLL_BUDGET);
>  	}
>  }
>
>  static void io_napi_remove_stale(struct io_ring_ctx *ctx)
>  {
> -	unsigned int i;
>  	struct io_napi_ht_entry *he;
> +	unsigned int i;
>
>  	hash_for_each(ctx->napi_ht, i, he, node) {
>  		if (time_after(jiffies, he->timeout)) {
> @@ -152,11 +154,10 @@ static void io_napi_remove_stale(struct io_ring_ctx *ctx)
>  			hash_del(&he->node);
>  		}
>  	}
> -
>  }
>
>  static void io_napi_merge_lists(struct io_ring_ctx *ctx,
> -		struct list_head *napi_list)
> +				struct list_head *napi_list)
>  {
>  	spin_lock(&ctx->napi_lock);
>  	list_splice(napi_list, &ctx->napi_list);
> @@ -186,9 +187,9 @@ void io_napi_init(struct io_ring_ctx *ctx)
>   */
>  void io_napi_free(struct io_ring_ctx *ctx)
>  {
> -	unsigned int i;
>  	struct io_napi_ht_entry *he;
>  	LIST_HEAD(napi_list);
> +	unsigned int i;
>
>  	spin_lock(&ctx->napi_lock);
>  	hash_for_each(ctx->napi_ht, i, he, node)
> @@ -206,8 +207,8 @@ void io_napi_free(struct io_ring_ctx *ctx)
>  int io_register_napi(struct io_ring_ctx *ctx, void __user *arg)
>  {
>  	const struct io_uring_napi curr = {
> -		.busy_poll_to = ctx->napi_busy_poll_to,
> -		.prefer_busy_poll = ctx->napi_prefer_busy_poll
> +		.busy_poll_to		= ctx->napi_busy_poll_to,
> +		.prefer_busy_poll	= ctx->napi_prefer_busy_poll
>  	};
>  	struct io_uring_napi napi;
>
> @@ -236,14 +237,12 @@ int io_register_napi(struct io_ring_ctx *ctx, void __user *arg)
>  int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg)
>  {
>  	const struct io_uring_napi curr = {
> -		.busy_poll_to = ctx->napi_busy_poll_to,
> -		.prefer_busy_poll = ctx->napi_prefer_busy_poll
> +		.busy_poll_to		= ctx->napi_busy_poll_to,
> +		.prefer_busy_poll	= ctx->napi_prefer_busy_poll
>  	};
>
> -	if (arg) {
> -		if (copy_to_user(arg, &curr, sizeof(curr)))
> -			return -EFAULT;
> -	}
> +	if (arg && copy_to_user(arg, &curr, sizeof(curr)))
> +		return -EFAULT;
>
>  	WRITE_ONCE(ctx->napi_busy_poll_to, 0);
>  	WRITE_ONCE(ctx->napi_prefer_busy_poll, false);
> @@ -251,31 +250,36 @@ int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg)
>  }
>
>  /*
> - * io_napi_adjust_timeout() - Add napi id to the busy poll list
> + * __io_napi_adjust_timeout() - Add napi id to the busy poll list
>   * @ctx: pointer to io-uring context structure
>   * @iowq: pointer to io wait queue
>   * @ts: pointer to timespec or NULL
>   *
>   * Adjust the busy loop timeout according to timespec and busy poll timeout.
>   */
> -void io_napi_adjust_timeout(struct io_ring_ctx *ctx, struct io_wait_queue *iowq,
> -		struct timespec64 *ts)
> +void __io_napi_adjust_timeout(struct io_ring_ctx *ctx,
> +			      struct io_wait_queue *iowq, struct timespec64 *ts)
>  {
> +	unsigned int poll_to;
> +
> +	if (!io_napi(ctx))
> +		return;
> +
> +	poll_to = READ_ONCE(ctx->napi_busy_poll_to);
>  	if (ts)
> -		adjust_timeout(READ_ONCE(ctx->napi_busy_poll_to), ts,
> -			&iowq->napi_busy_poll_to);
> +		adjust_timeout(poll_to, ts, &iowq->napi_busy_poll_to);
>  	else
> -		iowq->napi_busy_poll_to = READ_ONCE(ctx->napi_busy_poll_to);
> +		iowq->napi_busy_poll_to = poll_to;
>  }
>
>  /*
> - * io_napi_busy_loop() - execute busy poll loop
> + * __io_napi_busy_loop() - execute busy poll loop
>   * @ctx: pointer to io-uring context structure
>   * @iowq: pointer to io wait queue
>   *
>   * Execute the busy poll loop and merge the spliced off list.
>   */
> -void io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq)
> +void __io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq)
>  {
>  	iowq->napi_prefer_busy_poll = READ_ONCE(ctx->napi_prefer_busy_poll);
>
> @@ -302,8 +306,8 @@ void io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq)
>   */
>  int io_napi_sqpoll_busy_poll(struct io_ring_ctx *ctx)
>  {
> -	int ret = 0;
>  	LIST_HEAD(napi_list);
> +	int ret;
>
>  	if (!READ_ONCE(ctx->napi_busy_poll_to))
>  		return 0;
> @@ -312,9 +316,7 @@ int io_napi_sqpoll_busy_poll(struct io_ring_ctx *ctx)
>  	list_splice_init(&ctx->napi_list, &napi_list);
>  	spin_unlock(&ctx->napi_lock);
>
> -	if (__io_napi_busy_loop(&napi_list, ctx->napi_prefer_busy_poll))
> -		ret = 1;
> -
> +	ret = __io_napi_do_busy_loop(&napi_list, ctx->napi_prefer_busy_poll);
>  	io_napi_merge_lists(ctx, &napi_list);
>  	return ret;
>  }
> diff --git a/io_uring/napi.h b/io_uring/napi.h
> index 8da8f032a441..b5e93b3777c0 100644
> --- a/io_uring/napi.h
> +++ b/io_uring/napi.h
> @@ -17,9 +17,9 @@ int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg);
>
>  void __io_napi_add(struct io_ring_ctx *ctx, struct file *file);
>
> -void io_napi_adjust_timeout(struct io_ring_ctx *ctx,
> +void __io_napi_adjust_timeout(struct io_ring_ctx *ctx,
>  		struct io_wait_queue *iowq, struct timespec64 *ts);
> -void io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq);
> +void __io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq);
>  int io_napi_sqpoll_busy_poll(struct io_ring_ctx *ctx);
>
>  static inline bool io_napi(struct io_ring_ctx *ctx)
> @@ -27,6 +27,23 @@ static inline bool io_napi(struct io_ring_ctx *ctx)
>  	return !list_empty(&ctx->napi_list);
>  }
>
> +static inline void io_napi_adjust_timeout(struct io_ring_ctx *ctx,
> +					  struct io_wait_queue *iowq,
> +					  struct timespec64 *ts)
> +{
> +	if (!io_napi(ctx))
> +		return;
> +	__io_napi_adjust_timeout(ctx, iowq, ts);
> +}
> +
> +static inline void io_napi_busy_loop(struct io_ring_ctx *ctx,
> +				     struct io_wait_queue *iowq)
> +{
> +	if (!io_napi(ctx))
> +		return;
> +	__io_napi_busy_loop(ctx, iowq);
> +}
> +
>  /*
>   * io_napi_add() - Add napi id to the busy poll list
>   * @req: pointer to io_kiocb request

Looks good to me, only difference is to callapse
__io_napi_adjust_timeout and adjust_timeout in one function.
Jens Axboe April 28, 2023, 1:09 a.m. UTC | #11
On 4/27/23 10:27?AM, Stefan Roesch wrote:
>>> +	if (timespec64_compare(ts, &pollto) > 0) {
>>> +		*ts = timespec64_sub(*ts, pollto);
>>> +		*new_poll_to = poll_to;
>>> +	} else {
>>> +		u64 to = timespec64_to_ns(ts);
>>> +
>>> +		do_div(to, 1000);
>>
>> Is this going to complain on 32-bit?
>>
> 
> My understanding is this should work on 32-bit.

Yeah seems fine, I ended up double checking it too.
diff mbox series

Patch

diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 1b2a20a42413..2b2ca990ee93 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -2,6 +2,7 @@ 
 #define IO_URING_TYPES_H
 
 #include <linux/blkdev.h>
+#include <linux/hashtable.h>
 #include <linux/task_work.h>
 #include <linux/bitmap.h>
 #include <linux/llist.h>
@@ -277,6 +278,15 @@  struct io_ring_ctx {
 	struct xarray		personalities;
 	u32			pers_next;
 
+#ifdef CONFIG_NET_RX_BUSY_POLL
+	struct list_head	napi_list;	/* track busy poll napi_id */
+	spinlock_t		napi_lock;	/* napi_list lock */
+
+	DECLARE_HASHTABLE(napi_ht, 4);
+	unsigned int		napi_busy_poll_to; /* napi busy poll default timeout */
+	bool			napi_prefer_busy_poll;
+#endif
+
 	struct {
 		/*
 		 * We cache a range of free CQEs we can use, once exhausted it
diff --git a/io_uring/Makefile b/io_uring/Makefile
index 8cc8e5387a75..2efe7c5f07ba 100644
--- a/io_uring/Makefile
+++ b/io_uring/Makefile
@@ -9,3 +9,4 @@  obj-$(CONFIG_IO_URING)		+= io_uring.o xattr.o nop.o fs.o splice.o \
 					sqpoll.o fdinfo.o tctx.o poll.o \
 					cancel.o kbuf.o rsrc.o rw.o opdef.o notif.o
 obj-$(CONFIG_IO_WQ)		+= io-wq.o
+obj-$(CONFIG_NET_RX_BUSY_POLL) += napi.o
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index efbd6c9c56e5..fff8f84eb560 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -91,6 +91,7 @@ 
 #include "rsrc.h"
 #include "cancel.h"
 #include "net.h"
+#include "napi.h"
 #include "notif.h"
 
 #include "timeout.h"
@@ -337,6 +338,8 @@  static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
 	INIT_WQ_LIST(&ctx->locked_free_list);
 	INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
 	INIT_WQ_LIST(&ctx->submit_state.compl_reqs);
+	io_napi_init(ctx);
+
 	return ctx;
 err:
 	kfree(ctx->dummy_ubuf);
@@ -2614,15 +2617,31 @@  static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
 	iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
 	iowq.timeout = KTIME_MAX;
 
-	if (uts) {
-		struct timespec64 ts;
+	if (!io_napi(ctx)) {
+		if (uts) {
+			struct timespec64 ts;
 
-		if (get_timespec64(&ts, uts))
-			return -EFAULT;
-		iowq.timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
+			if (get_timespec64(&ts, uts))
+				return -EFAULT;
+			iowq.timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
+		}
+	} else {
+		if (uts) {
+			struct timespec64 ts;
+
+			if (get_timespec64(&ts, uts))
+				return -EFAULT;
+
+			io_napi_adjust_timeout(ctx, &iowq, &ts);
+			iowq.timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
+		} else {
+			io_napi_adjust_timeout(ctx, &iowq, NULL);
+		}
+		io_napi_busy_loop(ctx, &iowq);
 	}
 
 	trace_io_uring_cqring_wait(ctx, min_events);
+
 	do {
 		unsigned long check_cq;
 
@@ -2856,6 +2875,7 @@  static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
 	io_req_caches_free(ctx);
 	if (ctx->hash_map)
 		io_wq_put_hash(ctx->hash_map);
+	io_napi_free(ctx);
 	kfree(ctx->cancel_table.hbs);
 	kfree(ctx->cancel_table_locked.hbs);
 	kfree(ctx->dummy_ubuf);
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 5f04bd47562a..d669e06f54f0 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -48,6 +48,10 @@  struct io_wait_queue {
 	unsigned nr_timeouts;
 	ktime_t timeout;
 
+#ifdef CONFIG_NET_RX_BUSY_POLL
+	unsigned int napi_busy_poll_to;
+	bool napi_prefer_busy_poll;
+#endif
 };
 
 static inline bool io_should_wake(struct io_wait_queue *iowq)
diff --git a/io_uring/napi.c b/io_uring/napi.c
new file mode 100644
index 000000000000..bb7d2b6b7e90
--- /dev/null
+++ b/io_uring/napi.c
@@ -0,0 +1,243 @@ 
+// SPDX-License-Identifier: GPL-2.0
+
+#include "io_uring.h"
+#include "napi.h"
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+
+/* Timeout for cleanout of stale entries. */
+#define NAPI_TIMEOUT		(60 * SEC_CONVERSION)
+
+struct io_napi_ht_entry {
+	unsigned int		napi_id;
+	struct list_head	list;
+
+	/* Covered by napi lock spinlock.  */
+	unsigned long		timeout;
+	struct hlist_node	node;
+};
+
+void __io_napi_add(struct io_ring_ctx *ctx, struct file *file)
+{
+	unsigned int napi_id;
+	struct socket *sock;
+	struct sock *sk;
+	struct io_napi_ht_entry *he;
+
+	sock = sock_from_file(file);
+	if (!sock)
+		return;
+
+	sk = sock->sk;
+	if (!sk)
+		return;
+
+	napi_id = READ_ONCE(sk->sk_napi_id);
+
+	/* Non-NAPI IDs can be rejected. */
+	if (napi_id < MIN_NAPI_ID)
+		return;
+
+	spin_lock(&ctx->napi_lock);
+	hash_for_each_possible(ctx->napi_ht, he, node, napi_id) {
+		if (he->napi_id == napi_id) {
+			he->timeout = jiffies + NAPI_TIMEOUT;
+			goto out;
+		}
+	}
+
+	he = kmalloc(sizeof(*he), GFP_NOWAIT);
+	if (!he)
+		goto out;
+
+	he->napi_id = napi_id;
+	he->timeout = jiffies + NAPI_TIMEOUT;
+	hash_add(ctx->napi_ht, &he->node, napi_id);
+
+	list_add_tail(&he->list, &ctx->napi_list);
+
+out:
+	spin_unlock(&ctx->napi_lock);
+}
+
+static inline void adjust_timeout(unsigned int poll_to, struct timespec64 *ts,
+		unsigned int *new_poll_to)
+{
+	struct timespec64 pollto = ns_to_timespec64(1000 * (s64)poll_to);
+
+	if (timespec64_compare(ts, &pollto) > 0) {
+		*ts = timespec64_sub(*ts, pollto);
+		*new_poll_to = poll_to;
+	} else {
+		u64 to = timespec64_to_ns(ts);
+
+		do_div(to, 1000);
+		*new_poll_to = to;
+		ts->tv_sec = 0;
+		ts->tv_nsec = 0;
+	}
+}
+
+static inline bool io_napi_busy_loop_timeout(unsigned long start_time,
+		unsigned long bp_usec)
+{
+	if (bp_usec) {
+		unsigned long end_time = start_time + bp_usec;
+		unsigned long now = busy_loop_current_time();
+
+		return time_after(now, end_time);
+	}
+
+	return true;
+}
+
+static bool io_napi_busy_loop_should_end(void *p, unsigned long start_time)
+{
+	struct io_wait_queue *iowq = p;
+
+	return signal_pending(current) ||
+	       io_should_wake(iowq) ||
+	       io_napi_busy_loop_timeout(start_time, iowq->napi_busy_poll_to);
+}
+
+static bool __io_napi_busy_loop(struct list_head *napi_list, bool prefer_busy_poll)
+{
+	struct io_napi_ht_entry *e;
+	struct io_napi_ht_entry *n;
+
+	list_for_each_entry_safe(e, n, napi_list, list) {
+		napi_busy_loop(e->napi_id, NULL, NULL, prefer_busy_poll,
+			       BUSY_POLL_BUDGET);
+	}
+
+	return !list_empty(napi_list);
+}
+
+static void io_napi_multi_busy_loop(struct list_head *napi_list,
+		struct io_wait_queue *iowq)
+{
+	unsigned long start_time = busy_loop_current_time();
+
+	do {
+		if (list_is_singular(napi_list))
+			break;
+		if (!__io_napi_busy_loop(napi_list, iowq->napi_prefer_busy_poll))
+			break;
+	} while (!io_napi_busy_loop_should_end(iowq, start_time));
+}
+
+static void io_napi_blocking_busy_loop(struct list_head *napi_list,
+		struct io_wait_queue *iowq)
+{
+	if (!list_is_singular(napi_list))
+		io_napi_multi_busy_loop(napi_list, iowq);
+
+	if (list_is_singular(napi_list)) {
+		struct io_napi_ht_entry *ne;
+
+		ne = list_first_entry(napi_list, struct io_napi_ht_entry, list);
+		napi_busy_loop(ne->napi_id, io_napi_busy_loop_should_end, iowq,
+			iowq->napi_prefer_busy_poll, BUSY_POLL_BUDGET);
+	}
+}
+
+static void io_napi_remove_stale(struct io_ring_ctx *ctx)
+{
+	unsigned int i;
+	struct io_napi_ht_entry *he;
+
+	hash_for_each(ctx->napi_ht, i, he, node) {
+		if (time_after(jiffies, he->timeout)) {
+			list_del(&he->list);
+			hash_del(&he->node);
+		}
+	}
+
+}
+
+static void io_napi_merge_lists(struct io_ring_ctx *ctx,
+		struct list_head *napi_list)
+{
+	spin_lock(&ctx->napi_lock);
+	list_splice(napi_list, &ctx->napi_list);
+	io_napi_remove_stale(ctx);
+	spin_unlock(&ctx->napi_lock);
+}
+
+/*
+ * io_napi_init() - Init napi settings
+ * @ctx: pointer to io-uring context structure
+ *
+ * Init napi settings in the io-uring context.
+ */
+void io_napi_init(struct io_ring_ctx *ctx)
+{
+	INIT_LIST_HEAD(&ctx->napi_list);
+	spin_lock_init(&ctx->napi_lock);
+	ctx->napi_prefer_busy_poll = false;
+	ctx->napi_busy_poll_to = READ_ONCE(sysctl_net_busy_poll);
+}
+
+/*
+ * io_napi_free() - Deallocate napi
+ * @ctx: pointer to io-uring context structure
+ *
+ * Free the napi list and the hash table in the io-uring context.
+ */
+void io_napi_free(struct io_ring_ctx *ctx)
+{
+	unsigned int i;
+	struct io_napi_ht_entry *he;
+	LIST_HEAD(napi_list);
+
+	spin_lock(&ctx->napi_lock);
+	hash_for_each(ctx->napi_ht, i, he, node)
+		hash_del(&he->node);
+	spin_unlock(&ctx->napi_lock);
+}
+
+/*
+ * io_napi_adjust_timeout() - Add napi id to the busy poll list
+ * @ctx: pointer to io-uring context structure
+ * @iowq: pointer to io wait queue
+ * @ts: pointer to timespec or NULL
+ *
+ * Adjust the busy loop timeout according to timespec and busy poll timeout.
+ */
+void io_napi_adjust_timeout(struct io_ring_ctx *ctx, struct io_wait_queue *iowq,
+		struct timespec64 *ts)
+{
+	if (ts)
+		adjust_timeout(READ_ONCE(ctx->napi_busy_poll_to), ts,
+			&iowq->napi_busy_poll_to);
+	else
+		iowq->napi_busy_poll_to = READ_ONCE(ctx->napi_busy_poll_to);
+}
+
+/*
+ * io_napi_busy_loop() - execute busy poll loop
+ * @ctx: pointer to io-uring context structure
+ * @iowq: pointer to io wait queue
+ *
+ * Execute the busy poll loop and merge the spliced off list.
+ */
+void io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq)
+{
+	iowq->napi_prefer_busy_poll = READ_ONCE(ctx->napi_prefer_busy_poll);
+
+	/* SQPOLL is handled in sqthread. */
+	if (!(ctx->flags & IORING_SETUP_SQPOLL)) {
+		LIST_HEAD(napi_list);
+
+		spin_lock(&ctx->napi_lock);
+		list_splice_init(&ctx->napi_list, &napi_list);
+		spin_unlock(&ctx->napi_lock);
+
+		if (iowq->napi_busy_poll_to)
+			io_napi_blocking_busy_loop(&napi_list, iowq);
+
+		io_napi_merge_lists(ctx, &napi_list);
+	}
+}
+
+#endif
diff --git a/io_uring/napi.h b/io_uring/napi.h
new file mode 100644
index 000000000000..49322a16b6e5
--- /dev/null
+++ b/io_uring/napi.h
@@ -0,0 +1,66 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef IOU_NAPI_H
+#define IOU_NAPI_H
+
+#include <linux/kernel.h>
+#include <linux/io_uring.h>
+#include <net/busy_poll.h>
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+
+void io_napi_init(struct io_ring_ctx *ctx);
+void io_napi_free(struct io_ring_ctx *ctx);
+
+void __io_napi_add(struct io_ring_ctx *ctx, struct file *file);
+
+void io_napi_adjust_timeout(struct io_ring_ctx *ctx,
+		struct io_wait_queue *iowq, struct timespec64 *ts);
+void io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq);
+
+static inline bool io_napi(struct io_ring_ctx *ctx)
+{
+	return !list_empty(&ctx->napi_list);
+}
+
+/*
+ * io_napi_add() - Add napi id to the busy poll list
+ * @req: pointer to io_kiocb request
+ *
+ * Add the napi id of the socket to the napi busy poll list and hash table.
+ */
+static inline void io_napi_add(struct io_kiocb *req)
+{
+	struct io_ring_ctx *ctx = req->ctx;
+
+	if (!READ_ONCE(ctx->napi_busy_poll_to))
+		return;
+
+	__io_napi_add(ctx, req->file);
+}
+
+#else
+
+static inline void io_napi_init(struct io_ring_ctx *ctx)
+{
+}
+
+static inline void io_napi_free(struct io_ring_ctx *ctx)
+{
+}
+
+static inline bool io_napi(struct io_ring_ctx *ctx)
+{
+	return false;
+}
+
+static inline void io_napi_add(struct io_kiocb *req)
+{
+}
+
+#define io_napi_adjust_timeout(ctx, iowq, ts) do {} while (0)
+#define io_napi_busy_loop(ctx, iowq) do {} while (0)
+
+#endif
+
+#endif
diff --git a/io_uring/poll.c b/io_uring/poll.c
index c90e47dc1e29..0284849793bb 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -15,6 +15,7 @@ 
 
 #include "io_uring.h"
 #include "refs.h"
+#include "napi.h"
 #include "opdef.h"
 #include "kbuf.h"
 #include "poll.h"
@@ -631,6 +632,7 @@  static int __io_arm_poll_handler(struct io_kiocb *req,
 		__io_poll_execute(req, mask);
 		return 0;
 	}
+	io_napi_add(req);
 
 	if (ipt->owning) {
 		/*