@@ -47,6 +47,7 @@ bool sk_busy_loop_end(void *p, unsigned long start_time);
void napi_busy_loop(unsigned int napi_id,
bool (*loop_end)(void *, unsigned long),
void *loop_end_arg, bool prefer_busy_poll, u16 budget);
+void napi_execute(unsigned napi_id, void (*cb)(void *), void *cb_arg);
void napi_busy_loop_rcu(unsigned int napi_id,
bool (*loop_end)(void *, unsigned long),
@@ -66,6 +67,11 @@ static inline bool sk_can_busy_loop(struct sock *sk)
return false;
}
+static inline void napi_execute(unsigned napi_id,
+ void (*cb)(void *), void *cb_arg)
+{
+}
+
#endif /* CONFIG_NET_RX_BUSY_POLL */
static inline unsigned long busy_loop_current_time(void)
@@ -6351,6 +6351,30 @@ enum {
NAPI_F_END_ON_RESCHED = 2,
};
+static inline bool napi_state_start_busy_polling(struct napi_struct *napi,
+ unsigned flags)
+{
+ unsigned long val = READ_ONCE(napi->state);
+
+ /* If multiple threads are competing for this napi,
+ * we avoid dirtying napi->state as much as we can.
+ */
+ if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
+ NAPIF_STATE_IN_BUSY_POLL))
+ goto fail;
+
+ if (cmpxchg(&napi->state, val,
+ val | NAPIF_STATE_IN_BUSY_POLL |
+ NAPIF_STATE_SCHED) != val)
+ goto fail;
+
+ return true;
+fail:
+ if (flags & NAPI_F_PREFER_BUSY_POLL)
+ set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
+ return false;
+}
+
static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock,
unsigned flags, u16 budget)
{
@@ -6426,24 +6450,8 @@ static void __napi_busy_loop(unsigned int napi_id,
local_bh_disable();
bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
if (!napi_poll) {
- unsigned long val = READ_ONCE(napi->state);
-
- /* If multiple threads are competing for this napi,
- * we avoid dirtying napi->state as much as we can.
- */
- if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
- NAPIF_STATE_IN_BUSY_POLL)) {
- if (flags & NAPI_F_PREFER_BUSY_POLL)
- set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
+ if (!napi_state_start_busy_polling(napi, flags))
goto count;
- }
- if (cmpxchg(&napi->state, val,
- val | NAPIF_STATE_IN_BUSY_POLL |
- NAPIF_STATE_SCHED) != val) {
- if (flags & NAPI_F_PREFER_BUSY_POLL)
- set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
- goto count;
- }
have_poll_lock = netpoll_poll_lock(napi);
napi_poll = napi->poll;
}
@@ -6507,6 +6515,45 @@ void napi_busy_loop(unsigned int napi_id,
}
EXPORT_SYMBOL(napi_busy_loop);
+void napi_execute(unsigned napi_id,
+ void (*cb)(void *), void *cb_arg)
+{
+ unsigned flags = NAPI_F_PREFER_BUSY_POLL;
+ void *have_poll_lock = NULL;
+ struct napi_struct *napi;
+
+ rcu_read_lock();
+ napi = napi_by_id(napi_id);
+ if (!napi) {
+ rcu_read_unlock();
+ return;
+ }
+
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_disable();
+
+ for (;;) {
+ local_bh_disable();
+
+ if (napi_state_start_busy_polling(napi, flags)) {
+ have_poll_lock = netpoll_poll_lock(napi);
+ cb(cb_arg);
+ local_bh_enable();
+ busy_poll_stop(napi, have_poll_lock, flags, 1);
+ break;
+ }
+
+ local_bh_enable();
+ if (unlikely(need_resched()))
+ break;
+ cpu_relax();
+ }
+
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_enable();
+ rcu_read_unlock();
+}
+
void napi_suspend_irqs(unsigned int napi_id)
{
struct napi_struct *napi;