@@ -260,18 +260,14 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
s16 offset;
int err;
- rcu_read_lock();
- xdp_prog = rcu_dereference(efx->xdp_prog);
- if (!xdp_prog) {
- rcu_read_unlock();
+ xdp_prog = rcu_dereference_bh(efx->xdp_prog);
+ if (!xdp_prog)
return true;
- }
rx_queue = efx_channel_get_rx_queue(channel);
if (unlikely(channel->rx_pkt_n_frags > 1)) {
/* We can't do XDP on fragmented packets - drop. */
- rcu_read_unlock();
efx_free_rx_buffers(rx_queue, rx_buf,
channel->rx_pkt_n_frags);
if (net_ratelimit())
@@ -295,8 +291,10 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
xdp_prepare_buff(&xdp, *ehp - EFX_XDP_HEADROOM, EFX_XDP_HEADROOM,
rx_buf->len, false);
+ /* This code is invoked within a single NAPI poll cycle and thus under
+ * local_bh_disable(), which provides the needed RCU protection.
+ */
xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
- rcu_read_unlock();
offset = (u8 *)xdp.data - *ehp;