@@ -1595,6 +1595,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
res_budget = budget;
xdp.rxq = &rx_ring->xdp_rxq;
xdp.frame_sz = ENA_PAGE_SIZE;
+ xdp.mb = 0;
do {
xdp_verdict = XDP_PASS;
@@ -139,6 +139,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
xdp.data_end = *data_ptr + *len;
xdp.rxq = &rxr->xdp_rxq;
xdp.frame_sz = PAGE_SIZE; /* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
+ xdp.mb = 0;
orig_data = xdp.data;
rcu_read_lock();
@@ -553,6 +553,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
xdp.data_end = xdp.data + len;
xdp.rxq = &rq->xdp_rxq;
xdp.frame_sz = RCV_FRAG_LEN + XDP_PACKET_HEADROOM;
+ xdp.mb = 0;
orig_data = xdp.data;
rcu_read_lock();
@@ -366,6 +366,7 @@ static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE -
(dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM);
+ xdp.mb = 0;
xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -2332,6 +2332,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, 0);
#endif
xdp.rxq = &rx_ring->xdp_rxq;
+ xdp.mb = 0;
while (likely(total_rx_packets < (unsigned int)budget)) {
struct i40e_rx_buffer *rx_buffer;
@@ -1089,6 +1089,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
#if (PAGE_SIZE < 8192)
xdp.frame_sz = ice_rx_frame_truesize(rx_ring, 0);
#endif
+ xdp.mb = 0;
/* start the loop to process Rx packets bounded by 'budget' */
while (likely(total_rx_pkts < (unsigned int)budget)) {
@@ -2298,6 +2298,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
#if (PAGE_SIZE < 8192)
xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, 0);
#endif
+ xdp.mb = 0;
while (likely(total_rx_packets < budget)) {
union ixgbe_adv_rx_desc *rx_desc;
@@ -1129,6 +1129,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
struct xdp_buff xdp;
xdp.rxq = &rx_ring->xdp_rxq;
+ xdp.mb = 0;
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
@@ -3558,6 +3558,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
xdp.data = data + MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM;
xdp.data_end = xdp.data + rx_bytes;
xdp.frame_sz = PAGE_SIZE;
+ xdp.mb = 0;
if (bm_pool->pkt_size == MVPP2_BM_SHORT_PKT_SIZE)
xdp.rxq = &rxq->xdp_rxq_short;
@@ -684,6 +684,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
xdp_prog = rcu_dereference(ring->xdp_prog);
xdp.rxq = &ring->xdp_rxq;
xdp.frame_sz = priv->frag_info[0].frag_stride;
+ xdp.mb = 0;
doorbell_pending = 0;
/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
@@ -1133,6 +1133,7 @@ static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom,
xdp->data_end = xdp->data + len;
xdp->rxq = &rq->xdp_rxq;
xdp->frame_sz = rq->buff.frame0_sz;
+ xdp->mb = 0;
}
static struct sk_buff *
@@ -1824,6 +1824,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
xdp.frame_sz = PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM;
xdp.rxq = &rx_ring->xdp_rxq;
+ xdp.mb = 0;
tx_ring = r_vec->xdp_ring;
while (pkts_polled < budget) {
@@ -1096,6 +1096,7 @@ static bool qede_rx_xdp(struct qede_dev *edev,
xdp.data_end = xdp.data + *len;
xdp.rxq = &rxq->xdp_rxq;
xdp.frame_sz = rxq->rx_buf_seg_size; /* PAGE_SIZE when XDP enabled */
+ xdp.mb = 0;
/* Queues always have a full reset currently, so for the time
* being until there's atomic program replace just mark read
@@ -301,6 +301,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
xdp.data_end = xdp.data + rx_buf->len;
xdp.rxq = &rx_queue->xdp_rxq_info;
xdp.frame_sz = efx->rx_page_buf_step;
+ xdp.mb = 0;
xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
rcu_read_unlock();
@@ -947,6 +947,7 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
xdp.rxq = &dring->xdp_rxq;
xdp.frame_sz = PAGE_SIZE;
+ xdp.mb = 0;
rcu_read_lock();
xdp_prog = READ_ONCE(priv->xdp_prog);
@@ -407,6 +407,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
xdp.data_hard_start = pa;
xdp.rxq = &priv->xdp_rxq[ch];
xdp.frame_sz = PAGE_SIZE;
+ xdp.mb = 0;
port = priv->emac_port + cpsw->data.dual_emac;
ret = cpsw_run_xdp(priv, ch, &xdp, page, port);
@@ -350,6 +350,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
xdp.data_hard_start = pa;
xdp.rxq = &priv->xdp_rxq[ch];
xdp.frame_sz = PAGE_SIZE;
+ xdp.mb = 0;
ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port);
if (ret != CPSW_XDP_PASS)
@@ -50,6 +50,7 @@ u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
xdp->data_end = xdp->data + len;
xdp->rxq = &nvchan->xdp_rxq;
xdp->frame_sz = PAGE_SIZE;
+ xdp->mb = 0;
memcpy(xdp->data, data, len);
@@ -1641,6 +1641,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
xdp.data_end = xdp.data + len;
xdp.rxq = &tfile->xdp_rxq;
xdp.frame_sz = buflen;
+ xdp.mb = 0;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
if (act == XDP_REDIRECT || act == XDP_TX) {
@@ -2388,6 +2389,7 @@ static int tun_xdp_one(struct tun_struct *tun,
xdp_set_data_meta_invalid(xdp);
xdp->rxq = &tfile->xdp_rxq;
xdp->frame_sz = buflen;
+ xdp->mb = 0;
act = bpf_prog_run_xdp(xdp_prog, xdp);
err = tun_xdp_act(tun, xdp_prog, xdp, act);
@@ -711,6 +711,7 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
/* SKB "head" area always have tailroom for skb_shared_info */
xdp.frame_sz = (void *)skb_end_pointer(skb) - xdp.data_hard_start;
xdp.frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ xdp.mb = 0;
orig_data = xdp.data;
orig_data_end = xdp.data_end;
@@ -690,6 +690,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
xdp.data_meta = xdp.data;
xdp.rxq = &rq->xdp_rxq;
xdp.frame_sz = buflen;
+ xdp.mb = 0;
orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
stats->xdp_packets++;
@@ -860,6 +861,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
xdp.data_meta = xdp.data;
xdp.rxq = &rq->xdp_rxq;
xdp.frame_sz = frame_sz - vi->hdr_len;
+ xdp.mb = 0;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
stats->xdp_packets++;
@@ -870,6 +870,7 @@ static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata,
xdp->data_end = xdp->data + len;
xdp->rxq = &queue->xdp_rxq;
xdp->frame_sz = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM;
+ xdp->mb = 0;
act = bpf_prog_run_xdp(prog, xdp);
switch (act) {
@@ -4640,6 +4640,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
/* SKB "head" area always have tailroom for skb_shared_info */
xdp->frame_sz = (void *)skb_end_pointer(skb) - xdp->data_hard_start;
xdp->frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ xdp->mb = 0;
orig_data_end = xdp->data_end;
orig_data = xdp->data;
Initialize multi-buffer bit (mb) to 0 in all XDP-capable drivers. This is a preliminary patch to enable xdp multi-buffer support. Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org> --- drivers/net/ethernet/amazon/ena/ena_netdev.c | 1 + drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c | 1 + drivers/net/ethernet/cavium/thunder/nicvf_main.c | 1 + drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c | 1 + drivers/net/ethernet/intel/i40e/i40e_txrx.c | 1 + drivers/net/ethernet/intel/ice/ice_txrx.c | 1 + drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 1 + drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 1 + drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | 1 + drivers/net/ethernet/mellanox/mlx4/en_rx.c | 1 + drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 1 + drivers/net/ethernet/netronome/nfp/nfp_net_common.c | 1 + drivers/net/ethernet/qlogic/qede/qede_fp.c | 1 + drivers/net/ethernet/sfc/rx.c | 1 + drivers/net/ethernet/socionext/netsec.c | 1 + drivers/net/ethernet/ti/cpsw.c | 1 + drivers/net/ethernet/ti/cpsw_new.c | 1 + drivers/net/hyperv/netvsc_bpf.c | 1 + drivers/net/tun.c | 2 ++ drivers/net/veth.c | 1 + drivers/net/virtio_net.c | 2 ++ drivers/net/xen-netfront.c | 1 + net/core/dev.c | 1 + 23 files changed, 25 insertions(+)