@@ -8,4 +8,4 @@ obj-$(CONFIG_IGB) += igb.o
igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \
e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \
- e1000_i210.o igb_ptp.o igb_hwmon.o
+ e1000_i210.o igb_ptp.o igb_hwmon.o igb_xsk.o
@@ -20,6 +20,7 @@
#include <linux/mdio.h>
#include <net/xdp.h>
+#include <net/xdp_sock_drv.h>
struct igb_adapter;
@@ -86,6 +87,7 @@ struct igb_adapter;
#define IGB_XDP_CONSUMED BIT(0)
#define IGB_XDP_TX BIT(1)
#define IGB_XDP_REDIR BIT(2)
+#define IGB_XDP_EXIT BIT(3)
struct vf_data_storage {
unsigned char vf_mac_addresses[ETH_ALEN];
@@ -278,14 +280,23 @@ struct igb_tx_buffer {
};
struct igb_rx_buffer {
- dma_addr_t dma;
- struct page *page;
+ union {
+ struct {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct page *page;
#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
- __u32 page_offset;
+ __u32 page_offset;
#else
- __u16 page_offset;
+ __u16 page_offset;
#endif
- __u16 pagecnt_bias;
+ __u16 pagecnt_bias;
+ };
+ struct {
+ struct xdp_buff *xdp;
+ };
+ };
+
};
struct igb_tx_queue_stats {
@@ -357,6 +368,7 @@ struct igb_ring {
};
};
struct xdp_rxq_info xdp_rxq;
+ struct xsk_buff_pool *xsk_pool;
} ____cacheline_internodealigned_in_smp;
struct igb_q_vector {
@@ -385,7 +397,8 @@ enum e1000_ring_flags_t {
IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
IGB_RING_FLAG_TX_CTX_IDX,
IGB_RING_FLAG_TX_DETECT_HANG,
- IGB_RING_FLAG_TX_DISABLED
+ IGB_RING_FLAG_TX_DISABLED,
+ IGB_RING_FLAG_AF_XDP_ZC
};
#define ring_uses_large_buffer(ring) \
@@ -823,4 +836,13 @@ int igb_add_mac_steering_filter(struct igb_adapter *adapter,
int igb_del_mac_steering_filter(struct igb_adapter *adapter,
const u8 *addr, u8 queue, u8 flags);
+struct xsk_buff_pool *igb_xsk_pool(struct igb_adapter *adapter,
+ struct igb_ring *ring);
+int igb_xsk_pool_setup(struct igb_adapter *adapter,
+ struct xsk_buff_pool *pool,
+ u16 qid);
+bool igb_alloc_rx_buffers_zc(struct igb_ring *rx_ring, u16 count);
+int igb_clean_rx_irq_zc(struct igb_q_vector *q_vector, const int budget);
+int igb_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags);
+
#endif /* _IGB_H_ */
@@ -2013,7 +2013,9 @@ static void igb_configure(struct igb_adapter *adapter)
*/
for (i = 0; i < adapter->num_rx_queues; i++) {
struct igb_ring *ring = adapter->rx_ring[i];
- igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
+ ring->xsk_pool ?
+ igb_alloc_rx_buffers_zc(ring, igb_desc_unused(ring)) :
+ igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
}
}
@@ -2931,9 +2933,14 @@ static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf)
static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
+ struct igb_adapter *adapter = netdev_priv(dev);
+
switch (xdp->command) {
case XDP_SETUP_PROG:
return igb_xdp_setup(dev, xdp);
+ case XDP_SETUP_XSK_POOL:
+ return igb_xsk_pool_setup(adapter, xdp->xsk.pool,
+ xdp->xsk.queue_id);
default:
return -EINVAL;
}
@@ -3010,6 +3017,9 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
if (unlikely(!tx_ring))
return -ENXIO;
+ if (unlikely(test_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags)))
+ return -ENXIO;
+
nq = txring_txq(tx_ring);
__netif_tx_lock(nq, cpu);
@@ -3060,6 +3070,7 @@ static const struct net_device_ops igb_netdev_ops = {
.ndo_setup_tc = igb_setup_tc,
.ndo_bpf = igb_xdp,
.ndo_xdp_xmit = igb_xdp_xmit,
+ .ndo_xsk_wakeup = igb_xsk_wakeup,
};
/**
@@ -3356,7 +3367,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->priv_flags |= IFF_SUPP_NOFCS;
netdev->priv_flags |= IFF_UNICAST_FLT;
- netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY;
/* MTU range: 68 - 9216 */
netdev->min_mtu = ETH_MIN_MTU;
@@ -4734,12 +4746,17 @@ void igb_setup_srrctl(struct igb_adapter *adapter, struct igb_ring *ring)
struct e1000_hw *hw = &adapter->hw;
int reg_idx = ring->reg_idx;
u32 srrctl = 0;
+ u32 buf_size;
- srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
- if (ring_uses_large_buffer(ring))
- srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+ if (ring->xsk_pool)
+ buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool);
+ else if (ring_uses_large_buffer(ring))
+ buf_size = IGB_RXBUFFER_3072;
else
- srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+ buf_size = IGB_RXBUFFER_2048;
+
+ srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
+ srrctl |= buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT;
srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
if (hw->mac.type >= e1000_82580)
srrctl |= E1000_SRRCTL_TIMESTAMP;
@@ -4771,8 +4788,16 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
u32 rxdctl = 0;
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
- WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
- MEM_TYPE_PAGE_SHARED, NULL));
+ ring->xsk_pool = igb_xsk_pool(adapter, ring);
+ if (ring->xsk_pool) {
+ WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL));
+ xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
+ } else {
+ WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED, NULL));
+ }
/* disable the queue */
wr32(E1000_RXDCTL(reg_idx), 0);
@@ -4892,7 +4917,9 @@ void igb_txrx_ring_enable(struct igb_adapter *adapter, u16 qid)
* at least 1 descriptor unused to make sure
* next_to_use != next_to_clean
*/
- igb_alloc_rx_buffers(rx_ring, igb_desc_unused(rx_ring));
+ rx_ring->xsk_pool ?
+ igb_alloc_rx_buffers_zc(rx_ring, igb_desc_unused(rx_ring)) :
+ igb_alloc_rx_buffers(rx_ring, igb_desc_unused(rx_ring));
clear_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags);
}
@@ -5069,6 +5096,13 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
while (i != rx_ring->next_to_alloc) {
struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
+ if (rx_ring->xsk_pool) {
+ if (buffer_info->xdp)
+ xsk_buff_free(buffer_info->xdp);
+ buffer_info->xdp = NULL;
+ goto skip_for_xsk;
+ }
+
/* Invalidate cache lines that may have been written to by
* device so that we avoid corrupting memory.
*/
@@ -5087,6 +5121,7 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
__page_frag_cache_drain(buffer_info->page,
buffer_info->pagecnt_bias);
+skip_for_xsk:
i++;
if (i == rx_ring->count)
i = 0;
@@ -6518,6 +6553,9 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
+ if (unlikely(test_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags)))
+ return NETDEV_TX_BUSY;
+
/* record the location of the first descriptor for this packet */
first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
first->type = IGB_TYPE_SKB;
@@ -8260,7 +8298,9 @@ static int igb_poll(struct napi_struct *napi, int budget)
clean_complete = igb_clean_tx_irq(q_vector, budget);
if (q_vector->rx.ring) {
- int cleaned = igb_clean_rx_irq(q_vector, budget);
+ int cleaned = q_vector->rx.ring->xsk_pool ?
+ igb_clean_rx_irq_zc(q_vector, budget) :
+ igb_clean_rx_irq(q_vector, budget);
work_done += cleaned;
if (cleaned >= budget)
@@ -8668,8 +8708,13 @@ struct sk_buff *igb_run_xdp(struct igb_adapter *adapter,
break;
case XDP_REDIRECT:
err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
- if (err)
+ if (err) {
+ if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
+ result = IGB_XDP_EXIT;
+ else
+ result = IGB_XDP_CONSUMED;
goto out_failure;
+ }
result = IGB_XDP_REDIR;
break;
default:
@@ -8929,6 +8974,8 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
struct igb_adapter *adapter = q_vector->adapter;
struct igb_ring *rx_ring = q_vector->rx.ring;
struct sk_buff *skb = rx_ring->skb;
+ int cpu = smp_processor_id();
+ struct netdev_queue *nq;
unsigned int total_bytes = 0, total_packets = 0;
u16 cleaned_count = igb_desc_unused(rx_ring);
unsigned int xdp_xmit = 0;
@@ -9059,7 +9106,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
if (xdp_xmit & IGB_XDP_TX) {
struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
+ nq = txring_txq(tx_ring);
+ __netif_tx_lock(nq, cpu);
igb_xdp_ring_update_tail(tx_ring);
+ __netif_tx_unlock(nq);
}
u64_stats_update_begin(&rx_ring->rx_syncp);
new file mode 100644
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2018 Intel Corporation. */
+
+#include <linux/bpf_trace.h>
+#include <net/xdp_sock_drv.h>
+#include <net/xdp.h>
+
+#include "e1000_hw.h"
+#include "igb.h"
+
+struct xsk_buff_pool *igb_xsk_pool(struct igb_adapter *adapter,
+ struct igb_ring *ring)
+{
+ int qid = ring->queue_index;
+
+ if (!igb_xdp_is_enabled(adapter) ||
+ !test_bit(IGB_RING_FLAG_AF_XDP_ZC, &ring->flags))
+ return NULL;
+
+ return xsk_get_pool_from_qid(adapter->netdev, qid);
+}
+
+static int igb_xsk_pool_enable(struct igb_adapter *adapter,
+ struct xsk_buff_pool *pool,
+ u16 qid)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct igb_ring *rx_ring;
+ bool if_running;
+ int err;
+
+ if (qid >= adapter->num_rx_queues)
+ return -EINVAL;
+
+ if (qid >= netdev->real_num_rx_queues)
+ return -EINVAL;
+
+ err = xsk_pool_dma_map(pool, &adapter->pdev->dev, IGB_RX_DMA_ATTR);
+ if (err)
+ return err;
+
+ rx_ring = adapter->rx_ring[qid];
+ if_running = netif_running(adapter->netdev) && igb_xdp_is_enabled(adapter);
+ if (if_running)
+ igb_txrx_ring_disable(adapter, qid);
+
+ set_bit(IGB_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
+
+ if (if_running) {
+ igb_txrx_ring_enable(adapter, qid);
+
+ /* Kick start the NAPI context so that receiving will start */
+ err = igb_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX);
+ if (err) {
+ clear_bit(IGB_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
+ xsk_pool_dma_unmap(pool, IGB_RX_DMA_ATTR);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int igb_xsk_pool_disable(struct igb_adapter *adapter, u16 qid)
+{
+ struct igb_ring *rx_ring;
+ struct xsk_buff_pool *pool;
+ bool if_running;
+
+ pool = xsk_get_pool_from_qid(adapter->netdev, qid);
+ if (!pool)
+ return -EINVAL;
+
+ rx_ring = adapter->rx_ring[qid];
+ if_running = netif_running(adapter->netdev) && igb_xdp_is_enabled(adapter);
+ if (if_running)
+ igb_txrx_ring_disable(adapter, qid);
+
+ xsk_pool_dma_unmap(pool, IGB_RX_DMA_ATTR);
+ clear_bit(IGB_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
+
+ if (if_running)
+ igb_txrx_ring_enable(adapter, qid);
+
+ return 0;
+}
+
+int igb_xsk_pool_setup(struct igb_adapter *adapter,
+ struct xsk_buff_pool *pool,
+ u16 qid)
+{
+ return pool ? igb_xsk_pool_enable(adapter, pool, qid) :
+ igb_xsk_pool_disable(adapter, qid);
+}
+
+bool igb_alloc_rx_buffers_zc(struct igb_ring *rx_ring, u16 count)
+{
+ union e1000_adv_rx_desc *rx_desc;
+ struct igb_rx_buffer *buffer_info;
+ u16 i = rx_ring->next_to_use;
+ dma_addr_t dma;
+ bool ok = true;
+
+ /* nothing to do */
+ if (!count)
+ return true;
+
+ rx_desc = IGB_RX_DESC(rx_ring, i);
+ buffer_info = &rx_ring->rx_buffer_info[i];
+ i -= rx_ring->count;
+
+ do {
+ buffer_info->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
+ if (!buffer_info->xdp) {
+ ok = false;
+ break;
+ }
+
+ dma = xsk_buff_xdp_get_dma(buffer_info->xdp);
+
+ /* Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info.
+ */
+ rx_desc->read.pkt_addr = cpu_to_le64(dma);
+
+ rx_desc++;
+ buffer_info++;
+ i++;
+ if (unlikely(!i)) {
+ rx_desc = IGB_RX_DESC(rx_ring, 0);
+ buffer_info = rx_ring->rx_buffer_info;
+ i -= rx_ring->count;
+ }
+
+ /* clear the length for the next_to_use descriptor */
+ rx_desc->wb.upper.length = 0;
+
+ count--;
+ } while (count);
+
+ i += rx_ring->count;
+
+ if (rx_ring->next_to_use != i) {
+ rx_ring->next_to_use = i;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(i, rx_ring->tail);
+ }
+
+ return ok;
+}
+
+static struct sk_buff *igb_construct_skb_zc(struct igb_ring *rx_ring,
+ struct xdp_buff *xdp,
+ ktime_t timestamp)
+{
+ unsigned int totalsize = xdp->data_end - xdp->data_meta;
+ unsigned int metasize = xdp->data - xdp->data_meta;
+ struct sk_buff *skb;
+
+ net_prefetch(xdp->data_meta);
+
+ /* allocate a skb to store the frags */
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!skb))
+ return NULL;
+
+ if (timestamp)
+ skb_hwtstamps(skb)->hwtstamp = timestamp;
+
+ memcpy(__skb_put(skb, totalsize), xdp->data_meta,
+ ALIGN(totalsize, sizeof(long)));
+
+ if (metasize) {
+ skb_metadata_set(skb, metasize);
+ __skb_pull(skb, metasize);
+ }
+
+ return skb;
+}
+
+static void igb_update_ntc(struct igb_ring *rx_ring)
+{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ /* fetch, update, and store next to clean */
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+
+ prefetch(IGB_RX_DESC(rx_ring, ntc));
+}
+
+int igb_clean_rx_irq_zc(struct igb_q_vector *q_vector, const int budget)
+{
+ struct igb_adapter *adapter = q_vector->adapter;
+ struct igb_ring *rx_ring = q_vector->rx.ring;
+ struct sk_buff *skb;
+ int cpu = smp_processor_id();
+ struct netdev_queue *nq;
+ unsigned int total_bytes = 0, total_packets = 0;
+ u16 cleaned_count = igb_desc_unused(rx_ring);
+ unsigned int xdp_xmit = 0;
+ bool failure = false;
+
+ while (likely(total_packets < budget)) {
+ union e1000_adv_rx_desc *rx_desc;
+ struct igb_rx_buffer *rx_buffer;
+ ktime_t timestamp = 0;
+ unsigned int size;
+
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
+ igb_alloc_rx_buffers_zc(rx_ring, cleaned_count);
+ cleaned_count = 0;
+ }
+
+ rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
+ size = le16_to_cpu(rx_desc->wb.upper.length);
+ if (!size)
+ break;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * descriptor has been written back
+ */
+ dma_rmb();
+
+ rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+ rx_buffer->xdp->data_end = rx_buffer->xdp->data + size;
+ xsk_buff_dma_sync_for_cpu(rx_buffer->xdp, rx_ring->xsk_pool);
+
+ /* pull rx packet timestamp if available and valid */
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+ int ts_hdr_len;
+
+ ts_hdr_len = igb_ptp_rx_pktstamp(rx_ring->q_vector,
+ rx_buffer->xdp->data,
+ ×tamp);
+
+ rx_buffer->xdp->data += ts_hdr_len;
+ rx_buffer->xdp->data_meta += ts_hdr_len;
+ size -= ts_hdr_len;
+ }
+
+ skb = igb_run_xdp(adapter, rx_ring, rx_buffer->xdp);
+
+ if (IS_ERR(skb)) {
+ unsigned int xdp_res = -PTR_ERR(skb);
+
+ if (likely(xdp_res & (IGB_XDP_TX | IGB_XDP_REDIR))) {
+ xdp_xmit |= xdp_res;
+ } else if (xdp_res == IGB_XDP_EXIT) {
+ failure = true;
+ break;
+ } else if (xdp_res == IGB_XDP_CONSUMED) {
+ xsk_buff_free(rx_buffer->xdp);
+ }
+
+ total_packets++;
+ total_bytes += size;
+
+ rx_buffer->xdp = NULL;
+ cleaned_count++;
+ igb_update_ntc(rx_ring);
+ continue;
+ }
+
+ skb = igb_construct_skb_zc(rx_ring, rx_buffer->xdp, timestamp);
+
+ /* exit if we failed to retrieve a buffer */
+ if (!skb) {
+ rx_ring->rx_stats.alloc_failed++;
+ break;
+ }
+
+ xsk_buff_free(rx_buffer->xdp);
+ rx_buffer->xdp = NULL;
+ cleaned_count++;
+ igb_update_ntc(rx_ring);
+
+ if (eth_skb_pad(skb))
+ continue;
+
+ /* probably a little skewed due to removing CRC */
+ total_bytes += skb->len;
+
+ /* populate checksum, timestamp, VLAN, and protocol */
+ igb_process_skb_fields(rx_ring, rx_desc, skb);
+
+ napi_gro_receive(&q_vector->napi, skb);
+
+ /* reset skb pointer */
+ skb = NULL;
+
+ /* update budget accounting */
+ total_packets++;
+ }
+
+ if (xdp_xmit & IGB_XDP_REDIR)
+ xdp_do_flush();
+
+ if (xdp_xmit & IGB_XDP_TX) {
+ struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
+
+ nq = txring_txq(tx_ring);
+ __netif_tx_lock(nq, cpu);
+ igb_xdp_ring_update_tail(tx_ring);
+ __netif_tx_unlock(nq);
+ }
+
+ u64_stats_update_begin(&rx_ring->rx_syncp);
+ rx_ring->rx_stats.packets += total_packets;
+ rx_ring->rx_stats.bytes += total_bytes;
+ u64_stats_update_end(&rx_ring->rx_syncp);
+ q_vector->rx.total_packets += total_packets;
+ q_vector->rx.total_bytes += total_bytes;
+
+ if (cleaned_count)
+ igb_alloc_rx_buffers_zc(rx_ring, cleaned_count);
+
+ if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
+ if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
+ xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
+ else
+ xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
+
+ return (int)total_packets;
+ }
+ return failure ? budget : (int)total_packets;
+}
+
+int igb_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+ struct igb_ring *ring;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 eics = 0;
+
+ if (test_bit(__IGB_DOWN, &adapter->state))
+ return -ENETDOWN;
+
+ if (!igb_xdp_is_enabled(adapter))
+ return -EINVAL;
+
+ if (qid >= adapter->num_tx_queues)
+ return -EINVAL;
+
+ ring = adapter->tx_ring[qid];
+
+ if (test_bit(IGB_RING_FLAG_TX_DISABLED, &ring->flags))
+ return -ENETDOWN;
+
+ if (!ring->xsk_pool)
+ return -EINVAL;
+
+ if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
+ /* Cause software interrupt to ensure Rx ring is cleaned */
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
+ eics |= ring->q_vector->eims_value;
+ wr32(E1000_EICS, eics);
+ } else {
+ wr32(E1000_ICS, E1000_ICS_RXDMT0);
+ }
+ }
+
+ return 0;
+}
Add support for AF_XDP zero-copy receive path. Add IGB_RING_FLAG_AF_XDP_ZC ring flag to indicate that a ring has AF_XDP zero-copy support. This flag is used in igb_configure_rx_ring to register XSK_BUFF_POOL (if zero-copy is enabled) memory model or the default PAGE_SHARED model otherwise. When AF_XDP zero-copy is enabled, the rx buffers are allocated from the xsk buff pool using igb_alloc_rx_buffers_zc. Signed-off-by: Sriram Yagnaraman <sriram.yagnaraman@est.tech> --- drivers/net/ethernet/intel/igb/Makefile | 2 +- drivers/net/ethernet/intel/igb/igb.h | 34 +- drivers/net/ethernet/intel/igb/igb_main.c | 72 ++++- drivers/net/ethernet/intel/igb/igb_xsk.c | 373 ++++++++++++++++++++++ 4 files changed, 463 insertions(+), 18 deletions(-) create mode 100644 drivers/net/ethernet/intel/igb/igb_xsk.c