diff mbox series

[RFC,2/2] vmxnet3: Add XDP_REDIRECT support.

Message ID 20220629014927.2123-2-u9012063@gmail.com (mailing list archive)
State RFC
Delegated to: Netdev Maintainers
Headers show
Series [RFC,1/2] vmxnet3: Add basic XDP support. | expand

Checks

Context Check Description
netdev/tree_selection success Guessed tree name to be net-next, async
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix warning Target tree name not specified in the subject
netdev/cover_letter success Single patches do not need cover letters
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 16 this patch: 16
netdev/cc_maintainers warning 15 maintainers not CCed: kafai@fb.com daniel@iogearbox.net songliubraving@fb.com ast@kernel.org bpf@vger.kernel.org hawk@kernel.org pabeni@redhat.com davem@davemloft.net edumazet@google.com kuba@kernel.org pv-drivers@vmware.com john.fastabend@gmail.com yhs@fb.com andrii@kernel.org kpsingh@kernel.org
netdev/build_clang fail Errors and warnings before: 0 this patch: 2
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 16 this patch: 16
netdev/checkpatch warning WARNING: 'acess' may be misspelled - perhaps 'access'? WARNING: braces {} are not necessary for single statement blocks WARNING: line length of 83 exceeds 80 columns WARNING: line length of 86 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

William Tu June 29, 2022, 1:49 a.m. UTC
The patch adds XDP_REDIRECT support for vmxnet3. A new page is allocated
if XDP_REDIRECT is needed, and the packet contents are copied into the new
page so that afterward, the original rx buffer can be unmapped/free without
any issue.

Tested the patch using two VMs, one runs dpdk pktgen, and another
runs:
$ ./samples/bpf/xdp_redirect_cpu -d ens160 -c 0 -e drop
$ ./samples/bpf/xdp_redirect_cpu -d ens160 -c 0 -e pass
$ ./samples/bpf/xdp_redirect_cpu -d ens160 -c 0 -r ens160

Results:
XDP_DROP  385K pkt/s
XDP_PASS  351K pkt/s
TX	  21K  pkt/s (not sure if s.t is wrong here)

Need feedbacks:
a. I have to allocate a new page before calling xdp_do_redirect.
   Without doing so, I got several issues such as OOM, CPU soft lockup,
   invalid page acess. I'm still trying to fix it.
b. I don't know whether thse performance number makes sense or not.

Signed-off-by: William Tu <u9012063@gmail.com>
---
 drivers/net/vmxnet3/vmxnet3_drv.c     | 68 +++++++++++++++++++++++++--
 drivers/net/vmxnet3/vmxnet3_ethtool.c |  4 ++
 drivers/net/vmxnet3/vmxnet3_int.h     |  3 ++
 3 files changed, 72 insertions(+), 3 deletions(-)
diff mbox series

Patch

diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 549e31a1d485..fa8bff86f55f 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1402,6 +1402,46 @@  vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
 		       struct sk_buff *skb,
 		       struct vmxnet3_tx_queue *tq);
 
+static int
+vmxnet3_xdp_xmit(struct net_device *dev,
+		 int n, struct xdp_frame **frames, u32 flags)
+{
+	struct vmxnet3_adapter *adapter;
+	struct vmxnet3_tx_queue *tq;
+	struct netdev_queue *nq;
+	int i, err, cpu;
+	int tq_number;
+	int nxmit_byte = 0, nxmit = 0;
+
+	adapter = netdev_priv(dev);
+
+	if (unlikely(test_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)))
+		return -ENETDOWN;
+	if (unlikely(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)))
+		return -EINVAL;
+
+	tq_number = adapter->num_tx_queues;
+	cpu = smp_processor_id();
+	tq = &adapter->tx_queue[cpu % tq_number];
+	nq = netdev_get_tx_queue(adapter->netdev, tq->qid);
+
+	__netif_tx_lock(nq, cpu);
+	for (i = 0; i < n; i++) {
+		err = vmxnet3_xdp_xmit_frame(adapter, frames[i], NULL, tq);
+		if (err) {
+			tq->stats.xdp_xmit_err++;
+			break;
+		}
+		nxmit_byte += frames[i]->len;
+		nxmit++;
+	}
+
+	tq->stats.xdp_xmit += nxmit;
+	__netif_tx_unlock(nq);
+
+	return nxmit;
+}
+
 static int
 vmxnet3_xdp_xmit_back(struct vmxnet3_adapter *adapter,
 		      struct xdp_frame *xdpf,
@@ -1513,8 +1553,10 @@  vmxnet3_run_xdp(struct vmxnet3_rx_queue *rq, struct sk_buff *skb,
 	int headroom = XDP_PACKET_HEADROOM;
 	struct xdp_frame *xdpf;
 	struct xdp_buff xdp;
+	struct page *page;
 	void *orig_data;
 	void *buf_hard_start;
+	int err;
 	u32 act;
 
 	buf_hard_start = skb->data - headroom;
@@ -1557,13 +1599,30 @@  vmxnet3_run_xdp(struct vmxnet3_rx_queue *rq, struct sk_buff *skb,
 		ctx->skb = NULL;
 		break;
 	case XDP_ABORTED:
-		ctx->skb = NULL;
 		trace_xdp_exception(rq->adapter->netdev, rq->xdp_bpf_prog, act);
 		rq->stats.xdp_aborted++;
+		ctx->skb = NULL;
 		break;
-	case XDP_REDIRECT: /* Not Supported. */
+	case XDP_REDIRECT:
+		page = alloc_page(GFP_ATOMIC);
+		if (!page) {
+			return XDP_DROP;
+		}
+		xdp_init_buff(&xdp, PAGE_SIZE, &rq->xdp_rxq);
+		xdp_prepare_buff(&xdp, page_address(page), headroom, skb->len, false);
+		memcpy(xdp.data, skb->data, skb->len);
+		err = xdp_do_redirect(rq->adapter->netdev, &xdp, rq->xdp_bpf_prog);
+		if (!err) {
+			rq->stats.xdp_redirects++;
+			dev_kfree_skb(ctx->skb);
+		} else {
+			__free_page(page);
+			dev_kfree_skb(ctx->skb);
+			rq->stats.xdp_drops++;
+		}
 		ctx->skb = NULL;
-		fallthrough;
+		*need_xdp_flush = true;
+		break;
 	default:
 		bpf_warn_invalid_xdp_action(rq->adapter->netdev,
 					    rq->xdp_bpf_prog, act);
@@ -1943,6 +2002,8 @@  vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
 		vmxnet3_getRxComp(rcd,
 				  &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
 	}
+	if (need_xdp_flush)
+		xdp_do_flush_map();
 
 	return num_pkts;
 }
@@ -3937,6 +3998,7 @@  vmxnet3_probe_device(struct pci_dev *pdev,
 		.ndo_poll_controller = vmxnet3_netpoll,
 #endif
 		.ndo_bpf = vmxnet3_xdp,
+		.ndo_xdp_xmit = vmxnet3_xdp_xmit,
 	};
 	int err;
 	u32 ver;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 5574c18c0727..b93dab2056e2 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -76,6 +76,10 @@  vmxnet3_tq_driver_stats[] = {
 					 copy_skb_header) },
 	{ "  giant hdr",	offsetof(struct vmxnet3_tq_driver_stats,
 					 oversized_hdr) },
+	{ "  xdp xmit",		offsetof(struct vmxnet3_tq_driver_stats,
+					 xdp_xmit) },
+	{ "  xdp xmit err",	offsetof(struct vmxnet3_tq_driver_stats,
+					 xdp_xmit_err) },
 };
 
 /* per rq stats maintained by the device */
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 0f3b243302e4..9b8020f08e4c 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -219,6 +219,9 @@  struct vmxnet3_tq_driver_stats {
 	u64 linearized;         /* # of pkts linearized */
 	u64 copy_skb_header;    /* # of times we have to copy skb header */
 	u64 oversized_hdr;
+
+	u64 xdp_xmit;
+	u64 xdp_xmit_err;
 };
 
 struct vmxnet3_tx_ctx {