diff mbox series

[bpf-next,v2,4/4] selftests/bpf: Add selftest for bpf_xdp_flow_offload_lookup kfunc

Message ID 46168ffdeb2b57b4331c75f95d27b747c2ccc517.1716026761.git.lorenzo@kernel.org (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series netfilter: Add the capability to offload flowtable in XDP layer | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR success PR summary
bpf/vmtest-bpf-next-VM_Test-2 success Logs for Unittests
bpf/vmtest-bpf-next-VM_Test-0 success Logs for Lint
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-3 success Logs for Validate matrix.py
bpf/vmtest-bpf-next-VM_Test-12 success Logs for s390x-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-4 success Logs for aarch64-gcc / build / build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-10 success Logs for aarch64-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-5 success Logs for aarch64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-9 success Logs for aarch64-gcc / test (test_verifier, false, 360) / test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-7 success Logs for aarch64-gcc / test (test_progs, false, 360) / test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-6 success Logs for aarch64-gcc / test (test_maps, false, 360) / test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-28 success Logs for x86_64-llvm-17 / build / build for x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-8 success Logs for aarch64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-29 success Logs for x86_64-llvm-17 / build-release / build for x86_64 with llvm-17 and -O2 optimization
bpf/vmtest-bpf-next-VM_Test-34 success Logs for x86_64-llvm-17 / veristat
bpf/vmtest-bpf-next-VM_Test-35 success Logs for x86_64-llvm-18 / build / build for x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-36 success Logs for x86_64-llvm-18 / build-release / build for x86_64 with llvm-18 and -O2 optimization
bpf/vmtest-bpf-next-VM_Test-42 success Logs for x86_64-llvm-18 / veristat
bpf/vmtest-bpf-next-VM_Test-17 success Logs for s390x-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-18 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-19 success Logs for x86_64-gcc / build / build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-11 success Logs for s390x-gcc / build / build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-20 success Logs for x86_64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-16 success Logs for s390x-gcc / test (test_verifier, false, 360) / test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-13 success Logs for s390x-gcc / test (test_maps, false, 360) / test_maps on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-14 success Logs for s390x-gcc / test (test_progs, false, 360) / test_progs on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-15 success Logs for s390x-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-25 success Logs for x86_64-gcc / test (test_progs_parallel, true, 30) / test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-26 success Logs for x86_64-gcc / test (test_verifier, false, 360) / test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-27 success Logs for x86_64-gcc / veristat / veristat on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-24 success Logs for x86_64-gcc / test (test_progs_no_alu32_parallel, true, 30) / test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-32 success Logs for x86_64-llvm-17 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-33 success Logs for x86_64-llvm-17 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-31 success Logs for x86_64-llvm-17 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-30 success Logs for x86_64-llvm-17 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-38 success Logs for x86_64-llvm-18 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-40 success Logs for x86_64-llvm-18 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-39 success Logs for x86_64-llvm-18 / test (test_progs_cpuv4, false, 360) / test_progs_cpuv4 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-37 success Logs for x86_64-llvm-18 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-41 success Logs for x86_64-llvm-18 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-22 success Logs for x86_64-gcc / test (test_progs, false, 360) / test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-21 success Logs for x86_64-gcc / test (test_maps, false, 360) / test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-23 success Logs for x86_64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with gcc
netdev/tree_selection success Clearly marked for bpf-next, async
netdev/apply fail Patch does not apply to bpf-next-0

Commit Message

Lorenzo Bianconi May 18, 2024, 10:12 a.m. UTC
Introduce e2e selftest for bpf_xdp_flow_offload_lookup kfunc through
xdp_flowtable utility.

Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
---
 tools/testing/selftests/bpf/Makefile          |  10 +-
 tools/testing/selftests/bpf/config            |   4 +
 .../selftests/bpf/progs/xdp_flowtable.c       | 141 +++++++++++++++++
 .../selftests/bpf/test_xdp_flowtable.sh       | 112 ++++++++++++++
 tools/testing/selftests/bpf/xdp_flowtable.c   | 142 ++++++++++++++++++
 5 files changed, 407 insertions(+), 2 deletions(-)
 create mode 100644 tools/testing/selftests/bpf/progs/xdp_flowtable.c
 create mode 100755 tools/testing/selftests/bpf/test_xdp_flowtable.sh
 create mode 100644 tools/testing/selftests/bpf/xdp_flowtable.c

Comments

Alexei Starovoitov May 21, 2024, 1:43 a.m. UTC | #1
On Sat, May 18, 2024 at 3:13 AM Lorenzo Bianconi <lorenzo@kernel.org> wrote:
>
> Introduce e2e selftest for bpf_xdp_flow_offload_lookup kfunc through
> xdp_flowtable utility.
>
> Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
> ---
>  tools/testing/selftests/bpf/Makefile          |  10 +-
>  tools/testing/selftests/bpf/config            |   4 +
>  .../selftests/bpf/progs/xdp_flowtable.c       | 141 +++++++++++++++++
>  .../selftests/bpf/test_xdp_flowtable.sh       | 112 ++++++++++++++
>  tools/testing/selftests/bpf/xdp_flowtable.c   | 142 ++++++++++++++++++
>  5 files changed, 407 insertions(+), 2 deletions(-)
>  create mode 100644 tools/testing/selftests/bpf/progs/xdp_flowtable.c
>  create mode 100755 tools/testing/selftests/bpf/test_xdp_flowtable.sh
>  create mode 100644 tools/testing/selftests/bpf/xdp_flowtable.c
>
> diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
> index e0b3887b3d2df..7361c429bed62 100644
> --- a/tools/testing/selftests/bpf/Makefile
> +++ b/tools/testing/selftests/bpf/Makefile
> @@ -133,7 +133,8 @@ TEST_PROGS := test_kmod.sh \
>         test_bpftool_metadata.sh \
>         test_doc_build.sh \
>         test_xsk.sh \
> -       test_xdp_features.sh
> +       test_xdp_features.sh \
> +       test_xdp_flowtable.sh
>
>  TEST_PROGS_EXTENDED := with_addr.sh \
>         with_tunnels.sh ima_setup.sh verify_sig_setup.sh \
> @@ -144,7 +145,7 @@ TEST_GEN_PROGS_EXTENDED = test_skb_cgroup_id_user \
>         flow_dissector_load test_flow_dissector test_tcp_check_syncookie_user \
>         test_lirc_mode2_user xdping test_cpp runqslower bench bpf_testmod.ko \
>         xskxceiver xdp_redirect_multi xdp_synproxy veristat xdp_hw_metadata \
> -       xdp_features bpf_test_no_cfi.ko
> +       xdp_features bpf_test_no_cfi.ko xdp_flowtable
>
>  TEST_GEN_FILES += liburandom_read.so urandom_read sign-file uprobe_multi
>
> @@ -476,6 +477,7 @@ test_usdt.skel.h-deps := test_usdt.bpf.o test_usdt_multispec.bpf.o
>  xsk_xdp_progs.skel.h-deps := xsk_xdp_progs.bpf.o
>  xdp_hw_metadata.skel.h-deps := xdp_hw_metadata.bpf.o
>  xdp_features.skel.h-deps := xdp_features.bpf.o
> +xdp_flowtable.skel.h-deps := xdp_flowtable.bpf.o
>
>  LINKED_BPF_SRCS := $(patsubst %.bpf.o,%.c,$(foreach skel,$(LINKED_SKELS),$($(skel)-deps)))
>
> @@ -710,6 +712,10 @@ $(OUTPUT)/xdp_features: xdp_features.c $(OUTPUT)/network_helpers.o $(OUTPUT)/xdp
>         $(call msg,BINARY,,$@)
>         $(Q)$(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@
>
> +$(OUTPUT)/xdp_flowtable: xdp_flowtable.c $(OUTPUT)/xdp_flowtable.skel.h | $(OUTPUT)
> +       $(call msg,BINARY,,$@)
> +       $(Q)$(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@
> +
>  # Make sure we are able to include and link libbpf against c++.
>  $(OUTPUT)/test_cpp: test_cpp.cpp $(OUTPUT)/test_core_extern.skel.h $(BPFOBJ)
>         $(call msg,CXX,,$@)
> diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
> index eeabd798bc3ae..1a9aea01145f7 100644
> --- a/tools/testing/selftests/bpf/config
> +++ b/tools/testing/selftests/bpf/config
> @@ -82,6 +82,10 @@ CONFIG_NF_CONNTRACK=y
>  CONFIG_NF_CONNTRACK_MARK=y
>  CONFIG_NF_DEFRAG_IPV4=y
>  CONFIG_NF_DEFRAG_IPV6=y
> +CONFIG_NF_TABLES=y
> +CONFIG_NETFILTER_INGRESS=y
> +CONFIG_NF_FLOW_TABLE=y
> +CONFIG_NF_FLOW_TABLE_INET=y
>  CONFIG_NF_NAT=y
>  CONFIG_RC_CORE=y
>  CONFIG_SECURITY=y
> diff --git a/tools/testing/selftests/bpf/progs/xdp_flowtable.c b/tools/testing/selftests/bpf/progs/xdp_flowtable.c
> new file mode 100644
> index 0000000000000..888ac87790f90
> --- /dev/null
> +++ b/tools/testing/selftests/bpf/progs/xdp_flowtable.c
> @@ -0,0 +1,141 @@
> +// SPDX-License-Identifier: GPL-2.0
> +#include <vmlinux.h>
> +#include <bpf/bpf_helpers.h>
> +#include <bpf/bpf_endian.h>
> +
> +#define MAX_ERRNO      4095
> +
> +#define ETH_P_IP       0x0800
> +#define ETH_P_IPV6     0x86dd
> +#define IP_MF          0x2000  /* "More Fragments" */
> +#define IP_OFFSET      0x1fff  /* "Fragment Offset" */
> +#define AF_INET                2
> +#define AF_INET6       10
> +
> +struct flow_offload_tuple_rhash *
> +bpf_xdp_flow_offload_lookup(struct xdp_md *,
> +                           struct bpf_fib_lookup *) __ksym;
> +
> +struct {
> +       __uint(type, BPF_MAP_TYPE_ARRAY);
> +       __type(key, __u32);
> +       __type(value, __u32);
> +       __uint(max_entries, 1);
> +} stats SEC(".maps");
> +
> +static __always_inline bool
> +xdp_flowtable_offload_check_iphdr(struct iphdr *iph)

Please do not use __always_inline in bpf code.
It was needed 10 years ago. Not any more.

> +{
> +       /* ip fragmented traffic */
> +       if (iph->frag_off & bpf_htons(IP_MF | IP_OFFSET))
> +               return false;
> +
> +       /* ip options */
> +       if (iph->ihl * 4 != sizeof(*iph))
> +               return false;
> +
> +       if (iph->ttl <= 1)
> +               return false;
> +
> +       return true;
> +}
> +
> +static __always_inline bool
> +xdp_flowtable_offload_check_tcp_state(void *ports, void *data_end, u8 proto)
> +{
> +       if (proto == IPPROTO_TCP) {
> +               struct tcphdr *tcph = ports;
> +
> +               if (tcph + 1 > data_end)
> +                       return false;
> +
> +               if (tcph->fin || tcph->rst)
> +                       return false;
> +       }
> +
> +       return true;
> +}
> +
> +SEC("xdp.frags")
> +int xdp_flowtable_do_lookup(struct xdp_md *ctx)
> +{
> +       void *data_end = (void *)(long)ctx->data_end;
> +       struct flow_offload_tuple_rhash *tuplehash;
> +       struct bpf_fib_lookup tuple = {
> +               .ifindex = ctx->ingress_ifindex,
> +       };
> +       void *data = (void *)(long)ctx->data;
> +       struct ethhdr *eth = data;
> +       struct flow_ports *ports;
> +       __u32 *val, key = 0;
> +
> +       if (eth + 1 > data_end)
> +               return XDP_DROP;
> +
> +       switch (eth->h_proto) {
> +       case bpf_htons(ETH_P_IP): {
> +               struct iphdr *iph = data + sizeof(*eth);
> +
> +               ports = (struct flow_ports *)(iph + 1);
> +               if (ports + 1 > data_end)
> +                       return XDP_PASS;
> +
> +               /* sanity check on ip header */
> +               if (!xdp_flowtable_offload_check_iphdr(iph))
> +                       return XDP_PASS;
> +
> +               if (!xdp_flowtable_offload_check_tcp_state(ports, data_end,
> +                                                          iph->protocol))
> +                       return XDP_PASS;
> +
> +               tuple.family            = AF_INET;
> +               tuple.tos               = iph->tos;
> +               tuple.l4_protocol       = iph->protocol;
> +               tuple.tot_len           = bpf_ntohs(iph->tot_len);
> +               tuple.ipv4_src          = iph->saddr;
> +               tuple.ipv4_dst          = iph->daddr;
> +               tuple.sport             = ports->source;
> +               tuple.dport             = ports->dest;
> +               break;
> +       }
> +       case bpf_htons(ETH_P_IPV6): {
> +               struct in6_addr *src = (struct in6_addr *)tuple.ipv6_src;
> +               struct in6_addr *dst = (struct in6_addr *)tuple.ipv6_dst;
> +               struct ipv6hdr *ip6h = data + sizeof(*eth);
> +
> +               ports = (struct flow_ports *)(ip6h + 1);
> +               if (ports + 1 > data_end)
> +                       return XDP_PASS;
> +
> +               if (ip6h->hop_limit <= 1)
> +                       return XDP_PASS;
> +
> +               if (!xdp_flowtable_offload_check_tcp_state(ports, data_end,
> +                                                          ip6h->nexthdr))
> +                       return XDP_PASS;
> +
> +               tuple.family            = AF_INET6;
> +               tuple.l4_protocol       = ip6h->nexthdr;
> +               tuple.tot_len           = bpf_ntohs(ip6h->payload_len);
> +               *src                    = ip6h->saddr;
> +               *dst                    = ip6h->daddr;
> +               tuple.sport             = ports->source;
> +               tuple.dport             = ports->dest;
> +               break;
> +       }
> +       default:
> +               return XDP_PASS;
> +       }
> +
> +       tuplehash = bpf_xdp_flow_offload_lookup(ctx, &tuple);
> +       if (!tuplehash)
> +               return XDP_PASS;
> +
> +       val = bpf_map_lookup_elem(&stats, &key);
> +       if (val)
> +               __sync_add_and_fetch(val, 1);
> +
> +       return XDP_PASS;
> +}
> +
> +char _license[] SEC("license") = "GPL";
> diff --git a/tools/testing/selftests/bpf/test_xdp_flowtable.sh b/tools/testing/selftests/bpf/test_xdp_flowtable.sh
> new file mode 100755
> index 0000000000000..1a8a40aebbdf1
> --- /dev/null
> +++ b/tools/testing/selftests/bpf/test_xdp_flowtable.sh

Sorry shell scripts are not allowed.
Integrate it into test_progs.

pw-bot: cr
diff mbox series

Patch

diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index e0b3887b3d2df..7361c429bed62 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -133,7 +133,8 @@  TEST_PROGS := test_kmod.sh \
 	test_bpftool_metadata.sh \
 	test_doc_build.sh \
 	test_xsk.sh \
-	test_xdp_features.sh
+	test_xdp_features.sh \
+	test_xdp_flowtable.sh
 
 TEST_PROGS_EXTENDED := with_addr.sh \
 	with_tunnels.sh ima_setup.sh verify_sig_setup.sh \
@@ -144,7 +145,7 @@  TEST_GEN_PROGS_EXTENDED = test_skb_cgroup_id_user \
 	flow_dissector_load test_flow_dissector test_tcp_check_syncookie_user \
 	test_lirc_mode2_user xdping test_cpp runqslower bench bpf_testmod.ko \
 	xskxceiver xdp_redirect_multi xdp_synproxy veristat xdp_hw_metadata \
-	xdp_features bpf_test_no_cfi.ko
+	xdp_features bpf_test_no_cfi.ko xdp_flowtable
 
 TEST_GEN_FILES += liburandom_read.so urandom_read sign-file uprobe_multi
 
@@ -476,6 +477,7 @@  test_usdt.skel.h-deps := test_usdt.bpf.o test_usdt_multispec.bpf.o
 xsk_xdp_progs.skel.h-deps := xsk_xdp_progs.bpf.o
 xdp_hw_metadata.skel.h-deps := xdp_hw_metadata.bpf.o
 xdp_features.skel.h-deps := xdp_features.bpf.o
+xdp_flowtable.skel.h-deps := xdp_flowtable.bpf.o
 
 LINKED_BPF_SRCS := $(patsubst %.bpf.o,%.c,$(foreach skel,$(LINKED_SKELS),$($(skel)-deps)))
 
@@ -710,6 +712,10 @@  $(OUTPUT)/xdp_features: xdp_features.c $(OUTPUT)/network_helpers.o $(OUTPUT)/xdp
 	$(call msg,BINARY,,$@)
 	$(Q)$(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@
 
+$(OUTPUT)/xdp_flowtable: xdp_flowtable.c $(OUTPUT)/xdp_flowtable.skel.h | $(OUTPUT)
+	$(call msg,BINARY,,$@)
+	$(Q)$(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@
+
 # Make sure we are able to include and link libbpf against c++.
 $(OUTPUT)/test_cpp: test_cpp.cpp $(OUTPUT)/test_core_extern.skel.h $(BPFOBJ)
 	$(call msg,CXX,,$@)
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index eeabd798bc3ae..1a9aea01145f7 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -82,6 +82,10 @@  CONFIG_NF_CONNTRACK=y
 CONFIG_NF_CONNTRACK_MARK=y
 CONFIG_NF_DEFRAG_IPV4=y
 CONFIG_NF_DEFRAG_IPV6=y
+CONFIG_NF_TABLES=y
+CONFIG_NETFILTER_INGRESS=y
+CONFIG_NF_FLOW_TABLE=y
+CONFIG_NF_FLOW_TABLE_INET=y
 CONFIG_NF_NAT=y
 CONFIG_RC_CORE=y
 CONFIG_SECURITY=y
diff --git a/tools/testing/selftests/bpf/progs/xdp_flowtable.c b/tools/testing/selftests/bpf/progs/xdp_flowtable.c
new file mode 100644
index 0000000000000..888ac87790f90
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/xdp_flowtable.c
@@ -0,0 +1,141 @@ 
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#define MAX_ERRNO	4095
+
+#define ETH_P_IP	0x0800
+#define ETH_P_IPV6	0x86dd
+#define IP_MF		0x2000	/* "More Fragments" */
+#define IP_OFFSET	0x1fff	/* "Fragment Offset" */
+#define AF_INET		2
+#define AF_INET6	10
+
+struct flow_offload_tuple_rhash *
+bpf_xdp_flow_offload_lookup(struct xdp_md *,
+			    struct bpf_fib_lookup *) __ksym;
+
+struct {
+	__uint(type, BPF_MAP_TYPE_ARRAY);
+	__type(key, __u32);
+	__type(value, __u32);
+	__uint(max_entries, 1);
+} stats SEC(".maps");
+
+static __always_inline bool
+xdp_flowtable_offload_check_iphdr(struct iphdr *iph)
+{
+	/* ip fragmented traffic */
+	if (iph->frag_off & bpf_htons(IP_MF | IP_OFFSET))
+		return false;
+
+	/* ip options */
+	if (iph->ihl * 4 != sizeof(*iph))
+		return false;
+
+	if (iph->ttl <= 1)
+		return false;
+
+	return true;
+}
+
+static __always_inline bool
+xdp_flowtable_offload_check_tcp_state(void *ports, void *data_end, u8 proto)
+{
+	if (proto == IPPROTO_TCP) {
+		struct tcphdr *tcph = ports;
+
+		if (tcph + 1 > data_end)
+			return false;
+
+		if (tcph->fin || tcph->rst)
+			return false;
+	}
+
+	return true;
+}
+
+SEC("xdp.frags")
+int xdp_flowtable_do_lookup(struct xdp_md *ctx)
+{
+	void *data_end = (void *)(long)ctx->data_end;
+	struct flow_offload_tuple_rhash *tuplehash;
+	struct bpf_fib_lookup tuple = {
+		.ifindex = ctx->ingress_ifindex,
+	};
+	void *data = (void *)(long)ctx->data;
+	struct ethhdr *eth = data;
+	struct flow_ports *ports;
+	__u32 *val, key = 0;
+
+	if (eth + 1 > data_end)
+		return XDP_DROP;
+
+	switch (eth->h_proto) {
+	case bpf_htons(ETH_P_IP): {
+		struct iphdr *iph = data + sizeof(*eth);
+
+		ports = (struct flow_ports *)(iph + 1);
+		if (ports + 1 > data_end)
+			return XDP_PASS;
+
+		/* sanity check on ip header */
+		if (!xdp_flowtable_offload_check_iphdr(iph))
+			return XDP_PASS;
+
+		if (!xdp_flowtable_offload_check_tcp_state(ports, data_end,
+							   iph->protocol))
+			return XDP_PASS;
+
+		tuple.family		= AF_INET;
+		tuple.tos		= iph->tos;
+		tuple.l4_protocol	= iph->protocol;
+		tuple.tot_len		= bpf_ntohs(iph->tot_len);
+		tuple.ipv4_src		= iph->saddr;
+		tuple.ipv4_dst		= iph->daddr;
+		tuple.sport		= ports->source;
+		tuple.dport		= ports->dest;
+		break;
+	}
+	case bpf_htons(ETH_P_IPV6): {
+		struct in6_addr *src = (struct in6_addr *)tuple.ipv6_src;
+		struct in6_addr *dst = (struct in6_addr *)tuple.ipv6_dst;
+		struct ipv6hdr *ip6h = data + sizeof(*eth);
+
+		ports = (struct flow_ports *)(ip6h + 1);
+		if (ports + 1 > data_end)
+			return XDP_PASS;
+
+		if (ip6h->hop_limit <= 1)
+			return XDP_PASS;
+
+		if (!xdp_flowtable_offload_check_tcp_state(ports, data_end,
+							   ip6h->nexthdr))
+			return XDP_PASS;
+
+		tuple.family		= AF_INET6;
+		tuple.l4_protocol	= ip6h->nexthdr;
+		tuple.tot_len		= bpf_ntohs(ip6h->payload_len);
+		*src			= ip6h->saddr;
+		*dst			= ip6h->daddr;
+		tuple.sport		= ports->source;
+		tuple.dport		= ports->dest;
+		break;
+	}
+	default:
+		return XDP_PASS;
+	}
+
+	tuplehash = bpf_xdp_flow_offload_lookup(ctx, &tuple);
+	if (!tuplehash)
+		return XDP_PASS;
+
+	val = bpf_map_lookup_elem(&stats, &key);
+	if (val)
+		__sync_add_and_fetch(val, 1);
+
+	return XDP_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_xdp_flowtable.sh b/tools/testing/selftests/bpf/test_xdp_flowtable.sh
new file mode 100755
index 0000000000000..1a8a40aebbdf1
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_xdp_flowtable.sh
@@ -0,0 +1,112 @@ 
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+readonly NS0="ns0-$(mktemp -u XXXXXX)"
+readonly NS1="ns1-$(mktemp -u XXXXXX)"
+readonly infile="$(mktemp)"
+readonly outfile="$(mktemp)"
+
+xdp_flowtable_pid=""
+ret=1
+
+setup_flowtable() {
+nft -f /dev/stdin <<EOF
+table inet nat {
+	chain postrouting {
+		type nat hook postrouting priority filter; policy accept;
+		meta oif v10 masquerade
+	}
+}
+table inet filter {
+	flowtable ft {
+		hook ingress priority filter
+		devices = { v01, v10 }
+	}
+	chain forward {
+		type filter hook forward priority filter
+		meta l4proto { tcp, udp } flow add @ft
+	}
+}
+EOF
+}
+
+setup() {
+	sysctl -w net.ipv4.ip_forward=1
+	sysctl -w net.ipv6.conf.all.forwarding=1
+
+	ip netns add ${NS0}
+	ip netns add ${NS1}
+
+	ip link add v01 type veth peer name v00 netns ${NS0}
+	ip link add v10 type veth peer name v11 netns ${NS1}
+
+	ip -n ${NS0} addr add 192.168.0.1/24 dev v00
+	ip -6 -n ${NS0} addr add 2001:db8::1/64 dev v00
+	ip -n ${NS0} link set dev v00 up
+	ip -n ${NS0} route add default via 192.168.0.2
+	ip -6 -n ${NS0} route add default via 2001:db8::2
+
+	ip addr add 192.168.0.2/24 dev v01
+	ip -6 addr add 2001:db8::2/64 dev v01
+	ip link set dev v01 up
+	ip addr add 192.168.1.1/24 dev v10
+	ip -6 addr add 2001:db8:1::1/64 dev v10
+	ip link set dev v10 up
+
+	ip -n ${NS1} addr add 192.168.1.2/24 dev v11
+	ip -6 -n ${NS1} addr add 2001:db8:1::2/64 dev v11
+	ip -n ${NS1} link set dev v11 up
+	ip -n ${NS1} route add default via 192.168.1.1
+	ip -6 -n ${NS1} route add default via 2001:db8:1::1
+
+	# Load XDP program
+	./xdp_flowtable v01 &
+	xdp_flowtable_pid=$!
+
+	setup_flowtable
+
+	dd if=/dev/urandom of="${infile}" bs=8192 count=16 status=none
+}
+
+wait_for_nc_server() {
+	while sleep 1; do
+		ip netns exec ${NS1} ss -nutlp | grep -q ":$1"
+		[ $? -eq 0 ] && break
+	done
+}
+
+cleanup() {
+	{
+		rm -f "${infile}" "${outfile}"
+
+		nft delete table inet filter
+		nft delete table inet nat
+
+		ip link del v01
+		ip link del v10
+
+		ip netns del ${NS0}
+		ip netns del ${NS1}
+	} >/dev/null 2>/dev/null
+}
+
+test_xdp_flowtable_lookup() {
+	## Run IPv4 test
+	ip netns exec ${NS1} nc -4 --no-shutdown -l 8084 > ${outfile} &
+	wait_for_nc_server 8084
+	ip netns exec ${NS0} timeout 2 nc -4 192.168.1.2 8084 < ${infile}
+
+	## Run IPv6 test
+	ip netns exec ${NS1} nc -6 --no-shutdown -l 8086 > ${outfile} &
+	wait_for_nc_server 8086
+	ip netns exec ${NS0} timeout 2 nc -6 2001:db8:1::2 8086 < ${infile}
+
+	wait $xdp_flowtable_pid && ret=0
+}
+
+trap cleanup 0 2 3 6 9
+setup
+
+test_xdp_flowtable_lookup
+
+exit $ret
diff --git a/tools/testing/selftests/bpf/xdp_flowtable.c b/tools/testing/selftests/bpf/xdp_flowtable.c
new file mode 100644
index 0000000000000..dea24deda7359
--- /dev/null
+++ b/tools/testing/selftests/bpf/xdp_flowtable.c
@@ -0,0 +1,142 @@ 
+// SPDX-License-Identifier: GPL-2.0
+#include <uapi/linux/bpf.h>
+#include <linux/if_link.h>
+#include <net/if.h>
+#include <unistd.h>
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include <signal.h>
+#include <argp.h>
+
+#include "xdp_flowtable.skel.h"
+
+#define MAX_ITERATION	10
+
+static volatile bool exiting, verbosity;
+static char ifname[IF_NAMESIZE];
+static int ifindex = -ENODEV;
+const char *argp_program_version = "xdp-flowtable 0.0";
+const char argp_program_doc[] =
+"XDP flowtable application.\n"
+"\n"
+"USAGE: ./xdp-flowtable [-v] <iface-name>\n";
+
+static const struct argp_option opts[] = {
+	{ "verbose", 'v', NULL, 0, "Verbose debug output" },
+	{},
+};
+
+static void sig_handler(int sig)
+{
+	exiting = true;
+}
+
+static int libbpf_print_fn(enum libbpf_print_level level,
+			   const char *format, va_list args)
+{
+	if (level == LIBBPF_DEBUG && !verbosity)
+		return 0;
+	return vfprintf(stderr, format, args);
+}
+
+static error_t parse_arg(int key, char *arg, struct argp_state *state)
+{
+	switch (key) {
+	case 'v':
+		verbosity = true;
+		break;
+	case ARGP_KEY_ARG:
+		errno = 0;
+		if (strlen(arg) >= IF_NAMESIZE) {
+			fprintf(stderr, "Invalid device name: %s\n", arg);
+			argp_usage(state);
+			return ARGP_ERR_UNKNOWN;
+		}
+
+		ifindex = if_nametoindex(arg);
+		if (!ifindex)
+			ifindex = strtoul(arg, NULL, 0);
+		if (!ifindex || !if_indextoname(ifindex, ifname)) {
+			fprintf(stderr,
+				"Bad interface index or name (%d): %s\n",
+				errno, strerror(errno));
+			argp_usage(state);
+			return ARGP_ERR_UNKNOWN;
+		}
+		break;
+	default:
+		return ARGP_ERR_UNKNOWN;
+	}
+
+	return 0;
+}
+
+static const struct argp argp = {
+	.options = opts,
+	.parser = parse_arg,
+	.doc = argp_program_doc,
+};
+
+int main(int argc, char **argv)
+{
+	unsigned int count = 0, key = 0;
+	struct xdp_flowtable *skel;
+	int i, err;
+
+	libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+	libbpf_set_print(libbpf_print_fn);
+
+	signal(SIGINT, sig_handler);
+	signal(SIGTERM, sig_handler);
+
+	/* Parse command line arguments */
+	err = argp_parse(&argp, argc, argv, 0, NULL, NULL);
+	if (err)
+		return err;
+
+	/* Load and verify BPF application */
+	skel = xdp_flowtable__open();
+	if (!skel) {
+		fprintf(stderr, "Failed to open and load BPF skeleton\n");
+		return -EINVAL;
+	}
+
+	/* Load & verify BPF programs */
+	err = xdp_flowtable__load(skel);
+	if (err) {
+		fprintf(stderr, "Failed to load and verify BPF skeleton\n");
+		goto cleanup;
+	}
+
+	/* Attach the XDP program */
+	err = xdp_flowtable__attach(skel);
+	if (err) {
+		fprintf(stderr, "Failed to attach BPF skeleton\n");
+		goto cleanup;
+	}
+
+	err = bpf_xdp_attach(ifindex,
+			     bpf_program__fd(skel->progs.xdp_flowtable_do_lookup),
+			     XDP_FLAGS_DRV_MODE, NULL);
+	if (err) {
+		fprintf(stderr, "Failed attaching XDP program to device %s\n",
+			ifname);
+		goto cleanup;
+	}
+
+	/* Collect stats */
+	for (i = 0; i < MAX_ITERATION && !exiting; i++)
+		sleep(1);
+
+	/* Check results */
+	err = bpf_map__lookup_elem(skel->maps.stats, &key, sizeof(key),
+				   &count, sizeof(count), 0);
+	if (!err && !count)
+		err = -EINVAL;
+
+	bpf_xdp_detach(ifindex, XDP_FLAGS_DRV_MODE, NULL);
+cleanup:
+	xdp_flowtable__destroy(skel);
+
+	return err;
+}