From patchwork Thu Sep 9 22:04:08 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Vadim Fedorenko X-Patchwork-Id: 12484035 X-Patchwork-Delegate: bpf@iogearbox.net Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-18.7 required=3.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_AU,HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_CR_TRAILER, INCLUDES_PATCH,MAILING_LIST_MULTI,SPF_HELO_NONE,SPF_PASS,URIBL_BLOCKED, USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 74AB3C433EF for ; Thu, 9 Sep 2021 22:04:37 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 579CA61186 for ; Thu, 9 Sep 2021 22:04:37 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S233941AbhIIWFq (ORCPT ); Thu, 9 Sep 2021 18:05:46 -0400 Received: from novek.ru ([213.148.174.62]:36534 "EHLO novek.ru" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1343758AbhIIWFp (ORCPT ); Thu, 9 Sep 2021 18:05:45 -0400 Received: from nat1.ooonet.ru (gw.zelenaya.net [91.207.137.40]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by novek.ru (Postfix) with ESMTPSA id 69D8550408C; Fri, 10 Sep 2021 01:01:16 +0300 (MSK) DKIM-Filter: OpenDKIM Filter v2.11.0 novek.ru 69D8550408C DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=novek.ru; s=mail; t=1631224880; bh=ke3RGdGe1SB7wmL/INAR3Bz0BQEsf0ZOyV2eB/RGMmI=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=zPuP57APZIZFtVfFuXW0eweTgSIgcqPN31d2xuJrPuBMIa47Hp6RhlpNpQSQKQvm1 S6TyKAh5qsoPGYdCiHcO00a6uni6VcJla9H7ypHSMVX2Gvjjt6LFkkBwLEgwPn5E74 0uMhiJ9UvhDe+tWrPdtNY//tGI9EwxOgRV/jqe8o= From: Vadim Fedorenko To: Martin KaFai Lau , Andrii Nakryiko , Daniel Borkmann Cc: Vadim Fedorenko , Alexei Starovoitov , Song Liu , Yonghong Song , John Fastabend , Jakub Kicinski , netdev@vger.kernel.org, bpf@vger.kernel.org Subject: [PATCH bpf-next v3 1/2] bpf: add hardware timestamp field to __sk_buff Date: Fri, 10 Sep 2021 01:04:08 +0300 Message-Id: <20210909220409.8804-2-vfedorenko@novek.ru> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20210909220409.8804-1-vfedorenko@novek.ru> References: <20210909220409.8804-1-vfedorenko@novek.ru> Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org X-Patchwork-Delegate: bpf@iogearbox.net BPF programs may want to know hardware timestamps if NIC supports such timestamping. Expose this data as hwtstamp field of __sk_buff the same way as gso_segs/gso_size. This field could be accessed from the same programs as tstamp field, but it's read-only field. Explicit test to deny access to padding data is added to bpf_skb_is_valid_access. Also update BPF_PROG_TEST_RUN tests of the feature. Signed-off-by: Vadim Fedorenko Acked-by: Martin KaFai Lau --- include/uapi/linux/bpf.h | 2 ++ net/core/filter.c | 21 +++++++++++++++++++++ tools/include/uapi/linux/bpf.h | 2 ++ 3 files changed, 25 insertions(+) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 791f31dd0abe..51cfd91cc387 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -5284,6 +5284,8 @@ struct __sk_buff { __u32 gso_segs; __bpf_md_ptr(struct bpf_sock *, sk); __u32 gso_size; + __u32 :32; /* Padding, future use. */ + __u64 hwtstamp; }; struct bpf_tunnel_key { diff --git a/net/core/filter.c b/net/core/filter.c index 2e32cee2c469..4bace37a6a44 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -7765,6 +7765,10 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type break; case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): return false; + case bpf_ctx_range(struct __sk_buff, hwtstamp): + if (type == BPF_WRITE || size != sizeof(__u64)) + return false; + break; case bpf_ctx_range(struct __sk_buff, tstamp): if (size != sizeof(__u64)) return false; @@ -7774,6 +7778,9 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type return false; info->reg_type = PTR_TO_SOCK_COMMON_OR_NULL; break; + case offsetofend(struct __sk_buff, gso_size) ... offsetof(struct __sk_buff, hwtstamp) - 1: + /* Explicitly prohibit access to padding in __sk_buff. */ + return false; default: /* Only narrow read access allowed for now. */ if (type == BPF_WRITE) { @@ -7802,6 +7809,7 @@ static bool sk_filter_is_valid_access(int off, int size, case bpf_ctx_range_till(struct __sk_buff, family, local_port): case bpf_ctx_range(struct __sk_buff, tstamp): case bpf_ctx_range(struct __sk_buff, wire_len): + case bpf_ctx_range(struct __sk_buff, hwtstamp): return false; } @@ -7872,6 +7880,7 @@ static bool lwt_is_valid_access(int off, int size, case bpf_ctx_range(struct __sk_buff, data_meta): case bpf_ctx_range(struct __sk_buff, tstamp): case bpf_ctx_range(struct __sk_buff, wire_len): + case bpf_ctx_range(struct __sk_buff, hwtstamp): return false; } @@ -8373,6 +8382,7 @@ static bool sk_skb_is_valid_access(int off, int size, case bpf_ctx_range(struct __sk_buff, data_meta): case bpf_ctx_range(struct __sk_buff, tstamp): case bpf_ctx_range(struct __sk_buff, wire_len): + case bpf_ctx_range(struct __sk_buff, hwtstamp): return false; } @@ -8884,6 +8894,17 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, si->dst_reg, si->src_reg, offsetof(struct sk_buff, sk)); break; + case offsetof(struct __sk_buff, hwtstamp): + BUILD_BUG_ON(sizeof_field(struct skb_shared_hwtstamps, hwtstamp) != 8); + BUILD_BUG_ON(offsetof(struct skb_shared_hwtstamps, hwtstamp) != 0); + + insn = bpf_convert_shinfo_access(si, insn); + *insn++ = BPF_LDX_MEM(BPF_DW, + si->dst_reg, si->dst_reg, + bpf_target_off(struct skb_shared_info, + hwtstamps, 8, + target_size)); + break; } return insn - insn_buf; diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 791f31dd0abe..51cfd91cc387 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -5284,6 +5284,8 @@ struct __sk_buff { __u32 gso_segs; __bpf_md_ptr(struct bpf_sock *, sk); __u32 gso_size; + __u32 :32; /* Padding, future use. */ + __u64 hwtstamp; }; struct bpf_tunnel_key { From patchwork Thu Sep 9 22:04:09 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Vadim Fedorenko X-Patchwork-Id: 12484037 X-Patchwork-Delegate: bpf@iogearbox.net Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-18.7 required=3.0 tests=BAYES_00,DKIM_SIGNED, DKIM_VALID,DKIM_VALID_AU,HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_CR_TRAILER, INCLUDES_PATCH,MAILING_LIST_MULTI,SPF_HELO_NONE,SPF_PASS,URIBL_BLOCKED, USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 6F2D6C433EF for ; Thu, 9 Sep 2021 22:04:41 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 489756023B for ; Thu, 9 Sep 2021 22:04:41 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1344066AbhIIWFu (ORCPT ); Thu, 9 Sep 2021 18:05:50 -0400 Received: from novek.ru ([213.148.174.62]:36548 "EHLO novek.ru" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1343850AbhIIWFr (ORCPT ); Thu, 9 Sep 2021 18:05:47 -0400 Received: from nat1.ooonet.ru (gw.zelenaya.net [91.207.137.40]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by novek.ru (Postfix) with ESMTPSA id 81698504087; Fri, 10 Sep 2021 01:01:20 +0300 (MSK) DKIM-Filter: OpenDKIM Filter v2.11.0 novek.ru 81698504087 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=novek.ru; s=mail; t=1631224882; bh=/lGdVx9LvKrB1E2slhzsvf0EKG8IxrVtBtmCwPfFn7g=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=ZPWGpEs7oKc8Kzvsn+OzXHJxzR1MSmVYUzXPs9A9l/WjTuauu+UTVAiV4A2Tc/JrX +YhT5joOZJwnsconFBUlbDKapRiAz9175Lpqyc3Mc+ubCL2i0YLCF+kZE47v0uo9t7 N++bgYtHe4gC0FPLs/xFWJQ0glJVchChmNaE3iBg= From: Vadim Fedorenko To: Martin KaFai Lau , Andrii Nakryiko , Daniel Borkmann Cc: Vadim Fedorenko , Alexei Starovoitov , Song Liu , Yonghong Song , John Fastabend , Jakub Kicinski , netdev@vger.kernel.org, bpf@vger.kernel.org Subject: [PATCH bpf-next v3 2/2] selftests/bpf: test new __sk_buff field hwtstamp Date: Fri, 10 Sep 2021 01:04:09 +0300 Message-Id: <20210909220409.8804-3-vfedorenko@novek.ru> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20210909220409.8804-1-vfedorenko@novek.ru> References: <20210909220409.8804-1-vfedorenko@novek.ru> Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org X-Patchwork-Delegate: bpf@iogearbox.net Analogous to the gso_segs selftests introduced in commit d9ff286a0f59 ("bpf: allow BPF programs access skb_shared_info->gso_segs field") Signed-off-by: Vadim Fedorenko Acked-by: Martin KaFai Lau --- lib/test_bpf.c | 1 + net/bpf/test_run.c | 8 +++ .../selftests/bpf/prog_tests/skb_ctx.c | 1 + .../selftests/bpf/progs/test_skb_ctx.c | 2 + .../testing/selftests/bpf/verifier/ctx_skb.c | 60 +++++++++++++++++++ 5 files changed, 72 insertions(+) diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 830a18ecffc8..0018d51b93b0 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -8800,6 +8800,7 @@ static __init struct sk_buff *build_test_skb(void) skb_shinfo(skb[0])->gso_type |= SKB_GSO_DODGY; skb_shinfo(skb[0])->gso_segs = 0; skb_shinfo(skb[0])->frag_list = skb[1]; + skb_shinfo(skb[0])->hwtstamps.hwtstamp = 1000; /* adjust skb[0]'s len */ skb[0]->len += skb[1]->len; diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 1153b89c9d93..fcb2f493f710 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -507,6 +507,12 @@ static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb) /* gso_size is allowed */ if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size), + offsetof(struct __sk_buff, hwtstamp))) + return -EINVAL; + + /* hwtstamp is allowed */ + + if (!range_is_zero(__skb, offsetofend(struct __sk_buff, hwtstamp), sizeof(struct __sk_buff))) return -EINVAL; @@ -529,6 +535,7 @@ static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb) return -EINVAL; skb_shinfo(skb)->gso_segs = __skb->gso_segs; skb_shinfo(skb)->gso_size = __skb->gso_size; + skb_shinfo(skb)->hwtstamps.hwtstamp = __skb->hwtstamp; return 0; } @@ -548,6 +555,7 @@ static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb) memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN); __skb->wire_len = cb->pkt_len; __skb->gso_segs = skb_shinfo(skb)->gso_segs; + __skb->hwtstamp = skb_shinfo(skb)->hwtstamps.hwtstamp; } int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, diff --git a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c index 2bf8c687348b..c437e6ba8fe2 100644 --- a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c +++ b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c @@ -18,6 +18,7 @@ void test_skb_ctx(void) .gso_segs = 8, .mark = 9, .gso_size = 10, + .hwtstamp = 11, }; struct bpf_prog_test_run_attr tattr = { .data_in = &pkt_v4, diff --git a/tools/testing/selftests/bpf/progs/test_skb_ctx.c b/tools/testing/selftests/bpf/progs/test_skb_ctx.c index bbd5a9c1c4df..ba4dab09d19c 100644 --- a/tools/testing/selftests/bpf/progs/test_skb_ctx.c +++ b/tools/testing/selftests/bpf/progs/test_skb_ctx.c @@ -29,6 +29,8 @@ int process(struct __sk_buff *skb) return 1; if (skb->ifindex != 1) return 1; + if (skb->hwtstamp != 11) + return 1; return 0; } diff --git a/tools/testing/selftests/bpf/verifier/ctx_skb.c b/tools/testing/selftests/bpf/verifier/ctx_skb.c index 2022c0f2cd75..9e1a30b94197 100644 --- a/tools/testing/selftests/bpf/verifier/ctx_skb.c +++ b/tools/testing/selftests/bpf/verifier/ctx_skb.c @@ -1057,6 +1057,66 @@ .result = ACCEPT, .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, +{ + "padding after gso_size is not accessible", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetofend(struct __sk_buff, gso_size)), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .result_unpriv = REJECT, + .errstr = "invalid bpf_context access off=180 size=4", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "read hwtstamp from CGROUP_SKB", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, hwtstamp)), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, +}, +{ + "read hwtstamp from CGROUP_SKB", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, + offsetof(struct __sk_buff, hwtstamp)), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, +}, +{ + "write hwtstamp from CGROUP_SKB", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, hwtstamp)), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .result_unpriv = REJECT, + .errstr = "invalid bpf_context access off=184 size=8", + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, +}, +{ + "read hwtstamp from CLS", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, hwtstamp)), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, { "check wire_len is not readable by sockets", .insns = {