Message ID | 20201117094023.3685-7-kuniyu@amazon.co.jp (mailing list archive) |
---|---|
State | RFC |
Delegated to: | BPF |
Headers | show |
Series | Socket migration for SO_REUSEPORT. | expand |
Context | Check | Description |
---|---|---|
netdev/cover_letter | success | Link |
netdev/fixes_present | success | Link |
netdev/patch_count | success | Link |
netdev/tree_selection | success | Clearly marked for bpf-next |
netdev/subject_prefix | success | Link |
netdev/source_inline | success | Was 0 now: 0 |
netdev/verify_signedoff | success | Link |
netdev/module_param | success | Was 0 now: 0 |
netdev/build_32bit | success | Errors and warnings before: 15752 this patch: 15752 |
netdev/kdoc | success | Errors and warnings before: 0 this patch: 0 |
netdev/verify_fixes | success | Link |
netdev/checkpatch | success | total: 0 errors, 0 warnings, 0 checks, 47 lines checked |
netdev/build_allmodconfig_warn | success | Errors and warnings before: 15664 this patch: 15664 |
netdev/header_inline | success | Link |
netdev/stable | success | Stable not CCed |
On Tue, Nov 17, 2020 at 06:40:21PM +0900, Kuniyuki Iwashima wrote: > We will call sock_reuseport.prog for socket migration in the next commit, > so the eBPF program has to know which listener is closing in order to > select the new listener. > > Currently, we can get a unique ID for each listener in the userspace by > calling bpf_map_lookup_elem() for BPF_MAP_TYPE_REUSEPORT_SOCKARRAY map. > This patch exposes the ID to the eBPF program. > > Reviewed-by: Benjamin Herrenschmidt <benh@amazon.com> > Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.co.jp> > --- > include/linux/bpf.h | 1 + > include/uapi/linux/bpf.h | 1 + > net/core/filter.c | 8 ++++++++ > tools/include/uapi/linux/bpf.h | 1 + > 4 files changed, 11 insertions(+) > > diff --git a/include/linux/bpf.h b/include/linux/bpf.h > index 581b2a2e78eb..c0646eceffa2 100644 > --- a/include/linux/bpf.h > +++ b/include/linux/bpf.h > @@ -1897,6 +1897,7 @@ struct sk_reuseport_kern { > u32 hash; > u32 reuseport_id; > bool bind_inany; > + u64 cookie; > }; > bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, > struct bpf_insn_access_aux *info); > diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h > index 162999b12790..3fcddb032838 100644 > --- a/include/uapi/linux/bpf.h > +++ b/include/uapi/linux/bpf.h > @@ -4403,6 +4403,7 @@ struct sk_reuseport_md { > __u32 ip_protocol; /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */ > __u32 bind_inany; /* Is sock bound to an INANY address? */ > __u32 hash; /* A hash of the packet 4 tuples */ > + __u64 cookie; /* ID of the listener in map */ Instead of only adding the cookie of a sk, lets make the sk pointer available: __bpf_md_ptr(struct bpf_sock *, sk); and then use the BPF_FUNC_get_socket_cookie to get the cookie. Other fields of the sk can also be directly accessed too once the sk pointer is available.
From: Martin KaFai Lau <kafai@fb.com> Date: Wed, 18 Nov 2020 16:11:54 -0800 > On Tue, Nov 17, 2020 at 06:40:21PM +0900, Kuniyuki Iwashima wrote: > > We will call sock_reuseport.prog for socket migration in the next commit, > > so the eBPF program has to know which listener is closing in order to > > select the new listener. > > > > Currently, we can get a unique ID for each listener in the userspace by > > calling bpf_map_lookup_elem() for BPF_MAP_TYPE_REUSEPORT_SOCKARRAY map. > > This patch exposes the ID to the eBPF program. > > > > Reviewed-by: Benjamin Herrenschmidt <benh@amazon.com> > > Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.co.jp> > > --- > > include/linux/bpf.h | 1 + > > include/uapi/linux/bpf.h | 1 + > > net/core/filter.c | 8 ++++++++ > > tools/include/uapi/linux/bpf.h | 1 + > > 4 files changed, 11 insertions(+) > > > > diff --git a/include/linux/bpf.h b/include/linux/bpf.h > > index 581b2a2e78eb..c0646eceffa2 100644 > > --- a/include/linux/bpf.h > > +++ b/include/linux/bpf.h > > @@ -1897,6 +1897,7 @@ struct sk_reuseport_kern { > > u32 hash; > > u32 reuseport_id; > > bool bind_inany; > > + u64 cookie; > > }; > > bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, > > struct bpf_insn_access_aux *info); > > diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h > > index 162999b12790..3fcddb032838 100644 > > --- a/include/uapi/linux/bpf.h > > +++ b/include/uapi/linux/bpf.h > > @@ -4403,6 +4403,7 @@ struct sk_reuseport_md { > > __u32 ip_protocol; /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */ > > __u32 bind_inany; /* Is sock bound to an INANY address? */ > > __u32 hash; /* A hash of the packet 4 tuples */ > > + __u64 cookie; /* ID of the listener in map */ > Instead of only adding the cookie of a sk, lets make the sk pointer available: > > __bpf_md_ptr(struct bpf_sock *, sk); > > and then use the BPF_FUNC_get_socket_cookie to get the cookie. > > Other fields of the sk can also be directly accessed too once > the sk pointer is available. Oh, I did not know BPF_FUNC_get_socket_cookie. I will add the sk pointer and use the helper function in the next spin! Thank you.
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 581b2a2e78eb..c0646eceffa2 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1897,6 +1897,7 @@ struct sk_reuseport_kern { u32 hash; u32 reuseport_id; bool bind_inany; + u64 cookie; }; bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, struct bpf_insn_access_aux *info); diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 162999b12790..3fcddb032838 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -4403,6 +4403,7 @@ struct sk_reuseport_md { __u32 ip_protocol; /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */ __u32 bind_inany; /* Is sock bound to an INANY address? */ __u32 hash; /* A hash of the packet 4 tuples */ + __u64 cookie; /* ID of the listener in map */ }; #define BPF_TAG_SIZE 8 diff --git a/net/core/filter.c b/net/core/filter.c index 2ca5eecebacf..01e28f283962 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -9862,6 +9862,7 @@ static void bpf_init_reuseport_kern(struct sk_reuseport_kern *reuse_kern, reuse_kern->hash = hash; reuse_kern->reuseport_id = reuse->reuseport_id; reuse_kern->bind_inany = reuse->bind_inany; + reuse_kern->cookie = sock_gen_cookie(sk); } struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, @@ -10010,6 +10011,9 @@ sk_reuseport_is_valid_access(int off, int size, case offsetof(struct sk_reuseport_md, hash): return size == size_default; + case bpf_ctx_range(struct sk_reuseport_md, cookie): + return size == sizeof(__u64); + /* Fields that allow narrowing */ case bpf_ctx_range(struct sk_reuseport_md, eth_protocol): if (size < sizeof_field(struct sk_buff, protocol)) @@ -10082,6 +10086,10 @@ static u32 sk_reuseport_convert_ctx_access(enum bpf_access_type type, case offsetof(struct sk_reuseport_md, bind_inany): SK_REUSEPORT_LOAD_FIELD(bind_inany); break; + + case offsetof(struct sk_reuseport_md, cookie): + SK_REUSEPORT_LOAD_FIELD(cookie); + break; } return insn - insn_buf; diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 162999b12790..3fcddb032838 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -4403,6 +4403,7 @@ struct sk_reuseport_md { __u32 ip_protocol; /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */ __u32 bind_inany; /* Is sock bound to an INANY address? */ __u32 hash; /* A hash of the packet 4 tuples */ + __u64 cookie; /* ID of the listener in map */ }; #define BPF_TAG_SIZE 8