diff mbox series

[bpf-next,2/3] bpf: allow bpf_{s,g}etsockopt from cgroup bind{4,6} hooks

Message ID 20201118001742.85005-3-sdf@google.com (mailing list archive)
State New, archived
Delegated to: BPF
Headers show
Series bpf: expose bpf_{s,g}etsockopt helpers to bind{4,6} hooks | expand

Checks

Context Check Description
netdev/cover_letter success Link
netdev/fixes_present success Link
netdev/patch_count success Link
netdev/tree_selection success Clearly marked for bpf-next
netdev/subject_prefix success Link
netdev/source_inline success Was 0 now: 0
netdev/verify_signedoff success Link
netdev/module_param success Was 0 now: 0
netdev/build_32bit success Errors and warnings before: 15689 this patch: 15689
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/verify_fixes success Link
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 57 lines checked
netdev/build_allmodconfig_warn success Errors and warnings before: 15601 this patch: 15601
netdev/header_inline success Link
netdev/stable success Stable not CCed

Commit Message

Stanislav Fomichev Nov. 18, 2020, 12:17 a.m. UTC
I have to now lock/unlock socket for the bind hook execution.
That shouldn't cause any overhead because the socket is unbound
and shouldn't receive any traffic.

Signed-off-by: Stanislav Fomichev <sdf@google.com>
---
 include/linux/bpf-cgroup.h | 12 ++++++------
 net/core/filter.c          |  4 ++++
 net/ipv4/af_inet.c         |  2 +-
 net/ipv6/af_inet6.c        |  2 +-
 4 files changed, 12 insertions(+), 8 deletions(-)

Comments

Alexei Starovoitov Nov. 18, 2020, 4:05 a.m. UTC | #1
On Tue, Nov 17, 2020 at 4:17 PM Stanislav Fomichev <sdf@google.com> wrote:
>
> I have to now lock/unlock socket for the bind hook execution.
> That shouldn't cause any overhead because the socket is unbound
> and shouldn't receive any traffic.
>
> Signed-off-by: Stanislav Fomichev <sdf@google.com>
> ---
>  include/linux/bpf-cgroup.h | 12 ++++++------
>  net/core/filter.c          |  4 ++++
>  net/ipv4/af_inet.c         |  2 +-
>  net/ipv6/af_inet6.c        |  2 +-
>  4 files changed, 12 insertions(+), 8 deletions(-)
>
> diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
> index ed71bd1a0825..72e69a0e1e8c 100644
> --- a/include/linux/bpf-cgroup.h
> +++ b/include/linux/bpf-cgroup.h
> @@ -246,11 +246,11 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
>         __ret;                                                                 \
>  })
>
> -#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr)                             \
> -       BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND)
> +#define BPF_CGROUP_RUN_PROG_INET4_BIND_LOCK(sk, uaddr)                        \
> +       BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_BIND, NULL)
>
> -#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr)                             \
> -       BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND)
> +#define BPF_CGROUP_RUN_PROG_INET6_BIND_LOCK(sk, uaddr)                        \
> +       BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_BIND, NULL)
>
>  #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
>                                             sk->sk_prot->pre_connect)
> @@ -434,8 +434,8 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
>  #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
>  #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
>  #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
> -#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
> -#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
> +#define BPF_CGROUP_RUN_PROG_INET4_BIND_LOCK(sk, uaddr) ({ 0; })
> +#define BPF_CGROUP_RUN_PROG_INET6_BIND_LOCK(sk, uaddr) ({ 0; })
>  #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
>  #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
>  #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
> diff --git a/net/core/filter.c b/net/core/filter.c
> index 2ca5eecebacf..21d91dcf0260 100644
> --- a/net/core/filter.c
> +++ b/net/core/filter.c
> @@ -6995,6 +6995,8 @@ sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
>                 return &bpf_sk_storage_delete_proto;
>         case BPF_FUNC_setsockopt:
>                 switch (prog->expected_attach_type) {
> +               case BPF_CGROUP_INET4_BIND:
> +               case BPF_CGROUP_INET6_BIND:
>                 case BPF_CGROUP_INET4_CONNECT:
>                 case BPF_CGROUP_INET6_CONNECT:
>                         return &bpf_sock_addr_setsockopt_proto;
> @@ -7003,6 +7005,8 @@ sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
>                 }
>         case BPF_FUNC_getsockopt:
>                 switch (prog->expected_attach_type) {
> +               case BPF_CGROUP_INET4_BIND:
> +               case BPF_CGROUP_INET6_BIND:
>                 case BPF_CGROUP_INET4_CONNECT:
>                 case BPF_CGROUP_INET6_CONNECT:
>                         return &bpf_sock_addr_getsockopt_proto;
> diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
> index b7260c8cef2e..b94fa8eb831b 100644
> --- a/net/ipv4/af_inet.c
> +++ b/net/ipv4/af_inet.c
> @@ -450,7 +450,7 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
>         /* BPF prog is run before any checks are done so that if the prog
>          * changes context in a wrong way it will be caught.
>          */
> -       err = BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr);
> +       err = BPF_CGROUP_RUN_PROG_INET4_BIND_LOCK(sk, uaddr);

I think it is ok, but I need to go through the locking paths more.
Andrey,
please take a look as well.

>         if (err)
>                 return err;
>
> diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
> index e648fbebb167..a7e3d170af51 100644
> --- a/net/ipv6/af_inet6.c
> +++ b/net/ipv6/af_inet6.c
> @@ -451,7 +451,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
>         /* BPF prog is run before any checks are done so that if the prog
>          * changes context in a wrong way it will be caught.
>          */
> -       err = BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr);
> +       err = BPF_CGROUP_RUN_PROG_INET6_BIND_LOCK(sk, uaddr);
>         if (err)
>                 return err;
>
> --
> 2.29.2.299.gdc1121823c-goog
>
Andrey Ignatov Nov. 30, 2020, 1:05 a.m. UTC | #2
Alexei Starovoitov <alexei.starovoitov@gmail.com> [Tue, 2020-11-17 20:05 -0800]:
> On Tue, Nov 17, 2020 at 4:17 PM Stanislav Fomichev <sdf@google.com> wrote:
> >
> > I have to now lock/unlock socket for the bind hook execution.
> > That shouldn't cause any overhead because the socket is unbound
> > and shouldn't receive any traffic.
> >
> > Signed-off-by: Stanislav Fomichev <sdf@google.com>
> > ---
> >  include/linux/bpf-cgroup.h | 12 ++++++------
> >  net/core/filter.c          |  4 ++++
> >  net/ipv4/af_inet.c         |  2 +-
> >  net/ipv6/af_inet6.c        |  2 +-
> >  4 files changed, 12 insertions(+), 8 deletions(-)
> >
> > diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
> > index ed71bd1a0825..72e69a0e1e8c 100644
> > --- a/include/linux/bpf-cgroup.h
> > +++ b/include/linux/bpf-cgroup.h
> > @@ -246,11 +246,11 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
> >         __ret;                                                                 \
> >  })
> >
> > -#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr)                             \
> > -       BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND)
> > +#define BPF_CGROUP_RUN_PROG_INET4_BIND_LOCK(sk, uaddr)                        \
> > +       BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_BIND, NULL)
> >
> > -#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr)                             \
> > -       BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND)
> > +#define BPF_CGROUP_RUN_PROG_INET6_BIND_LOCK(sk, uaddr)                        \
> > +       BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_BIND, NULL)
> >
> >  #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
> >                                             sk->sk_prot->pre_connect)
> > @@ -434,8 +434,8 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
> >  #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
> >  #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
> >  #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
> > -#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
> > -#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
> > +#define BPF_CGROUP_RUN_PROG_INET4_BIND_LOCK(sk, uaddr) ({ 0; })
> > +#define BPF_CGROUP_RUN_PROG_INET6_BIND_LOCK(sk, uaddr) ({ 0; })
> >  #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
> >  #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
> >  #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
> > diff --git a/net/core/filter.c b/net/core/filter.c
> > index 2ca5eecebacf..21d91dcf0260 100644
> > --- a/net/core/filter.c
> > +++ b/net/core/filter.c
> > @@ -6995,6 +6995,8 @@ sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
> >                 return &bpf_sk_storage_delete_proto;
> >         case BPF_FUNC_setsockopt:
> >                 switch (prog->expected_attach_type) {
> > +               case BPF_CGROUP_INET4_BIND:
> > +               case BPF_CGROUP_INET6_BIND:
> >                 case BPF_CGROUP_INET4_CONNECT:
> >                 case BPF_CGROUP_INET6_CONNECT:
> >                         return &bpf_sock_addr_setsockopt_proto;
> > @@ -7003,6 +7005,8 @@ sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
> >                 }
> >         case BPF_FUNC_getsockopt:
> >                 switch (prog->expected_attach_type) {
> > +               case BPF_CGROUP_INET4_BIND:
> > +               case BPF_CGROUP_INET6_BIND:
> >                 case BPF_CGROUP_INET4_CONNECT:
> >                 case BPF_CGROUP_INET6_CONNECT:
> >                         return &bpf_sock_addr_getsockopt_proto;
> > diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
> > index b7260c8cef2e..b94fa8eb831b 100644
> > --- a/net/ipv4/af_inet.c
> > +++ b/net/ipv4/af_inet.c
> > @@ -450,7 +450,7 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
> >         /* BPF prog is run before any checks are done so that if the prog
> >          * changes context in a wrong way it will be caught.
> >          */
> > -       err = BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr);
> > +       err = BPF_CGROUP_RUN_PROG_INET4_BIND_LOCK(sk, uaddr);
> 
> I think it is ok, but I need to go through the locking paths more.
> Andrey,
> please take a look as well.

Sorry for delay, I was offline for the last two weeks.

From the correctness perspective it looks fine to me.

From the performance perspective I can think of one relevant scenario.
Quite common use-case in applications is to use bind(2) not before
listen(2) but before connect(2) for client sockets so that connection
can be set up from specific source IP and, optionally, port.

Binding to both IP and port case is not interesting since it's already
slow due to get_port().

But some applications do care about connection setup performance and at
the same time need to set source IP only (no port). In this case they
use IP_BIND_ADDRESS_NO_PORT socket option, what makes bind(2) fast
(we've discussed it with Stanislav earlier in [0]).

I can imagine some pathological case when an application sets up tons of
connections with bind(2) before connect(2) for sockets with
IP_BIND_ADDRESS_NO_PORT enabled (that by itself requires setsockopt(2)
though, i.e. socket lock/unlock) and that another lock/unlock to run
bind hook may add some overhead. Though I do not know how critical that
overhead may be and whether it's worth to benchmark or not (maybe too
much paranoia).

[0] https://lore.kernel.org/bpf/20200505182010.GB55644@rdna-mbp/

> >         if (err)
> >                 return err;
> >
> > diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
> > index e648fbebb167..a7e3d170af51 100644
> > --- a/net/ipv6/af_inet6.c
> > +++ b/net/ipv6/af_inet6.c
> > @@ -451,7 +451,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
> >         /* BPF prog is run before any checks are done so that if the prog
> >          * changes context in a wrong way it will be caught.
> >          */
> > -       err = BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr);
> > +       err = BPF_CGROUP_RUN_PROG_INET6_BIND_LOCK(sk, uaddr);
> >         if (err)
> >                 return err;
> >
> > --
> > 2.29.2.299.gdc1121823c-goog
> >
Stanislav Fomichev Nov. 30, 2020, 4:38 p.m. UTC | #3
On 11/29, Andrey Ignatov wrote:
> Alexei Starovoitov <alexei.starovoitov@gmail.com> [Tue, 2020-11-17 20:05  
> -0800]:
> > On Tue, Nov 17, 2020 at 4:17 PM Stanislav Fomichev <sdf@google.com>  
> wrote:
[..]
> >
> > I think it is ok, but I need to go through the locking paths more.
> > Andrey,
> > please take a look as well.

> Sorry for delay, I was offline for the last two weeks.
No worries, I was OOO myself last week, thanks for the feedback!

>  From the correctness perspective it looks fine to me.

>  From the performance perspective I can think of one relevant scenario.
> Quite common use-case in applications is to use bind(2) not before
> listen(2) but before connect(2) for client sockets so that connection
> can be set up from specific source IP and, optionally, port.

> Binding to both IP and port case is not interesting since it's already
> slow due to get_port().

> But some applications do care about connection setup performance and at
> the same time need to set source IP only (no port). In this case they
> use IP_BIND_ADDRESS_NO_PORT socket option, what makes bind(2) fast
> (we've discussed it with Stanislav earlier in [0]).

> I can imagine some pathological case when an application sets up tons of
> connections with bind(2) before connect(2) for sockets with
> IP_BIND_ADDRESS_NO_PORT enabled (that by itself requires setsockopt(2)
> though, i.e. socket lock/unlock) and that another lock/unlock to run
> bind hook may add some overhead. Though I do not know how critical that
> overhead may be and whether it's worth to benchmark or not (maybe too
> much paranoia).

> [0] https://lore.kernel.org/bpf/20200505182010.GB55644@rdna-mbp/
Even in case of IP_BIND_ADDRESS_NO_PORT, inet[6]_bind() does
lock_sock down the line, so it's not like we are switching
a lockless path to the one with the lock, right?

And in this case, similar to listen, the socket is still uncontended and
owned by the userspace. So that extra lock/unlock should be cheap
enough to be ignored (spin_lock_bh on the warm cache line).

Am I missing something?
Andrey Ignatov Nov. 30, 2020, 11:02 p.m. UTC | #4
sdf@google.com <sdf@google.com> [Mon, 2020-11-30 08:38 -0800]:
> On 11/29, Andrey Ignatov wrote:
> > Alexei Starovoitov <alexei.starovoitov@gmail.com> [Tue, 2020-11-17 20:05
> > -0800]:
> > > On Tue, Nov 17, 2020 at 4:17 PM Stanislav Fomichev <sdf@google.com>
> > wrote:
> [..]
> > >
> > > I think it is ok, but I need to go through the locking paths more.
> > > Andrey,
> > > please take a look as well.
> 
> > Sorry for delay, I was offline for the last two weeks.
> No worries, I was OOO myself last week, thanks for the feedback!
> 
> >  From the correctness perspective it looks fine to me.
> 
> >  From the performance perspective I can think of one relevant scenario.
> > Quite common use-case in applications is to use bind(2) not before
> > listen(2) but before connect(2) for client sockets so that connection
> > can be set up from specific source IP and, optionally, port.
> 
> > Binding to both IP and port case is not interesting since it's already
> > slow due to get_port().
> 
> > But some applications do care about connection setup performance and at
> > the same time need to set source IP only (no port). In this case they
> > use IP_BIND_ADDRESS_NO_PORT socket option, what makes bind(2) fast
> > (we've discussed it with Stanislav earlier in [0]).
> 
> > I can imagine some pathological case when an application sets up tons of
> > connections with bind(2) before connect(2) for sockets with
> > IP_BIND_ADDRESS_NO_PORT enabled (that by itself requires setsockopt(2)
> > though, i.e. socket lock/unlock) and that another lock/unlock to run
> > bind hook may add some overhead. Though I do not know how critical that
> > overhead may be and whether it's worth to benchmark or not (maybe too
> > much paranoia).
> 
> > [0] https://lore.kernel.org/bpf/20200505182010.GB55644@rdna-mbp/
> Even in case of IP_BIND_ADDRESS_NO_PORT, inet[6]_bind() does
> lock_sock down the line, so it's not like we are switching
> a lockless path to the one with the lock, right?

Right, I understand that it's going from one lock/unlock to two (not
from zero to one), that's what I meant by "another". My point was about
this one more lock.

> And in this case, similar to listen, the socket is still uncontended and
> owned by the userspace. So that extra lock/unlock should be cheap
> enough to be ignored (spin_lock_bh on the warm cache line).
> 
> Am I missing something?

As I mentioned it may come up only in "pathological case" what is
probably fine to ignore, i.e. I'd rather agree with "cheap enough to be
ignored" and benchmark would likely confirm it, I just couldn't say that
for sure w/o numbers so brought this point.

Given that we both agree that it should be fine to ignore this +1 lock,
IMO it should be good to go unless someone else has objections.
Stanislav Fomichev Dec. 1, 2020, 6:43 p.m. UTC | #5
On 11/30, Andrey Ignatov wrote:
> sdf@google.com <sdf@google.com> [Mon, 2020-11-30 08:38 -0800]:
> > On 11/29, Andrey Ignatov wrote:
> > > Alexei Starovoitov <alexei.starovoitov@gmail.com> [Tue, 2020-11-17  
> 20:05
> > > -0800]:
> > > > On Tue, Nov 17, 2020 at 4:17 PM Stanislav Fomichev <sdf@google.com>
> > > wrote:
> > [..]
> > > >
> > > > I think it is ok, but I need to go through the locking paths more.
> > > > Andrey,
> > > > please take a look as well.
> >
> > > Sorry for delay, I was offline for the last two weeks.
> > No worries, I was OOO myself last week, thanks for the feedback!
> >
> > >  From the correctness perspective it looks fine to me.
> >
> > >  From the performance perspective I can think of one relevant  
> scenario.
> > > Quite common use-case in applications is to use bind(2) not before
> > > listen(2) but before connect(2) for client sockets so that connection
> > > can be set up from specific source IP and, optionally, port.
> >
> > > Binding to both IP and port case is not interesting since it's already
> > > slow due to get_port().
> >
> > > But some applications do care about connection setup performance and  
> at
> > > the same time need to set source IP only (no port). In this case they
> > > use IP_BIND_ADDRESS_NO_PORT socket option, what makes bind(2) fast
> > > (we've discussed it with Stanislav earlier in [0]).
> >
> > > I can imagine some pathological case when an application sets up tons  
> of
> > > connections with bind(2) before connect(2) for sockets with
> > > IP_BIND_ADDRESS_NO_PORT enabled (that by itself requires setsockopt(2)
> > > though, i.e. socket lock/unlock) and that another lock/unlock to run
> > > bind hook may add some overhead. Though I do not know how critical  
> that
> > > overhead may be and whether it's worth to benchmark or not (maybe too
> > > much paranoia).
> >
> > > [0] https://lore.kernel.org/bpf/20200505182010.GB55644@rdna-mbp/
> > Even in case of IP_BIND_ADDRESS_NO_PORT, inet[6]_bind() does
> > lock_sock down the line, so it's not like we are switching
> > a lockless path to the one with the lock, right?

> Right, I understand that it's going from one lock/unlock to two (not
> from zero to one), that's what I meant by "another". My point was about
> this one more lock.

> > And in this case, similar to listen, the socket is still uncontended and
> > owned by the userspace. So that extra lock/unlock should be cheap
> > enough to be ignored (spin_lock_bh on the warm cache line).
> >
> > Am I missing something?

> As I mentioned it may come up only in "pathological case" what is
> probably fine to ignore, i.e. I'd rather agree with "cheap enough to be
> ignored" and benchmark would likely confirm it, I just couldn't say that
> for sure w/o numbers so brought this point.

> Given that we both agree that it should be fine to ignore this +1 lock,
> IMO it should be good to go unless someone else has objections.
Thanks, agreed. Do you mind giving it an acked-by so it gets some
attention in the patchwork? ;-)
Andrey Ignatov Dec. 1, 2020, 7:21 p.m. UTC | #6
Stanislav Fomichev <sdf@google.com> [Tue, 2020-11-17 16:18 -0800]:
> I have to now lock/unlock socket for the bind hook execution.
> That shouldn't cause any overhead because the socket is unbound
> and shouldn't receive any traffic.
> 
> Signed-off-by: Stanislav Fomichev <sdf@google.com>

Acked-by: Andrey Ignatov <rdna@fb.com>

> ---
>  include/linux/bpf-cgroup.h | 12 ++++++------
>  net/core/filter.c          |  4 ++++
>  net/ipv4/af_inet.c         |  2 +-
>  net/ipv6/af_inet6.c        |  2 +-
>  4 files changed, 12 insertions(+), 8 deletions(-)
> 
> diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
> index ed71bd1a0825..72e69a0e1e8c 100644
> --- a/include/linux/bpf-cgroup.h
> +++ b/include/linux/bpf-cgroup.h
> @@ -246,11 +246,11 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
>  	__ret;								       \
>  })
>  
> -#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr)			       \
> -	BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND)
> +#define BPF_CGROUP_RUN_PROG_INET4_BIND_LOCK(sk, uaddr)			       \
> +	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_BIND, NULL)
>  
> -#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr)			       \
> -	BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND)
> +#define BPF_CGROUP_RUN_PROG_INET6_BIND_LOCK(sk, uaddr)			       \
> +	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_BIND, NULL)
>  
>  #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
>  					    sk->sk_prot->pre_connect)
> @@ -434,8 +434,8 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
>  #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
>  #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
>  #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
> -#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
> -#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
> +#define BPF_CGROUP_RUN_PROG_INET4_BIND_LOCK(sk, uaddr) ({ 0; })
> +#define BPF_CGROUP_RUN_PROG_INET6_BIND_LOCK(sk, uaddr) ({ 0; })
>  #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
>  #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
>  #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
> diff --git a/net/core/filter.c b/net/core/filter.c
> index 2ca5eecebacf..21d91dcf0260 100644
> --- a/net/core/filter.c
> +++ b/net/core/filter.c
> @@ -6995,6 +6995,8 @@ sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
>  		return &bpf_sk_storage_delete_proto;
>  	case BPF_FUNC_setsockopt:
>  		switch (prog->expected_attach_type) {
> +		case BPF_CGROUP_INET4_BIND:
> +		case BPF_CGROUP_INET6_BIND:
>  		case BPF_CGROUP_INET4_CONNECT:
>  		case BPF_CGROUP_INET6_CONNECT:
>  			return &bpf_sock_addr_setsockopt_proto;
> @@ -7003,6 +7005,8 @@ sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
>  		}
>  	case BPF_FUNC_getsockopt:
>  		switch (prog->expected_attach_type) {
> +		case BPF_CGROUP_INET4_BIND:
> +		case BPF_CGROUP_INET6_BIND:
>  		case BPF_CGROUP_INET4_CONNECT:
>  		case BPF_CGROUP_INET6_CONNECT:
>  			return &bpf_sock_addr_getsockopt_proto;
> diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
> index b7260c8cef2e..b94fa8eb831b 100644
> --- a/net/ipv4/af_inet.c
> +++ b/net/ipv4/af_inet.c
> @@ -450,7 +450,7 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
>  	/* BPF prog is run before any checks are done so that if the prog
>  	 * changes context in a wrong way it will be caught.
>  	 */
> -	err = BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr);
> +	err = BPF_CGROUP_RUN_PROG_INET4_BIND_LOCK(sk, uaddr);
>  	if (err)
>  		return err;
>  
> diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
> index e648fbebb167..a7e3d170af51 100644
> --- a/net/ipv6/af_inet6.c
> +++ b/net/ipv6/af_inet6.c
> @@ -451,7 +451,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
>  	/* BPF prog is run before any checks are done so that if the prog
>  	 * changes context in a wrong way it will be caught.
>  	 */
> -	err = BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr);
> +	err = BPF_CGROUP_RUN_PROG_INET6_BIND_LOCK(sk, uaddr);
>  	if (err)
>  		return err;
>  
> -- 
> 2.29.2.299.gdc1121823c-goog
>
Andrey Ignatov Dec. 1, 2020, 7:22 p.m. UTC | #7
sdf@google.com <sdf@google.com> [Tue, 2020-12-01 10:43 -0800]:
> On 11/30, Andrey Ignatov wrote:
> > sdf@google.com <sdf@google.com> [Mon, 2020-11-30 08:38 -0800]:
> > > On 11/29, Andrey Ignatov wrote:
> > > > Alexei Starovoitov <alexei.starovoitov@gmail.com> [Tue, 2020-11-17
> > 20:05
> > > > -0800]:
> > > > > On Tue, Nov 17, 2020 at 4:17 PM Stanislav Fomichev <sdf@google.com>
> > > > wrote:
> > > [..]
> > > > >
> > > > > I think it is ok, but I need to go through the locking paths more.
> > > > > Andrey,
> > > > > please take a look as well.
> > >
> > > > Sorry for delay, I was offline for the last two weeks.
> > > No worries, I was OOO myself last week, thanks for the feedback!
> > >
> > > >  From the correctness perspective it looks fine to me.
> > >
> > > >  From the performance perspective I can think of one relevant
> > scenario.
> > > > Quite common use-case in applications is to use bind(2) not before
> > > > listen(2) but before connect(2) for client sockets so that connection
> > > > can be set up from specific source IP and, optionally, port.
> > >
> > > > Binding to both IP and port case is not interesting since it's already
> > > > slow due to get_port().
> > >
> > > > But some applications do care about connection setup performance and
> > at
> > > > the same time need to set source IP only (no port). In this case they
> > > > use IP_BIND_ADDRESS_NO_PORT socket option, what makes bind(2) fast
> > > > (we've discussed it with Stanislav earlier in [0]).
> > >
> > > > I can imagine some pathological case when an application sets up
> > tons of
> > > > connections with bind(2) before connect(2) for sockets with
> > > > IP_BIND_ADDRESS_NO_PORT enabled (that by itself requires setsockopt(2)
> > > > though, i.e. socket lock/unlock) and that another lock/unlock to run
> > > > bind hook may add some overhead. Though I do not know how critical
> > that
> > > > overhead may be and whether it's worth to benchmark or not (maybe too
> > > > much paranoia).
> > >
> > > > [0] https://lore.kernel.org/bpf/20200505182010.GB55644@rdna-mbp/
> > > Even in case of IP_BIND_ADDRESS_NO_PORT, inet[6]_bind() does
> > > lock_sock down the line, so it's not like we are switching
> > > a lockless path to the one with the lock, right?
> 
> > Right, I understand that it's going from one lock/unlock to two (not
> > from zero to one), that's what I meant by "another". My point was about
> > this one more lock.
> 
> > > And in this case, similar to listen, the socket is still uncontended and
> > > owned by the userspace. So that extra lock/unlock should be cheap
> > > enough to be ignored (spin_lock_bh on the warm cache line).
> > >
> > > Am I missing something?
> 
> > As I mentioned it may come up only in "pathological case" what is
> > probably fine to ignore, i.e. I'd rather agree with "cheap enough to be
> > ignored" and benchmark would likely confirm it, I just couldn't say that
> > for sure w/o numbers so brought this point.
> 
> > Given that we both agree that it should be fine to ignore this +1 lock,
> > IMO it should be good to go unless someone else has objections.
> Thanks, agreed. Do you mind giving it an acked-by so it gets some
> attention in the patchwork? ;-)

Sure. Acked this one.
diff mbox series

Patch

diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index ed71bd1a0825..72e69a0e1e8c 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -246,11 +246,11 @@  int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
 	__ret;								       \
 })
 
-#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr)			       \
-	BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND)
+#define BPF_CGROUP_RUN_PROG_INET4_BIND_LOCK(sk, uaddr)			       \
+	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_BIND, NULL)
 
-#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr)			       \
-	BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND)
+#define BPF_CGROUP_RUN_PROG_INET6_BIND_LOCK(sk, uaddr)			       \
+	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_BIND, NULL)
 
 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
 					    sk->sk_prot->pre_connect)
@@ -434,8 +434,8 @@  static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
 #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET4_BIND_LOCK(sk, uaddr) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET6_BIND_LOCK(sk, uaddr) ({ 0; })
 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
diff --git a/net/core/filter.c b/net/core/filter.c
index 2ca5eecebacf..21d91dcf0260 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -6995,6 +6995,8 @@  sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 		return &bpf_sk_storage_delete_proto;
 	case BPF_FUNC_setsockopt:
 		switch (prog->expected_attach_type) {
+		case BPF_CGROUP_INET4_BIND:
+		case BPF_CGROUP_INET6_BIND:
 		case BPF_CGROUP_INET4_CONNECT:
 		case BPF_CGROUP_INET6_CONNECT:
 			return &bpf_sock_addr_setsockopt_proto;
@@ -7003,6 +7005,8 @@  sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 		}
 	case BPF_FUNC_getsockopt:
 		switch (prog->expected_attach_type) {
+		case BPF_CGROUP_INET4_BIND:
+		case BPF_CGROUP_INET6_BIND:
 		case BPF_CGROUP_INET4_CONNECT:
 		case BPF_CGROUP_INET6_CONNECT:
 			return &bpf_sock_addr_getsockopt_proto;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index b7260c8cef2e..b94fa8eb831b 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -450,7 +450,7 @@  int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
 	/* BPF prog is run before any checks are done so that if the prog
 	 * changes context in a wrong way it will be caught.
 	 */
-	err = BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr);
+	err = BPF_CGROUP_RUN_PROG_INET4_BIND_LOCK(sk, uaddr);
 	if (err)
 		return err;
 
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index e648fbebb167..a7e3d170af51 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -451,7 +451,7 @@  int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
 	/* BPF prog is run before any checks are done so that if the prog
 	 * changes context in a wrong way it will be caught.
 	 */
-	err = BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr);
+	err = BPF_CGROUP_RUN_PROG_INET6_BIND_LOCK(sk, uaddr);
 	if (err)
 		return err;