Message ID | 20220923224518.2353383-1-kafai@fb.com (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | BPF |
Headers | show |
Series | bpf: Remove recursion check for struct_ops prog | expand |
On Fri, Sep 23, 2022 at 3:48 PM Martin KaFai Lau <kafai@fb.com> wrote: > > From: Martin KaFai Lau <martin.lau@kernel.org> > > When a bad bpf prog '.init' calls > bpf_setsockopt(TCP_CONGESTION, "itself"), it will trigger this loop: > > .init => bpf_setsockopt(tcp_cc) => .init => bpf_setsockopt(tcp_cc) ... > ... => .init => bpf_setsockopt(tcp_cc). > > It was prevented by the prog->active counter before but the prog->active > detection cannot be used in struct_ops as explained in the earlier > patch of the set. > > In this patch, the second bpf_setsockopt(tcp_cc) is not allowed > in order to break the loop. This is done by using a bit of > an existing 1 byte hole in tcp_sock to check if there is > on-going bpf_setsockopt(TCP_CONGESTION) in this tcp_sock. > > Note that this essentially limits only the first '.init' can > call bpf_setsockopt(TCP_CONGESTION) to pick a fallback cc (eg. peer > does not support ECN) and the second '.init' cannot fallback to > another cc. This applies even the second > bpf_setsockopt(TCP_CONGESTION) will not cause a loop. > > Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org> > --- > include/linux/tcp.h | 6 ++++++ > net/core/filter.c | 28 +++++++++++++++++++++++++++- > 2 files changed, 33 insertions(+), 1 deletion(-) > > diff --git a/include/linux/tcp.h b/include/linux/tcp.h > index a9fbe22732c3..3bdf687e2fb3 100644 > --- a/include/linux/tcp.h > +++ b/include/linux/tcp.h > @@ -388,6 +388,12 @@ struct tcp_sock { > u8 bpf_sock_ops_cb_flags; /* Control calling BPF programs > * values defined in uapi/linux/tcp.h > */ > + u8 bpf_chg_cc_inprogress:1; /* In the middle of > + * bpf_setsockopt(TCP_CONGESTION), > + * it is to avoid the bpf_tcp_cc->init() > + * to recur itself by calling > + * bpf_setsockopt(TCP_CONGESTION, "itself"). > + */ > #define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) (TP->bpf_sock_ops_cb_flags & ARG) > #else > #define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) 0 > diff --git a/net/core/filter.c b/net/core/filter.c > index 96f2f7a65e65..ac4c45c02da5 100644 > --- a/net/core/filter.c > +++ b/net/core/filter.c > @@ -5105,6 +5105,9 @@ static int bpf_sol_tcp_setsockopt(struct sock *sk, int optname, > static int sol_tcp_sockopt_congestion(struct sock *sk, char *optval, > int *optlen, bool getopt) > { > + struct tcp_sock *tp; > + int ret; > + > if (*optlen < 2) > return -EINVAL; > > @@ -5125,8 +5128,31 @@ static int sol_tcp_sockopt_congestion(struct sock *sk, char *optval, > if (*optlen >= sizeof("cdg") - 1 && !strncmp("cdg", optval, *optlen)) > return -ENOTSUPP; > > - return do_tcp_setsockopt(sk, SOL_TCP, TCP_CONGESTION, > + /* It stops this looping > + * > + * .init => bpf_setsockopt(tcp_cc) => .init => > + * bpf_setsockopt(tcp_cc)" => .init => .... > + * > + * The second bpf_setsockopt(tcp_cc) is not allowed > + * in order to break the loop when both .init > + * are the same bpf prog. > + * > + * This applies even the second bpf_setsockopt(tcp_cc) > + * does not cause a loop. This limits only the first > + * '.init' can call bpf_setsockopt(TCP_CONGESTION) to > + * pick a fallback cc (eg. peer does not support ECN) > + * and the second '.init' cannot fallback to > + * another. > + */ > + tp = tcp_sk(sk); > + if (tp->bpf_chg_cc_inprogress) > + return -EBUSY; > + > + tp->bpf_chg_cc_inprogress = 1; > + ret = do_tcp_setsockopt(sk, SOL_TCP, TCP_CONGESTION, > KERNEL_SOCKPTR(optval), *optlen); > + tp->bpf_chg_cc_inprogress = 0; > + return ret; Eric, Could you please ack this patch?
Eric, Ping! This is an important fix for anyone using bpf-based tcp-cc. On Mon, Sep 26, 2022 at 8:34 PM Alexei Starovoitov <alexei.starovoitov@gmail.com> wrote: > > On Fri, Sep 23, 2022 at 3:48 PM Martin KaFai Lau <kafai@fb.com> wrote: > > > > From: Martin KaFai Lau <martin.lau@kernel.org> > > > > When a bad bpf prog '.init' calls > > bpf_setsockopt(TCP_CONGESTION, "itself"), it will trigger this loop: > > > > .init => bpf_setsockopt(tcp_cc) => .init => bpf_setsockopt(tcp_cc) ... > > ... => .init => bpf_setsockopt(tcp_cc). > > > > It was prevented by the prog->active counter before but the prog->active > > detection cannot be used in struct_ops as explained in the earlier > > patch of the set. > > > > In this patch, the second bpf_setsockopt(tcp_cc) is not allowed > > in order to break the loop. This is done by using a bit of > > an existing 1 byte hole in tcp_sock to check if there is > > on-going bpf_setsockopt(TCP_CONGESTION) in this tcp_sock. > > > > Note that this essentially limits only the first '.init' can > > call bpf_setsockopt(TCP_CONGESTION) to pick a fallback cc (eg. peer > > does not support ECN) and the second '.init' cannot fallback to > > another cc. This applies even the second > > bpf_setsockopt(TCP_CONGESTION) will not cause a loop. > > > > Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org> > > --- > > include/linux/tcp.h | 6 ++++++ > > net/core/filter.c | 28 +++++++++++++++++++++++++++- > > 2 files changed, 33 insertions(+), 1 deletion(-) > > > > diff --git a/include/linux/tcp.h b/include/linux/tcp.h > > index a9fbe22732c3..3bdf687e2fb3 100644 > > --- a/include/linux/tcp.h > > +++ b/include/linux/tcp.h > > @@ -388,6 +388,12 @@ struct tcp_sock { > > u8 bpf_sock_ops_cb_flags; /* Control calling BPF programs > > * values defined in uapi/linux/tcp.h > > */ > > + u8 bpf_chg_cc_inprogress:1; /* In the middle of > > + * bpf_setsockopt(TCP_CONGESTION), > > + * it is to avoid the bpf_tcp_cc->init() > > + * to recur itself by calling > > + * bpf_setsockopt(TCP_CONGESTION, "itself"). > > + */ > > #define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) (TP->bpf_sock_ops_cb_flags & ARG) > > #else > > #define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) 0 > > diff --git a/net/core/filter.c b/net/core/filter.c > > index 96f2f7a65e65..ac4c45c02da5 100644 > > --- a/net/core/filter.c > > +++ b/net/core/filter.c > > @@ -5105,6 +5105,9 @@ static int bpf_sol_tcp_setsockopt(struct sock *sk, int optname, > > static int sol_tcp_sockopt_congestion(struct sock *sk, char *optval, > > int *optlen, bool getopt) > > { > > + struct tcp_sock *tp; > > + int ret; > > + > > if (*optlen < 2) > > return -EINVAL; > > > > @@ -5125,8 +5128,31 @@ static int sol_tcp_sockopt_congestion(struct sock *sk, char *optval, > > if (*optlen >= sizeof("cdg") - 1 && !strncmp("cdg", optval, *optlen)) > > return -ENOTSUPP; > > > > - return do_tcp_setsockopt(sk, SOL_TCP, TCP_CONGESTION, > > + /* It stops this looping > > + * > > + * .init => bpf_setsockopt(tcp_cc) => .init => > > + * bpf_setsockopt(tcp_cc)" => .init => .... > > + * > > + * The second bpf_setsockopt(tcp_cc) is not allowed > > + * in order to break the loop when both .init > > + * are the same bpf prog. > > + * > > + * This applies even the second bpf_setsockopt(tcp_cc) > > + * does not cause a loop. This limits only the first > > + * '.init' can call bpf_setsockopt(TCP_CONGESTION) to > > + * pick a fallback cc (eg. peer does not support ECN) > > + * and the second '.init' cannot fallback to > > + * another. > > + */ > > + tp = tcp_sk(sk); > > + if (tp->bpf_chg_cc_inprogress) > > + return -EBUSY; > > + > > + tp->bpf_chg_cc_inprogress = 1; > > + ret = do_tcp_setsockopt(sk, SOL_TCP, TCP_CONGESTION, > > KERNEL_SOCKPTR(optval), *optlen); > > + tp->bpf_chg_cc_inprogress = 0; > > + return ret; > > Eric, > > Could you please ack this patch?
On Fri, Sep 23, 2022 at 3:48 PM Martin KaFai Lau <kafai@fb.com> wrote: > > From: Martin KaFai Lau <martin.lau@kernel.org> > > When a bad bpf prog '.init' calls > bpf_setsockopt(TCP_CONGESTION, "itself"), it will trigger this loop: > > .init => bpf_setsockopt(tcp_cc) => .init => bpf_setsockopt(tcp_cc) ... > ... => .init => bpf_setsockopt(tcp_cc). > > It was prevented by the prog->active counter before but the prog->active > detection cannot be used in struct_ops as explained in the earlier > patch of the set. > > In this patch, the second bpf_setsockopt(tcp_cc) is not allowed > in order to break the loop. This is done by using a bit of > an existing 1 byte hole in tcp_sock to check if there is > on-going bpf_setsockopt(TCP_CONGESTION) in this tcp_sock. > > Note that this essentially limits only the first '.init' can > call bpf_setsockopt(TCP_CONGESTION) to pick a fallback cc (eg. peer > does not support ECN) and the second '.init' cannot fallback to > another cc. This applies even the second > bpf_setsockopt(TCP_CONGESTION) will not cause a loop. > > Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org> > --- > include/linux/tcp.h | 6 ++++++ > net/core/filter.c | 28 +++++++++++++++++++++++++++- > 2 files changed, 33 insertions(+), 1 deletion(-) > > diff --git a/include/linux/tcp.h b/include/linux/tcp.h > index a9fbe22732c3..3bdf687e2fb3 100644 > --- a/include/linux/tcp.h > +++ b/include/linux/tcp.h > @@ -388,6 +388,12 @@ struct tcp_sock { > u8 bpf_sock_ops_cb_flags; /* Control calling BPF programs > * values defined in uapi/linux/tcp.h > */ > + u8 bpf_chg_cc_inprogress:1; /* In the middle of > + * bpf_setsockopt(TCP_CONGESTION), > + * it is to avoid the bpf_tcp_cc->init() > + * to recur itself by calling > + * bpf_setsockopt(TCP_CONGESTION, "itself"). > + */ > #define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) (TP->bpf_sock_ops_cb_flags & ARG) > #else > #define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) 0 > diff --git a/net/core/filter.c b/net/core/filter.c > index 96f2f7a65e65..ac4c45c02da5 100644 > --- a/net/core/filter.c > +++ b/net/core/filter.c > @@ -5105,6 +5105,9 @@ static int bpf_sol_tcp_setsockopt(struct sock *sk, int optname, > static int sol_tcp_sockopt_congestion(struct sock *sk, char *optval, > int *optlen, bool getopt) > { > + struct tcp_sock *tp; > + int ret; > + > if (*optlen < 2) > return -EINVAL; > > @@ -5125,8 +5128,31 @@ static int sol_tcp_sockopt_congestion(struct sock *sk, char *optval, > if (*optlen >= sizeof("cdg") - 1 && !strncmp("cdg", optval, *optlen)) > return -ENOTSUPP; > > - return do_tcp_setsockopt(sk, SOL_TCP, TCP_CONGESTION, > + /* It stops this looping > + * > + * .init => bpf_setsockopt(tcp_cc) => .init => > + * bpf_setsockopt(tcp_cc)" => .init => .... > + * > + * The second bpf_setsockopt(tcp_cc) is not allowed > + * in order to break the loop when both .init > + * are the same bpf prog. > + * > + * This applies even the second bpf_setsockopt(tcp_cc) > + * does not cause a loop. This limits only the first > + * '.init' can call bpf_setsockopt(TCP_CONGESTION) to > + * pick a fallback cc (eg. peer does not support ECN) > + * and the second '.init' cannot fallback to > + * another. > + */ > + tp = tcp_sk(sk); > + if (tp->bpf_chg_cc_inprogress) > + return -EBUSY; > + Is the socket locked (and owned by current thread) at this point ? If not, changing bpf_chg_cc_inprogress would be racy. > + tp->bpf_chg_cc_inprogress = 1; > + ret = do_tcp_setsockopt(sk, SOL_TCP, TCP_CONGESTION, > KERNEL_SOCKPTR(optval), *optlen); > + tp->bpf_chg_cc_inprogress = 0; > + return ret; > } > > static int sol_tcp_sockopt(struct sock *sk, int optname, > -- > 2.30.2 >
On 9/28/22 7:04 PM, Eric Dumazet wrote: > On Fri, Sep 23, 2022 at 3:48 PM Martin KaFai Lau <kafai@fb.com> wrote: >> >> From: Martin KaFai Lau <martin.lau@kernel.org> >> >> When a bad bpf prog '.init' calls >> bpf_setsockopt(TCP_CONGESTION, "itself"), it will trigger this loop: >> >> .init => bpf_setsockopt(tcp_cc) => .init => bpf_setsockopt(tcp_cc) ... >> ... => .init => bpf_setsockopt(tcp_cc). >> >> It was prevented by the prog->active counter before but the prog->active >> detection cannot be used in struct_ops as explained in the earlier >> patch of the set. >> >> In this patch, the second bpf_setsockopt(tcp_cc) is not allowed >> in order to break the loop. This is done by using a bit of >> an existing 1 byte hole in tcp_sock to check if there is >> on-going bpf_setsockopt(TCP_CONGESTION) in this tcp_sock. >> >> Note that this essentially limits only the first '.init' can >> call bpf_setsockopt(TCP_CONGESTION) to pick a fallback cc (eg. peer >> does not support ECN) and the second '.init' cannot fallback to >> another cc. This applies even the second >> bpf_setsockopt(TCP_CONGESTION) will not cause a loop. >> >> Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org> >> --- >> include/linux/tcp.h | 6 ++++++ >> net/core/filter.c | 28 +++++++++++++++++++++++++++- >> 2 files changed, 33 insertions(+), 1 deletion(-) >> >> diff --git a/include/linux/tcp.h b/include/linux/tcp.h >> index a9fbe22732c3..3bdf687e2fb3 100644 >> --- a/include/linux/tcp.h >> +++ b/include/linux/tcp.h >> @@ -388,6 +388,12 @@ struct tcp_sock { >> u8 bpf_sock_ops_cb_flags; /* Control calling BPF programs >> * values defined in uapi/linux/tcp.h >> */ >> + u8 bpf_chg_cc_inprogress:1; /* In the middle of >> + * bpf_setsockopt(TCP_CONGESTION), >> + * it is to avoid the bpf_tcp_cc->init() >> + * to recur itself by calling >> + * bpf_setsockopt(TCP_CONGESTION, "itself"). >> + */ >> #define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) (TP->bpf_sock_ops_cb_flags & ARG) >> #else >> #define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) 0 >> diff --git a/net/core/filter.c b/net/core/filter.c >> index 96f2f7a65e65..ac4c45c02da5 100644 >> --- a/net/core/filter.c >> +++ b/net/core/filter.c >> @@ -5105,6 +5105,9 @@ static int bpf_sol_tcp_setsockopt(struct sock *sk, int optname, >> static int sol_tcp_sockopt_congestion(struct sock *sk, char *optval, >> int *optlen, bool getopt) >> { >> + struct tcp_sock *tp; >> + int ret; >> + >> if (*optlen < 2) >> return -EINVAL; >> >> @@ -5125,8 +5128,31 @@ static int sol_tcp_sockopt_congestion(struct sock *sk, char *optval, >> if (*optlen >= sizeof("cdg") - 1 && !strncmp("cdg", optval, *optlen)) >> return -ENOTSUPP; >> >> - return do_tcp_setsockopt(sk, SOL_TCP, TCP_CONGESTION, >> + /* It stops this looping >> + * >> + * .init => bpf_setsockopt(tcp_cc) => .init => >> + * bpf_setsockopt(tcp_cc)" => .init => .... >> + * >> + * The second bpf_setsockopt(tcp_cc) is not allowed >> + * in order to break the loop when both .init >> + * are the same bpf prog. >> + * >> + * This applies even the second bpf_setsockopt(tcp_cc) >> + * does not cause a loop. This limits only the first >> + * '.init' can call bpf_setsockopt(TCP_CONGESTION) to >> + * pick a fallback cc (eg. peer does not support ECN) >> + * and the second '.init' cannot fallback to >> + * another. >> + */ >> + tp = tcp_sk(sk); >> + if (tp->bpf_chg_cc_inprogress) >> + return -EBUSY; >> + > > Is the socket locked (and owned by current thread) at this point ? > If not, changing bpf_chg_cc_inprogress would be racy. Yes, the socket is locked and owned. There is a sock_owned_by_me check earlier in _bpf_setsockopt(). > > >> + tp->bpf_chg_cc_inprogress = 1; >> + ret = do_tcp_setsockopt(sk, SOL_TCP, TCP_CONGESTION, >> KERNEL_SOCKPTR(optval), *optlen); >> + tp->bpf_chg_cc_inprogress = 0; >> + return ret; >> } >> >> static int sol_tcp_sockopt(struct sock *sk, int optname, >> -- >> 2.30.2 >>
On Wed, Sep 28, 2022 at 10:31 PM Martin KaFai Lau <martin.lau@linux.dev> wrote: > > On 9/28/22 7:04 PM, Eric Dumazet wrote: > > On Fri, Sep 23, 2022 at 3:48 PM Martin KaFai Lau <kafai@fb.com> wrote: > >> > >> From: Martin KaFai Lau <martin.lau@kernel.org> > >> > >> When a bad bpf prog '.init' calls > >> bpf_setsockopt(TCP_CONGESTION, "itself"), it will trigger this loop: > >> > >> .init => bpf_setsockopt(tcp_cc) => .init => bpf_setsockopt(tcp_cc) ... > >> ... => .init => bpf_setsockopt(tcp_cc). > >> > >> It was prevented by the prog->active counter before but the prog->active > >> detection cannot be used in struct_ops as explained in the earlier > >> patch of the set. > >> > >> In this patch, the second bpf_setsockopt(tcp_cc) is not allowed > >> in order to break the loop. This is done by using a bit of > >> an existing 1 byte hole in tcp_sock to check if there is > >> on-going bpf_setsockopt(TCP_CONGESTION) in this tcp_sock. > >> > >> Note that this essentially limits only the first '.init' can > >> call bpf_setsockopt(TCP_CONGESTION) to pick a fallback cc (eg. peer > >> does not support ECN) and the second '.init' cannot fallback to > >> another cc. This applies even the second > >> bpf_setsockopt(TCP_CONGESTION) will not cause a loop. > >> > >> Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org> > >> --- > >> include/linux/tcp.h | 6 ++++++ > >> net/core/filter.c | 28 +++++++++++++++++++++++++++- > >> 2 files changed, 33 insertions(+), 1 deletion(-) > >> > >> diff --git a/include/linux/tcp.h b/include/linux/tcp.h > >> index a9fbe22732c3..3bdf687e2fb3 100644 > >> --- a/include/linux/tcp.h > >> +++ b/include/linux/tcp.h > >> @@ -388,6 +388,12 @@ struct tcp_sock { > >> u8 bpf_sock_ops_cb_flags; /* Control calling BPF programs > >> * values defined in uapi/linux/tcp.h > >> */ > >> + u8 bpf_chg_cc_inprogress:1; /* In the middle of > >> + * bpf_setsockopt(TCP_CONGESTION), > >> + * it is to avoid the bpf_tcp_cc->init() > >> + * to recur itself by calling > >> + * bpf_setsockopt(TCP_CONGESTION, "itself"). > >> + */ > >> #define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) (TP->bpf_sock_ops_cb_flags & ARG) > >> #else > >> #define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) 0 > >> diff --git a/net/core/filter.c b/net/core/filter.c > >> index 96f2f7a65e65..ac4c45c02da5 100644 > >> --- a/net/core/filter.c > >> +++ b/net/core/filter.c > >> @@ -5105,6 +5105,9 @@ static int bpf_sol_tcp_setsockopt(struct sock *sk, int optname, > >> static int sol_tcp_sockopt_congestion(struct sock *sk, char *optval, > >> int *optlen, bool getopt) > >> { > >> + struct tcp_sock *tp; > >> + int ret; > >> + > >> if (*optlen < 2) > >> return -EINVAL; > >> > >> @@ -5125,8 +5128,31 @@ static int sol_tcp_sockopt_congestion(struct sock *sk, char *optval, > >> if (*optlen >= sizeof("cdg") - 1 && !strncmp("cdg", optval, *optlen)) > >> return -ENOTSUPP; > >> > >> - return do_tcp_setsockopt(sk, SOL_TCP, TCP_CONGESTION, > >> + /* It stops this looping > >> + * > >> + * .init => bpf_setsockopt(tcp_cc) => .init => > >> + * bpf_setsockopt(tcp_cc)" => .init => .... > >> + * > >> + * The second bpf_setsockopt(tcp_cc) is not allowed > >> + * in order to break the loop when both .init > >> + * are the same bpf prog. > >> + * > >> + * This applies even the second bpf_setsockopt(tcp_cc) > >> + * does not cause a loop. This limits only the first > >> + * '.init' can call bpf_setsockopt(TCP_CONGESTION) to > >> + * pick a fallback cc (eg. peer does not support ECN) > >> + * and the second '.init' cannot fallback to > >> + * another. > >> + */ > >> + tp = tcp_sk(sk); > >> + if (tp->bpf_chg_cc_inprogress) > >> + return -EBUSY; > >> + > > > > Is the socket locked (and owned by current thread) at this point ? > > If not, changing bpf_chg_cc_inprogress would be racy. > > Yes, the socket is locked and owned. There is a sock_owned_by_me check earlier > in _bpf_setsockopt(). Good to know. Note a listener can be cloned without socket lock being held. In order to avoid surprises, I would clear bpf_chg_cc_inprogress in tcp_create_openreq_child()
On 9/28/22 10:37 PM, Eric Dumazet wrote: > On Wed, Sep 28, 2022 at 10:31 PM Martin KaFai Lau <martin.lau@linux.dev> wrote: >> >> On 9/28/22 7:04 PM, Eric Dumazet wrote: >>> On Fri, Sep 23, 2022 at 3:48 PM Martin KaFai Lau <kafai@fb.com> wrote: >>>> >>>> From: Martin KaFai Lau <martin.lau@kernel.org> >>>> >>>> When a bad bpf prog '.init' calls >>>> bpf_setsockopt(TCP_CONGESTION, "itself"), it will trigger this loop: >>>> >>>> .init => bpf_setsockopt(tcp_cc) => .init => bpf_setsockopt(tcp_cc) ... >>>> ... => .init => bpf_setsockopt(tcp_cc). >>>> >>>> It was prevented by the prog->active counter before but the prog->active >>>> detection cannot be used in struct_ops as explained in the earlier >>>> patch of the set. >>>> >>>> In this patch, the second bpf_setsockopt(tcp_cc) is not allowed >>>> in order to break the loop. This is done by using a bit of >>>> an existing 1 byte hole in tcp_sock to check if there is >>>> on-going bpf_setsockopt(TCP_CONGESTION) in this tcp_sock. >>>> >>>> Note that this essentially limits only the first '.init' can >>>> call bpf_setsockopt(TCP_CONGESTION) to pick a fallback cc (eg. peer >>>> does not support ECN) and the second '.init' cannot fallback to >>>> another cc. This applies even the second >>>> bpf_setsockopt(TCP_CONGESTION) will not cause a loop. >>>> >>>> Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org> >>>> --- >>>> include/linux/tcp.h | 6 ++++++ >>>> net/core/filter.c | 28 +++++++++++++++++++++++++++- >>>> 2 files changed, 33 insertions(+), 1 deletion(-) >>>> >>>> diff --git a/include/linux/tcp.h b/include/linux/tcp.h >>>> index a9fbe22732c3..3bdf687e2fb3 100644 >>>> --- a/include/linux/tcp.h >>>> +++ b/include/linux/tcp.h >>>> @@ -388,6 +388,12 @@ struct tcp_sock { >>>> u8 bpf_sock_ops_cb_flags; /* Control calling BPF programs >>>> * values defined in uapi/linux/tcp.h >>>> */ >>>> + u8 bpf_chg_cc_inprogress:1; /* In the middle of >>>> + * bpf_setsockopt(TCP_CONGESTION), >>>> + * it is to avoid the bpf_tcp_cc->init() >>>> + * to recur itself by calling >>>> + * bpf_setsockopt(TCP_CONGESTION, "itself"). >>>> + */ >>>> #define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) (TP->bpf_sock_ops_cb_flags & ARG) >>>> #else >>>> #define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) 0 >>>> diff --git a/net/core/filter.c b/net/core/filter.c >>>> index 96f2f7a65e65..ac4c45c02da5 100644 >>>> --- a/net/core/filter.c >>>> +++ b/net/core/filter.c >>>> @@ -5105,6 +5105,9 @@ static int bpf_sol_tcp_setsockopt(struct sock *sk, int optname, >>>> static int sol_tcp_sockopt_congestion(struct sock *sk, char *optval, >>>> int *optlen, bool getopt) >>>> { >>>> + struct tcp_sock *tp; >>>> + int ret; >>>> + >>>> if (*optlen < 2) >>>> return -EINVAL; >>>> >>>> @@ -5125,8 +5128,31 @@ static int sol_tcp_sockopt_congestion(struct sock *sk, char *optval, >>>> if (*optlen >= sizeof("cdg") - 1 && !strncmp("cdg", optval, *optlen)) >>>> return -ENOTSUPP; >>>> >>>> - return do_tcp_setsockopt(sk, SOL_TCP, TCP_CONGESTION, >>>> + /* It stops this looping >>>> + * >>>> + * .init => bpf_setsockopt(tcp_cc) => .init => >>>> + * bpf_setsockopt(tcp_cc)" => .init => .... >>>> + * >>>> + * The second bpf_setsockopt(tcp_cc) is not allowed >>>> + * in order to break the loop when both .init >>>> + * are the same bpf prog. >>>> + * >>>> + * This applies even the second bpf_setsockopt(tcp_cc) >>>> + * does not cause a loop. This limits only the first >>>> + * '.init' can call bpf_setsockopt(TCP_CONGESTION) to >>>> + * pick a fallback cc (eg. peer does not support ECN) >>>> + * and the second '.init' cannot fallback to >>>> + * another. >>>> + */ >>>> + tp = tcp_sk(sk); >>>> + if (tp->bpf_chg_cc_inprogress) >>>> + return -EBUSY; >>>> + >>> >>> Is the socket locked (and owned by current thread) at this point ? >>> If not, changing bpf_chg_cc_inprogress would be racy. >> >> Yes, the socket is locked and owned. There is a sock_owned_by_me check earlier >> in _bpf_setsockopt(). > > Good to know. Note a listener can be cloned without socket lock being held. > > In order to avoid surprises, I would clear bpf_chg_cc_inprogress in > tcp_create_openreq_child() Ah, make sense. I will re-spin.
diff --git a/include/linux/tcp.h b/include/linux/tcp.h index a9fbe22732c3..3bdf687e2fb3 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -388,6 +388,12 @@ struct tcp_sock { u8 bpf_sock_ops_cb_flags; /* Control calling BPF programs * values defined in uapi/linux/tcp.h */ + u8 bpf_chg_cc_inprogress:1; /* In the middle of + * bpf_setsockopt(TCP_CONGESTION), + * it is to avoid the bpf_tcp_cc->init() + * to recur itself by calling + * bpf_setsockopt(TCP_CONGESTION, "itself"). + */ #define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) (TP->bpf_sock_ops_cb_flags & ARG) #else #define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) 0 diff --git a/net/core/filter.c b/net/core/filter.c index 96f2f7a65e65..ac4c45c02da5 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -5105,6 +5105,9 @@ static int bpf_sol_tcp_setsockopt(struct sock *sk, int optname, static int sol_tcp_sockopt_congestion(struct sock *sk, char *optval, int *optlen, bool getopt) { + struct tcp_sock *tp; + int ret; + if (*optlen < 2) return -EINVAL; @@ -5125,8 +5128,31 @@ static int sol_tcp_sockopt_congestion(struct sock *sk, char *optval, if (*optlen >= sizeof("cdg") - 1 && !strncmp("cdg", optval, *optlen)) return -ENOTSUPP; - return do_tcp_setsockopt(sk, SOL_TCP, TCP_CONGESTION, + /* It stops this looping + * + * .init => bpf_setsockopt(tcp_cc) => .init => + * bpf_setsockopt(tcp_cc)" => .init => .... + * + * The second bpf_setsockopt(tcp_cc) is not allowed + * in order to break the loop when both .init + * are the same bpf prog. + * + * This applies even the second bpf_setsockopt(tcp_cc) + * does not cause a loop. This limits only the first + * '.init' can call bpf_setsockopt(TCP_CONGESTION) to + * pick a fallback cc (eg. peer does not support ECN) + * and the second '.init' cannot fallback to + * another. + */ + tp = tcp_sk(sk); + if (tp->bpf_chg_cc_inprogress) + return -EBUSY; + + tp->bpf_chg_cc_inprogress = 1; + ret = do_tcp_setsockopt(sk, SOL_TCP, TCP_CONGESTION, KERNEL_SOCKPTR(optval), *optlen); + tp->bpf_chg_cc_inprogress = 0; + return ret; } static int sol_tcp_sockopt(struct sock *sk, int optname,