@@ -1705,12 +1705,16 @@ extern struct static_key_false_deferred tcp_md5_needed;
struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
const union tcp_md5_addr *addr,
int family);
+
+#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
static inline struct tcp_md5sig_key *
tcp_md5_do_lookup(const struct sock *sk, int l3index,
const union tcp_md5_addr *addr, int family)
{
if (!static_branch_unlikely(&tcp_md5_needed.key))
return NULL;
+ if (unlikely(sk->sk_state == TCP_TIME_WAIT))
+ return tcp_twsk_md5_key(tcp_twsk(sk));
return __tcp_md5_do_lookup(sk, l3index, addr, family);
}
@@ -1718,9 +1722,6 @@ enum skb_drop_reason
tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
const void *saddr, const void *daddr,
int family, int dif, int sdif);
-
-
-#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
#else
static inline struct tcp_md5sig_key *
tcp_md5_do_lookup(const struct sock *sk, int l3index,
@@ -676,7 +676,7 @@ static bool tcp_v4_md5_sign_reset(struct net *net, const struct sock *sk,
return !!hash_location;
rcu_read_lock();
- if (sk && sk_fullsock(sk)) {
+ if (sk && sk->sk_state != TCP_NEW_SYN_RECV) {
const union tcp_md5_addr *addr;
int l3index;
@@ -2195,8 +2195,13 @@ int tcp_v4_rcv(struct sk_buff *skb)
goto discard_it;
do_time_wait:
- if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
+ if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
drop_reason = SKB_DROP_REASON_XFRM_POLICY;
+ else
+ drop_reason = tcp_inbound_md5_hash(sk, skb,
+ &iph->saddr, &iph->daddr,
+ AF_INET, dif, sdif);
+ if (drop_reason) {
inet_twsk_put(inet_twsk(sk));
goto discard_it;
}
@@ -987,7 +987,7 @@ static int tcp_v6_md5_lookup_reset_key(struct net *net, const struct sock *sk,
if (!static_branch_unlikely(&tcp_md5_needed.key))
return !!hash_location;
- if (sk && sk_fullsock(sk)) {
+ if (sk && sk->sk_state != TCP_NEW_SYN_RECV) {
/* sdif set, means packet ingressed via a device
* in an L3 domain and inet_iif is set to it.
*/
@@ -1795,8 +1795,14 @@ INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
goto discard_it;
do_time_wait:
- if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
+ if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
drop_reason = SKB_DROP_REASON_XFRM_POLICY;
+ else
+ drop_reason = tcp_inbound_md5_hash(sk, skb,
+ &hdr->saddr, &hdr->daddr,
+ AF_INET6, dif, sdif);
+
+ if (drop_reason) {
inet_twsk_put(inet_twsk(sk));
goto discard_it;
}
It seems rare for BGP to have twsk socket and quite unlikely on server side, in addition I don't see any major concern of destroying twsk early by unsigned segments. But on the other hand, it seems better not to change TCP state by unsigned inbound segments and fixing this seems not hard. So, lets avoid replying or doing any TCP state changes as long as the segments weren't verified. Signed-off-by: Dmitry Safonov <dima@arista.com> --- include/net/tcp.h | 7 ++++--- net/ipv4/tcp_ipv4.c | 9 +++++++-- net/ipv6/tcp_ipv6.c | 10 ++++++++-- 3 files changed, 19 insertions(+), 7 deletions(-)