@@ -9871,10 +9871,29 @@ struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
{
struct sk_reuseport_kern reuse_kern;
enum sk_action action;
+ bool allocated = false;
+
+ if (migration) {
+ /* cancel migration for possibly incapable eBPF program */
+ if (prog->expected_attach_type != BPF_SK_REUSEPORT_SELECT_OR_MIGRATE)
+ return ERR_PTR(-ENOTSUPP);
+
+ if (!skb) {
+ allocated = true;
+ skb = alloc_skb(0, GFP_ATOMIC);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+ }
+ } else if (!skb) {
+ return NULL; /* fall back to select by hash */
+ }
bpf_init_reuseport_kern(&reuse_kern, reuse, sk, skb, hash, migration);
action = BPF_PROG_RUN(prog, &reuse_kern);
+ if (allocated)
+ kfree_skb(skb);
+
if (action == SK_PASS)
return reuse_kern.selected_sk;
else
@@ -247,8 +247,15 @@ struct sock *reuseport_detach_sock(struct sock *sk)
prog = rcu_dereference(reuse->prog);
if (sk->sk_protocol == IPPROTO_TCP) {
- if (reuse->num_socks && !prog)
- nsk = i == reuse->num_socks ? reuse->socks[i - 1] : reuse->socks[i];
+ if (reuse->num_socks) {
+ if (prog)
+ nsk = bpf_run_sk_reuseport(reuse, sk, prog, NULL, 0,
+ BPF_SK_REUSEPORT_MIGRATE_QUEUE);
+
+ if (!nsk)
+ nsk = i == reuse->num_socks ?
+ reuse->socks[i - 1] : reuse->socks[i];
+ }
reuse->num_closed_socks++;
reuse->socks[reuse->max_socks - reuse->num_closed_socks] = sk;
@@ -342,15 +349,9 @@ struct sock *__reuseport_select_sock(struct sock *sk, u32 hash,
if (!prog)
goto select_by_hash;
- if (migration)
- goto out;
-
- if (!skb)
- goto select_by_hash;
-
if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT)
sk2 = bpf_run_sk_reuseport(reuse, sk, prog, skb, hash, migration);
- else
+ else if (!skb)
sk2 = run_bpf_filter(reuse, socks, prog, skb, hdr_len);
select_by_hash:
@@ -699,7 +699,7 @@ void inet_unhash(struct sock *sk)
if (rcu_access_pointer(sk->sk_reuseport_cb)) {
nsk = reuseport_detach_sock(sk);
- if (nsk)
+ if (!IS_ERR_OR_NULL(nsk))
inet_csk_reqsk_queue_migrate(sk, nsk);
}