@@ -583,6 +583,15 @@ bpf_iter_mptcp_subflow_next(struct bpf_iter_mptcp_subflow *it) __weak __ksym;
extern void
bpf_iter_mptcp_subflow_destroy(struct bpf_iter_mptcp_subflow *it) __weak __ksym;
+struct bpf_iter_mptcp_subflow_sched;
+extern int bpf_iter_mptcp_subflow_sched_new(struct bpf_iter_mptcp_subflow_sched *it,
+ struct sock *sk,
+ struct mptcp_sched_data *data) __weak __ksym;
+extern struct mptcp_subflow_context *
+bpf_iter_mptcp_subflow_sched_next(struct bpf_iter_mptcp_subflow_sched *it) __weak __ksym;
+extern void
+bpf_iter_mptcp_subflow_sched_destroy(struct bpf_iter_mptcp_subflow_sched *it) __weak __ksym;
+
extern int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags) __weak __ksym;
extern int bpf_wq_start(struct bpf_wq *wq, unsigned int flags) __weak __ksym;
extern int bpf_wq_set_callback_impl(struct bpf_wq *wq,
@@ -17,11 +17,12 @@ void BPF_PROG(mptcp_sched_bkup_release, struct mptcp_sock *msk)
}
SEC("struct_ops")
-int BPF_PROG(bpf_bkup_get_send, struct mptcp_sock *msk)
+int BPF_PROG(bpf_bkup_get_send, struct mptcp_sock *msk,
+ struct mptcp_sched_data *data)
{
struct mptcp_subflow_context *subflow;
- bpf_for_each(mptcp_subflow, subflow, (struct sock *)msk) {
+ bpf_for_each(mptcp_subflow_sched, subflow, (struct sock *)msk, data) {
if (!BPF_CORE_READ_BITFIELD_PROBED(subflow, backup) ||
!BPF_CORE_READ_BITFIELD_PROBED(subflow, request_bkup)) {
mptcp_subflow_set_scheduled(subflow, true);