diff mbox series

[mptcp-next,3/6] bpf: Add mptcp_address bpf_iter

Message ID ba6be35e277528249653ea525a158102f401d406.1729248083.git.tanggeliang@kylinos.cn (mailing list archive)
State Superseded, archived
Headers show
Series add mptcp_address bpf_iter | expand

Checks

Context Check Description
matttbe/checkpatch warning total: 0 errors, 3 warnings, 0 checks, 61 lines checked
matttbe/shellcheck success MPTCP selftests files have not been modified
matttbe/build success Build and static analysis OK
matttbe/KVM_Validation__normal success Success! ✅
matttbe/KVM_Validation__debug success Success! ✅
matttbe/KVM_Validation__btf-normal__only_bpftest_all_ success Success! ✅
matttbe/KVM_Validation__btf-debug__only_bpftest_all_ fail Critical: 2 Call Trace(s) - Critical: Global Timeout ❌

Commit Message

Geliang Tang Oct. 18, 2024, 10:51 a.m. UTC
From: Geliang Tang <tanggeliang@kylinos.cn>

Just like the mptcp_subflow bpf_iter used to implement the MPTCP BPF
packet scheduler, another bpf_iter is also needed, named mptcp_address,
to traverse all address entries on userspace_pm_local_addr_list of an
MPTCP socket for implementing the MPTCP BPF path manager.

In kernel space, we walk this list like this:

list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list)
	kfunc(entry);

With the mptcp_address bpf_iter, bpf_for_each() can be used to do the
same thing in BPF program:

	bpf_for_each(mptcp_address, entry, msk)
		kfunc(entry);

This bpf_iter should be invoked under holding the msk pm lock, so use
spin_is_locked() to check whether the lock is holding.

Signed-off-by: Geliang Tang <tanggeliang@kylinos.cn>
---
 net/mptcp/bpf.c | 43 +++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 43 insertions(+)
diff mbox series

Patch

diff --git a/net/mptcp/bpf.c b/net/mptcp/bpf.c
index 1ad7f703abb2..102d4d63f390 100644
--- a/net/mptcp/bpf.c
+++ b/net/mptcp/bpf.c
@@ -214,6 +214,15 @@  struct bpf_iter_mptcp_subflow_kern {
 	struct list_head *pos;
 } __attribute__((aligned(8)));
 
+struct bpf_iter_mptcp_address {
+	__u64 __opaque[2];
+} __attribute__((aligned(8)));
+
+struct bpf_iter_mptcp_address_kern {
+	struct mptcp_sock *msk;
+	struct list_head *pos;
+} __attribute__((aligned(8)));
+
 __bpf_kfunc_start_defs();
 
 __bpf_kfunc static struct mptcp_sock *bpf_mptcp_sk(struct sock *sk)
@@ -264,6 +273,37 @@  __bpf_kfunc static void bpf_iter_mptcp_subflow_destroy(struct bpf_iter_mptcp_sub
 {
 }
 
+__bpf_kfunc static int bpf_iter_mptcp_address_new(struct bpf_iter_mptcp_address *it,
+						  struct mptcp_sock *msk)
+{
+	struct bpf_iter_mptcp_address_kern *kit = (void *)it;
+
+	kit->msk = msk;
+	if (!msk)
+		return -EINVAL;
+
+	WARN_ON_ONCE(!spin_is_locked(&msk->pm.lock));
+
+	kit->pos = &msk->pm.userspace_pm_local_addr_list;
+	return 0;
+}
+
+__bpf_kfunc static struct mptcp_pm_addr_entry *
+bpf_iter_mptcp_address_next(struct bpf_iter_mptcp_address *it)
+{
+	struct bpf_iter_mptcp_address_kern *kit = (void *)it;
+
+	if (!kit->msk || list_is_last(kit->pos, &kit->msk->pm.userspace_pm_local_addr_list))
+		return NULL;
+
+	kit->pos = kit->pos->next;
+	return list_entry(kit->pos, struct mptcp_pm_addr_entry, list);
+}
+
+__bpf_kfunc static void bpf_iter_mptcp_address_destroy(struct bpf_iter_mptcp_address *it)
+{
+}
+
 __bpf_kfunc static struct mptcp_sock *bpf_mptcp_sock_acquire(struct mptcp_sock *msk)
 {
 	struct sock *sk = (struct sock *)msk;
@@ -302,6 +342,9 @@  BTF_ID_FLAGS(func, bpf_mptcp_subflow_tcp_sock)
 BTF_ID_FLAGS(func, bpf_iter_mptcp_subflow_new, KF_ITER_NEW | KF_TRUSTED_ARGS)
 BTF_ID_FLAGS(func, bpf_iter_mptcp_subflow_next, KF_ITER_NEXT | KF_RET_NULL)
 BTF_ID_FLAGS(func, bpf_iter_mptcp_subflow_destroy, KF_ITER_DESTROY)
+BTF_ID_FLAGS(func, bpf_iter_mptcp_address_new, KF_ITER_NEW | KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_iter_mptcp_address_next, KF_ITER_NEXT | KF_RET_NULL)
+BTF_ID_FLAGS(func, bpf_iter_mptcp_address_destroy, KF_ITER_DESTROY)
 BTF_ID_FLAGS(func, bpf_mptcp_sock_acquire, KF_ACQUIRE | KF_RET_NULL)
 BTF_ID_FLAGS(func, bpf_mptcp_sock_release, KF_RELEASE)
 BTF_KFUNCS_END(bpf_mptcp_common_kfunc_ids)