diff mbox series

[RFC,bpf-next,06/13] bpf: Helpers to alloc and free object id in bpf namespace

Message ID 20230326092208.13613-7-laoar.shao@gmail.com (mailing list archive)
State RFC
Delegated to: BPF
Headers show
Series bpf: Introduce BPF namespace | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR success PR summary
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-7 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-2 success Logs for build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-3 success Logs for build for aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-5 success Logs for build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-6 success Logs for build for x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-4 success Logs for build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-8 fail Logs for test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-9 fail Logs for test_maps on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-10 pending Logs for test_maps on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-11 fail Logs for test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-12 fail Logs for test_maps on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-13 fail Logs for test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-14 fail Logs for test_progs on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-16 fail Logs for test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-17 fail Logs for test_progs on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-18 fail Logs for test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-19 fail Logs for test_progs_no_alu32 on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-21 fail Logs for test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-22 fail Logs for test_progs_no_alu32 on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-23 success Logs for test_progs_no_alu32_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-24 success Logs for test_progs_no_alu32_parallel on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-25 success Logs for test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-26 success Logs for test_progs_no_alu32_parallel on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-27 success Logs for test_progs_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-28 success Logs for test_progs_parallel on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-29 success Logs for test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-30 success Logs for test_progs_parallel on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-31 fail Logs for test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-32 fail Logs for test_verifier on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-34 fail Logs for test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-35 fail Logs for test_verifier on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-33 success Logs for test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-20 fail Logs for test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-15 fail Logs for test_progs on s390x with gcc
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for bpf-next, async
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit fail Errors and warnings before: 22 this patch: 25
netdev/cc_maintainers warning 2 maintainers not CCed: martin.lau@linux.dev song@kernel.org
netdev/build_clang success Errors and warnings before: 18 this patch: 18
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn fail Errors and warnings before: 20 this patch: 23
netdev/checkpatch warning CHECK: Alignment should match open parenthesis CHECK: spinlock_t definition without comment WARNING: line length of 84 exceeds 80 columns WARNING: line length of 92 exceeds 80 columns WARNING: line length of 93 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Yafang Shao March 26, 2023, 9:22 a.m. UTC
Introduce generic helpers to alloc bpf_{map,prog,link} in bpf namespace.

Signed-off-by: Yafang Shao <laoar.shao@gmail.com>
---
 include/linux/bpf_namespace.h | 36 ++++++++++++++++++
 kernel/bpf/bpf_namespace.c    | 86 +++++++++++++++++++++++++++++++++++++++++++
 kernel/bpf/syscall.c          |  6 +--
 3 files changed, 125 insertions(+), 3 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/bpf_namespace.h b/include/linux/bpf_namespace.h
index 06aa51f..50bd68c 100644
--- a/include/linux/bpf_namespace.h
+++ b/include/linux/bpf_namespace.h
@@ -38,9 +38,45 @@  struct bpf_namespace {
 
 extern struct bpf_namespace init_bpf_ns;
 extern struct proc_ns_operations bpfns_operations;
+extern spinlock_t map_idr_lock;
+extern spinlock_t prog_idr_lock;
+extern spinlock_t link_idr_lock;
 
 struct bpf_namespace *copy_bpfns(unsigned long flags,
 								struct user_namespace *user_ns,
 								struct bpf_namespace *old_ns);
 void put_bpfns(struct bpf_namespace *ns);
+struct bpf_obj_id *bpf_alloc_obj_id(struct bpf_namespace *ns,
+									void *obj, int type);
+void bpf_free_obj_id(struct bpf_obj_id *obj_id, int type);
+
+/*
+ * The helpers to get the bpf_id's id seen from different namespaces
+ *
+ * bpf_id_nr()    : global id, i.e. the id seen from the init namespace;
+ * bpf_id_vnr()   : virtual id, i.e. the id seen from the pid namespace of
+ *                  current.
+ * bpf_id_nr_ns() : id seen from the ns specified.
+ *
+ * see also task_xid_nr() etc in include/linux/sched.h
+ */
+static inline int bpf_obj_id_nr(struct bpf_obj_id *obj_id)
+{
+	if (obj_id)
+		return obj_id->numbers[0].nr;
+	return 0;
+}
+
+static inline int bpf_obj_id_nr_ns(struct bpf_obj_id *obj_id,
+								   struct bpf_namespace *ns)
+{
+	if (obj_id && ns->level <= obj_id->level)
+		return obj_id->numbers[ns->level].nr;
+	return 0;
+}
+
+static inline int bpf_obj_id_vnr(struct bpf_obj_id *obj_id)
+{
+	return bpf_obj_id_nr_ns(obj_id, current->nsproxy->bpf_ns);
+}
 #endif /* _LINUX_BPF_ID_NS_H */
diff --git a/kernel/bpf/bpf_namespace.c b/kernel/bpf/bpf_namespace.c
index 88a86cd..1e98d1d 100644
--- a/kernel/bpf/bpf_namespace.c
+++ b/kernel/bpf/bpf_namespace.c
@@ -217,3 +217,89 @@  static __init int bpf_namespaces_init(void)
 }
 
 late_initcall(bpf_namespaces_init);
+
+struct bpf_obj_id *bpf_alloc_obj_id(struct bpf_namespace *ns,
+									void *obj, int type)
+{
+	struct bpf_namespace *tmp = ns;
+	struct bpf_obj_id *obj_id;
+	spinlock_t *idr_lock;
+	unsigned long flags;
+	int id;
+	int i;
+
+	switch (type) {
+	case MAP_OBJ_ID:
+		idr_lock = &map_idr_lock;
+		break;
+	case PROG_OBJ_ID:
+		idr_lock = &prog_idr_lock;
+		break;
+	case LINK_OBJ_ID:
+		idr_lock = &link_idr_lock;
+		break;
+	default:
+		return ERR_PTR(-EINVAL);
+	}
+
+	obj_id = kmem_cache_alloc(ns->obj_id_cachep, GFP_KERNEL);
+	if (!obj_id)
+		return ERR_PTR(-ENOMEM);
+
+	obj_id->level = ns->level;
+	for (i = ns->level; i >= 0; i--) {
+		idr_preload(GFP_KERNEL);
+		spin_lock_bh(idr_lock);
+		id = idr_alloc_cyclic(&tmp->idr[type], obj, 1, INT_MAX, GFP_ATOMIC);
+		spin_unlock_bh(idr_lock);
+		idr_preload_end();
+		if (id < 0)
+			goto out_free;
+		obj_id->numbers[i].nr = id;
+		obj_id->numbers[i].ns = tmp;
+		tmp = tmp->parent;
+	}
+
+	return obj_id;
+
+out_free:
+	for (; i <= ns->level; i++) {
+		tmp = obj_id->numbers[i].ns;
+		spin_lock_irqsave(idr_lock, flags);
+		idr_remove(&tmp->idr[type], obj_id->numbers[i].nr);
+		spin_unlock_irqrestore(idr_lock, flags);
+	}
+	kmem_cache_free(ns->obj_id_cachep, obj_id);
+	return ERR_PTR(id);
+}
+
+void bpf_free_obj_id(struct bpf_obj_id *obj_id, int type)
+{
+	struct bpf_namespace *ns;
+	spinlock_t *idr_lock;
+	unsigned long flags;
+	int i;
+
+	switch (type) {
+	case MAP_OBJ_ID:
+		idr_lock = &map_idr_lock;
+		break;
+	case PROG_OBJ_ID:
+		idr_lock = &prog_idr_lock;
+		break;
+	case LINK_OBJ_ID:
+		idr_lock = &link_idr_lock;
+		break;
+	default:
+		return;
+	}
+	/* Note that the level-0 should be freed at last */
+	for (i = obj_id->level; i >= 0; i--) {
+		spin_lock_irqsave(idr_lock, flags);
+		ns = obj_id->numbers[i].ns;
+		idr_remove(&ns->idr[type], obj_id->numbers[i].nr);
+		spin_unlock_irqrestore(idr_lock, flags);
+	}
+	ns = obj_id->numbers[obj_id->level].ns;
+	kmem_cache_free(ns->obj_id_cachep, obj_id);
+}
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index ee1297d..f24e550 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -48,11 +48,11 @@ 
 
 DEFINE_PER_CPU(int, bpf_prog_active);
 static DEFINE_IDR(prog_idr);
-static DEFINE_SPINLOCK(prog_idr_lock);
+DEFINE_SPINLOCK(prog_idr_lock);
 static DEFINE_IDR(map_idr);
-static DEFINE_SPINLOCK(map_idr_lock);
+DEFINE_SPINLOCK(map_idr_lock);
 static DEFINE_IDR(link_idr);
-static DEFINE_SPINLOCK(link_idr_lock);
+DEFINE_SPINLOCK(link_idr_lock);
 
 int sysctl_unprivileged_bpf_disabled __read_mostly =
 	IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0;