diff mbox series

[RFC,bpf-next,05/13] bpf: Make bpf objects id have the same alloc and free pattern

Message ID 20230326092208.13613-6-laoar.shao@gmail.com (mailing list archive)
State RFC
Delegated to: BPF
Headers show
Series bpf: Introduce BPF namespace | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR success PR summary
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-7 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-2 success Logs for build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-3 success Logs for build for aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-5 success Logs for build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-6 success Logs for build for x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-4 success Logs for build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-8 fail Logs for test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-9 fail Logs for test_maps on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-10 pending Logs for test_maps on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-11 fail Logs for test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-12 fail Logs for test_maps on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-13 fail Logs for test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-14 fail Logs for test_progs on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-16 fail Logs for test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-17 fail Logs for test_progs on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-18 fail Logs for test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-19 fail Logs for test_progs_no_alu32 on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-21 fail Logs for test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-22 fail Logs for test_progs_no_alu32 on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-23 success Logs for test_progs_no_alu32_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-24 success Logs for test_progs_no_alu32_parallel on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-25 success Logs for test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-26 success Logs for test_progs_no_alu32_parallel on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-27 success Logs for test_progs_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-28 success Logs for test_progs_parallel on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-29 success Logs for test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-30 success Logs for test_progs_parallel on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-31 fail Logs for test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-32 fail Logs for test_verifier on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-34 fail Logs for test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-35 fail Logs for test_verifier on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-33 success Logs for test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-20 fail Logs for test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-15 fail Logs for test_progs on s390x with gcc
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for bpf-next, async
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 20 this patch: 20
netdev/cc_maintainers warning 2 maintainers not CCed: martin.lau@linux.dev song@kernel.org
netdev/build_clang success Errors and warnings before: 18 this patch: 18
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 20 this patch: 20
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 163 lines checked
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Yafang Shao March 26, 2023, 9:22 a.m. UTC
Make them have the same patthern, then we can use the generic helpers
instead.

Signed-off-by: Yafang Shao <laoar.shao@gmail.com>
---
 kernel/bpf/offload.c | 15 +++++++++++--
 kernel/bpf/syscall.c | 62 ++++++++++++++++++++++++----------------------------
 2 files changed, 41 insertions(+), 36 deletions(-)
diff mbox series

Patch

diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c
index d9c9f45..aec70e0 100644
--- a/kernel/bpf/offload.c
+++ b/kernel/bpf/offload.c
@@ -134,9 +134,20 @@  static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap,
 
 static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap)
 {
+	struct bpf_map *map = &offmap->map;
+
 	WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE));
-	/* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */
-	bpf_map_free_id(&offmap->map);
+	/* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map.
+	 *
+	 * Offloaded maps are removed from the IDR store when their device
+	 * disappears - even if someone holds an fd to them they are unusable,
+	 * the memory is gone, all ops will fail; they are simply waiting for
+	 * refcnt to drop to be freed.
+	 */
+	if (map->id) {
+		bpf_map_free_id(map);
+		map->id = 0;
+	}
 	list_del_init(&offmap->offloads);
 	offmap->netdev = NULL;
 }
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index f3664f2..ee1297d 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -382,30 +382,19 @@  static int bpf_map_alloc_id(struct bpf_map *map)
 	idr_preload(GFP_KERNEL);
 	spin_lock_bh(&map_idr_lock);
 	id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
-	if (id > 0)
-		map->id = id;
 	spin_unlock_bh(&map_idr_lock);
 	idr_preload_end();
 
-	return id > 0 ? 0 : id;
+	return id;
 }
 
 void bpf_map_free_id(struct bpf_map *map)
 {
 	unsigned long flags;
 
-	/* Offloaded maps are removed from the IDR store when their device
-	 * disappears - even if someone holds an fd to them they are unusable,
-	 * the memory is gone, all ops will fail; they are simply waiting for
-	 * refcnt to drop to be freed.
-	 */
-	if (!map->id)
-		return;
-
 	spin_lock_irqsave(&map_idr_lock, flags);
 
 	idr_remove(&map_idr, map->id);
-	map->id = 0;
 
 	spin_unlock_irqrestore(&map_idr_lock, flags);
 }
@@ -748,8 +737,11 @@  static void bpf_map_put_uref(struct bpf_map *map)
 void bpf_map_put(struct bpf_map *map)
 {
 	if (atomic64_dec_and_test(&map->refcnt)) {
-		/* bpf_map_free_id() must be called first */
-		bpf_map_free_id(map);
+		/* bpf_map_free_id() must be called first. */
+		if (map->id) {
+			bpf_map_free_id(map);
+			map->id = 0;
+		}
 		btf_put(map->btf);
 		INIT_WORK(&map->work, bpf_map_free_deferred);
 		/* Avoid spawning kworkers, since they all might contend
@@ -1215,8 +1207,9 @@  static int map_create(union bpf_attr *attr)
 		goto free_map_field_offs;
 
 	err = bpf_map_alloc_id(map);
-	if (err)
+	if (err < 0)
 		goto free_map_sec;
+	map->id = err;
 
 	bpf_map_save_memcg(map);
 
@@ -2024,29 +2017,18 @@  static int bpf_prog_alloc_id(struct bpf_prog *prog)
 	idr_preload(GFP_KERNEL);
 	spin_lock_bh(&prog_idr_lock);
 	id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
-	if (id > 0)
-		prog->aux->id = id;
 	spin_unlock_bh(&prog_idr_lock);
 	idr_preload_end();
 
-	return id > 0 ? 0 : id;
+	return id;
 }
 
 void bpf_prog_free_id(struct bpf_prog *prog)
 {
 	unsigned long flags;
 
-	/* cBPF to eBPF migrations are currently not in the idr store.
-	 * Offloaded programs are removed from the store when their device
-	 * disappears - even if someone grabs an fd to them they are unusable,
-	 * simply waiting for refcnt to drop to be freed.
-	 */
-	if (!prog->aux->id)
-		return;
-
 	spin_lock_irqsave(&prog_idr_lock, flags);
 	idr_remove(&prog_idr, prog->aux->id);
-	prog->aux->id = 0;
 	spin_unlock_irqrestore(&prog_idr_lock, flags);
 }
 
@@ -2091,7 +2073,15 @@  static void bpf_prog_put_deferred(struct work_struct *work)
 	prog = aux->prog;
 	perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
 	bpf_audit_prog(prog, BPF_AUDIT_UNLOAD);
-	bpf_prog_free_id(prog);
+	/* cBPF to eBPF migrations are currently not in the idr store.
+	 * Offloaded programs are removed from the store when their device
+	 * disappears - even if someone grabs an fd to them they are unusable,
+	 * simply waiting for refcnt to drop to be freed.
+	 */
+	if (prog->aux->id) {
+		bpf_prog_free_id(prog);
+		prog->aux->id = 0;
+	}
 	__bpf_prog_put_noref(prog, true);
 }
 
@@ -2655,8 +2645,9 @@  static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr)
 		goto free_used_maps;
 
 	err = bpf_prog_alloc_id(prog);
-	if (err)
+	if (err < 0)
 		goto free_used_maps;
+	prog->aux->id = err;
 
 	/* Upon success of bpf_prog_alloc_id(), the BPF prog is
 	 * effectively publicly exposed. However, retrieving via
@@ -2730,9 +2721,6 @@  void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
 
 static void bpf_link_free_id(int id)
 {
-	if (!id)
-		return;
-
 	spin_lock_bh(&link_idr_lock);
 	idr_remove(&link_idr, id);
 	spin_unlock_bh(&link_idr_lock);
@@ -2748,7 +2736,10 @@  static void bpf_link_free_id(int id)
 void bpf_link_cleanup(struct bpf_link_primer *primer)
 {
 	primer->link->prog = NULL;
-	bpf_link_free_id(primer->id);
+	if (primer->id) {
+		bpf_link_free_id(primer->id);
+		primer->id = 0;
+	}
 	fput(primer->file);
 	put_unused_fd(primer->fd);
 }
@@ -2761,7 +2752,10 @@  void bpf_link_inc(struct bpf_link *link)
 /* bpf_link_free is guaranteed to be called from process context */
 static void bpf_link_free(struct bpf_link *link)
 {
-	bpf_link_free_id(link->id);
+	if (link->id) {
+		bpf_link_free_id(link->id);
+		link->id = 0;
+	}
 	if (link->prog) {
 		/* detach BPF program, clean up used resources */
 		link->ops->release(link);