Message ID | 20230131180016.3368305-3-davemarchevsky@fb.com (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | BPF |
Headers | show |
Series | BPF rbtree next-gen datastructure | expand |
On 1/31/23 1:00 PM, Dave Marchevsky wrote: > This patch eliminates extra bpf_reg_state memory usage added due to > previous patch keeping a copy of lock identity in reg state for > non-owning refs. > > Instead of copying lock identity around, this patch changes > non_owning_ref_lock field to be a bool, taking advantage of the > following: > > * There can currently only be one active lock at a time > * non-owning refs are only valid in the critical section > > So if a verifier_state has an active_lock, any non-owning ref must've > been obtained under that lock, and any non-owning ref not obtained under > that lock must have been invalidated previously. Therefore if a > non-owning ref is associated with a lock, it's the active_lock of the > current state. So we can keep a bool "are we associated with active_lock > of current state" instead of copying lock identity around. > > Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com> > --- > include/linux/bpf_verifier.h | 2 +- > kernel/bpf/verifier.c | 20 ++++++++++---------- > 2 files changed, 11 insertions(+), 11 deletions(-) > > diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h > index 1c6bbde40705..baeb5afb0b81 100644 > --- a/include/linux/bpf_verifier.h > +++ b/include/linux/bpf_verifier.h > @@ -84,7 +84,7 @@ struct bpf_reg_state { > struct { > struct btf *btf; > u32 btf_id; > - struct bpf_active_lock non_owning_ref_lock; > + bool non_owning_ref_lock; > }; > > u32 mem_size; /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */ Pahole output after this change: union { int range; /* 8 4 */ struct { struct bpf_map * map_ptr; /* 8 8 */ u32 map_uid; /* 16 4 */ }; /* 8 16 */ struct { struct btf * btf; /* 8 8 */ u32 btf_id; /* 16 4 */ bool non_owning_ref_lock; /* 20 1 */ }; /* 8 16 */ u32 mem_size; /* 8 4 */ struct { enum bpf_dynptr_type type; /* 8 4 */ bool first_slot; /* 12 1 */ } dynptr; /* 8 8 */ struct { long unsigned int raw1; /* 8 8 */ long unsigned int raw2; /* 16 8 */ } raw; /* 8 16 */ u32 subprogno; /* 8 4 */ }; /* 8 16 */ The PTR_TO_BTF_ID union entry was taking 24 bytes in previous commit, now it's down to 16, so back to same size as before previous commit.
On Tue, Jan 31, 2023 at 10:00 AM Dave Marchevsky <davemarchevsky@fb.com> wrote: > static void invalidate_non_owning_refs(struct bpf_verifier_env *env, > struct bpf_active_lock *lock) > { > + struct bpf_active_lock *cur_state_lock; > struct bpf_func_state *unused; > struct bpf_reg_state *reg; > > + cur_state_lock = &env->cur_state->active_lock; > bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({ > - if (reg->non_owning_ref_lock.ptr && > - reg->non_owning_ref_lock.ptr == lock->ptr && > - reg->non_owning_ref_lock.id == lock->id) > + if (reg->non_owning_ref_lock && > + cur_state_lock->ptr == lock->ptr && > + cur_state_lock->id == lock->id) invalidate_non_owning_refs() is called with &cur_state, so the last two checks are redundant, but I suspect they hide the issue with the first check. Just reg->non_owning_ref_lock is ambiguous. It needs base_type(reg->type) == PTR_TO_BTF_ID first.
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 1c6bbde40705..baeb5afb0b81 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -84,7 +84,7 @@ struct bpf_reg_state { struct { struct btf *btf; u32 btf_id; - struct bpf_active_lock non_owning_ref_lock; + bool non_owning_ref_lock; }; u32 mem_size; /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */ diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 4b1883ffcf21..ed816e824928 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -935,9 +935,8 @@ static void print_verifier_state(struct bpf_verifier_env *env, verbose_a("id=%d", reg->id); if (reg->ref_obj_id) verbose_a("ref_obj_id=%d", reg->ref_obj_id); - if (reg->non_owning_ref_lock.ptr) - verbose_a("non_own_id=(%p,%d)", reg->non_owning_ref_lock.ptr, - reg->non_owning_ref_lock.id); + if (reg->non_owning_ref_lock) + verbose(env, "non_own_ref,"); if (t != SCALAR_VALUE) verbose_a("off=%d", reg->off); if (type_is_pkt_pointer(t)) @@ -4834,7 +4833,7 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env, } if (type_is_alloc(reg->type) && !reg->ref_obj_id && - !reg->non_owning_ref_lock.ptr) { + !reg->non_owning_ref_lock) { verbose(env, "verifier internal error: ref_obj_id for allocated object must be non-zero\n"); return -EFAULT; } @@ -7085,13 +7084,15 @@ static int release_reference(struct bpf_verifier_env *env, static void invalidate_non_owning_refs(struct bpf_verifier_env *env, struct bpf_active_lock *lock) { + struct bpf_active_lock *cur_state_lock; struct bpf_func_state *unused; struct bpf_reg_state *reg; + cur_state_lock = &env->cur_state->active_lock; bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({ - if (reg->non_owning_ref_lock.ptr && - reg->non_owning_ref_lock.ptr == lock->ptr && - reg->non_owning_ref_lock.id == lock->id) + if (reg->non_owning_ref_lock && + cur_state_lock->ptr == lock->ptr && + cur_state_lock->id == lock->id) __mark_reg_unknown(env, reg); })); } @@ -8718,13 +8719,12 @@ static int ref_set_non_owning_lock(struct bpf_verifier_env *env, struct bpf_reg_ return -EFAULT; } - if (reg->non_owning_ref_lock.ptr) { + if (reg->non_owning_ref_lock) { verbose(env, "verifier internal error: non_owning_ref_lock already set\n"); return -EFAULT; } - reg->non_owning_ref_lock.id = state->active_lock.id; - reg->non_owning_ref_lock.ptr = state->active_lock.ptr; + reg->non_owning_ref_lock = true; return 0; }
This patch eliminates extra bpf_reg_state memory usage added due to previous patch keeping a copy of lock identity in reg state for non-owning refs. Instead of copying lock identity around, this patch changes non_owning_ref_lock field to be a bool, taking advantage of the following: * There can currently only be one active lock at a time * non-owning refs are only valid in the critical section So if a verifier_state has an active_lock, any non-owning ref must've been obtained under that lock, and any non-owning ref not obtained under that lock must have been invalidated previously. Therefore if a non-owning ref is associated with a lock, it's the active_lock of the current state. So we can keep a bool "are we associated with active_lock of current state" instead of copying lock identity around. Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com> --- include/linux/bpf_verifier.h | 2 +- kernel/bpf/verifier.c | 20 ++++++++++---------- 2 files changed, 11 insertions(+), 11 deletions(-)