@@ -323,7 +323,8 @@ struct bpf_verifier_state {
u32 branches;
u32 insn_idx;
u32 curframe;
- u32 active_spin_lock;
+ void *active_spin_lock_ptr;
+ u32 active_spin_lock_id;
bool speculative;
/* first and last insn idx of this verifier state */
@@ -1201,7 +1201,8 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state,
}
dst_state->speculative = src->speculative;
dst_state->curframe = src->curframe;
- dst_state->active_spin_lock = src->active_spin_lock;
+ dst_state->active_spin_lock_ptr = src->active_spin_lock_ptr;
+ dst_state->active_spin_lock_id = src->active_spin_lock_id;
dst_state->branches = src->branches;
dst_state->parent = src->parent;
dst_state->first_insn_idx = src->first_insn_idx;
@@ -5470,22 +5471,35 @@ static int process_spin_lock(struct bpf_verifier_env *env, int regno,
return -EINVAL;
}
if (is_lock) {
- if (cur->active_spin_lock) {
+ if (cur->active_spin_lock_ptr) {
verbose(env,
"Locking two bpf_spin_locks are not allowed\n");
return -EINVAL;
}
- cur->active_spin_lock = reg->id;
+ if (map)
+ cur->active_spin_lock_ptr = map;
+ else
+ cur->active_spin_lock_ptr = btf;
+ cur->active_spin_lock_id = reg->id;
} else {
- if (!cur->active_spin_lock) {
+ void *ptr;
+
+ if (map)
+ ptr = map;
+ else
+ ptr = btf;
+
+ if (!cur->active_spin_lock_ptr) {
verbose(env, "bpf_spin_unlock without taking a lock\n");
return -EINVAL;
}
- if (cur->active_spin_lock != reg->id) {
+ if (cur->active_spin_lock_ptr != ptr ||
+ cur->active_spin_lock_id != reg->id) {
verbose(env, "bpf_spin_unlock of different lock\n");
return -EINVAL;
}
- cur->active_spin_lock = 0;
+ cur->active_spin_lock_ptr = NULL;
+ cur->active_spin_lock_id = 0;
}
return 0;
}
@@ -10392,8 +10406,8 @@ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE) {
dst_reg->type = PTR_TO_MAP_VALUE;
dst_reg->off = aux->map_off;
- if (btf_record_has_field(map->record, BPF_SPIN_LOCK))
- dst_reg->id = ++env->id_gen;
+ WARN_ON_ONCE(map->max_entries != 1);
+ /* We want reg->id to be same (0) as map_value is not distinct */
} else if (insn->src_reg == BPF_PSEUDO_MAP_FD ||
insn->src_reg == BPF_PSEUDO_MAP_IDX) {
dst_reg->type = CONST_PTR_TO_MAP;
@@ -10471,7 +10485,7 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
return err;
}
- if (env->cur_state->active_spin_lock) {
+ if (env->cur_state->active_spin_lock_ptr) {
verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n");
return -EINVAL;
}
@@ -11737,7 +11751,8 @@ static bool states_equal(struct bpf_verifier_env *env,
if (old->speculative && !cur->speculative)
return false;
- if (old->active_spin_lock != cur->active_spin_lock)
+ if (old->active_spin_lock_ptr != cur->active_spin_lock_ptr ||
+ old->active_spin_lock_id != cur->active_spin_lock_id)
return false;
/* for states to be equal callsites have to be the same
@@ -12376,7 +12391,7 @@ static int do_check(struct bpf_verifier_env *env)
return -EINVAL;
}
- if (env->cur_state->active_spin_lock &&
+ if (env->cur_state->active_spin_lock_ptr &&
(insn->src_reg == BPF_PSEUDO_CALL ||
insn->imm != BPF_FUNC_spin_unlock)) {
verbose(env, "function calls are not allowed while holding a lock\n");
@@ -12413,7 +12428,7 @@ static int do_check(struct bpf_verifier_env *env)
return -EINVAL;
}
- if (env->cur_state->active_spin_lock) {
+ if (env->cur_state->active_spin_lock_ptr) {
verbose(env, "bpf_spin_unlock is missing\n");
return -EINVAL;
}
Global variables reside in maps accessible using direct_value_addr callbacks, so giving each load instruction's rewrite a unique reg->id disallows us from holding locks which are global. This is not great, so refactor the active_spin_lock into two separate fields, active_spin_lock_ptr and active_spin_lock_id, which is generic enough to allow it for global variables, map lookups, and local kptr registers at the same time. Held vs non-held is indicated by active_spin_lock_ptr, which stores the reg->map_ptr or reg->btf pointer of the register used for locking spin lock. But the active_spin_lock_id also needs to be compared to ensure whether bpf_spin_unlock is for the same register. Next, pseudo load instructions are not given a unique reg->id, as they are doing lookup for the same map value (max_entries is never greater than 1). Essentially, we consider that the tuple of (active_spin_lock_ptr, active_spin_lock_id) will always be unique for any kind of argument to bpf_spin_{lock,unlock}. Note that this can be extended in the future to also remember offset used for locking, so that we can introduce multiple bpf_spin_lock fields in the same allocation. Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com> --- include/linux/bpf_verifier.h | 3 ++- kernel/bpf/verifier.c | 39 +++++++++++++++++++++++++----------- 2 files changed, 29 insertions(+), 13 deletions(-)