@@ -180,6 +180,7 @@ enum btf_field_type {
BPF_KPTR = BPF_KPTR_UNREF | BPF_KPTR_REF,
BPF_LIST_HEAD = (1 << 4),
BPF_LIST_NODE = (1 << 5),
+ BPF_GRAPH_NODE_OR_ROOT = BPF_LIST_NODE | BPF_LIST_HEAD,
};
struct btf_field_kptr {
@@ -43,6 +43,22 @@ enum bpf_reg_liveness {
REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */
};
+/* For every reg representing a map value or allocated object pointer,
+ * we consider the tuple of (ptr, id) for them to be unique in verifier
+ * context and conside them to not alias each other for the purposes of
+ * tracking lock state.
+ */
+struct bpf_active_lock {
+ /* This can either be reg->map_ptr or reg->btf. If ptr is NULL,
+ * there's no active lock held, and other fields have no
+ * meaning. If non-NULL, it indicates that a lock is held and
+ * id member has the reg->id of the register which can be >= 0.
+ */
+ void *ptr;
+ /* This will be reg->id */
+ u32 id;
+};
+
struct bpf_reg_state {
/* Ordering of fields matters. See states_equal() */
enum bpf_reg_type type;
@@ -68,6 +84,7 @@ struct bpf_reg_state {
struct {
struct btf *btf;
u32 btf_id;
+ struct bpf_active_lock non_owning_ref_lock;
};
struct { /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */
@@ -226,11 +243,6 @@ struct bpf_reference_state {
* exiting a callback function.
*/
int callback_ref;
- /* Mark the reference state to release the registers sharing the same id
- * on bpf_spin_unlock (for nodes that we will lose ownership to but are
- * safe to access inside the critical section).
- */
- bool release_on_unlock;
};
/* state of the program:
@@ -331,21 +343,8 @@ struct bpf_verifier_state {
u32 branches;
u32 insn_idx;
u32 curframe;
- /* For every reg representing a map value or allocated object pointer,
- * we consider the tuple of (ptr, id) for them to be unique in verifier
- * context and conside them to not alias each other for the purposes of
- * tracking lock state.
- */
- struct {
- /* This can either be reg->map_ptr or reg->btf. If ptr is NULL,
- * there's no active lock held, and other fields have no
- * meaning. If non-NULL, it indicates that a lock is held and
- * id member has the reg->id of the register which can be >= 0.
- */
- void *ptr;
- /* This will be reg->id */
- u32 id;
- } active_lock;
+
+ struct bpf_active_lock active_lock;
bool speculative;
bool active_rcu_lock;
@@ -190,6 +190,10 @@ struct bpf_verifier_stack_elem {
static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx);
static int release_reference(struct bpf_verifier_env *env, int ref_obj_id);
+static void invalidate_non_owning_refs(struct bpf_verifier_env *env,
+ struct bpf_active_lock *lock);
+static int ref_set_non_owning_lock(struct bpf_verifier_env *env,
+ struct bpf_reg_state *reg);
static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
{
@@ -1073,6 +1077,9 @@ static void print_verifier_state(struct bpf_verifier_env *env,
verbose_a("id=%d", reg->id);
if (reg->ref_obj_id)
verbose_a("ref_obj_id=%d", reg->ref_obj_id);
+ if (reg->non_owning_ref_lock.ptr)
+ verbose_a("non_own_id=(%p,%d)", reg->non_owning_ref_lock.ptr,
+ reg->non_owning_ref_lock.id);
if (t != SCALAR_VALUE)
verbose_a("off=%d", reg->off);
if (type_is_pkt_pointer(t))
@@ -5041,7 +5048,8 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
return -EACCES;
}
- if (type_is_alloc(reg->type) && !reg->ref_obj_id) {
+ if (type_is_alloc(reg->type) && !reg->ref_obj_id &&
+ !reg->non_owning_ref_lock.ptr) {
verbose(env, "verifier internal error: ref_obj_id for allocated object must be non-zero\n");
return -EFAULT;
}
@@ -6031,9 +6039,7 @@ static int process_spin_lock(struct bpf_verifier_env *env, int regno,
cur->active_lock.ptr = btf;
cur->active_lock.id = reg->id;
} else {
- struct bpf_func_state *fstate = cur_func(env);
void *ptr;
- int i;
if (map)
ptr = map;
@@ -6049,25 +6055,11 @@ static int process_spin_lock(struct bpf_verifier_env *env, int regno,
verbose(env, "bpf_spin_unlock of different lock\n");
return -EINVAL;
}
- cur->active_lock.ptr = NULL;
- cur->active_lock.id = 0;
- for (i = fstate->acquired_refs - 1; i >= 0; i--) {
- int err;
+ invalidate_non_owning_refs(env, &cur->active_lock);
- /* Complain on error because this reference state cannot
- * be freed before this point, as bpf_spin_lock critical
- * section does not allow functions that release the
- * allocated object immediately.
- */
- if (!fstate->refs[i].release_on_unlock)
- continue;
- err = release_reference(env, fstate->refs[i].id);
- if (err) {
- verbose(env, "failed to release release_on_unlock reference");
- return err;
- }
- }
+ cur->active_lock.ptr = NULL;
+ cur->active_lock.id = 0;
}
return 0;
}
@@ -6535,6 +6527,23 @@ static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
return 0;
}
+static struct btf_field *
+reg_find_field_offset(const struct bpf_reg_state *reg, s32 off, u32 fields)
+{
+ struct btf_field *field;
+ struct btf_record *rec;
+
+ rec = reg_btf_record(reg);
+ if (!rec)
+ return NULL;
+
+ field = btf_record_find(rec, off, fields);
+ if (!field)
+ return NULL;
+
+ return field;
+}
+
int check_func_arg_reg_off(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg, int regno,
enum bpf_arg_type arg_type)
@@ -6556,6 +6565,18 @@ int check_func_arg_reg_off(struct bpf_verifier_env *env,
*/
if (arg_type_is_dynptr(arg_type) && type == PTR_TO_STACK)
return 0;
+
+ if (type == (PTR_TO_BTF_ID | MEM_ALLOC) && reg->off) {
+ if (reg_find_field_offset(reg, reg->off, BPF_GRAPH_NODE_OR_ROOT))
+ return __check_ptr_off_reg(env, reg, regno, true);
+
+ verbose(env, "R%d must have zero offset when passed to release func\n",
+ regno);
+ verbose(env, "No graph node or root found at R%d type:%s off:%d\n", regno,
+ kernel_type_name(reg->btf, reg->btf_id), reg->off);
+ return -EINVAL;
+ }
+
/* Doing check_ptr_off_reg check for the offset will catch this
* because fixed_off_ok is false, but checking here allows us
* to give the user a better error message.
@@ -7352,6 +7373,20 @@ static int release_reference(struct bpf_verifier_env *env,
return 0;
}
+static void invalidate_non_owning_refs(struct bpf_verifier_env *env,
+ struct bpf_active_lock *lock)
+{
+ struct bpf_func_state *unused;
+ struct bpf_reg_state *reg;
+
+ bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({
+ if (reg->non_owning_ref_lock.ptr &&
+ reg->non_owning_ref_lock.ptr == lock->ptr &&
+ reg->non_owning_ref_lock.id == lock->id)
+ __mark_reg_unknown(env, reg);
+ }));
+}
+
static void clear_caller_saved_regs(struct bpf_verifier_env *env,
struct bpf_reg_state *regs)
{
@@ -8904,38 +8939,55 @@ static int process_kf_arg_ptr_to_kptr(struct bpf_verifier_env *env,
return 0;
}
-static int ref_set_release_on_unlock(struct bpf_verifier_env *env, u32 ref_obj_id)
+static int ref_set_non_owning_lock(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
{
- struct bpf_func_state *state = cur_func(env);
+ struct bpf_verifier_state *state = env->cur_state;
+
+ if (!state->active_lock.ptr) {
+ verbose(env, "verifier internal error: ref_set_non_owning_lock w/o active lock\n");
+ return -EFAULT;
+ }
+
+ if (reg->non_owning_ref_lock.ptr) {
+ verbose(env, "verifier internal error: non_owning_ref_lock already set\n");
+ return -EFAULT;
+ }
+
+ reg->non_owning_ref_lock.id = state->active_lock.id;
+ reg->non_owning_ref_lock.ptr = state->active_lock.ptr;
+ return 0;
+}
+
+static int ref_convert_owning_non_owning(struct bpf_verifier_env *env, u32 ref_obj_id)
+{
+ struct bpf_func_state *state, *unused;
struct bpf_reg_state *reg;
int i;
- /* bpf_spin_lock only allows calling list_push and list_pop, no BPF
- * subprogs, no global functions. This means that the references would
- * not be released inside the critical section but they may be added to
- * the reference state, and the acquired_refs are never copied out for a
- * different frame as BPF to BPF calls don't work in bpf_spin_lock
- * critical sections.
- */
+ state = cur_func(env);
+
if (!ref_obj_id) {
- verbose(env, "verifier internal error: ref_obj_id is zero for release_on_unlock\n");
+ verbose(env, "verifier internal error: ref_obj_id is zero for "
+ "owning -> non-owning conversion\n");
return -EFAULT;
}
+
for (i = 0; i < state->acquired_refs; i++) {
- if (state->refs[i].id == ref_obj_id) {
- if (state->refs[i].release_on_unlock) {
- verbose(env, "verifier internal error: expected false release_on_unlock");
- return -EFAULT;
+ if (state->refs[i].id != ref_obj_id)
+ continue;
+
+ /* Clear ref_obj_id here so release_reference doesn't clobber
+ * the whole reg
+ */
+ bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({
+ if (reg->ref_obj_id == ref_obj_id) {
+ reg->ref_obj_id = 0;
+ ref_set_non_owning_lock(env, reg);
}
- state->refs[i].release_on_unlock = true;
- /* Now mark everyone sharing same ref_obj_id as untrusted */
- bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
- if (reg->ref_obj_id == ref_obj_id)
- reg->type |= PTR_UNTRUSTED;
- }));
- return 0;
- }
+ }));
+ return 0;
}
+
verbose(env, "verifier internal error: ref state missing for ref_obj_id\n");
return -EFAULT;
}
@@ -9070,7 +9122,6 @@ static int process_kf_arg_ptr_to_list_node(struct bpf_verifier_env *env,
{
const struct btf_type *et, *t;
struct btf_field *field;
- struct btf_record *rec;
u32 list_node_off;
if (meta->btf != btf_vmlinux ||
@@ -9087,9 +9138,8 @@ static int process_kf_arg_ptr_to_list_node(struct bpf_verifier_env *env,
return -EINVAL;
}
- rec = reg_btf_record(reg);
list_node_off = reg->off + reg->var_off.value;
- field = btf_record_find(rec, list_node_off, BPF_LIST_NODE);
+ field = reg_find_field_offset(reg, list_node_off, BPF_LIST_NODE);
if (!field || field->offset != list_node_off) {
verbose(env, "bpf_list_node not found at offset=%u\n", list_node_off);
return -EINVAL;
@@ -9115,8 +9165,8 @@ static int process_kf_arg_ptr_to_list_node(struct bpf_verifier_env *env,
btf_name_by_offset(field->graph_root.btf, et->name_off));
return -EINVAL;
}
- /* Set arg#1 for expiration after unlock */
- return ref_set_release_on_unlock(env, reg->ref_obj_id);
+
+ return 0;
}
static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_arg_meta *meta)
@@ -9395,11 +9445,11 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
int *insn_idx_p)
{
const struct btf_type *t, *func, *func_proto, *ptr_type;
+ u32 i, nargs, func_id, ptr_type_id, release_ref_obj_id;
struct bpf_reg_state *regs = cur_regs(env);
const char *func_name, *ptr_type_name;
bool sleepable, rcu_lock, rcu_unlock;
struct bpf_kfunc_call_arg_meta meta;
- u32 i, nargs, func_id, ptr_type_id;
int err, insn_idx = *insn_idx_p;
const struct btf_param *args;
const struct btf_type *ret_t;
@@ -9494,6 +9544,24 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
}
}
+ if (meta.func_id == special_kfunc_list[KF_bpf_list_push_front] ||
+ meta.func_id == special_kfunc_list[KF_bpf_list_push_back]) {
+ release_ref_obj_id = regs[BPF_REG_2].ref_obj_id;
+ err = ref_convert_owning_non_owning(env, release_ref_obj_id);
+ if (err) {
+ verbose(env, "kfunc %s#%d conversion of owning ref to non-owning failed\n",
+ func_name, func_id);
+ return err;
+ }
+
+ err = release_reference(env, release_ref_obj_id);
+ if (err) {
+ verbose(env, "kfunc %s#%d reference has not been acquired before\n",
+ func_name, func_id);
+ return err;
+ }
+ }
+
for (i = 0; i < CALLER_SAVED_REGS; i++)
mark_reg_not_init(env, regs, caller_saved[i]);
This patch introduces non-owning reference semantics to the verifier, specifically linked_list API kfunc handling. release_on_unlock logic for refs is refactored - with small functional changes - to implement these semantics, and bpf_list_push_{front,back} are migrated to use them. When a list node is pushed to a list, the program still has a pointer to the node: n = bpf_obj_new(typeof(*n)); bpf_spin_lock(&l); bpf_list_push_back(&l, n); /* n still points to the just-added node */ bpf_spin_unlock(&l); What the verifier considers n to be after the push, and thus what can be done with n, are changed by this patch. Common properties both before/after this patch: * After push, n is only a valid reference to the node until end of critical section * After push, n cannot be pushed to any list * After push, the program can read the node's fields using n Before: * After push, n retains the ref_obj_id which it received on bpf_obj_new, but the associated bpf_reference_state's release_on_unlock field is set to true * release_on_unlock field and associated logic is used to implement "n is only a valid ref until end of critical section" * After push, n cannot be written to, the node must be removed from the list before writing to its fields * After push, n is marked PTR_UNTRUSTED After: * After push, n's ref is released and ref_obj_id set to 0. The bpf_reg_state's non_owning_ref_lock struct is populated with the currently active lock * non_owning_ref_lock and logic is used to implement "n is only a valid ref until end of critical section" * n can be written to (except for special fields e.g. bpf_list_node, timer, ...) * No special type flag is added to n after push Summary of specific implementation changes to achieve the above: * release_on_unlock field, ref_set_release_on_unlock helper, and logic to "release on unlock" based on that field are removed * The anonymous active_lock struct used by bpf_verifier_state is pulled out into a named struct bpf_active_lock. * A non_owning_ref_lock field of type bpf_active_lock is added to bpf_reg_state's PTR_TO_BTF_ID union * Helpers are added to use non_owning_ref_lock to implement non-owning ref semantics as described above * invalidate_non_owning_refs - helper to clobber all non-owning refs matching a particular bpf_active_lock identity. Replaces release_on_unlock logic in process_spin_lock. * ref_set_non_owning_lock - set non_owning_ref_lock for a reg based on current verifier state * ref_convert_owning_non_owning - convert owning reference w/ specified ref_obj_id to non-owning references. Setup non_owning_ref_lock for each reg with that ref_obj_id and 0 out its ref_obj_id After these changes, linked_list's "release on unlock" logic continues to function as before, except for the semantic differences noted above. The patch immediately following this one makes minor changes to linked_list selftests to account for the differing behavior. Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com> --- include/linux/bpf.h | 1 + include/linux/bpf_verifier.h | 39 ++++----- kernel/bpf/verifier.c | 164 +++++++++++++++++++++++++---------- 3 files changed, 136 insertions(+), 68 deletions(-)