@@ -419,6 +419,9 @@ enum bpf_type_flag {
CONDITIONAL_RELEASE = BIT(12 + BPF_BASE_TYPE_BITS),
+ PTR_ITER = BIT(13 + BPF_BASE_TYPE_BITS),
+ PTR_ITER_END = BIT(14 + BPF_BASE_TYPE_BITS),
+
__BPF_TYPE_FLAG_MAX,
__BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1,
};
@@ -5433,6 +5433,34 @@ union bpf_attr {
*
* Return
* 0
+ *
+ * long bpf_rbtree_first(struct bpf_map *map)
+ * Description
+ * Return the first node in the tree according to sort order
+ *
+ * Return
+ * If found, ptr to node, otherwise NULL
+ *
+ * long bpf_rbtree_last(struct bpf_map *map)
+ * Description
+ * Return the last node in the tree according to sort order
+ *
+ * Return
+ * If found, ptr to node, otherwise NULL
+ *
+ * long bpf_rbtree_next(struct bpf_map *map, void *cur)
+ * Description
+ * Return the next node in the tree according to sort order
+ *
+ * Return
+ * If found, ptr to node, otherwise NULL
+ *
+ * long bpf_rbtree_prev(struct bpf_map *map, void *cur)
+ * Description
+ * Return the previous node in the tree according to sort order
+ *
+ * Return
+ * If found, ptr to node, otherwise NULL
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -5652,6 +5680,10 @@ union bpf_attr {
FN(rbtree_get_lock), \
FN(rbtree_lock), \
FN(rbtree_unlock), \
+ FN(rbtree_first), \
+ FN(rbtree_last), \
+ FN(rbtree_next), \
+ FN(rbtree_prev), \
/* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
@@ -1608,6 +1608,10 @@ const struct bpf_func_proto bpf_rbtree_free_node_proto __weak;
const struct bpf_func_proto bpf_rbtree_get_lock_proto __weak;
const struct bpf_func_proto bpf_rbtree_lock_proto __weak;
const struct bpf_func_proto bpf_rbtree_unlock_proto __weak;
+const struct bpf_func_proto bpf_rbtree_first_proto __weak;
+const struct bpf_func_proto bpf_rbtree_last_proto __weak;
+const struct bpf_func_proto bpf_rbtree_next_proto __weak;
+const struct bpf_func_proto bpf_rbtree_prev_proto __weak;
const struct bpf_func_proto *
bpf_base_func_proto(enum bpf_func_id func_id)
@@ -1715,6 +1719,14 @@ bpf_base_func_proto(enum bpf_func_id func_id)
return &bpf_rbtree_lock_proto;
case BPF_FUNC_rbtree_unlock:
return &bpf_rbtree_unlock_proto;
+ case BPF_FUNC_rbtree_first:
+ return &bpf_rbtree_first_proto;
+ case BPF_FUNC_rbtree_last:
+ return &bpf_rbtree_last_proto;
+ case BPF_FUNC_rbtree_next:
+ return &bpf_rbtree_next_proto;
+ case BPF_FUNC_rbtree_prev:
+ return &bpf_rbtree_prev_proto;
default:
break;
}
@@ -174,6 +174,84 @@ const struct bpf_func_proto bpf_rbtree_find_proto = {
.arg3_type = ARG_PTR_TO_FUNC,
};
+BPF_CALL_1(bpf_rbtree_first, struct bpf_map *, map)
+{
+ struct bpf_rbtree *tree = container_of(map, struct bpf_rbtree, map);
+
+ if (!__rbtree_lock_held(tree))
+ return (u64)NULL;
+
+ return (u64)rb_first_cached(&tree->root);
+}
+
+const struct bpf_func_proto bpf_rbtree_first_proto = {
+ .func = bpf_rbtree_first,
+ .gpl_only = true,
+ .ret_type = RET_PTR_TO_BTF_ID_OR_NULL | PTR_ITER | OBJ_NON_OWNING_REF,
+ .ret_btf_id = BPF_PTR_POISON,
+ .arg1_type = ARG_CONST_MAP_PTR,
+};
+
+BPF_CALL_1(bpf_rbtree_last, struct bpf_map *, map)
+{
+ struct bpf_rbtree *tree = container_of(map, struct bpf_rbtree, map);
+
+ if (!__rbtree_lock_held(tree))
+ return (u64)NULL;
+
+ return (u64)rb_last(&tree->root.rb_root);
+}
+
+const struct bpf_func_proto bpf_rbtree_last_proto = {
+ .func = bpf_rbtree_last,
+ .gpl_only = true,
+ .ret_type = RET_PTR_TO_BTF_ID_OR_NULL | PTR_ITER | OBJ_NON_OWNING_REF,
+ .ret_btf_id = BPF_PTR_POISON,
+ .arg1_type = ARG_CONST_MAP_PTR,
+};
+
+BPF_CALL_2(bpf_rbtree_next, struct bpf_map *, map, void *, cur)
+{
+ struct rb_node *next = rb_next((struct rb_node *)cur);
+ struct bpf_rbtree *tree = container_of(map, struct bpf_rbtree, map);
+
+ if (!__rbtree_lock_held(tree))
+ return (u64)NULL;
+
+ return (u64)next;
+}
+
+const struct bpf_func_proto bpf_rbtree_next_proto = {
+ .func = bpf_rbtree_next,
+ .gpl_only = true,
+ .ret_type = RET_PTR_TO_BTF_ID_OR_NULL | PTR_ITER_END | OBJ_NON_OWNING_REF,
+ .ret_btf_id = BPF_PTR_POISON,
+ .arg1_type = ARG_CONST_MAP_PTR,
+ .arg2_type = ARG_PTR_TO_BTF_ID | PTR_ITER,
+ .arg2_btf_id = BPF_PTR_POISON,
+};
+
+BPF_CALL_2(bpf_rbtree_prev, struct bpf_map *, map, void *, cur)
+{
+ struct rb_node *node = (struct rb_node *)cur;
+ struct bpf_rbtree *tree = container_of(map, struct bpf_rbtree, map);
+
+ if (!__rbtree_lock_held(tree))
+ return (u64)NULL;
+
+ return (u64)rb_prev(node);
+}
+
+const struct bpf_func_proto bpf_rbtree_prev_proto = {
+ .func = bpf_rbtree_prev,
+ .gpl_only = true,
+ .ret_type = RET_PTR_TO_BTF_ID_OR_NULL | PTR_ITER_END | OBJ_NON_OWNING_REF,
+ .ret_btf_id = BPF_PTR_POISON,
+ .arg1_type = ARG_CONST_MAP_PTR,
+ .arg2_type = ARG_PTR_TO_BTF_ID | PTR_ITER,
+ .arg2_btf_id = BPF_PTR_POISON,
+};
+
/* Like rbtree_postorder_for_each_entry_safe, but 'pos' and 'n' are
* 'rb_node *', so field name of rb_node within containing struct is not
* needed.
@@ -484,6 +484,11 @@ static bool type_may_be_null(u32 type)
return type & PTR_MAYBE_NULL;
}
+static bool type_is_iter(u32 type)
+{
+ return type & PTR_ITER || type & PTR_ITER_END;
+}
+
static bool is_acquire_function(enum bpf_func_id func_id,
const struct bpf_map *map)
{
@@ -586,7 +591,9 @@ static bool function_manipulates_rbtree_node(enum bpf_func_id func_id)
{
return func_id == BPF_FUNC_rbtree_add ||
func_id == BPF_FUNC_rbtree_remove ||
- func_id == BPF_FUNC_rbtree_free_node;
+ func_id == BPF_FUNC_rbtree_free_node ||
+ func_id == BPF_FUNC_rbtree_next ||
+ func_id == BPF_FUNC_rbtree_prev;
}
static bool function_returns_rbtree_node(enum bpf_func_id func_id)
@@ -594,7 +601,11 @@ static bool function_returns_rbtree_node(enum bpf_func_id func_id)
return func_id == BPF_FUNC_rbtree_alloc_node ||
func_id == BPF_FUNC_rbtree_find ||
func_id == BPF_FUNC_rbtree_add ||
- func_id == BPF_FUNC_rbtree_remove;
+ func_id == BPF_FUNC_rbtree_remove ||
+ func_id == BPF_FUNC_rbtree_first ||
+ func_id == BPF_FUNC_rbtree_last ||
+ func_id == BPF_FUNC_rbtree_next ||
+ func_id == BPF_FUNC_rbtree_prev;
}
/* string representation of 'enum bpf_reg_type'
@@ -655,6 +666,12 @@ static const char *reg_type_str(struct bpf_verifier_env *env,
32 - postfix_idx);
}
+ if (type_is_iter(type)) {
+ postfix_idx += strlcpy(postfix + postfix_idx, "_iter", 32 - postfix_idx);
+ if (type & PTR_ITER_END)
+ postfix_idx += strlcpy(postfix + postfix_idx, "_end", 32 - postfix_idx);
+ }
+
if (type & MEM_RDONLY)
strncpy(prefix, "rdonly_", 32);
if (type & MEM_ALLOC)
@@ -1470,7 +1487,7 @@ static void mark_ptr_not_null_reg(struct bpf_reg_state *reg)
map->map_type == BPF_MAP_TYPE_SOCKHASH) {
reg->type = PTR_TO_SOCKET;
} else {
- reg->type = PTR_TO_MAP_VALUE;
+ reg->type &= ~PTR_MAYBE_NULL;
}
return;
}
@@ -3063,6 +3080,11 @@ static bool __is_pointer_value(bool allow_ptr_leaks,
return reg->type != SCALAR_VALUE;
}
+static bool __is_iter_end(const struct bpf_reg_state *reg)
+{
+ return reg->type & PTR_ITER_END;
+}
+
static void save_register_state(struct bpf_func_state *state,
int spi, struct bpf_reg_state *reg,
int size)
@@ -5737,6 +5759,8 @@ static const struct bpf_reg_types map_key_value_types = {
PTR_TO_PACKET_META,
PTR_TO_MAP_KEY,
PTR_TO_MAP_VALUE,
+ PTR_TO_MAP_VALUE | PTR_ITER,
+ PTR_TO_MAP_VALUE | PTR_ITER_END,
},
};
@@ -5870,6 +5894,17 @@ static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
if (arg_type & PTR_MAYBE_NULL)
type &= ~PTR_MAYBE_NULL;
+ /* TYPE | PTR_ITER is valid input for helpers that expect TYPE
+ * TYPE is not valid input for helpers that expect TYPE | PTR_ITER
+ */
+ if (type_is_iter(arg_type)) {
+ if (!type_is_iter(type))
+ goto not_found;
+
+ type &= ~PTR_ITER;
+ type &= ~PTR_ITER_END;
+ }
+
for (i = 0; i < ARRAY_SIZE(compatible->types); i++) {
expected = compatible->types[i];
if (expected == NOT_INIT)
@@ -5879,6 +5914,7 @@ static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
goto found;
}
+not_found:
verbose(env, "R%d type=%s expected=", regno, reg_type_str(env, reg->type));
for (j = 0; j + 1 < i; j++)
verbose(env, "%s, ", reg_type_str(env, compatible->types[j]));
@@ -9933,6 +9969,17 @@ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode,
bool is_jmp32)
{
if (__is_pointer_value(false, reg)) {
+ if (__is_iter_end(reg) && val == 0) {
+ __mark_reg_const_zero(reg);
+ switch (opcode) {
+ case BPF_JEQ:
+ return 1;
+ case BPF_JNE:
+ return 0;
+ default:
+ return -1;
+ }
+ }
if (!reg_type_not_null(reg->type))
return -1;
@@ -5433,6 +5433,34 @@ union bpf_attr {
*
* Return
* 0
+ *
+ * long bpf_rbtree_first(struct bpf_map *map)
+ * Description
+ * Return the first node in the tree according to sort order
+ *
+ * Return
+ * If found, ptr to node, otherwise NULL
+ *
+ * long bpf_rbtree_last(struct bpf_map *map)
+ * Description
+ * Return the last node in the tree according to sort order
+ *
+ * Return
+ * If found, ptr to node, otherwise NULL
+ *
+ * long bpf_rbtree_next(struct bpf_map *map, void *cur)
+ * Description
+ * Return the next node in the tree according to sort order
+ *
+ * Return
+ * If found, ptr to node, otherwise NULL
+ *
+ * long bpf_rbtree_prev(struct bpf_map *map, void *cur)
+ * Description
+ * Return the previous node in the tree according to sort order
+ *
+ * Return
+ * If found, ptr to node, otherwise NULL
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -5652,6 +5680,10 @@ union bpf_attr {
FN(rbtree_get_lock), \
FN(rbtree_lock), \
FN(rbtree_unlock), \
+ FN(rbtree_first), \
+ FN(rbtree_last), \
+ FN(rbtree_next), \
+ FN(rbtree_prev), \
/* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
Add two type flags, PTR_ITER and PTR_ITER_END, meant to support the following pattern for iterating data structures: struct node *elem = data_structure_iter_first(&some_map) while (elem) { do_something(); elem = data_structure_iter_next(&some_map, elem); } Currently the ret_type of both _first() and _next() helpers would be PTR_TO_BTF_ID_OR_NULL and the verifier would consider the loop to be infinite as it knows nothing about an arbitrary PTR_TO_BTF_ID value. However, if we can express "this PTR_TO_BTF_ID will eventually be NULL if used in iteration helpers" via a new type flag, the verifier can use this information to determine that the loop will terminate while still verifying the loop body. So for our contrived example above, _first() now returns a PTR_TO_BTF_ID_OR_NULL | PTR_ITER, which _next() expects as input, itself returning PTR_TO_BTF_ID_OR_NULL | PTR_ITER_END. The verifier does nothing special for PTR_ITER, so the first iteration of the example loop will result in both elem == NULL and elem != NULL branches being verified. When verifying the latter branch, elem will become a PTR_TO_BTF_ID_OR_NULL | PTR_ITER_END. When the verifier sees a reg holding PTR_ITER_END in a conditional jump it knows the reg type and val can be replaced with SCALAR 0. So in the example loop on 2nd iteration elem == NULL will be assumed and verification will continue with that branch only. [ TODO: * PTR_ITER needs to be associated with the helper that produced it, to prevent something like: struct node *elem = data_structure_iter_last(&some_map) while (elem) { do_something(); elem = data_structure_iter_next(&some_map, elem); } i.e. _first() and _next() should be used together, same with _last() and _prev(). * This is currently very unsafe. Per multiple conversations w/ Alexei and Andrii, there are probably some ways forward here, but may be more complex than it's worth. If so, can migrate to a callback-based approach with similar functionality . ] Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com> --- include/linux/bpf.h | 3 ++ include/uapi/linux/bpf.h | 32 ++++++++++++++ kernel/bpf/helpers.c | 12 ++++++ kernel/bpf/rbtree.c | 78 ++++++++++++++++++++++++++++++++++ kernel/bpf/verifier.c | 53 +++++++++++++++++++++-- tools/include/uapi/linux/bpf.h | 32 ++++++++++++++ 6 files changed, 207 insertions(+), 3 deletions(-)