@@ -1950,7 +1950,7 @@ static int __bpf_list_add(struct bpf_list_node *node, struct bpf_list_head *head
INIT_LIST_HEAD(h);
if (!list_empty(n)) {
/* Only called from BPF prog, no need to migrate_disable */
- __bpf_obj_drop_impl(n - off, rec);
+ __bpf_obj_drop_impl((void *)n - off, rec);
return -EINVAL;
}
@@ -2032,7 +2032,7 @@ static int __bpf_rbtree_add(struct bpf_rb_root *root, struct bpf_rb_node *node,
if (!RB_EMPTY_NODE(n)) {
/* Only called from BPF prog, no need to migrate_disable */
- __bpf_obj_drop_impl(n - off, rec);
+ __bpf_obj_drop_impl((void *)n - off, rec);
return -EINVAL;
}
Given the pointer to struct bpf_{rb,list}_node within a local kptr and the byte offset of that field within the kptr struct, the calculation changed by this patch is meant to find the beginning of the kptr so that it can be passed to bpf_obj_drop. Unfortunately instead of doing ptr_to_kptr = ptr_to_node_field - offset_bytes the calculation is erroneously doing ptr_to_ktpr = ptr_to_node_field - (offset_bytes * sizeof(struct bpf_rb_node)) or the bpf_list_node equivalent. This patch fixes the calculation. Fixes: d2dcc67df910 ("bpf: Migrate bpf_rbtree_add and bpf_list_push_{front,back} to possibly fail") Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com> --- kernel/bpf/helpers.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)