@@ -133,6 +133,9 @@ static void bpf_struct_ops_init_one(struct bpf_struct_ops *st_ops,
}
sprintf(value_name, "%s%s", VALUE_PREFIX, st_ops->name);
+ /* XXX: This ID is not unique across modules. We need to include
+ * module or module ID as an unique ID.
+ */
value_id = btf_find_by_name_kind(btf, value_name,
BTF_KIND_STRUCT);
if (value_id < 0) {
@@ -141,6 +144,9 @@ static void bpf_struct_ops_init_one(struct bpf_struct_ops *st_ops,
return;
}
+ /* XXX: This ID is not unique across modules. We need to include
+ * module or module ID as an unique ID.
+ */
type_id = btf_find_by_name_kind(btf, st_ops->name,
BTF_KIND_STRUCT);
if (type_id < 0) {
@@ -569,6 +575,9 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
u32 moff;
moff = __btf_member_bit_offset(t, member) / 8;
+ /* XXX: Should resolve member types from module BTF, but
+ * it's not available yet.
+ */
ptype = btf_type_resolve_ptr(btf_vmlinux, member->type, NULL);
if (ptype == module_type) {
if (*(void **)(udata + moff))
@@ -837,6 +846,12 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
if (!st_map)
return ERR_PTR(-ENOMEM);
+ /* XXX: should sync with the unregister path */
+ /* XXX: Since we assign a st_ops, we need to do a rcu_synchronize()
+ * twice to make sure the st_ops is not freed while other
+ * tasks use this value. Or, we can find st_ops again holding
+ * the mutex to make sure it is not freed.
+ */
st_map->st_ops = st_ops;
map = &st_map->map;
@@ -1204,6 +1204,10 @@ static int map_create(union bpf_attr *attr)
return -EPERM;
}
+ /* XXX: attr->attach_btf_obj_fd should be initialized by the user
+ * space. We should use it to find type infor from
+ * attach_btf_id.
+ */
map = ops->map_alloc(attr);
if (IS_ERR(map))
return PTR_ERR(map);
@@ -2624,6 +2628,7 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
btf_get(attach_btf);
}
+
bpf_prog_load_fixup_attach_type(attr);
if (bpf_prog_load_check_attach(type, attr->expected_attach_type,
attach_btf, attr->attach_btf_id,
@@ -4576,6 +4581,7 @@ static int bpf_map_get_info_by_fd(struct file *file,
info.btf_value_type_id = map->btf_value_type_id;
}
info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
+ /* XXX: copy map->mod_btf->name as well? */
if (bpf_map_is_offloaded(map)) {
err = bpf_map_offload_info_fill(&info, map);
@@ -988,6 +988,7 @@ find_struct_ops_kern_types(struct bpf_object *obj, const char *tname,
* find "struct bpf_struct_ops_tcp_congestion_ops" from the
* btf_vmlinux.
*/
+ /* XXX: Should search module BTFs as well. */
kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX,
tname, BTF_KIND_STRUCT);
if (kern_vtype_id < 0) {
@@ -5143,6 +5144,8 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
return 0;
}
+int turnon_kk = false;
+
static void bpf_map__destroy(struct bpf_map *map);
static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner)
@@ -7945,13 +7948,32 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch
bpf_gen__init(obj->gen_loader, extra_log_level, obj->nr_programs, obj->nr_maps);
err = bpf_object__probe_loading(obj);
+ if (turnon_kk)
+ printf("bpf_object__probe_loading err=%d\n", err);
+ /* XXX: should correct module btf if needed.
+ * obj->btf_vmlinux provides the information of members of
+ * the struct_ops type required to load the object.
+ * (see bpf_object__init_kern_struct_ops_maps() and
+ * bpf_map__init_kern_struct_ops())
+ */
err = err ? : bpf_object__load_vmlinux_btf(obj, false);
+ if (turnon_kk)
+ printf("bpf_object__probe_loading err=%d\n", err);
err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
err = err ? : bpf_object__sanitize_and_load_btf(obj);
err = err ? : bpf_object__sanitize_maps(obj);
+ if (turnon_kk)
+ printf("bpf_object__probe_loading err=%d\n", err);
+ /* XXX: obj->btf_vmliux is not used for loading the object. */
err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
+ if (turnon_kk)
+ printf("bpf_object__probe_loading err=%d\n", err);
err = err ? : bpf_object__create_maps(obj);
+ if (turnon_kk)
+ printf("bpf_object__probe_loading err=%d\n", err);
err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path);
+ if (turnon_kk)
+ printf("bpf_object__probe_loading err=%d\n", err);
err = err ? : bpf_object__load_progs(obj, extra_log_level);
err = err ? : bpf_object_init_prog_arrays(obj);
err = err ? : bpf_object_prepare_struct_ops(obj);
@@ -9230,6 +9252,7 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
* attach_btf_id and member_idx
*/
if (!prog->attach_btf_id) {
+ /* XXX: attach_btf_obj_fd is needed as well */
prog->attach_btf_id = st_ops->type_id;
prog->expected_attach_type = member_idx;
}
@@ -13124,7 +13147,9 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
{
int i, err;
+ printf("Loading BPF skeleton '%s'...\n", s->name);
err = bpf_object__load(*s->obj);
+ printf("bpf_object__load\n");
if (err) {
pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err);
return libbpf_err(err);
@@ -13169,6 +13194,7 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
}
}
+ printf("BPF skeleton '%s' loaded successfully\n", s->name);
return 0;
}