@@ -5380,6 +5380,11 @@ static const char *alloc_obj_fields[] = {
"bpf_refcount",
};
+/* kernel structures with special BTF fields*/
+static const char *kstructs_with_special_btf[] = {
+ "unused",
+};
+
static struct btf_struct_metas *
btf_parse_struct_metas(struct bpf_verifier_log *log, struct btf *btf)
{
@@ -5391,6 +5396,7 @@ btf_parse_struct_metas(struct bpf_verifier_log *log, struct btf *btf)
} _arr;
} aof;
struct btf_struct_metas *tab = NULL;
+ bool btf_is_base_kernel;
int i, n, id, ret;
BUILD_BUG_ON(offsetof(struct btf_id_set, cnt) != 0);
@@ -5412,16 +5418,25 @@ btf_parse_struct_metas(struct bpf_verifier_log *log, struct btf *btf)
return NULL;
sort(&aof.set.ids, aof.set.cnt, sizeof(aof.set.ids[0]), btf_id_cmp_func, NULL);
- n = btf_nr_types(btf);
+ btf_is_base_kernel = btf_is_kernel(btf) && !btf_is_module(btf);
+ n = btf_is_base_kernel ? ARRAY_SIZE(kstructs_with_special_btf) : btf_nr_types(btf);
for (i = 1; i < n; i++) {
struct btf_struct_metas *new_tab;
const struct btf_member *member;
struct btf_struct_meta *type;
struct btf_record *record;
const struct btf_type *t;
- int j, tab_cnt;
+ int j, tab_cnt, id;
- t = btf_type_by_id(btf, i);
+ id = btf_is_base_kernel ?
+ btf_find_by_name_kind(btf, kstructs_with_special_btf[i],
+ BTF_KIND_STRUCT) : i;
+ if (id < 0) {
+ ret = -EINVAL;
+ goto free;
+ }
+
+ t = btf_type_by_id(btf, id);
if (!t) {
ret = -EINVAL;
goto free;
@@ -5449,7 +5464,7 @@ btf_parse_struct_metas(struct bpf_verifier_log *log, struct btf *btf)
tab = new_tab;
type = &tab->types[tab->cnt];
- type->btf_id = i;
+ type->btf_id = id;
record = btf_parse_fields(btf, t, BPF_SPIN_LOCK | BPF_LIST_HEAD | BPF_LIST_NODE |
BPF_RB_ROOT | BPF_RB_NODE | BPF_REFCOUNT |
BPF_KPTR, t->size);
@@ -5967,6 +5982,7 @@ BTF_ID(struct, bpf_ctx_convert)
struct btf *btf_parse_vmlinux(void)
{
+ struct btf_struct_metas *struct_meta_tab;
struct btf_verifier_env *env = NULL;
struct bpf_verifier_log *log;
struct btf *btf = NULL;
@@ -6009,6 +6025,23 @@ struct btf *btf_parse_vmlinux(void)
if (err)
goto errout;
+ struct_meta_tab = btf_parse_struct_metas(&env->log, btf);
+ if (IS_ERR(struct_meta_tab)) {
+ err = PTR_ERR(struct_meta_tab);
+ goto errout;
+ }
+ btf->struct_meta_tab = struct_meta_tab;
+
+ if (struct_meta_tab) {
+ int i;
+
+ for (i = 0; i < struct_meta_tab->cnt; i++) {
+ err = btf_check_and_fixup_fields(struct_meta_tab->types[i].record);
+ if (err < 0)
+ goto errout_meta;
+ }
+ }
+
/* btf_parse_vmlinux() runs under bpf_verifier_lock */
bpf_ctx_convert.t = btf_type_by_id(btf, bpf_ctx_convert_btf_id[0]);
@@ -6021,6 +6054,8 @@ struct btf *btf_parse_vmlinux(void)
btf_verifier_env_free(env);
return btf;
+errout_meta:
+ btf_free_struct_meta_tab(btf);
errout:
btf_verifier_env_free(env);
if (btf) {
@@ -6034,6 +6069,7 @@ struct btf *btf_parse_vmlinux(void)
static struct btf *btf_parse_module(const char *module_name, const void *data, unsigned int data_size)
{
+ struct btf_struct_metas *struct_meta_tab;
struct btf_verifier_env *env = NULL;
struct bpf_verifier_log *log;
struct btf *btf = NULL, *base_btf;
@@ -6091,10 +6127,29 @@ static struct btf *btf_parse_module(const char *module_name, const void *data, u
if (err)
goto errout;
+ struct_meta_tab = btf_parse_struct_metas(&env->log, btf);
+ if (IS_ERR(struct_meta_tab)) {
+ err = PTR_ERR(struct_meta_tab);
+ goto errout;
+ }
+ btf->struct_meta_tab = struct_meta_tab;
+
+ if (struct_meta_tab) {
+ int i;
+
+ for (i = 0; i < struct_meta_tab->cnt; i++) {
+ err = btf_check_and_fixup_fields(struct_meta_tab->types[i].record);
+ if (err < 0)
+ goto errout_meta;
+ }
+ }
+
btf_verifier_env_free(env);
refcount_set(&btf->refcnt, 1);
return btf;
+errout_meta:
+ btf_free_struct_meta_tab(btf);
errout:
btf_verifier_env_free(env);
if (btf) {
Currently, only program BTF from the user may contain special BTF fields (e.g., bpf_list_head, bpf_spin_lock, and bpf_timer). To support adding kernel objects to collections, we will need specical BTF fields (i.e., graph nodes) in kernel structures as well. This patch takes the first step by finding these fields and build metadata for kernel BTF. Unlike parsing program BTF, where we go through all types, an allowlist specifying kernel structures that contain special BTF fields is used. This to avoid wasting time parsing most kernel types that does not have any special BTF field. Signed-off-by: Amery Hung <amery.hung@bytedance.com> --- kernel/bpf/btf.c | 63 +++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 59 insertions(+), 4 deletions(-)