@@ -1610,6 +1610,7 @@ typedef const struct bpf_func_proto *
enum bpf_iter_feature {
BPF_ITER_RESCHED = BIT(0),
+ BPF_ITER_INHERIT = BIT(1),
};
#define BPF_ITER_CTX_ARG_MAX 2
@@ -1647,6 +1648,7 @@ bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog);
int bpf_iter_new_fd(struct bpf_link *link);
bool bpf_link_is_iter(struct bpf_link *link);
+bool bpf_link_support_inherit(struct bpf_link *link);
struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop);
int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx);
void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux,
@@ -8,7 +8,7 @@ CFLAGS_core.o += $(call cc-disable-warning, override-init) $(cflags-nogcse-yy)
obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o bpf_iter.o map_iter.o task_iter.o prog_iter.o
obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o bloom_filter.o
-obj-$(CONFIG_BPF_SYSCALL) += local_storage.o queue_stack_maps.o ringbuf.o
+obj-$(CONFIG_BPF_SYSCALL) += local_storage.o queue_stack_maps.o ringbuf.o cgroup_view_iter.o
obj-$(CONFIG_BPF_SYSCALL) += bpf_local_storage.o bpf_task_storage.o
obj-${CONFIG_BPF_LSM} += bpf_inode_storage.o
obj-$(CONFIG_BPF_SYSCALL) += disasm.o
@@ -496,6 +496,17 @@ bool bpf_link_is_iter(struct bpf_link *link)
return link->ops == &bpf_iter_link_lops;
}
+bool bpf_link_support_inherit(struct bpf_link *link)
+{
+ struct bpf_iter_link *iter_link;
+
+ if (!bpf_link_is_iter(link))
+ return false;
+
+ iter_link = container_of(link, struct bpf_iter_link, link);
+ return iter_link->tinfo->reg_info->feature & BPF_ITER_INHERIT;
+}
+
int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr,
struct bpf_prog *prog)
{
new file mode 100644
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2022 Google */
+#include <linux/bpf.h>
+#include <linux/fs.h>
+#include <linux/filter.h>
+#include <linux/kernel.h>
+#include <linux/btf_ids.h>
+#include <linux/cgroup.h>
+#include <linux/kernfs.h>
+#include "inode.h"
+
+static void *cgroup_view_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ struct bpf_dir_tag *tag;
+ struct kernfs_node *kn;
+ struct cgroup *cgroup;
+ struct inode *dir;
+
+ /* Only one session is supported. */
+ if (*pos > 0)
+ return NULL;
+
+ dir = d_inode(seq->file->f_path.dentry->d_parent);
+ tag = dir->i_private;
+ if (!tag)
+ return NULL;
+
+ kn = tag->private;
+
+ rcu_read_lock();
+ cgroup = rcu_dereference(*(void __rcu __force **)&kn->priv);
+ if (!cgroup || !cgroup_tryget(cgroup))
+ cgroup = NULL;
+ rcu_read_unlock();
+
+ if (!cgroup)
+ return NULL;
+
+ if (*pos == 0)
+ ++*pos;
+ return cgroup;
+}
+
+static void *cgroup_view_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ ++*pos;
+ return NULL;
+}
+
+struct bpf_iter__cgroup_view {
+ __bpf_md_ptr(struct bpf_iter_meta *, meta);
+ __bpf_md_ptr(struct cgroup *, cgroup);
+};
+
+DEFINE_BPF_ITER_FUNC(cgroup_view, struct bpf_iter_meta *meta, struct cgroup *cgroup)
+
+static int cgroup_view_seq_show(struct seq_file *seq, void *v)
+{
+ struct bpf_iter__cgroup_view ctx;
+ struct bpf_iter_meta meta;
+ struct bpf_prog *prog;
+ int ret = 0;
+
+ ctx.meta = &meta;
+ ctx.cgroup = v;
+ meta.seq = seq;
+ prog = bpf_iter_get_info(&meta, false);
+ if (prog)
+ ret = bpf_iter_run_prog(prog, &ctx);
+
+ return ret;
+}
+
+static void cgroup_view_seq_stop(struct seq_file *seq, void *v)
+{
+ if (v)
+ cgroup_put(v);
+}
+
+static const struct seq_operations cgroup_view_seq_ops = {
+ .start = cgroup_view_seq_start,
+ .next = cgroup_view_seq_next,
+ .stop = cgroup_view_seq_stop,
+ .show = cgroup_view_seq_show,
+};
+
+BTF_ID_LIST(btf_cgroup_id)
+BTF_ID(struct, cgroup)
+
+static const struct bpf_iter_seq_info cgroup_view_seq_info = {
+ .seq_ops = &cgroup_view_seq_ops,
+ .init_seq_private = NULL,
+ .fini_seq_private = NULL,
+ .seq_priv_size = 0,
+};
+
+static struct bpf_iter_reg cgroup_view_reg_info = {
+ .target = "cgroup_view",
+ .feature = BPF_ITER_INHERIT,
+ .ctx_arg_info_size = 1,
+ .ctx_arg_info = {
+ { offsetof(struct bpf_iter__cgroup_view, cgroup),
+ PTR_TO_BTF_ID },
+ },
+ .seq_info = &cgroup_view_seq_info,
+};
+
+static int __init cgroup_view_init(void)
+{
+ cgroup_view_reg_info.ctx_arg_info[0].btf_id = *btf_cgroup_id;
+ return bpf_iter_reg_target(&cgroup_view_reg_info);
+}
+
+late_initcall(cgroup_view_init);
Introduce a new type of iter prog: 'cgroup_view'. It prints out cgroup's state. Cgroup_view is supposed to be used together with directory tagging. When cgroup_view is pinned in a directory, it tags that directory as KERNFS_REP, i.e. a replicate of the cgroup hierarchy. Whenever a subdirectory is created, if there is a child cgroup of the same name exists, the subdirectory inherits the pinned cgroup_view object from its parent and holds a reference of the corresponding kernfs node. The cgroup_view prog takes a pointer to the cgroup and can use family of seq_print helpers to print out cgroup state. A typical use case of cgroup_view is to extend the cgroupfs interface. Signed-off-by: Hao Luo <haoluo@google.com> --- include/linux/bpf.h | 2 + kernel/bpf/Makefile | 2 +- kernel/bpf/bpf_iter.c | 11 ++++ kernel/bpf/cgroup_view_iter.c | 114 ++++++++++++++++++++++++++++++++++ 4 files changed, 128 insertions(+), 1 deletion(-) create mode 100644 kernel/bpf/cgroup_view_iter.c