@@ -48,6 +48,7 @@ enum cgroup_bpf_attach_type {
CGROUP_INET4_GETSOCKNAME,
CGROUP_INET6_GETSOCKNAME,
CGROUP_INET_SOCK_RELEASE,
+ CGROUP_TRACEPOINT,
MAX_CGROUP_BPF_ATTACH_TYPE
};
@@ -81,6 +82,7 @@ to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)
CGROUP_ATYPE(CGROUP_INET4_GETSOCKNAME);
CGROUP_ATYPE(CGROUP_INET6_GETSOCKNAME);
CGROUP_ATYPE(CGROUP_INET_SOCK_RELEASE);
+ CGROUP_ATYPE(CGROUP_TRACEPOINT);
default:
return CGROUP_BPF_ATTACH_TYPE_INVALID;
}
@@ -57,6 +57,10 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SYSCTL, cg_sysctl,
BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCKOPT, cg_sockopt,
struct bpf_sockopt, struct bpf_sockopt_kern)
#endif
+#if defined (CONFIG_BPF_EVENTS) && defined (CONFIG_CGROUP_BPF)
+BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_TRACEPOINT, cg_tracepoint,
+ __u64, u64)
+#endif
#ifdef CONFIG_BPF_LIRC_MODE2
BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2,
__u32, u32)
@@ -949,6 +949,7 @@ enum bpf_prog_type {
BPF_PROG_TYPE_LSM,
BPF_PROG_TYPE_SK_LOOKUP,
BPF_PROG_TYPE_SYSCALL, /* a program that can execute syscalls */
+ BPF_PROG_TYPE_CGROUP_TRACEPOINT,
};
enum bpf_attach_type {
@@ -994,6 +995,7 @@ enum bpf_attach_type {
BPF_SK_REUSEPORT_SELECT,
BPF_SK_REUSEPORT_SELECT_OR_MIGRATE,
BPF_PERF_EVENT,
+ BPF_CGROUP_TRACEPOINT,
__MAX_BPF_ATTACH_TYPE
};
@@ -2149,6 +2149,7 @@ static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
case BPF_PROG_TYPE_LSM:
case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */
case BPF_PROG_TYPE_EXT: /* extends any prog */
+ case BPF_PROG_TYPE_CGROUP_TRACEPOINT:
return true;
default:
return false;
@@ -3137,6 +3138,8 @@ attach_type_to_prog_type(enum bpf_attach_type attach_type)
return BPF_PROG_TYPE_SK_LOOKUP;
case BPF_XDP:
return BPF_PROG_TYPE_XDP;
+ case BPF_CGROUP_TRACEPOINT:
+ return BPF_PROG_TYPE_CGROUP_TRACEPOINT;
default:
return BPF_PROG_TYPE_UNSPEC;
}
@@ -3189,6 +3192,7 @@ static int bpf_prog_attach(const union bpf_attr *attr)
case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
case BPF_PROG_TYPE_CGROUP_SOCKOPT:
case BPF_PROG_TYPE_CGROUP_SYSCTL:
+ case BPF_PROG_TYPE_CGROUP_TRACEPOINT:
case BPF_PROG_TYPE_SOCK_OPS:
ret = cgroup_bpf_prog_attach(attr, ptype, prog);
break;
@@ -1798,6 +1798,14 @@ const struct bpf_verifier_ops perf_event_verifier_ops = {
const struct bpf_prog_ops perf_event_prog_ops = {
};
+const struct bpf_verifier_ops cg_tracepoint_verifier_ops = {
+ .get_func_proto = tp_prog_func_proto,
+ .is_valid_access = tp_prog_is_valid_access,
+};
+
+const struct bpf_prog_ops cg_tracepoint_prog_ops = {
+};
+
static DEFINE_MUTEX(bpf_event_mutex);
#define BPF_TRACE_MAX_PROGS 64
Change-Id: Ic9727186bb8c76c757e48635143b16e607f2299f Signed-off-by: Kenny Ho <Kenny.Ho@amd.com> --- include/linux/bpf-cgroup.h | 2 ++ include/linux/bpf_types.h | 4 ++++ include/uapi/linux/bpf.h | 2 ++ kernel/bpf/syscall.c | 4 ++++ kernel/trace/bpf_trace.c | 8 ++++++++ 5 files changed, 20 insertions(+)