@@ -117,6 +117,11 @@ static const struct bpf_func_proto bpf_probe_read_proto = {
.arg3_type = ARG_ANYTHING,
};
+static const struct bpf_func_proto *bpf_get_probe_read_proto(void)
+{
+ return kernel_is_locked_down("BPF") ? NULL : &bpf_probe_read_proto;
+}
+
BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src,
u32, size)
{
@@ -282,6 +287,9 @@ static const struct bpf_func_proto bpf_trace_printk_proto = {
const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
{
+ if (kernel_is_locked_down("BPF"))
+ return NULL;
+
/*
* this program might be calling bpf_trace_printk,
* so allocate per-cpu printk buffers
@@ -535,7 +543,7 @@ tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_map_delete_elem:
return &bpf_map_delete_elem_proto;
case BPF_FUNC_probe_read:
- return &bpf_probe_read_proto;
+ return bpf_get_probe_read_proto();
case BPF_FUNC_ktime_get_ns:
return &bpf_ktime_get_ns_proto;
case BPF_FUNC_tail_call: