Context |
Check |
Description |
bpf/vmtest-bpf-next-PR |
fail
|
PR summary
|
bpf/vmtest-bpf-next-VM_Test-21 |
success
|
Logs for x86_64-gcc / test (test_maps, false, 360) / test_maps on x86_64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-24 |
success
|
Logs for x86_64-gcc / test (test_progs_no_alu32_parallel, true, 30) / test_progs_no_alu32_parallel on x86_64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-27 |
success
|
Logs for x86_64-gcc / veristat / veristat on x86_64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-36 |
success
|
Logs for x86_64-llvm-18 / build-release / build for x86_64 with llvm-18 and -O2 optimization
|
bpf/vmtest-bpf-next-VM_Test-22 |
fail
|
Logs for x86_64-gcc / test (test_progs, false, 360) / test_progs on x86_64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-25 |
success
|
Logs for x86_64-gcc / test (test_progs_parallel, true, 30) / test_progs_parallel on x86_64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-26 |
success
|
Logs for x86_64-gcc / test (test_verifier, false, 360) / test_verifier on x86_64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-33 |
success
|
Logs for x86_64-llvm-17 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-17
|
bpf/vmtest-bpf-next-VM_Test-41 |
success
|
Logs for x86_64-llvm-18 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-18
|
bpf/vmtest-bpf-next-VM_Test-31 |
fail
|
Logs for x86_64-llvm-17 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-17
|
bpf/vmtest-bpf-next-VM_Test-32 |
fail
|
Logs for x86_64-llvm-17 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-17
|
bpf/vmtest-bpf-next-VM_Test-38 |
fail
|
Logs for x86_64-llvm-18 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-18
|
bpf/vmtest-bpf-next-VM_Test-39 |
fail
|
Logs for x86_64-llvm-18 / test (test_progs_cpuv4, false, 360) / test_progs_cpuv4 on x86_64 with llvm-18
|
bpf/vmtest-bpf-next-VM_Test-40 |
fail
|
Logs for x86_64-llvm-18 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-18
|
bpf/vmtest-bpf-next-VM_Test-16 |
success
|
Logs for s390x-gcc / test (test_verifier, false, 360) / test_verifier on s390x with gcc
|
bpf/vmtest-bpf-next-VM_Test-0 |
success
|
Logs for Lint
|
bpf/vmtest-bpf-next-VM_Test-1 |
success
|
Logs for ShellCheck
|
bpf/vmtest-bpf-next-VM_Test-2 |
success
|
Logs for Unittests
|
bpf/vmtest-bpf-next-VM_Test-3 |
success
|
Logs for Validate matrix.py
|
bpf/vmtest-bpf-next-VM_Test-5 |
success
|
Logs for aarch64-gcc / build-release
|
bpf/vmtest-bpf-next-VM_Test-4 |
success
|
Logs for aarch64-gcc / build / build for aarch64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-10 |
success
|
Logs for aarch64-gcc / veristat
|
bpf/vmtest-bpf-next-VM_Test-12 |
success
|
Logs for s390x-gcc / build-release
|
bpf/vmtest-bpf-next-VM_Test-13 |
success
|
Logs for set-matrix
|
bpf/vmtest-bpf-next-VM_Test-15 |
success
|
Logs for x86_64-gcc / build-release
|
bpf/vmtest-bpf-next-VM_Test-6 |
success
|
Logs for aarch64-gcc / test (test_maps, false, 360) / test_maps on aarch64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-9 |
success
|
Logs for aarch64-gcc / test (test_verifier, false, 360) / test_verifier on aarch64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-7 |
fail
|
Logs for aarch64-gcc / test (test_progs, false, 360) / test_progs on aarch64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-8 |
fail
|
Logs for aarch64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on aarch64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-14 |
success
|
Logs for x86_64-gcc / build / build for x86_64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-23 |
success
|
Logs for x86_64-llvm-17 / build / build for x86_64 with llvm-17
|
bpf/vmtest-bpf-next-VM_Test-29 |
success
|
Logs for x86_64-llvm-17 / veristat
|
bpf/vmtest-bpf-next-VM_Test-30 |
success
|
Logs for x86_64-llvm-18 / build / build for x86_64 with llvm-18
|
bpf/vmtest-bpf-next-VM_Test-37 |
success
|
Logs for x86_64-llvm-18 / veristat
|
bpf/vmtest-bpf-next-VM_Test-11 |
success
|
Logs for s390x-gcc / build / build for s390x with gcc
|
bpf/vmtest-bpf-next-VM_Test-17 |
success
|
Logs for s390x-gcc / veristat
|
bpf/vmtest-bpf-next-VM_Test-18 |
success
|
Logs for set-matrix
|
bpf/vmtest-bpf-next-VM_Test-19 |
success
|
Logs for x86_64-gcc / build / build for x86_64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-20 |
success
|
Logs for x86_64-gcc / build-release
|
bpf/vmtest-bpf-next-VM_Test-28 |
success
|
Logs for x86_64-llvm-17 / build / build for x86_64 with llvm-17
|
bpf/vmtest-bpf-next-VM_Test-34 |
success
|
Logs for x86_64-llvm-17 / veristat
|
bpf/vmtest-bpf-next-VM_Test-35 |
success
|
Logs for x86_64-llvm-18 / build / build for x86_64 with llvm-18
|
bpf/vmtest-bpf-next-VM_Test-42 |
success
|
Logs for x86_64-llvm-18 / veristat
|
@@ -72,6 +72,8 @@ arch_ftrace_get_regs(struct ftrace_regs *fregs)
override_function_with_return(&(fregs)->regs)
#define ftrace_regs_query_register_offset(name) \
regs_query_register_offset(name)
+#define ftrace_regs_get_frame_pointer(fregs) \
+ frame_pointer(&(fregs)->regs)
struct ftrace_ops;
#define ftrace_graph_func ftrace_graph_func
@@ -43,7 +43,9 @@ struct dyn_ftrace;
char *arch_ftrace_match_adjust(char *str, const char *search);
-#ifdef CONFIG_HAVE_FUNCTION_GRAPH_RETVAL
+#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FREGS
+unsigned long ftrace_return_to_handler(struct ftrace_regs *fregs);
+#elif defined(CONFIG_HAVE_FUNCTION_GRAPH_RETVAL)
struct fgraph_ret_regs;
unsigned long ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs);
#else
@@ -157,6 +159,7 @@ struct ftrace_regs {
#define ftrace_regs_set_instruction_pointer(fregs, ip) do { } while (0)
#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
+
static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs)
{
if (!fregs)
@@ -1067,6 +1070,10 @@ typedef int (*trace_func_graph_regs_ent_t)(unsigned long func,
unsigned long parent_ip,
struct ftrace_regs *fregs,
struct fgraph_ops *); /* entry w/ regs */
+typedef void (*trace_func_graph_regs_ret_t)(unsigned long func,
+ unsigned long parent_ip,
+ struct ftrace_regs *,
+ struct fgraph_ops *); /* return w/ regs */
extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace, struct fgraph_ops *gops);
@@ -1076,6 +1083,7 @@ struct fgraph_ops {
trace_func_graph_ent_t entryfunc;
trace_func_graph_ret_t retfunc;
trace_func_graph_regs_ent_t entryregfunc;
+ trace_func_graph_regs_ret_t retregfunc;
struct ftrace_ops ops; /* for the hash lists */
void *private;
int idx;
@@ -34,6 +34,9 @@ config HAVE_FUNCTION_GRAPH_TRACER
config HAVE_FUNCTION_GRAPH_RETVAL
bool
+config HAVE_FUNCTION_GRAPH_FREGS
+ bool
+
config HAVE_DYNAMIC_FTRACE
bool
help
@@ -232,7 +235,7 @@ config FUNCTION_GRAPH_TRACER
config FUNCTION_GRAPH_RETVAL
bool "Kernel Function Graph Return Value"
- depends on HAVE_FUNCTION_GRAPH_RETVAL
+ depends on HAVE_FUNCTION_GRAPH_RETVAL || HAVE_FUNCTION_GRAPH_FREGS
depends on FUNCTION_GRAPH_TRACER
default n
help
@@ -685,8 +685,8 @@ int function_graph_enter_ops(unsigned long ret, unsigned long func,
/* Retrieve a function return address to the trace stack on thread info.*/
static struct ftrace_ret_stack *
-ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
- unsigned long frame_pointer, int *index)
+ftrace_pop_return_trace(unsigned long *ret, unsigned long frame_pointer,
+ int *index)
{
struct ftrace_ret_stack *ret_stack;
@@ -731,10 +731,6 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
*index += FGRAPH_RET_INDEX;
*ret = ret_stack->ret;
- trace->func = ret_stack->func;
- trace->calltime = ret_stack->calltime;
- trace->overrun = atomic_read(¤t->trace_overrun);
- trace->depth = current->curr_ret_depth;
/*
* We still want to trace interrupts coming in if
* max_depth is set to 1. Make sure the decrement is
@@ -773,21 +769,42 @@ static struct notifier_block ftrace_suspend_notifier = {
/* fgraph_ret_regs is not defined without CONFIG_FUNCTION_GRAPH_RETVAL */
struct fgraph_ret_regs;
+static void fgraph_call_retfunc(struct ftrace_regs *fregs,
+ struct fgraph_ret_regs *ret_regs,
+ struct ftrace_ret_stack *ret_stack,
+ struct fgraph_ops *gops)
+{
+ struct ftrace_graph_ret trace;
+
+ trace.func = ret_stack->func;
+ trace.calltime = ret_stack->calltime;
+ trace.overrun = atomic_read(¤t->trace_overrun);
+ trace.depth = current->curr_ret_depth;
+ trace.rettime = trace_clock_local();
+#ifdef CONFIG_FUNCTION_GRAPH_RETVAL
+ if (fregs)
+ trace.retval = ftrace_regs_return_value(fregs);
+ else
+ trace.retval = fgraph_ret_regs_return_value(ret_regs);
+#endif
+ gops->retfunc(&trace, gops);
+}
+
/*
* Send the trace to the ring-buffer.
* @return the original return address.
*/
-static unsigned long __ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs,
+static unsigned long __ftrace_return_to_handler(struct ftrace_regs *fregs,
+ struct fgraph_ret_regs *ret_regs,
unsigned long frame_pointer)
{
struct ftrace_ret_stack *ret_stack;
- struct ftrace_graph_ret trace;
unsigned long bitmap;
unsigned long ret;
int index;
int i;
- ret_stack = ftrace_pop_return_trace(&trace, &ret, frame_pointer, &index);
+ ret_stack = ftrace_pop_return_trace(&ret, frame_pointer, &index);
if (unlikely(!ret_stack)) {
ftrace_graph_stop();
@@ -796,10 +813,8 @@ static unsigned long __ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs
return (unsigned long)panic;
}
- trace.rettime = trace_clock_local();
-#ifdef CONFIG_FUNCTION_GRAPH_RETVAL
- trace.retval = fgraph_ret_regs_return_value(ret_regs);
-#endif
+ if (fregs)
+ ftrace_regs_set_instruction_pointer(fregs, ret);
bitmap = get_fgraph_index_bitmap(current, index);
for (i = 0; i < FGRAPH_ARRAY_SIZE; i++) {
@@ -810,7 +825,10 @@ static unsigned long __ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs
if (gops == &fgraph_stub)
continue;
- gops->retfunc(&trace, gops);
+ if (gops->retregfunc)
+ gops->retregfunc(ret_stack->func, ret, fregs, gops);
+ else
+ fgraph_call_retfunc(fregs, ret_regs, ret_stack, gops);
}
/*
@@ -825,20 +843,22 @@ static unsigned long __ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs
return ret;
}
-/*
- * After all architecures have selected HAVE_FUNCTION_GRAPH_RETVAL, we can
- * leave only ftrace_return_to_handler(ret_regs).
- */
-#ifdef CONFIG_HAVE_FUNCTION_GRAPH_RETVAL
+#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FREGS
+unsigned long ftrace_return_to_handler(struct ftrace_regs *fregs)
+{
+ return __ftrace_return_to_handler(fregs, NULL,
+ ftrace_regs_get_frame_pointer(fregs));
+}
+#elif defined(CONFIG_HAVE_FUNCTION_GRAPH_RETVAL)
unsigned long ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs)
{
- return __ftrace_return_to_handler(ret_regs,
+ return __ftrace_return_to_handler(NULL, ret_regs,
fgraph_ret_regs_frame_pointer(ret_regs));
}
#else
unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
{
- return __ftrace_return_to_handler(NULL, frame_pointer);
+ return __ftrace_return_to_handler(NULL, NULL, frame_pointer);
}
#endif
@@ -1191,9 +1211,15 @@ int register_ftrace_graph(struct fgraph_ops *gops)
int ret = 0;
int i;
- if (gops->entryfunc && gops->entryregfunc)
+ if ((gops->entryfunc && gops->entryregfunc) ||
+ (gops->retfunc && gops->retregfunc))
return -EINVAL;
+#ifndef CONFIG_HAVE_FUNCTION_GRAPH_FREGS
+ if (gops->retregfunc)
+ return -EOPNOTSUPP;
+#endif
+
mutex_lock(&ftrace_lock);
if (!gops->ops.func) {