diff mbox series

[v7,23/36] function_graph: Add a new exit handler with parent_ip and ftrace_regs

Message ID 170723230476.502590.16817817024423790038.stgit@devnote2 (mailing list archive)
State Not Applicable
Headers show
Series tracing: fprobe: function_graph: Multi-function graph and fprobe on fgraph | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR success PR summary
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-0 success Logs for Lint
bpf/vmtest-bpf-next-VM_Test-2 success Logs for Unittests
bpf/vmtest-bpf-next-VM_Test-3 success Logs for Validate matrix.py
bpf/vmtest-bpf-next-VM_Test-5 success Logs for aarch64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-4 success Logs for aarch64-gcc / build / build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-10 success Logs for aarch64-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-12 success Logs for s390x-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-13 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-15 success Logs for x86_64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-9 success Logs for aarch64-gcc / test (test_verifier, false, 360) / test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-6 success Logs for aarch64-gcc / test (test_maps, false, 360) / test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-7 fail Logs for aarch64-gcc / test (test_progs, false, 360) / test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-8 fail Logs for aarch64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-14 success Logs for x86_64-gcc / build / build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-16 success Logs for x86_64-gcc / test (test_maps, false, 360) / test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-11 success Logs for s390x-gcc / build / build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-17 success Logs for s390x-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-18 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-19 success Logs for x86_64-gcc / build / build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-20 success Logs for x86_64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-21 success Logs for x86_64-gcc / test (test_maps, false, 360) / test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-22 fail Logs for x86_64-gcc / test (test_progs, false, 360) / test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-23 fail Logs for x86_64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-24 success Logs for x86_64-gcc / test (test_progs_no_alu32_parallel, true, 30) / test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-25 success Logs for x86_64-gcc / test (test_progs_parallel, true, 30) / test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-26 success Logs for x86_64-gcc / test (test_verifier, false, 360) / test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-27 success Logs for x86_64-gcc / veristat / veristat on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-28 success Logs for x86_64-llvm-17 / build / build for x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-29 success Logs for x86_64-llvm-17 / build-release / build for x86_64 with llvm-17 and -O2 optimization
bpf/vmtest-bpf-next-VM_Test-30 success Logs for x86_64-llvm-17 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-31 fail Logs for x86_64-llvm-17 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-32 fail Logs for x86_64-llvm-17 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-33 success Logs for x86_64-llvm-17 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-34 success Logs for x86_64-llvm-17 / veristat
bpf/vmtest-bpf-next-VM_Test-35 success Logs for x86_64-llvm-18 / build / build for x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-36 success Logs for x86_64-llvm-18 / build-release / build for x86_64 with llvm-18 and -O2 optimization
bpf/vmtest-bpf-next-VM_Test-37 success Logs for x86_64-llvm-18 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-38 fail Logs for x86_64-llvm-18 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-39 fail Logs for x86_64-llvm-18 / test (test_progs_cpuv4, false, 360) / test_progs_cpuv4 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-40 fail Logs for x86_64-llvm-18 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-41 success Logs for x86_64-llvm-18 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-42 success Logs for x86_64-llvm-18 / veristat
netdev/series_format fail Series longer than 15 patches (and no cover letter)
netdev/tree_selection success Guessed tree name to be net-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 5645 this patch: 5645
netdev/build_tools success Errors and warnings before: 0 this patch: 0
netdev/cc_maintainers warning 6 maintainers not CCed: mingo@redhat.com dave.hansen@linux.intel.com x86@kernel.org hpa@zytor.com mathieu.desnoyers@efficios.com bp@alien8.de
netdev/build_clang success Errors and warnings before: 1259 this patch: 1259
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 6361 this patch: 6361
netdev/checkpatch warning CHECK: Please don't use multiple blank lines WARNING: line length of 81 exceeds 80 columns WARNING: line length of 86 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 7 this patch: 7
netdev/source_inline success Was 0 now: 0

Commit Message

Masami Hiramatsu (Google) Feb. 6, 2024, 3:11 p.m. UTC
From: Masami Hiramatsu (Google) <mhiramat@kernel.org>

Add a new return handler to fgraph_ops as 'retregfunc'  which takes
parent_ip and ftrace_regs instead of ftrace_graph_ret. This handler
is available only if the arch support CONFIG_HAVE_FUNCTION_GRAPH_FREGS.
Note that the 'retfunc' and 'reregfunc' are mutual exclusive.
You can set only one of them.

Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
---
 Changes in v6:
  - update to use ftrace_regs_get_return_value() because of reordering
    patches.
 Changes in v3:
  - Update for new multiple fgraph.
  - Save the return address to instruction pointer in ftrace_regs.
---
 arch/x86/include/asm/ftrace.h |    2 +
 include/linux/ftrace.h        |   10 +++++-
 kernel/trace/Kconfig          |    5 ++-
 kernel/trace/fgraph.c         |   70 ++++++++++++++++++++++++++++-------------
 4 files changed, 63 insertions(+), 24 deletions(-)

Comments

Steven Rostedt Feb. 15, 2024, 3:39 p.m. UTC | #1
On Wed,  7 Feb 2024 00:11:44 +0900
"Masami Hiramatsu (Google)" <mhiramat@kernel.org> wrote:

> From: Masami Hiramatsu (Google) <mhiramat@kernel.org>
> 
> Add a new return handler to fgraph_ops as 'retregfunc'  which takes
> parent_ip and ftrace_regs instead of ftrace_graph_ret. This handler
> is available only if the arch support CONFIG_HAVE_FUNCTION_GRAPH_FREGS.
> Note that the 'retfunc' and 'reregfunc' are mutual exclusive.
> You can set only one of them.
> 
> Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
>

> @@ -1076,6 +1083,7 @@ struct fgraph_ops {
>  	trace_func_graph_ent_t		entryfunc;
>  	trace_func_graph_ret_t		retfunc;
>  	trace_func_graph_regs_ent_t	entryregfunc;
> +	trace_func_graph_regs_ret_t	retregfunc;

Same for this:

struct fgraph_ops {
	union {
		trace_func_graph_ent_t		entryfunc;
		trace_func_graph_regs_ent_t	entryregfunc;
	};
	union {
		trace_func_graph_ret_t		retfunc;
		trace_func_graph_regs_ret_t	retregfunc;
	}

-- Steve


>  	struct ftrace_ops		ops; /* for the hash lists */
>  	void				*private;
>  	int				idx;
Steven Rostedt Feb. 15, 2024, 3:49 p.m. UTC | #2
On Wed,  7 Feb 2024 00:11:44 +0900
"Masami Hiramatsu (Google)" <mhiramat@kernel.org> wrote:

> diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
> index 61c541c36596..308b3bec01b1 100644
> --- a/kernel/trace/Kconfig
> +++ b/kernel/trace/Kconfig
> @@ -34,6 +34,9 @@ config HAVE_FUNCTION_GRAPH_TRACER
>  config HAVE_FUNCTION_GRAPH_RETVAL
>  	bool
>  
> +config HAVE_FUNCTION_GRAPH_FREGS
> +	bool
> +
>  config HAVE_DYNAMIC_FTRACE
>  	bool
>  	help

We're starting to get overloaded with the CONFIG_HAVE_* options.

We need to start consolidating them. I would like to make RETVAL and FREGS
into one option. We can add this now, but before we add anything else, we
need to see what HAVE configs have the same archs, and then just
consolidate them. If an new arch wants to add one of the consolidated
features, it will also need to add all the other features that were
consolidated with it.

-- Steve
Steven Rostedt Feb. 15, 2024, 4:04 p.m. UTC | #3
On Wed,  7 Feb 2024 00:11:44 +0900
"Masami Hiramatsu (Google)" <mhiramat@kernel.org> wrote:

> diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
> index c88bf47f46da..a061f8832b20 100644
> --- a/arch/x86/include/asm/ftrace.h
> +++ b/arch/x86/include/asm/ftrace.h
> @@ -72,6 +72,8 @@ arch_ftrace_get_regs(struct ftrace_regs *fregs)
>  	override_function_with_return(&(fregs)->regs)
>  #define ftrace_regs_query_register_offset(name) \
>  	regs_query_register_offset(name)
> +#define ftrace_regs_get_frame_pointer(fregs) \
> +	frame_pointer(&(fregs)->regs)
>  

Doesn't the above belong in the next patch that implements this for x86?

>  struct ftrace_ops;
>  #define ftrace_graph_func ftrace_graph_func
> diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
> index 65d4d4b68768..da2a23f5a9ed 100644
> --- a/include/linux/ftrace.h
> +++ b/include/linux/ftrace.h
> @@ -43,7 +43,9 @@ struct dyn_ftrace;
>  
>  char *arch_ftrace_match_adjust(char *str, const char *search);
>  
> -#ifdef CONFIG_HAVE_FUNCTION_GRAPH_RETVAL
> +#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FREGS
> +unsigned long ftrace_return_to_handler(struct ftrace_regs *fregs);
> +#elif defined(CONFIG_HAVE_FUNCTION_GRAPH_RETVAL)
>  struct fgraph_ret_regs;
>  unsigned long ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs);
>  #else
> @@ -157,6 +159,7 @@ struct ftrace_regs {
>  #define ftrace_regs_set_instruction_pointer(fregs, ip) do { } while (0)
>  #endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
>  
> +

spurious newline.

>  static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs)
>  {
>  	if (!fregs)
> @@ -1067,6 +1070,10 @@ typedef int (*trace_func_graph_regs_ent_t)(unsigned long func,
>  					   unsigned long parent_ip,
>  					   struct ftrace_regs *fregs,
>  					   struct fgraph_ops *); /* entry w/ regs */
> +typedef void (*trace_func_graph_regs_ret_t)(unsigned long func,
> +					    unsigned long parent_ip,
> +					    struct ftrace_regs *,
> +					    struct fgraph_ops *); /* return w/ regs */
>  
>  extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace, struct fgraph_ops *gops);
>  
> @@ -1076,6 +1083,7 @@ struct fgraph_ops {
>  	trace_func_graph_ent_t		entryfunc;
>  	trace_func_graph_ret_t		retfunc;
>  	trace_func_graph_regs_ent_t	entryregfunc;
> +	trace_func_graph_regs_ret_t	retregfunc;
>  	struct ftrace_ops		ops; /* for the hash lists */
>  	void				*private;
>  	int				idx;
> diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
> index 61c541c36596..308b3bec01b1 100644
> --- a/kernel/trace/Kconfig
> +++ b/kernel/trace/Kconfig
> @@ -34,6 +34,9 @@ config HAVE_FUNCTION_GRAPH_TRACER
>  config HAVE_FUNCTION_GRAPH_RETVAL
>  	bool
>  
> +config HAVE_FUNCTION_GRAPH_FREGS
> +	bool
> +
>  config HAVE_DYNAMIC_FTRACE
>  	bool
>  	help
> @@ -232,7 +235,7 @@ config FUNCTION_GRAPH_TRACER
>  
>  config FUNCTION_GRAPH_RETVAL
>  	bool "Kernel Function Graph Return Value"
> -	depends on HAVE_FUNCTION_GRAPH_RETVAL
> +	depends on HAVE_FUNCTION_GRAPH_RETVAL || HAVE_FUNCTION_GRAPH_FREGS
>  	depends on FUNCTION_GRAPH_TRACER
>  	default n
>  	help
> diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
> index 459912ca72e0..12e5f108e242 100644
> --- a/kernel/trace/fgraph.c
> +++ b/kernel/trace/fgraph.c
> @@ -752,8 +752,8 @@ int function_graph_enter_ops(unsigned long ret, unsigned long func,
>  
>  /* Retrieve a function return address to the trace stack on thread info.*/
>  static struct ftrace_ret_stack *
> -ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
> -			unsigned long frame_pointer, int *index)
> +ftrace_pop_return_trace(unsigned long *ret, unsigned long frame_pointer,
> +			int *index)
>  {
>  	struct ftrace_ret_stack *ret_stack;
>  
> @@ -798,10 +798,6 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
>  
>  	*index += FGRAPH_RET_INDEX;
>  	*ret = ret_stack->ret;
> -	trace->func = ret_stack->func;
> -	trace->calltime = ret_stack->calltime;
> -	trace->overrun = atomic_read(&current->trace_overrun);
> -	trace->depth = current->curr_ret_depth;

There's a lot of information stored in the trace structure. Why not pass
that to the new retregfunc?

Then you don't need to separate this code out.

>  	/*
>  	 * We still want to trace interrupts coming in if
>  	 * max_depth is set to 1. Make sure the decrement is
> @@ -840,21 +836,42 @@ static struct notifier_block ftrace_suspend_notifier = {
>  /* fgraph_ret_regs is not defined without CONFIG_FUNCTION_GRAPH_RETVAL */
>  struct fgraph_ret_regs;
>  
> +static void fgraph_call_retfunc(struct ftrace_regs *fregs,
> +				struct fgraph_ret_regs *ret_regs,
> +				struct ftrace_ret_stack *ret_stack,
> +				struct fgraph_ops *gops)
> +{
> +	struct ftrace_graph_ret trace;
> +
> +	trace.func = ret_stack->func;
> +	trace.calltime = ret_stack->calltime;
> +	trace.overrun = atomic_read(&current->trace_overrun);
> +	trace.depth = current->curr_ret_depth;
> +	trace.rettime = trace_clock_local();
> +#ifdef CONFIG_FUNCTION_GRAPH_RETVAL
> +	if (fregs)
> +		trace.retval = ftrace_regs_get_return_value(fregs);
> +	else
> +		trace.retval = fgraph_ret_regs_return_value(ret_regs);
> +#endif
> +	gops->retfunc(&trace, gops);
> +}
> +
>  /*
>   * Send the trace to the ring-buffer.
>   * @return the original return address.
>   */
> -static unsigned long __ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs,
> +static unsigned long __ftrace_return_to_handler(struct ftrace_regs *fregs,
> +						struct fgraph_ret_regs *ret_regs,
>  						unsigned long frame_pointer)
>  {
>  	struct ftrace_ret_stack *ret_stack;
> -	struct ftrace_graph_ret trace;
>  	unsigned long bitmap;
>  	unsigned long ret;
>  	int index;
>  	int i;
>  
> -	ret_stack = ftrace_pop_return_trace(&trace, &ret, frame_pointer, &index);
> +	ret_stack = ftrace_pop_return_trace(&ret, frame_pointer, &index);
>  
>  	if (unlikely(!ret_stack)) {
>  		ftrace_graph_stop();
> @@ -863,10 +880,8 @@ static unsigned long __ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs
>  		return (unsigned long)panic;
>  	}
>  
> -	trace.rettime = trace_clock_local();
> -#ifdef CONFIG_FUNCTION_GRAPH_RETVAL
> -	trace.retval = fgraph_ret_regs_return_value(ret_regs);
> -#endif
> +	if (fregs)
> +		ftrace_regs_set_instruction_pointer(fregs, ret);
>  
>  	bitmap = get_fgraph_index_bitmap(current, index);
>  	for (i = 0; i < FGRAPH_ARRAY_SIZE; i++) {
> @@ -877,7 +892,10 @@ static unsigned long __ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs
>  		if (gops == &fgraph_stub)
>  			continue;
>  
> -		gops->retfunc(&trace, gops);
> +		if (gops->retregfunc)
> +			gops->retregfunc(ret_stack->func, ret, fregs, gops);
> +		else
> +			fgraph_call_retfunc(fregs, ret_regs, ret_stack, gops);
>  	}
>  
>  	/*
> @@ -892,20 +910,22 @@ static unsigned long __ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs
>  	return ret;
>  }
>  
> -/*
> - * After all architecures have selected HAVE_FUNCTION_GRAPH_RETVAL, we can
> - * leave only ftrace_return_to_handler(ret_regs).
> - */
> -#ifdef CONFIG_HAVE_FUNCTION_GRAPH_RETVAL
> +#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FREGS
> +unsigned long ftrace_return_to_handler(struct ftrace_regs *fregs)
> +{
> +	return __ftrace_return_to_handler(fregs, NULL,
> +				ftrace_regs_get_frame_pointer(fregs));
> +}
> +#elif defined(CONFIG_HAVE_FUNCTION_GRAPH_RETVAL)
>  unsigned long ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs)
>  {
> -	return __ftrace_return_to_handler(ret_regs,
> +	return __ftrace_return_to_handler(NULL, ret_regs,
>  				fgraph_ret_regs_frame_pointer(ret_regs));
>  }
>  #else
>  unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
>  {
> -	return __ftrace_return_to_handler(NULL, frame_pointer);
> +	return __ftrace_return_to_handler(NULL, NULL, frame_pointer);
>  }
>  #endif
>  
> @@ -1262,9 +1282,15 @@ int register_ftrace_graph(struct fgraph_ops *gops)
>  	int ret = 0;
>  	int i;
>  
> -	if (gops->entryfunc && gops->entryregfunc)
> +	if ((gops->entryfunc && gops->entryregfunc) ||
> +	    (gops->retfunc && gops->retregfunc))
>  		return -EINVAL;

With a union, you don't need this.

Now, is it possible to have a entryregfunc and a retfunc, or a entryfunc
and a retregfunc?

-- Steve

>  
> +#ifndef CONFIG_HAVE_FUNCTION_GRAPH_FREGS
> +	if (gops->retregfunc)
> +		return -EOPNOTSUPP;
> +#endif
> +
>  	mutex_lock(&ftrace_lock);
>  
>  	if (!gops->ops.func) {
Masami Hiramatsu (Google) Feb. 16, 2024, 8:42 a.m. UTC | #4
On Thu, 15 Feb 2024 10:39:43 -0500
Steven Rostedt <rostedt@goodmis.org> wrote:

> On Wed,  7 Feb 2024 00:11:44 +0900
> "Masami Hiramatsu (Google)" <mhiramat@kernel.org> wrote:
> 
> > From: Masami Hiramatsu (Google) <mhiramat@kernel.org>
> > 
> > Add a new return handler to fgraph_ops as 'retregfunc'  which takes
> > parent_ip and ftrace_regs instead of ftrace_graph_ret. This handler
> > is available only if the arch support CONFIG_HAVE_FUNCTION_GRAPH_FREGS.
> > Note that the 'retfunc' and 'reregfunc' are mutual exclusive.
> > You can set only one of them.
> > 
> > Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
> >
> 
> > @@ -1076,6 +1083,7 @@ struct fgraph_ops {
> >  	trace_func_graph_ent_t		entryfunc;
> >  	trace_func_graph_ret_t		retfunc;
> >  	trace_func_graph_regs_ent_t	entryregfunc;
> > +	trace_func_graph_regs_ret_t	retregfunc;
> 
> Same for this:
> 
> struct fgraph_ops {
> 	union {
> 		trace_func_graph_ent_t		entryfunc;
> 		trace_func_graph_regs_ent_t	entryregfunc;
> 	};
> 	union {
> 		trace_func_graph_ret_t		retfunc;
> 		trace_func_graph_regs_ret_t	retregfunc;
> 	}

OK, and we need to introduce a flag for fgraph_ops that it is using
`regfunc` or not.

Thank you,

> 
> -- Steve
> 
> 
> >  	struct ftrace_ops		ops; /* for the hash lists */
> >  	void				*private;
> >  	int				idx;
Masami Hiramatsu (Google) Feb. 16, 2024, 8:43 a.m. UTC | #5
On Thu, 15 Feb 2024 10:49:03 -0500
Steven Rostedt <rostedt@goodmis.org> wrote:

> On Wed,  7 Feb 2024 00:11:44 +0900
> "Masami Hiramatsu (Google)" <mhiramat@kernel.org> wrote:
> 
> > diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
> > index 61c541c36596..308b3bec01b1 100644
> > --- a/kernel/trace/Kconfig
> > +++ b/kernel/trace/Kconfig
> > @@ -34,6 +34,9 @@ config HAVE_FUNCTION_GRAPH_TRACER
> >  config HAVE_FUNCTION_GRAPH_RETVAL
> >  	bool
> >  
> > +config HAVE_FUNCTION_GRAPH_FREGS
> > +	bool
> > +
> >  config HAVE_DYNAMIC_FTRACE
> >  	bool
> >  	help
> 
> We're starting to get overloaded with the CONFIG_HAVE_* options.
> 
> We need to start consolidating them. I would like to make RETVAL and FREGS
> into one option. We can add this now, but before we add anything else, we
> need to see what HAVE configs have the same archs, and then just
> consolidate them. If an new arch wants to add one of the consolidated
> features, it will also need to add all the other features that were
> consolidated with it.

Got it. So RETVAL should be implemented by FREGS or REGS.
Thank you,

> 
> -- Steve
Masami Hiramatsu (Google) Feb. 16, 2024, 8:51 a.m. UTC | #6
On Thu, 15 Feb 2024 11:04:04 -0500
Steven Rostedt <rostedt@goodmis.org> wrote:

> On Wed,  7 Feb 2024 00:11:44 +0900
> "Masami Hiramatsu (Google)" <mhiramat@kernel.org> wrote:
> 
> > diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
> > index c88bf47f46da..a061f8832b20 100644
> > --- a/arch/x86/include/asm/ftrace.h
> > +++ b/arch/x86/include/asm/ftrace.h
> > @@ -72,6 +72,8 @@ arch_ftrace_get_regs(struct ftrace_regs *fregs)
> >  	override_function_with_return(&(fregs)->regs)
> >  #define ftrace_regs_query_register_offset(name) \
> >  	regs_query_register_offset(name)
> > +#define ftrace_regs_get_frame_pointer(fregs) \
> > +	frame_pointer(&(fregs)->regs)
> >  
> 
> Doesn't the above belong in the next patch that implements this for x86?

Yes, thanks for pointing it!

> 
> >  struct ftrace_ops;
> >  #define ftrace_graph_func ftrace_graph_func
> > diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
> > index 65d4d4b68768..da2a23f5a9ed 100644
> > --- a/include/linux/ftrace.h
> > +++ b/include/linux/ftrace.h
> > @@ -43,7 +43,9 @@ struct dyn_ftrace;
> >  
> >  char *arch_ftrace_match_adjust(char *str, const char *search);
> >  
> > -#ifdef CONFIG_HAVE_FUNCTION_GRAPH_RETVAL
> > +#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FREGS
> > +unsigned long ftrace_return_to_handler(struct ftrace_regs *fregs);
> > +#elif defined(CONFIG_HAVE_FUNCTION_GRAPH_RETVAL)
> >  struct fgraph_ret_regs;
> >  unsigned long ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs);
> >  #else
> > @@ -157,6 +159,7 @@ struct ftrace_regs {
> >  #define ftrace_regs_set_instruction_pointer(fregs, ip) do { } while (0)
> >  #endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
> >  
> > +
> 
> spurious newline.

OK, I'll remove.

> 
> >  static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs)
> >  {
> >  	if (!fregs)
> > @@ -1067,6 +1070,10 @@ typedef int (*trace_func_graph_regs_ent_t)(unsigned long func,
> >  					   unsigned long parent_ip,
> >  					   struct ftrace_regs *fregs,
> >  					   struct fgraph_ops *); /* entry w/ regs */
> > +typedef void (*trace_func_graph_regs_ret_t)(unsigned long func,
> > +					    unsigned long parent_ip,
> > +					    struct ftrace_regs *,
> > +					    struct fgraph_ops *); /* return w/ regs */
> >  
> >  extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace, struct fgraph_ops *gops);
> >  
> > @@ -1076,6 +1083,7 @@ struct fgraph_ops {
> >  	trace_func_graph_ent_t		entryfunc;
> >  	trace_func_graph_ret_t		retfunc;
> >  	trace_func_graph_regs_ent_t	entryregfunc;
> > +	trace_func_graph_regs_ret_t	retregfunc;
> >  	struct ftrace_ops		ops; /* for the hash lists */
> >  	void				*private;
> >  	int				idx;
> > diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
> > index 61c541c36596..308b3bec01b1 100644
> > --- a/kernel/trace/Kconfig
> > +++ b/kernel/trace/Kconfig
> > @@ -34,6 +34,9 @@ config HAVE_FUNCTION_GRAPH_TRACER
> >  config HAVE_FUNCTION_GRAPH_RETVAL
> >  	bool
> >  
> > +config HAVE_FUNCTION_GRAPH_FREGS
> > +	bool
> > +
> >  config HAVE_DYNAMIC_FTRACE
> >  	bool
> >  	help
> > @@ -232,7 +235,7 @@ config FUNCTION_GRAPH_TRACER
> >  
> >  config FUNCTION_GRAPH_RETVAL
> >  	bool "Kernel Function Graph Return Value"
> > -	depends on HAVE_FUNCTION_GRAPH_RETVAL
> > +	depends on HAVE_FUNCTION_GRAPH_RETVAL || HAVE_FUNCTION_GRAPH_FREGS
> >  	depends on FUNCTION_GRAPH_TRACER
> >  	default n
> >  	help
> > diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
> > index 459912ca72e0..12e5f108e242 100644
> > --- a/kernel/trace/fgraph.c
> > +++ b/kernel/trace/fgraph.c
> > @@ -752,8 +752,8 @@ int function_graph_enter_ops(unsigned long ret, unsigned long func,
> >  
> >  /* Retrieve a function return address to the trace stack on thread info.*/
> >  static struct ftrace_ret_stack *
> > -ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
> > -			unsigned long frame_pointer, int *index)
> > +ftrace_pop_return_trace(unsigned long *ret, unsigned long frame_pointer,
> > +			int *index)
> >  {
> >  	struct ftrace_ret_stack *ret_stack;
> >  
> > @@ -798,10 +798,6 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
> >  
> >  	*index += FGRAPH_RET_INDEX;
> >  	*ret = ret_stack->ret;
> > -	trace->func = ret_stack->func;
> > -	trace->calltime = ret_stack->calltime;
> > -	trace->overrun = atomic_read(&current->trace_overrun);
> > -	trace->depth = current->curr_ret_depth;
> 
> There's a lot of information stored in the trace structure. Why not pass
> that to the new retregfunc?
> 
> Then you don't need to separate this code out.

Sorry, I couldn't catch what you meant, Would you mean to call
ftrace_pop_return_trace() before calling retregfunc()?? because some of the
information are found from ret_stack, which is poped from shadow stack.

> 
> >  	/*
> >  	 * We still want to trace interrupts coming in if
> >  	 * max_depth is set to 1. Make sure the decrement is
> > @@ -840,21 +836,42 @@ static struct notifier_block ftrace_suspend_notifier = {
> >  /* fgraph_ret_regs is not defined without CONFIG_FUNCTION_GRAPH_RETVAL */
> >  struct fgraph_ret_regs;
> >  
> > +static void fgraph_call_retfunc(struct ftrace_regs *fregs,
> > +				struct fgraph_ret_regs *ret_regs,
> > +				struct ftrace_ret_stack *ret_stack,
> > +				struct fgraph_ops *gops)
> > +{
> > +	struct ftrace_graph_ret trace;
> > +
> > +	trace.func = ret_stack->func;
> > +	trace.calltime = ret_stack->calltime;
> > +	trace.overrun = atomic_read(&current->trace_overrun);
> > +	trace.depth = current->curr_ret_depth;
> > +	trace.rettime = trace_clock_local();
> > +#ifdef CONFIG_FUNCTION_GRAPH_RETVAL
> > +	if (fregs)
> > +		trace.retval = ftrace_regs_get_return_value(fregs);
> > +	else
> > +		trace.retval = fgraph_ret_regs_return_value(ret_regs);
> > +#endif
> > +	gops->retfunc(&trace, gops);
> > +}
> > +
> >  /*
> >   * Send the trace to the ring-buffer.
> >   * @return the original return address.
> >   */
> > -static unsigned long __ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs,
> > +static unsigned long __ftrace_return_to_handler(struct ftrace_regs *fregs,
> > +						struct fgraph_ret_regs *ret_regs,
> >  						unsigned long frame_pointer)
> >  {
> >  	struct ftrace_ret_stack *ret_stack;
> > -	struct ftrace_graph_ret trace;
> >  	unsigned long bitmap;
> >  	unsigned long ret;
> >  	int index;
> >  	int i;
> >  
> > -	ret_stack = ftrace_pop_return_trace(&trace, &ret, frame_pointer, &index);
> > +	ret_stack = ftrace_pop_return_trace(&ret, frame_pointer, &index);
> >  
> >  	if (unlikely(!ret_stack)) {
> >  		ftrace_graph_stop();
> > @@ -863,10 +880,8 @@ static unsigned long __ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs
> >  		return (unsigned long)panic;
> >  	}
> >  
> > -	trace.rettime = trace_clock_local();
> > -#ifdef CONFIG_FUNCTION_GRAPH_RETVAL
> > -	trace.retval = fgraph_ret_regs_return_value(ret_regs);
> > -#endif
> > +	if (fregs)
> > +		ftrace_regs_set_instruction_pointer(fregs, ret);
> >  
> >  	bitmap = get_fgraph_index_bitmap(current, index);
> >  	for (i = 0; i < FGRAPH_ARRAY_SIZE; i++) {
> > @@ -877,7 +892,10 @@ static unsigned long __ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs
> >  		if (gops == &fgraph_stub)
> >  			continue;
> >  
> > -		gops->retfunc(&trace, gops);
> > +		if (gops->retregfunc)
> > +			gops->retregfunc(ret_stack->func, ret, fregs, gops);
> > +		else
> > +			fgraph_call_retfunc(fregs, ret_regs, ret_stack, gops);
> >  	}
> >  
> >  	/*
> > @@ -892,20 +910,22 @@ static unsigned long __ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs
> >  	return ret;
> >  }
> >  
> > -/*
> > - * After all architecures have selected HAVE_FUNCTION_GRAPH_RETVAL, we can
> > - * leave only ftrace_return_to_handler(ret_regs).
> > - */
> > -#ifdef CONFIG_HAVE_FUNCTION_GRAPH_RETVAL
> > +#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FREGS
> > +unsigned long ftrace_return_to_handler(struct ftrace_regs *fregs)
> > +{
> > +	return __ftrace_return_to_handler(fregs, NULL,
> > +				ftrace_regs_get_frame_pointer(fregs));
> > +}
> > +#elif defined(CONFIG_HAVE_FUNCTION_GRAPH_RETVAL)
> >  unsigned long ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs)
> >  {
> > -	return __ftrace_return_to_handler(ret_regs,
> > +	return __ftrace_return_to_handler(NULL, ret_regs,
> >  				fgraph_ret_regs_frame_pointer(ret_regs));
> >  }
> >  #else
> >  unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
> >  {
> > -	return __ftrace_return_to_handler(NULL, frame_pointer);
> > +	return __ftrace_return_to_handler(NULL, NULL, frame_pointer);
> >  }
> >  #endif
> >  
> > @@ -1262,9 +1282,15 @@ int register_ftrace_graph(struct fgraph_ops *gops)
> >  	int ret = 0;
> >  	int i;
> >  
> > -	if (gops->entryfunc && gops->entryregfunc)
> > +	if ((gops->entryfunc && gops->entryregfunc) ||
> > +	    (gops->retfunc && gops->retregfunc))
> >  		return -EINVAL;
> 
> With a union, you don't need this.

Indeed.

> 
> Now, is it possible to have a entryregfunc and a retfunc, or a entryfunc
> and a retregfunc?

It is possisble, but may not give any benefit.

Thank you,

> 
> -- Steve
> 
> >  
> > +#ifndef CONFIG_HAVE_FUNCTION_GRAPH_FREGS
> > +	if (gops->retregfunc)
> > +		return -EOPNOTSUPP;
> > +#endif
> > +
> >  	mutex_lock(&ftrace_lock);
> >  
> >  	if (!gops->ops.func) {
>
Masami Hiramatsu (Google) Feb. 18, 2024, 2:53 a.m. UTC | #7
On Fri, 16 Feb 2024 17:51:08 +0900
Masami Hiramatsu (Google) <mhiramat@kernel.org> wrote:

> > > @@ -798,10 +798,6 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
> > >  
> > >  	*index += FGRAPH_RET_INDEX;
> > >  	*ret = ret_stack->ret;
> > > -	trace->func = ret_stack->func;
> > > -	trace->calltime = ret_stack->calltime;
> > > -	trace->overrun = atomic_read(&current->trace_overrun);
> > > -	trace->depth = current->curr_ret_depth;
> > 
> > There's a lot of information stored in the trace structure. Why not pass
> > that to the new retregfunc?
> > 
> > Then you don't need to separate this code out.
> 
> Sorry, I couldn't catch what you meant, Would you mean to call
> ftrace_pop_return_trace() before calling retregfunc()?? because some of the
> information are found from ret_stack, which is poped from shadow stack.

Ah, sorry I got what you said. I think this `trace` is not usable for the new
interface. Most of the information is only used for the function-graph tracer.
For example, trace->calltime and trace->overrun, trace->depth are used only
for the function-graph tracer, but not for the other tracers.

But yeah, this idea is considerable. It also allows us to just update
entryfunc() and retfunc() to pass fgraph_regs and return address.

Thank you!
Masami Hiramatsu (Google) Feb. 19, 2024, 2:04 p.m. UTC | #8
Hi Steve,

On Sun, 18 Feb 2024 11:53:28 +0900
Masami Hiramatsu (Google) <mhiramat@kernel.org> wrote:

> On Fri, 16 Feb 2024 17:51:08 +0900
> Masami Hiramatsu (Google) <mhiramat@kernel.org> wrote:
> 
> > > > @@ -798,10 +798,6 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
> > > >  
> > > >  	*index += FGRAPH_RET_INDEX;
> > > >  	*ret = ret_stack->ret;
> > > > -	trace->func = ret_stack->func;
> > > > -	trace->calltime = ret_stack->calltime;
> > > > -	trace->overrun = atomic_read(&current->trace_overrun);
> > > > -	trace->depth = current->curr_ret_depth;
> > > 
> > > There's a lot of information stored in the trace structure. Why not pass
> > > that to the new retregfunc?
> > > 
> > > Then you don't need to separate this code out.
> > 
> > Sorry, I couldn't catch what you meant, Would you mean to call
> > ftrace_pop_return_trace() before calling retregfunc()?? because some of the
> > information are found from ret_stack, which is poped from shadow stack.
> 
> Ah, sorry I got what you said. I think this `trace` is not usable for the new
> interface. Most of the information is only used for the function-graph tracer.
> For example, trace->calltime and trace->overrun, trace->depth are used only
> for the function-graph tracer, but not for the other tracers.
> 
> But yeah, this idea is considerable. It also allows us to just update
> entryfunc() and retfunc() to pass fgraph_regs and return address.

The reason why I didn't use the those for *regfunc() is not only those
have unused information, but those does not have some params.

 - ftrace_graph_ent only have current `func`, but entryregfunc()
    needs `parent_ip` (== return address)
 - ftrace_graph_ret only have current `func`, but retregfunc()
    needs `ret` (== return address) too.

If I update the ftrace_graph_ent/ret to add 'retaddr', we can just pass
ftrace_graph_ent/ret, ftrace_regs, and fgraph_ops to *regfunc().
Moreover, maybe we don't need *regfunc, but just update entryfunc/retfunc
to pass ftrace_regs *, which will be NULL if it is not supported.

Thank you,
diff mbox series

Patch

diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index c88bf47f46da..a061f8832b20 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -72,6 +72,8 @@  arch_ftrace_get_regs(struct ftrace_regs *fregs)
 	override_function_with_return(&(fregs)->regs)
 #define ftrace_regs_query_register_offset(name) \
 	regs_query_register_offset(name)
+#define ftrace_regs_get_frame_pointer(fregs) \
+	frame_pointer(&(fregs)->regs)
 
 struct ftrace_ops;
 #define ftrace_graph_func ftrace_graph_func
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 65d4d4b68768..da2a23f5a9ed 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -43,7 +43,9 @@  struct dyn_ftrace;
 
 char *arch_ftrace_match_adjust(char *str, const char *search);
 
-#ifdef CONFIG_HAVE_FUNCTION_GRAPH_RETVAL
+#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FREGS
+unsigned long ftrace_return_to_handler(struct ftrace_regs *fregs);
+#elif defined(CONFIG_HAVE_FUNCTION_GRAPH_RETVAL)
 struct fgraph_ret_regs;
 unsigned long ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs);
 #else
@@ -157,6 +159,7 @@  struct ftrace_regs {
 #define ftrace_regs_set_instruction_pointer(fregs, ip) do { } while (0)
 #endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
 
+
 static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs)
 {
 	if (!fregs)
@@ -1067,6 +1070,10 @@  typedef int (*trace_func_graph_regs_ent_t)(unsigned long func,
 					   unsigned long parent_ip,
 					   struct ftrace_regs *fregs,
 					   struct fgraph_ops *); /* entry w/ regs */
+typedef void (*trace_func_graph_regs_ret_t)(unsigned long func,
+					    unsigned long parent_ip,
+					    struct ftrace_regs *,
+					    struct fgraph_ops *); /* return w/ regs */
 
 extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace, struct fgraph_ops *gops);
 
@@ -1076,6 +1083,7 @@  struct fgraph_ops {
 	trace_func_graph_ent_t		entryfunc;
 	trace_func_graph_ret_t		retfunc;
 	trace_func_graph_regs_ent_t	entryregfunc;
+	trace_func_graph_regs_ret_t	retregfunc;
 	struct ftrace_ops		ops; /* for the hash lists */
 	void				*private;
 	int				idx;
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 61c541c36596..308b3bec01b1 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -34,6 +34,9 @@  config HAVE_FUNCTION_GRAPH_TRACER
 config HAVE_FUNCTION_GRAPH_RETVAL
 	bool
 
+config HAVE_FUNCTION_GRAPH_FREGS
+	bool
+
 config HAVE_DYNAMIC_FTRACE
 	bool
 	help
@@ -232,7 +235,7 @@  config FUNCTION_GRAPH_TRACER
 
 config FUNCTION_GRAPH_RETVAL
 	bool "Kernel Function Graph Return Value"
-	depends on HAVE_FUNCTION_GRAPH_RETVAL
+	depends on HAVE_FUNCTION_GRAPH_RETVAL || HAVE_FUNCTION_GRAPH_FREGS
 	depends on FUNCTION_GRAPH_TRACER
 	default n
 	help
diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
index 459912ca72e0..12e5f108e242 100644
--- a/kernel/trace/fgraph.c
+++ b/kernel/trace/fgraph.c
@@ -752,8 +752,8 @@  int function_graph_enter_ops(unsigned long ret, unsigned long func,
 
 /* Retrieve a function return address to the trace stack on thread info.*/
 static struct ftrace_ret_stack *
-ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
-			unsigned long frame_pointer, int *index)
+ftrace_pop_return_trace(unsigned long *ret, unsigned long frame_pointer,
+			int *index)
 {
 	struct ftrace_ret_stack *ret_stack;
 
@@ -798,10 +798,6 @@  ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
 
 	*index += FGRAPH_RET_INDEX;
 	*ret = ret_stack->ret;
-	trace->func = ret_stack->func;
-	trace->calltime = ret_stack->calltime;
-	trace->overrun = atomic_read(&current->trace_overrun);
-	trace->depth = current->curr_ret_depth;
 	/*
 	 * We still want to trace interrupts coming in if
 	 * max_depth is set to 1. Make sure the decrement is
@@ -840,21 +836,42 @@  static struct notifier_block ftrace_suspend_notifier = {
 /* fgraph_ret_regs is not defined without CONFIG_FUNCTION_GRAPH_RETVAL */
 struct fgraph_ret_regs;
 
+static void fgraph_call_retfunc(struct ftrace_regs *fregs,
+				struct fgraph_ret_regs *ret_regs,
+				struct ftrace_ret_stack *ret_stack,
+				struct fgraph_ops *gops)
+{
+	struct ftrace_graph_ret trace;
+
+	trace.func = ret_stack->func;
+	trace.calltime = ret_stack->calltime;
+	trace.overrun = atomic_read(&current->trace_overrun);
+	trace.depth = current->curr_ret_depth;
+	trace.rettime = trace_clock_local();
+#ifdef CONFIG_FUNCTION_GRAPH_RETVAL
+	if (fregs)
+		trace.retval = ftrace_regs_get_return_value(fregs);
+	else
+		trace.retval = fgraph_ret_regs_return_value(ret_regs);
+#endif
+	gops->retfunc(&trace, gops);
+}
+
 /*
  * Send the trace to the ring-buffer.
  * @return the original return address.
  */
-static unsigned long __ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs,
+static unsigned long __ftrace_return_to_handler(struct ftrace_regs *fregs,
+						struct fgraph_ret_regs *ret_regs,
 						unsigned long frame_pointer)
 {
 	struct ftrace_ret_stack *ret_stack;
-	struct ftrace_graph_ret trace;
 	unsigned long bitmap;
 	unsigned long ret;
 	int index;
 	int i;
 
-	ret_stack = ftrace_pop_return_trace(&trace, &ret, frame_pointer, &index);
+	ret_stack = ftrace_pop_return_trace(&ret, frame_pointer, &index);
 
 	if (unlikely(!ret_stack)) {
 		ftrace_graph_stop();
@@ -863,10 +880,8 @@  static unsigned long __ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs
 		return (unsigned long)panic;
 	}
 
-	trace.rettime = trace_clock_local();
-#ifdef CONFIG_FUNCTION_GRAPH_RETVAL
-	trace.retval = fgraph_ret_regs_return_value(ret_regs);
-#endif
+	if (fregs)
+		ftrace_regs_set_instruction_pointer(fregs, ret);
 
 	bitmap = get_fgraph_index_bitmap(current, index);
 	for (i = 0; i < FGRAPH_ARRAY_SIZE; i++) {
@@ -877,7 +892,10 @@  static unsigned long __ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs
 		if (gops == &fgraph_stub)
 			continue;
 
-		gops->retfunc(&trace, gops);
+		if (gops->retregfunc)
+			gops->retregfunc(ret_stack->func, ret, fregs, gops);
+		else
+			fgraph_call_retfunc(fregs, ret_regs, ret_stack, gops);
 	}
 
 	/*
@@ -892,20 +910,22 @@  static unsigned long __ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs
 	return ret;
 }
 
-/*
- * After all architecures have selected HAVE_FUNCTION_GRAPH_RETVAL, we can
- * leave only ftrace_return_to_handler(ret_regs).
- */
-#ifdef CONFIG_HAVE_FUNCTION_GRAPH_RETVAL
+#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FREGS
+unsigned long ftrace_return_to_handler(struct ftrace_regs *fregs)
+{
+	return __ftrace_return_to_handler(fregs, NULL,
+				ftrace_regs_get_frame_pointer(fregs));
+}
+#elif defined(CONFIG_HAVE_FUNCTION_GRAPH_RETVAL)
 unsigned long ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs)
 {
-	return __ftrace_return_to_handler(ret_regs,
+	return __ftrace_return_to_handler(NULL, ret_regs,
 				fgraph_ret_regs_frame_pointer(ret_regs));
 }
 #else
 unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
 {
-	return __ftrace_return_to_handler(NULL, frame_pointer);
+	return __ftrace_return_to_handler(NULL, NULL, frame_pointer);
 }
 #endif
 
@@ -1262,9 +1282,15 @@  int register_ftrace_graph(struct fgraph_ops *gops)
 	int ret = 0;
 	int i;
 
-	if (gops->entryfunc && gops->entryregfunc)
+	if ((gops->entryfunc && gops->entryregfunc) ||
+	    (gops->retfunc && gops->retregfunc))
 		return -EINVAL;
 
+#ifndef CONFIG_HAVE_FUNCTION_GRAPH_FREGS
+	if (gops->retregfunc)
+		return -EOPNOTSUPP;
+#endif
+
 	mutex_lock(&ftrace_lock);
 
 	if (!gops->ops.func) {