diff mbox series

[7/7] tracing: add arguments to function tracer

Message ID 20240904065908.1009086-8-svens@linux.ibm.com (mailing list archive)
State New
Delegated to: Masami Hiramatsu
Headers show
Series add function arguments to ftrace | expand

Commit Message

Sven Schnelle Sept. 4, 2024, 6:59 a.m. UTC
Wire up the code to print function arguments in the function tracer.
This functionality can be enabled/disabled during compile time by
setting CONFIG_FUNCTION_TRACE_ARGS and during runtime with
options/func-args.

        ping-689     [004] b....    77.170220: dummy_xmit(skb = 0x82904800, dev = 0x882d0000) <-dev_hard_start_xmit

Signed-off-by: Sven Schnelle <svens@linux.ibm.com>
---
 kernel/trace/trace.c              |  8 +++++-
 kernel/trace/trace.h              |  4 ++-
 kernel/trace/trace_entries.h      |  7 +++--
 kernel/trace/trace_functions.c    | 46 +++++++++++++++++++++++++++----
 kernel/trace/trace_irqsoff.c      |  4 +--
 kernel/trace/trace_output.c       | 14 ++++++++--
 kernel/trace/trace_sched_wakeup.c |  4 +--
 7 files changed, 71 insertions(+), 16 deletions(-)

Comments

Zheng Yejian Sept. 6, 2024, 3:49 a.m. UTC | #1
On 2024/9/4 14:59, Sven Schnelle wrote:
> Wire up the code to print function arguments in the function tracer.
> This functionality can be enabled/disabled during compile time by
> setting CONFIG_FUNCTION_TRACE_ARGS and during runtime with
> options/func-args.
> 
>          ping-689     [004] b....    77.170220: dummy_xmit(skb = 0x82904800, dev = 0x882d0000) <-dev_hard_start_xmit
> 
> Signed-off-by: Sven Schnelle <svens@linux.ibm.com>
> ---
>   kernel/trace/trace.c              |  8 +++++-
>   kernel/trace/trace.h              |  4 ++-
>   kernel/trace/trace_entries.h      |  7 +++--
>   kernel/trace/trace_functions.c    | 46 +++++++++++++++++++++++++++----
>   kernel/trace/trace_irqsoff.c      |  4 +--
>   kernel/trace/trace_output.c       | 14 ++++++++--
>   kernel/trace/trace_sched_wakeup.c |  4 +--
>   7 files changed, 71 insertions(+), 16 deletions(-)
> 
> diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
> index ebe7ce2f5f4a..8118e4c8c649 100644
> --- a/kernel/trace/trace.c
> +++ b/kernel/trace/trace.c
> @@ -2864,7 +2864,7 @@ trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
>   
>   void
>   trace_function(struct trace_array *tr, unsigned long ip, unsigned long
> -	       parent_ip, unsigned int trace_ctx)
> +	       parent_ip, unsigned int trace_ctx, struct ftrace_regs *regs)
>   {
>   	struct trace_event_call *call = &event_function;
>   	struct trace_buffer *buffer = tr->array_buffer.buffer;
> @@ -2878,6 +2878,12 @@ trace_function(struct trace_array *tr, unsigned long ip, unsigned long
>   	entry	= ring_buffer_event_data(event);
>   	entry->ip			= ip;
>   	entry->parent_ip		= parent_ip;
> +#ifdef CONFIG_FUNCTION_TRACE_ARGS
> +	if (regs)
> +		entry->regs		= *regs;
> +	else
> +		memset(&entry->regs, 0, sizeof(struct ftrace_regs));
> +#endif
>   
>   	if (!call_filter_check_discard(call, entry, buffer, event)) {
>   		if (static_branch_unlikely(&trace_function_exports_enabled))
> diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
> index bd3e3069300e..13cf6f97f7e0 100644
> --- a/kernel/trace/trace.h
> +++ b/kernel/trace/trace.h
> @@ -673,7 +673,8 @@ unsigned long trace_total_entries(struct trace_array *tr);
>   void trace_function(struct trace_array *tr,
>   		    unsigned long ip,
>   		    unsigned long parent_ip,
> -		    unsigned int trace_ctx);
> +		    unsigned int trace_ctx,
> +		    struct ftrace_regs *regs);
>   void trace_graph_function(struct trace_array *tr,
>   		    unsigned long ip,
>   		    unsigned long parent_ip,
> @@ -870,6 +871,7 @@ static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash)
>   #define TRACE_GRAPH_GRAPH_TIME          0x400
>   #define TRACE_GRAPH_PRINT_RETVAL        0x800
>   #define TRACE_GRAPH_PRINT_RETVAL_HEX    0x1000
> +#define TRACE_GRAPH_ARGS		0x2000
>   #define TRACE_GRAPH_PRINT_FILL_SHIFT	28
>   #define TRACE_GRAPH_PRINT_FILL_MASK	(0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
>   
> diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
> index c47422b20908..f2021ab52da2 100644
> --- a/kernel/trace/trace_entries.h
> +++ b/kernel/trace/trace_entries.h
> @@ -61,8 +61,11 @@ FTRACE_ENTRY_REG(function, ftrace_entry,
>   	TRACE_FN,
>   
>   	F_STRUCT(
> -		__field_fn(	unsigned long,	ip		)
> -		__field_fn(	unsigned long,	parent_ip	)
> +		__field_fn(	unsigned long,		ip		)
> +		__field_fn(	unsigned long,		parent_ip	)
> +#ifdef CONFIG_FUNCTION_TRACE_ARGS
> +		__field_struct( struct ftrace_regs,	regs		)

Only function arguments are printed, they are several registers in ftrace_regs,
would it be better to store what are needed?
Although different archs save function arguments in different registers, store
the entire ftrace_regs are much more simple..

> +#endif
>   	),
>   
>   	F_printk(" %ps <-- %ps",

F_printk should also match F_STRUCT, otherwise 'format' info may be incorrect,
it may confuse data parsing in user tools.

> diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
> index 3b0cea37e029..7ff651a0b45a 100644
> --- a/kernel/trace/trace_functions.c
> +++ b/kernel/trace/trace_functions.c
> @@ -25,6 +25,9 @@ static void
>   function_trace_call(unsigned long ip, unsigned long parent_ip,
>   		    struct ftrace_ops *op, struct ftrace_regs *fregs);
>   static void
> +function_args_trace_call(unsigned long ip, unsigned long parent_ip,
> +			 struct ftrace_ops *op, struct ftrace_regs *fregs);
> +static void
>   function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
>   			  struct ftrace_ops *op, struct ftrace_regs *fregs);
>   static void
> @@ -42,9 +45,10 @@ enum {
>   	TRACE_FUNC_NO_OPTS		= 0x0, /* No flags set. */
>   	TRACE_FUNC_OPT_STACK		= 0x1,
>   	TRACE_FUNC_OPT_NO_REPEATS	= 0x2,
> +	TRACE_FUNC_OPT_ARGS		= 0x4,
>   
>   	/* Update this to next highest bit. */
> -	TRACE_FUNC_OPT_HIGHEST_BIT	= 0x4
> +	TRACE_FUNC_OPT_HIGHEST_BIT	= 0x8
>   };
>   
>   #define TRACE_FUNC_OPT_MASK	(TRACE_FUNC_OPT_HIGHEST_BIT - 1)
> @@ -114,6 +118,8 @@ static ftrace_func_t select_trace_function(u32 flags_val)
>   	switch (flags_val & TRACE_FUNC_OPT_MASK) {
>   	case TRACE_FUNC_NO_OPTS:
>   		return function_trace_call;
> +	case TRACE_FUNC_OPT_ARGS:
> +		return function_args_trace_call;
>   	case TRACE_FUNC_OPT_STACK:
>   		return function_stack_trace_call;
>   	case TRACE_FUNC_OPT_NO_REPEATS:
> @@ -198,7 +204,34 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
>   	cpu = smp_processor_id();
>   	data = per_cpu_ptr(tr->array_buffer.data, cpu);
>   	if (!atomic_read(&data->disabled))
> -		trace_function(tr, ip, parent_ip, trace_ctx);
> +		trace_function(tr, ip, parent_ip, trace_ctx, NULL);
> +
> +	ftrace_test_recursion_unlock(bit);
> +}
> +
> +static void
> +function_args_trace_call(unsigned long ip, unsigned long parent_ip,
> +			 struct ftrace_ops *op, struct ftrace_regs *fregs)
> +{
> +	struct trace_array *tr = op->private;
> +	struct trace_array_cpu *data;
> +	unsigned int trace_ctx;
> +	int bit;
> +	int cpu;
> +
> +	if (unlikely(!tr->function_enabled))
> +		return;
> +
> +	bit = ftrace_test_recursion_trylock(ip, parent_ip);
> +	if (bit < 0)
> +		return;
> +
> +	trace_ctx = tracing_gen_ctx();
> +
> +	cpu = smp_processor_id();
> +	data = per_cpu_ptr(tr->array_buffer.data, cpu);
> +	if (!atomic_read(&data->disabled))
> +		trace_function(tr, ip, parent_ip, trace_ctx, fregs);
>   
>   	ftrace_test_recursion_unlock(bit);
>   }
> @@ -247,7 +280,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
>   
>   	if (likely(disabled == 1)) {
>   		trace_ctx = tracing_gen_ctx_flags(flags);
> -		trace_function(tr, ip, parent_ip, trace_ctx);
> +		trace_function(tr, ip, parent_ip, trace_ctx, NULL);
>   #ifdef CONFIG_UNWINDER_FRAME_POINTER
>   		if (ftrace_pids_enabled(op))
>   			skip++;
> @@ -329,7 +362,7 @@ function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
>   	trace_ctx = tracing_gen_ctx_flags(flags);
>   	process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
>   
> -	trace_function(tr, ip, parent_ip, trace_ctx);
> +	trace_function(tr, ip, parent_ip, trace_ctx, NULL);
>   
>   out:
>   	ftrace_test_recursion_unlock(bit);
> @@ -368,7 +401,7 @@ function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
>   		trace_ctx = tracing_gen_ctx_flags(flags);
>   		process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
>   
> -		trace_function(tr, ip, parent_ip, trace_ctx);
> +		trace_function(tr, ip, parent_ip, trace_ctx, NULL);
>   		__trace_stack(tr, trace_ctx, STACK_SKIP);
>   	}
>   
> @@ -382,6 +415,9 @@ static struct tracer_opt func_opts[] = {
>   	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
>   #endif
>   	{ TRACER_OPT(func-no-repeats, TRACE_FUNC_OPT_NO_REPEATS) },
> +#ifdef CONFIG_FUNCTION_TRACE_ARGS
> +	{ TRACER_OPT(func-args, TRACE_FUNC_OPT_ARGS) },
> +#endif
>   	{ } /* Always set a last empty entry */
>   };
>   
> diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
> index fce064e20570..096002c99d70 100644
> --- a/kernel/trace/trace_irqsoff.c
> +++ b/kernel/trace/trace_irqsoff.c
> @@ -150,7 +150,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
>   
>   	trace_ctx = tracing_gen_ctx_flags(flags);
>   
> -	trace_function(tr, ip, parent_ip, trace_ctx);
> +	trace_function(tr, ip, parent_ip, trace_ctx, fregs);
>   
>   	atomic_dec(&data->disabled);
>   }
> @@ -278,7 +278,7 @@ __trace_function(struct trace_array *tr,
>   	if (is_graph(tr))
>   		trace_graph_function(tr, ip, parent_ip, trace_ctx);
>   	else
> -		trace_function(tr, ip, parent_ip, trace_ctx);
> +		trace_function(tr, ip, parent_ip, trace_ctx, NULL);
>   }
>   
>   #else
> diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
> index 70405c4cceb6..8fdee7b9cdba 100644
> --- a/kernel/trace/trace_output.c
> +++ b/kernel/trace/trace_output.c
> @@ -1058,9 +1058,13 @@ enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags,
>   }
>   
>   static void print_fn_trace(struct trace_seq *s, unsigned long ip,
> -			   unsigned long parent_ip, int flags)
> +			   unsigned long parent_ip,
> +			   struct ftrace_regs *fregs,
> +			   int flags)
>   {
>   	seq_print_ip_sym(s, ip, flags);
> +	if (fregs)
> +		print_function_args(s, fregs, ip);
>   
>   	if ((flags & TRACE_ITER_PRINT_PARENT) && parent_ip) {
>   		trace_seq_puts(s, " <-");
> @@ -1074,10 +1078,14 @@ static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags,
>   {
>   	struct ftrace_entry *field;
>   	struct trace_seq *s = &iter->seq;
> +	struct ftrace_regs *fregs = NULL;
>   
>   	trace_assign_type(field, iter->ent);
>   
> -	print_fn_trace(s, field->ip, field->parent_ip, flags);
> +#if IS_ENABLED(CONFIG_FUNCTION_TRACE_ARGS)
> +	fregs = &field->regs;
> +#endif
> +	print_fn_trace(s, field->ip, field->parent_ip, fregs, flags);
>   	trace_seq_putc(s, '\n');
>   
>   	return trace_handle_return(s);
> @@ -1742,7 +1750,7 @@ trace_func_repeats_print(struct trace_iterator *iter, int flags,
>   
>   	trace_assign_type(field, iter->ent);
>   
> -	print_fn_trace(s, field->ip, field->parent_ip, flags);
> +	print_fn_trace(s, field->ip, field->parent_ip, NULL, flags);
>   	trace_seq_printf(s, " (repeats: %u, last_ts:", field->count);
>   	trace_print_time(s, iter,
>   			 iter->ts - FUNC_REPEATS_GET_DELTA_TS(field));
> diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
> index 130ca7e7787e..39043c955761 100644
> --- a/kernel/trace/trace_sched_wakeup.c
> +++ b/kernel/trace/trace_sched_wakeup.c
> @@ -224,7 +224,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
>   		return;
>   
>   	local_irq_save(flags);
> -	trace_function(tr, ip, parent_ip, trace_ctx);
> +	trace_function(tr, ip, parent_ip, trace_ctx, fregs);
>   	local_irq_restore(flags);
>   
>   	atomic_dec(&data->disabled);
> @@ -309,7 +309,7 @@ __trace_function(struct trace_array *tr,
>   	if (is_graph(tr))
>   		trace_graph_function(tr, ip, parent_ip, trace_ctx);
>   	else
> -		trace_function(tr, ip, parent_ip, trace_ctx);
> +		trace_function(tr, ip, parent_ip, trace_ctx, NULL);
>   }
>   
>   static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
Steven Rostedt Oct. 4, 2024, 10:43 p.m. UTC | #2
On Fri, 6 Sep 2024 11:49:10 +0800
Zheng Yejian <zhengyejian@huaweicloud.com> wrote:


> > diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
> > index c47422b20908..f2021ab52da2 100644
> > --- a/kernel/trace/trace_entries.h
> > +++ b/kernel/trace/trace_entries.h
> > @@ -61,8 +61,11 @@ FTRACE_ENTRY_REG(function, ftrace_entry,
> >   	TRACE_FN,
> >   
> >   	F_STRUCT(
> > -		__field_fn(	unsigned long,	ip		)
> > -		__field_fn(	unsigned long,	parent_ip	)
> > +		__field_fn(	unsigned long,		ip		)
> > +		__field_fn(	unsigned long,		parent_ip	)
> > +#ifdef CONFIG_FUNCTION_TRACE_ARGS
> > +		__field_struct( struct ftrace_regs,	regs		)  
> 
> Only function arguments are printed, they are several registers in ftrace_regs,
> would it be better to store what are needed?
> Although different archs save function arguments in different registers, store
> the entire ftrace_regs are much more simple..

Agreed, and I stated as much in my reply to patch 6.

This too will need two versions of the event. One will be function the
other will be function_args. And it will only record the necessary
arguments not the full ftrace_regs structure, as I plan on making that
structure have "zero size".

> 
> > +#endif
> >   	),
> >   
> >   	F_printk(" %ps <-- %ps",  
> 
> F_printk should also match F_STRUCT, otherwise 'format' info may be incorrect,
> it may confuse data parsing in user tools.

Well, it will just ignore the struct part, as its not listed.

-- Steve

> 
> > diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
> > index 3b0cea37e029..7ff651a0b45a 100644
> > --- a/kernel/trace/trace_functions.c
> > +++ b/kernel/trace/trace_functions.c
> > @@ -25,6 +25,9 @@ static void
> >   function_trace_call(unsigned long ip, unsigned long parent_ip,
> >   		    struct ftrace_ops *op, struct ftrace_regs *fregs);
> >   static void
> > +function_args_trace_call(unsigned long ip, unsigned long parent_ip,
> > +			 struct ftrace_ops *op, struct ftrace_regs *fregs);
diff mbox series

Patch

diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index ebe7ce2f5f4a..8118e4c8c649 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2864,7 +2864,7 @@  trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
 
 void
 trace_function(struct trace_array *tr, unsigned long ip, unsigned long
-	       parent_ip, unsigned int trace_ctx)
+	       parent_ip, unsigned int trace_ctx, struct ftrace_regs *regs)
 {
 	struct trace_event_call *call = &event_function;
 	struct trace_buffer *buffer = tr->array_buffer.buffer;
@@ -2878,6 +2878,12 @@  trace_function(struct trace_array *tr, unsigned long ip, unsigned long
 	entry	= ring_buffer_event_data(event);
 	entry->ip			= ip;
 	entry->parent_ip		= parent_ip;
+#ifdef CONFIG_FUNCTION_TRACE_ARGS
+	if (regs)
+		entry->regs		= *regs;
+	else
+		memset(&entry->regs, 0, sizeof(struct ftrace_regs));
+#endif
 
 	if (!call_filter_check_discard(call, entry, buffer, event)) {
 		if (static_branch_unlikely(&trace_function_exports_enabled))
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index bd3e3069300e..13cf6f97f7e0 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -673,7 +673,8 @@  unsigned long trace_total_entries(struct trace_array *tr);
 void trace_function(struct trace_array *tr,
 		    unsigned long ip,
 		    unsigned long parent_ip,
-		    unsigned int trace_ctx);
+		    unsigned int trace_ctx,
+		    struct ftrace_regs *regs);
 void trace_graph_function(struct trace_array *tr,
 		    unsigned long ip,
 		    unsigned long parent_ip,
@@ -870,6 +871,7 @@  static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash)
 #define TRACE_GRAPH_GRAPH_TIME          0x400
 #define TRACE_GRAPH_PRINT_RETVAL        0x800
 #define TRACE_GRAPH_PRINT_RETVAL_HEX    0x1000
+#define TRACE_GRAPH_ARGS		0x2000
 #define TRACE_GRAPH_PRINT_FILL_SHIFT	28
 #define TRACE_GRAPH_PRINT_FILL_MASK	(0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
 
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index c47422b20908..f2021ab52da2 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -61,8 +61,11 @@  FTRACE_ENTRY_REG(function, ftrace_entry,
 	TRACE_FN,
 
 	F_STRUCT(
-		__field_fn(	unsigned long,	ip		)
-		__field_fn(	unsigned long,	parent_ip	)
+		__field_fn(	unsigned long,		ip		)
+		__field_fn(	unsigned long,		parent_ip	)
+#ifdef CONFIG_FUNCTION_TRACE_ARGS
+		__field_struct( struct ftrace_regs,	regs		)
+#endif
 	),
 
 	F_printk(" %ps <-- %ps",
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 3b0cea37e029..7ff651a0b45a 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -25,6 +25,9 @@  static void
 function_trace_call(unsigned long ip, unsigned long parent_ip,
 		    struct ftrace_ops *op, struct ftrace_regs *fregs);
 static void
+function_args_trace_call(unsigned long ip, unsigned long parent_ip,
+			 struct ftrace_ops *op, struct ftrace_regs *fregs);
+static void
 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
 			  struct ftrace_ops *op, struct ftrace_regs *fregs);
 static void
@@ -42,9 +45,10 @@  enum {
 	TRACE_FUNC_NO_OPTS		= 0x0, /* No flags set. */
 	TRACE_FUNC_OPT_STACK		= 0x1,
 	TRACE_FUNC_OPT_NO_REPEATS	= 0x2,
+	TRACE_FUNC_OPT_ARGS		= 0x4,
 
 	/* Update this to next highest bit. */
-	TRACE_FUNC_OPT_HIGHEST_BIT	= 0x4
+	TRACE_FUNC_OPT_HIGHEST_BIT	= 0x8
 };
 
 #define TRACE_FUNC_OPT_MASK	(TRACE_FUNC_OPT_HIGHEST_BIT - 1)
@@ -114,6 +118,8 @@  static ftrace_func_t select_trace_function(u32 flags_val)
 	switch (flags_val & TRACE_FUNC_OPT_MASK) {
 	case TRACE_FUNC_NO_OPTS:
 		return function_trace_call;
+	case TRACE_FUNC_OPT_ARGS:
+		return function_args_trace_call;
 	case TRACE_FUNC_OPT_STACK:
 		return function_stack_trace_call;
 	case TRACE_FUNC_OPT_NO_REPEATS:
@@ -198,7 +204,34 @@  function_trace_call(unsigned long ip, unsigned long parent_ip,
 	cpu = smp_processor_id();
 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
 	if (!atomic_read(&data->disabled))
-		trace_function(tr, ip, parent_ip, trace_ctx);
+		trace_function(tr, ip, parent_ip, trace_ctx, NULL);
+
+	ftrace_test_recursion_unlock(bit);
+}
+
+static void
+function_args_trace_call(unsigned long ip, unsigned long parent_ip,
+			 struct ftrace_ops *op, struct ftrace_regs *fregs)
+{
+	struct trace_array *tr = op->private;
+	struct trace_array_cpu *data;
+	unsigned int trace_ctx;
+	int bit;
+	int cpu;
+
+	if (unlikely(!tr->function_enabled))
+		return;
+
+	bit = ftrace_test_recursion_trylock(ip, parent_ip);
+	if (bit < 0)
+		return;
+
+	trace_ctx = tracing_gen_ctx();
+
+	cpu = smp_processor_id();
+	data = per_cpu_ptr(tr->array_buffer.data, cpu);
+	if (!atomic_read(&data->disabled))
+		trace_function(tr, ip, parent_ip, trace_ctx, fregs);
 
 	ftrace_test_recursion_unlock(bit);
 }
@@ -247,7 +280,7 @@  function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
 
 	if (likely(disabled == 1)) {
 		trace_ctx = tracing_gen_ctx_flags(flags);
-		trace_function(tr, ip, parent_ip, trace_ctx);
+		trace_function(tr, ip, parent_ip, trace_ctx, NULL);
 #ifdef CONFIG_UNWINDER_FRAME_POINTER
 		if (ftrace_pids_enabled(op))
 			skip++;
@@ -329,7 +362,7 @@  function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
 	trace_ctx = tracing_gen_ctx_flags(flags);
 	process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
 
-	trace_function(tr, ip, parent_ip, trace_ctx);
+	trace_function(tr, ip, parent_ip, trace_ctx, NULL);
 
 out:
 	ftrace_test_recursion_unlock(bit);
@@ -368,7 +401,7 @@  function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
 		trace_ctx = tracing_gen_ctx_flags(flags);
 		process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
 
-		trace_function(tr, ip, parent_ip, trace_ctx);
+		trace_function(tr, ip, parent_ip, trace_ctx, NULL);
 		__trace_stack(tr, trace_ctx, STACK_SKIP);
 	}
 
@@ -382,6 +415,9 @@  static struct tracer_opt func_opts[] = {
 	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
 #endif
 	{ TRACER_OPT(func-no-repeats, TRACE_FUNC_OPT_NO_REPEATS) },
+#ifdef CONFIG_FUNCTION_TRACE_ARGS
+	{ TRACER_OPT(func-args, TRACE_FUNC_OPT_ARGS) },
+#endif
 	{ } /* Always set a last empty entry */
 };
 
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index fce064e20570..096002c99d70 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -150,7 +150,7 @@  irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
 
 	trace_ctx = tracing_gen_ctx_flags(flags);
 
-	trace_function(tr, ip, parent_ip, trace_ctx);
+	trace_function(tr, ip, parent_ip, trace_ctx, fregs);
 
 	atomic_dec(&data->disabled);
 }
@@ -278,7 +278,7 @@  __trace_function(struct trace_array *tr,
 	if (is_graph(tr))
 		trace_graph_function(tr, ip, parent_ip, trace_ctx);
 	else
-		trace_function(tr, ip, parent_ip, trace_ctx);
+		trace_function(tr, ip, parent_ip, trace_ctx, NULL);
 }
 
 #else
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 70405c4cceb6..8fdee7b9cdba 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -1058,9 +1058,13 @@  enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags,
 }
 
 static void print_fn_trace(struct trace_seq *s, unsigned long ip,
-			   unsigned long parent_ip, int flags)
+			   unsigned long parent_ip,
+			   struct ftrace_regs *fregs,
+			   int flags)
 {
 	seq_print_ip_sym(s, ip, flags);
+	if (fregs)
+		print_function_args(s, fregs, ip);
 
 	if ((flags & TRACE_ITER_PRINT_PARENT) && parent_ip) {
 		trace_seq_puts(s, " <-");
@@ -1074,10 +1078,14 @@  static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags,
 {
 	struct ftrace_entry *field;
 	struct trace_seq *s = &iter->seq;
+	struct ftrace_regs *fregs = NULL;
 
 	trace_assign_type(field, iter->ent);
 
-	print_fn_trace(s, field->ip, field->parent_ip, flags);
+#if IS_ENABLED(CONFIG_FUNCTION_TRACE_ARGS)
+	fregs = &field->regs;
+#endif
+	print_fn_trace(s, field->ip, field->parent_ip, fregs, flags);
 	trace_seq_putc(s, '\n');
 
 	return trace_handle_return(s);
@@ -1742,7 +1750,7 @@  trace_func_repeats_print(struct trace_iterator *iter, int flags,
 
 	trace_assign_type(field, iter->ent);
 
-	print_fn_trace(s, field->ip, field->parent_ip, flags);
+	print_fn_trace(s, field->ip, field->parent_ip, NULL, flags);
 	trace_seq_printf(s, " (repeats: %u, last_ts:", field->count);
 	trace_print_time(s, iter,
 			 iter->ts - FUNC_REPEATS_GET_DELTA_TS(field));
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 130ca7e7787e..39043c955761 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -224,7 +224,7 @@  wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
 		return;
 
 	local_irq_save(flags);
-	trace_function(tr, ip, parent_ip, trace_ctx);
+	trace_function(tr, ip, parent_ip, trace_ctx, fregs);
 	local_irq_restore(flags);
 
 	atomic_dec(&data->disabled);
@@ -309,7 +309,7 @@  __trace_function(struct trace_array *tr,
 	if (is_graph(tr))
 		trace_graph_function(tr, ip, parent_ip, trace_ctx);
 	else
-		trace_function(tr, ip, parent_ip, trace_ctx);
+		trace_function(tr, ip, parent_ip, trace_ctx, NULL);
 }
 
 static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)