@@ -1417,6 +1417,7 @@ struct task_struct {
/* Timestamp for last schedule: */
unsigned long long ftrace_timestamp;
+ unsigned long long ftrace_sleeptime;
/*
* Number of functions that haven't been traced
@@ -495,7 +495,7 @@ void *fgraph_retrieve_parent_data(int idx, int *size_bytes, int depth)
}
/* Both enabled by default (can be cleared by function_graph tracer flags */
-static bool fgraph_sleep_time = true;
+bool fgraph_sleep_time = true;
#ifdef CONFIG_DYNAMIC_FTRACE
/*
@@ -1046,9 +1046,7 @@ ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
struct task_struct *next,
unsigned int prev_state)
{
- struct ftrace_ret_stack *ret_stack;
unsigned long long timestamp;
- int offset;
/*
* Does the user want to count the time a function was asleep.
@@ -1065,17 +1063,7 @@ ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
if (!next->ftrace_timestamp)
return;
- /*
- * Update all the counters in next to make up for the
- * time next was sleeping.
- */
- timestamp -= next->ftrace_timestamp;
-
- for (offset = next->curr_ret_stack; offset > 0; ) {
- ret_stack = get_ret_stack(next, offset, &offset);
- if (ret_stack)
- ret_stack->calltime += timestamp;
- }
+ next->ftrace_sleeptime += timestamp - next->ftrace_timestamp;
}
static DEFINE_PER_CPU(unsigned long *, idle_ret_stack);
@@ -820,10 +820,15 @@ void ftrace_graph_graph_time_control(bool enable)
fgraph_graph_time = enable;
}
+struct profile_fgraph_data {
+ unsigned long long subtime;
+ unsigned long long sleeptime;
+};
+
static int profile_graph_entry(struct ftrace_graph_ent *trace,
struct fgraph_ops *gops)
{
- unsigned long long *subtime;
+ struct profile_fgraph_data *profile_data;
function_profile_call(trace->func, 0, NULL, NULL);
@@ -831,9 +836,12 @@ static int profile_graph_entry(struct ftrace_graph_ent *trace,
if (!current->ret_stack)
return 0;
- subtime = fgraph_reserve_data(gops->idx, sizeof(*subtime));
- if (subtime)
- *subtime = 0;
+ profile_data = fgraph_reserve_data(gops->idx, sizeof(*profile_data));
+ if (!profile_data)
+ return 0;
+
+ profile_data->subtime = 0;
+ profile_data->sleeptime = current->ftrace_sleeptime;
return 1;
}
@@ -841,9 +849,10 @@ static int profile_graph_entry(struct ftrace_graph_ent *trace,
static void profile_graph_return(struct ftrace_graph_ret *trace,
struct fgraph_ops *gops)
{
+ struct profile_fgraph_data *profile_data;
+ struct profile_fgraph_data *parent_data;
struct ftrace_profile_stat *stat;
unsigned long long calltime;
- unsigned long long *subtime;
struct ftrace_profile *rec;
unsigned long flags;
int size;
@@ -859,16 +868,24 @@ static void profile_graph_return(struct ftrace_graph_ret *trace,
calltime = trace->rettime - trace->calltime;
+ if (!fgraph_sleep_time) {
+ profile_data = fgraph_retrieve_data(gops->idx, &size);
+ if (profile_data && current->ftrace_sleeptime)
+ calltime -= current->ftrace_sleeptime - profile_data->sleeptime;
+ }
+
if (!fgraph_graph_time) {
/* Append this call time to the parent time to subtract */
- subtime = fgraph_retrieve_parent_data(gops->idx, &size, 1);
- if (subtime)
- *subtime += calltime;
+ parent_data = fgraph_retrieve_parent_data(gops->idx, &size, 1);
+ if (parent_data)
+ parent_data->subtime += calltime;
+
+ if (!profile_data)
+ profile_data = fgraph_retrieve_data(gops->idx, &size);
- subtime = fgraph_retrieve_data(gops->idx, &size);
- if (subtime && *subtime && *subtime < calltime)
- calltime -= *subtime;
+ if (profile_data && profile_data->subtime && profile_data->subtime < calltime)
+ calltime -= profile_data->subtime;
else
calltime = 0;
}
@@ -1039,6 +1039,7 @@ static inline void ftrace_graph_addr_finish(struct fgraph_ops *gops, struct ftra
#endif /* CONFIG_DYNAMIC_FTRACE */
extern unsigned int fgraph_max_depth;
+extern bool fgraph_sleep_time;
static inline bool
ftrace_graph_ignore_func(struct fgraph_ops *gops, struct ftrace_graph_ent *trace)
@@ -133,6 +133,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace,
unsigned long *task_var = fgraph_get_task_var(gops);
struct trace_array *tr = gops->private;
struct trace_array_cpu *data;
+ unsigned long *sleeptime;
unsigned long flags;
unsigned int trace_ctx;
long disabled;
@@ -167,6 +168,13 @@ int trace_graph_entry(struct ftrace_graph_ent *trace,
if (ftrace_graph_ignore_irqs())
return 0;
+ /* save the current sleep time if we are to ignore it */
+ if (!fgraph_sleep_time) {
+ sleeptime = fgraph_reserve_data(gops->idx, sizeof(*sleeptime));
+ if (sleeptime)
+ *sleeptime = current->ftrace_sleeptime;
+ }
+
/*
* Stop here if tracing_threshold is set. We only write function return
* events to the ring buffer.
@@ -238,6 +246,22 @@ void __trace_graph_return(struct trace_array *tr,
trace_buffer_unlock_commit_nostack(buffer, event);
}
+static void handle_nosleeptime(struct ftrace_graph_ret *trace,
+ struct fgraph_ops *gops)
+{
+ unsigned long long *sleeptime;
+ int size;
+
+ if (fgraph_sleep_time)
+ return;
+
+ sleeptime = fgraph_retrieve_data(gops->idx, &size);
+ if (!sleeptime)
+ return;
+
+ trace->calltime += current->ftrace_sleeptime - *sleeptime;
+}
+
void trace_graph_return(struct ftrace_graph_ret *trace,
struct fgraph_ops *gops)
{
@@ -256,6 +280,8 @@ void trace_graph_return(struct ftrace_graph_ret *trace,
return;
}
+ handle_nosleeptime(trace, gops);
+
local_irq_save(flags);
cpu = raw_smp_processor_id();
data = per_cpu_ptr(tr->array_buffer.data, cpu);
@@ -278,6 +304,8 @@ static void trace_graph_thresh_return(struct ftrace_graph_ret *trace,
return;
}
+ handle_nosleeptime(trace, gops);
+
if (tracing_thresh &&
(trace->rettime - trace->calltime < tracing_thresh))
return;