Message ID | 20201030214014.801706340@goodmis.org (mailing list archive) |
---|---|
State | Not Applicable |
Headers | show |
Series | None | expand |
On Fri 2020-10-30 17:31:53, Steven Rostedt wrote: > From: "Steven Rostedt (VMware)" <rostedt@goodmis.org> > > This adds CONFIG_FTRACE_RECORD_RECURSION that will record to a file > "recursed_functions" all the functions that caused recursion while a > callback to the function tracer was running. > > --- /dev/null > +++ b/kernel/trace/trace_recursion_record.c > @@ -0,0 +1,220 @@ > +// SPDX-License-Identifier: GPL-2.0 > + > +#include <linux/seq_file.h> > +#include <linux/kallsyms.h> > +#include <linux/module.h> > +#include <linux/ftrace.h> > +#include <linux/fs.h> > + > +#include "trace_output.h" > + > +struct recursed_functions { > + unsigned long ip; > + unsigned long parent_ip; > +}; > + > +static struct recursed_functions recursed_functions[CONFIG_FTRACE_RECORD_RECURSION_SIZE]; The code tries to be lockless safe as much as possible. It would make sense to allign the array. > +static atomic_t nr_records; > + > +/* > + * Cache the last found function. Yes, updates to this is racey, but > + * so is memory cache ;-) > + */ > +static unsigned long cached_function; > + > +void ftrace_record_recursion(unsigned long ip, unsigned long parent_ip) > +{ > + int index; > + int i = 0; > + unsigned long old; > + > + again: > + /* First check the last one recorded */ > + if (ip == cached_function) > + return; > + > + index = atomic_read(&nr_records); > + /* nr_records is -1 when clearing records */ > + smp_mb__after_atomic(); > + if (index < 0) > + return; > + > + /* See below */ > + if (i > index) > + index = i; This looks like a complicated way to do index++ via "i" variable. I guess that it was needed only in some older variant of the code. See below. > + if (index >= CONFIG_FTRACE_RECORD_RECURSION_SIZE) > + return; > + > + for (i = index - 1; i >= 0; i--) { > + if (recursed_functions[i].ip == ip) { > + cached_function = ip; > + return; > + } > + } > + > + cached_function = ip; > + > + /* > + * We only want to add a function if it hasn't been added before. > + * Add to the current location before incrementing the count. > + * If it fails to add, then increment the index (save in i) > + * and try again. > + */ > + old = cmpxchg(&recursed_functions[index].ip, 0, ip); > + if (old != 0) { > + /* Did something else already added this for us? */ > + if (old == ip) > + return; > + /* Try the next location (use i for the next index) */ > + i = index + 1; What about index++; We basically want to run the code again with index + 1 limit. Maybe, it even does not make sense to check the array again and we should just try to store the value into the next slot. > + goto again; > + } > + > + recursed_functions[index].parent_ip = parent_ip; WRITE_ONCE() ? > + > + /* > + * It's still possible that we could race with the clearing > + * CPU0 CPU1 > + * ---- ---- > + * ip = func > + * nr_records = -1; > + * recursed_functions[0] = 0; > + * i = -1 > + * if (i < 0) > + * nr_records = 0; > + * (new recursion detected) > + * recursed_functions[0] = func > + * cmpxchg(recursed_functions[0], > + * func, 0) > + * > + * But the worse that could happen is that we get a zero in > + * the recursed_functions array, and it's likely that "func" will > + * be recorded again. > + */ > + i = atomic_read(&nr_records); > + smp_mb__after_atomic(); > + if (i < 0) > + cmpxchg(&recursed_functions[index].ip, ip, 0); > + else if (i <= index) > + atomic_cmpxchg(&nr_records, i, index + 1); This looks weird. It would shift nr_records past the record added in this call. It might skip many slots that were zeroed when clearing. Also we do not know if our entry was not zeroed as well. I would suggest to do it some other way (not even compile tested): void ftrace_record_recursion(unsigned long ip, unsigned long parent_ip) { int index, old_index; int i = 0; unsigned long old_ip; again: /* First check the last one recorded. */ if (ip == READ_ONCE(cached_function)) return; index = atomic_read(&nr_records); /* nr_records is -1 when clearing records. */ smp_mb__after_atomic(); if (index < 0) return; /* Already cached? */ for (i = index - 1; i >= 0; i--) { if (recursed_functions[i].ip == ip) { WRITE_ONCE(cached_function, ip); return; } } if (index >= CONFIG_FTRACE_RECORD_RECURSION_SIZE) return; /* * Try to reserve the slot. It might be already taken * or the entire cache cleared. */ old_index = atomic_cmpxchg(&nr_records, index, index + 1); if (old_index != index) goto again; /* * Be careful. The entire cache might have been cleared and reused in * the meantime. Replace only empty slot. */ old_ip = cmpxchg(&recursed_functions[index].ip, 0, ip); if (old_ip != 0) goto again; old_ip = cmpxchg(&recursed_functions[index].parent_ip, 0, parrent_ip); if (old_ip != 0) goto again; /* * No ip is better than non-consistent one. The race with * clearing should be rare and not worth a perfect solution. */ if (READ_ONCE(recursed_functions[index].ip) != ip) { cmpxchg(&recursed_functions[index].ip, ip, 0UL) goto again; } } The last check probably is not needed. Inconsistent entries should be prevented by the way how this func is called: static atomic_t paranoid_test; \ if (!atomic_read(¶noid_test)) { \ atomic_inc(¶noid_test); \ ftrace_record_recursion(ip, pip); \ atomic_dec(¶noid_test); \ } \ The rest of the patchset looks fine. I do not feel comfortable to give it Reviewed-by because I did not review it in depth. I spent more time with the above lockless code. I took it is a training. I need to improve this skill to feel more comfortable with the lockless printk ring buffer ;-) Best Regards, Petr
On Mon, 2 Nov 2020 17:41:47 +0100 Petr Mladek <pmladek@suse.com> wrote: > On Fri 2020-10-30 17:31:53, Steven Rostedt wrote: > > From: "Steven Rostedt (VMware)" <rostedt@goodmis.org> > > > > This adds CONFIG_FTRACE_RECORD_RECURSION that will record to a file > > "recursed_functions" all the functions that caused recursion while a > > callback to the function tracer was running. > > > > > --- /dev/null > > +++ b/kernel/trace/trace_recursion_record.c > > @@ -0,0 +1,220 @@ > > +// SPDX-License-Identifier: GPL-2.0 > > + > > +#include <linux/seq_file.h> > > +#include <linux/kallsyms.h> > > +#include <linux/module.h> > > +#include <linux/ftrace.h> > > +#include <linux/fs.h> > > + > > +#include "trace_output.h" > > + > > +struct recursed_functions { > > + unsigned long ip; > > + unsigned long parent_ip; > > +}; > > + > > +static struct recursed_functions recursed_functions[CONFIG_FTRACE_RECORD_RECURSION_SIZE]; > > The code tries to be lockless safe as much as possible. It would make > sense to allign the array. Hmm, is there an arch where the compiler would put an array of structures with two unsigned long, misaligned? > > > > +static atomic_t nr_records; > > + > > +/* > > + * Cache the last found function. Yes, updates to this is racey, but > > + * so is memory cache ;-) > > + */ > > +static unsigned long cached_function; > > + > > +void ftrace_record_recursion(unsigned long ip, unsigned long parent_ip) > > +{ > > + int index; > > + int i = 0; > > + unsigned long old; > > + > > + again: > > + /* First check the last one recorded */ > > + if (ip == cached_function) > > + return; > > + > > + index = atomic_read(&nr_records); > > + /* nr_records is -1 when clearing records */ > > + smp_mb__after_atomic(); > > + if (index < 0) > > + return; > > + > > + /* See below */ > > + if (i > index) > > + index = i; > > This looks like a complicated way to do index++ via "i" variable. > I guess that it was needed only in some older variant of the code. > See below. Because we reread the index above, and index could be bigger than i (more than index + 1). > > > + if (index >= CONFIG_FTRACE_RECORD_RECURSION_SIZE) > > + return; > > + > > + for (i = index - 1; i >= 0; i--) { > > + if (recursed_functions[i].ip == ip) { > > + cached_function = ip; > > + return; > > + } > > + } > > + > > + cached_function = ip; > > + > > + /* > > + * We only want to add a function if it hasn't been added before. > > + * Add to the current location before incrementing the count. > > + * If it fails to add, then increment the index (save in i) > > + * and try again. > > + */ > > + old = cmpxchg(&recursed_functions[index].ip, 0, ip); > > + if (old != 0) { > > + /* Did something else already added this for us? */ > > + if (old == ip) > > + return; > > + /* Try the next location (use i for the next index) */ > > + i = index + 1; > > What about > > index++; > > We basically want to run the code again with index + 1 limit. But something else could update nr_records, and we want to use that if nr_records is greater than i. Now, we could swap the use case, and have int index = 0; [..] i = atomic_read(&nr_records); if (i > index) index = i; [..] index++; goto again; > > Maybe, it even does not make sense to check the array again > and we should just try to store the value into the next slot. We do this dance to prevent duplicates. But you are correct, that this went through a few iterations. And the first ones didn't have the cmpxchg on the ip itself, and that could make it so that we don't need this index = i dance. > > > + goto again; > > + } > > + > > + recursed_functions[index].parent_ip = parent_ip; > > WRITE_ONCE() ? Does it really matter? > > > + > > + /* > > + * It's still possible that we could race with the clearing > > + * CPU0 CPU1 > > + * ---- ---- > > + * ip = func > > + * nr_records = -1; > > + * recursed_functions[0] = 0; > > + * i = -1 > > + * if (i < 0) > > + * nr_records = 0; > > + * (new recursion detected) > > + * recursed_functions[0] = func > > + * cmpxchg(recursed_functions[0], > > + * func, 0) > > + * > > + * But the worse that could happen is that we get a zero in > > + * the recursed_functions array, and it's likely that "func" will > > + * be recorded again. > > + */ > > + i = atomic_read(&nr_records); > > + smp_mb__after_atomic(); > > + if (i < 0) > > + cmpxchg(&recursed_functions[index].ip, ip, 0); > > + else if (i <= index) > > + atomic_cmpxchg(&nr_records, i, index + 1); > > This looks weird. It would shift nr_records past the record added > in this call. It might skip many slots that were zeroed when clearing. > Also we do not know if our entry was not zeroed as well. > > I would suggest to do it some other way (not even compile tested): > > void ftrace_record_recursion(unsigned long ip, unsigned long parent_ip) > { > int index, old_index; > int i = 0; > unsigned long old_ip; > > again: > /* First check the last one recorded. */ > if (ip == READ_ONCE(cached_function)) > return; > > index = atomic_read(&nr_records); > /* nr_records is -1 when clearing records. */ > smp_mb__after_atomic(); > if (index < 0) > return; > > /* Already cached? */ > for (i = index - 1; i >= 0; i--) { > if (recursed_functions[i].ip == ip) { > WRITE_ONCE(cached_function, ip); > return; > } > } > > if (index >= CONFIG_FTRACE_RECORD_RECURSION_SIZE) > return; > > /* > * Try to reserve the slot. It might be already taken > * or the entire cache cleared. > */ > old_index = atomic_cmpxchg(&nr_records, index, index + 1); > if (old_index != index) > goto again; > > /* > * Be careful. The entire cache might have been cleared and reused in > * the meantime. Replace only empty slot. > */ > old_ip = cmpxchg(&recursed_functions[index].ip, 0, ip); > if (old_ip != 0) > goto again; > > old_ip = cmpxchg(&recursed_functions[index].parent_ip, 0, parrent_ip); > if (old_ip != 0) > goto again; > > /* > * No ip is better than non-consistent one. The race with > * clearing should be rare and not worth a perfect solution. > */ > if (READ_ONCE(recursed_functions[index].ip) != ip) { > cmpxchg(&recursed_functions[index].ip, ip, 0UL) > goto again; > } > } Let me go and rewrite it, this time considering the cmpxchg in the ip update code. I may end up with what you have above ;-) > > The last check probably is not needed. Inconsistent entries > should be prevented by the way how this func is called: > > static atomic_t paranoid_test; \ > if (!atomic_read(¶noid_test)) { \ > atomic_inc(¶noid_test); \ > ftrace_record_recursion(ip, pip); \ > atomic_dec(¶noid_test); \ > } \ > > > > > The rest of the patchset looks fine. I do not feel comfortable to give > it Reviewed-by because I did not review it in depth. > > I spent more time with the above lockless code. I took it is a > training. I need to improve this skill to feel more comfortable with > the lockless printk ring buffer ;-) Yeah, everything becomes exponentially complex when you make it lockless with multiple concurrent writers. -- Steve
On Mon, 2 Nov 2020 12:09:07 -0500 Steven Rostedt <rostedt@goodmis.org> wrote: > > > +void ftrace_record_recursion(unsigned long ip, unsigned long parent_ip) > > > +{ > > > + int index; > > > + int i = 0; > > > + unsigned long old; > > > + > > > + again: > > > + /* First check the last one recorded */ > > > + if (ip == cached_function) > > > + return; > > > + > > > + index = atomic_read(&nr_records); > > > + /* nr_records is -1 when clearing records */ > > > + smp_mb__after_atomic(); > > > + if (index < 0) > > > + return; > > > + > > > + /* See below */ > > > + if (i > index) > > > + index = i; > > > > This looks like a complicated way to do index++ via "i" variable. > > I guess that it was needed only in some older variant of the code. > > See below. > > Because we reread the index above, and index could be bigger than i (more > than index + 1). > > > > > > + if (index >= CONFIG_FTRACE_RECORD_RECURSION_SIZE) > > > + return; > > > + > > > + for (i = index - 1; i >= 0; i--) { > > > + if (recursed_functions[i].ip == ip) { > > > + cached_function = ip; > > > + return; > > > + } > > > + } > > > + > > > + cached_function = ip; > > > + > > > + /* > > > + * We only want to add a function if it hasn't been added before. > > > + * Add to the current location before incrementing the count. > > > + * If it fails to add, then increment the index (save in i) > > > + * and try again. > > > + */ > > > + old = cmpxchg(&recursed_functions[index].ip, 0, ip); > > > + if (old != 0) { > > > + /* Did something else already added this for us? */ > > > + if (old == ip) > > > + return; > > > + /* Try the next location (use i for the next index) */ > > > + i = index + 1; > > > > What about > > > > index++; > > > > We basically want to run the code again with index + 1 limit. > > But something else could update nr_records, and we want to use that if > nr_records is greater than i. > > Now, we could swap the use case, and have > > int index = 0; > > [..] > i = atomic_read(&nr_records); > if (i > index) > index = i; > > [..] > > index++; > goto again; > > > > > > Maybe, it even does not make sense to check the array again > > and we should just try to store the value into the next slot. > > We do this dance to prevent duplicates. > > But you are correct, that this went through a few iterations. And the first > ones didn't have the cmpxchg on the ip itself, and that could make it so > that we don't need this index = i dance. Playing with this more, I remember why I did this song and dance. If we have two or more writers, and one beats the other in updating the ip (with a different function). This one will go and try again. The reason to look at one passed nr_records, is because of the race between the multiple writers. This one may loop before the other can update nr_records, and it will fail to apply it again. You could just say, "hey we'll just keep looping until the other writer eventually updates nr_records". But this is where my paranoia gets in. What happens if that other writer takes an interrupt (interrupts are not disabled), and then deadlocks, or does something bad? This CPU will not get locked up spinning. Unlikely scenario, and it would require a bug someplace else. But I don't want a bug report stating that it found this recursion locking locking up the CPU and hide the real culprit. I'll add a comment to explain this in the code. And also swap the i and index around to make a little more sense. -- Steve
On Mon, 2 Nov 2020 17:41:47 +0100 Petr Mladek <pmladek@suse.com> wrote: > > + i = atomic_read(&nr_records); > > + smp_mb__after_atomic(); > > + if (i < 0) > > + cmpxchg(&recursed_functions[index].ip, ip, 0); > > + else if (i <= index) > > + atomic_cmpxchg(&nr_records, i, index + 1); > > This looks weird. It would shift nr_records past the record added > in this call. It might skip many slots that were zeroed when clearing. > Also we do not know if our entry was not zeroed as well. nr_records always holds the next position to write to. index = nr_records; recursed_functions[index].ip = ip; nr_records++; Before clearing, we have: nr_records = -1; smp_mb(); memset(recursed_functions, 0); smp_wmb(); nr_records = 0; When we enter this function: i = nr_records; smp_mb(); if (i < 0) return; Thus, we just stopped all new updates while clearing the records. But what about if something is currently updating? i = nr_records; smp_mb(); if (i < 0) cmpxchg(recursed_functions, ip, 0); The above shows that if the current updating process notices that the clearing happens, it will clear the function it added. else if (i <= index) cmpxchg(nr_records, i, index + 1); This makes sure that nr_records only grows if it is greater or equal to zero. The only race that I see that can happen, is the one in the comment I showed. And that is after enabling the recursed functions again after clearing, one CPU could add a function while another CPU that just added that same function could be just exiting this routine, notice that a clearing of the array happened, and remove its function (which was the same as the one just happened). So we get a "zero" in the array. If this happens, it is likely that that function will recurse again and will be added later. -- Steve
On Mon, 2 Nov 2020 12:37:21 -0500 Steven Rostedt <rostedt@goodmis.org> wrote: > The only race that I see that can happen, is the one in the comment I > showed. And that is after enabling the recursed functions again after > clearing, one CPU could add a function while another CPU that just added > that same function could be just exiting this routine, notice that a > clearing of the array happened, and remove its function (which was the same > as the one just happened). So we get a "zero" in the array. If this > happens, it is likely that that function will recurse again and will be > added later. > Updated version of this function: -- Steve void ftrace_record_recursion(unsigned long ip, unsigned long parent_ip) { int index = 0; int i; unsigned long old; again: /* First check the last one recorded */ if (ip == cached_function) return; i = atomic_read(&nr_records); /* nr_records is -1 when clearing records */ smp_mb__after_atomic(); if (i < 0) return; /* * If there's two writers and this writer comes in second, * the cmpxchg() below to update the ip will fail. Then this * writer will try again. It is possible that index will now * be greater than nr_records. This is because the writer * that succeeded has not updated the nr_records yet. * This writer could keep trying again until the other writer * updates nr_records. But if the other writer takes an * interrupt, and that interrupt locks up that CPU, we do * not want this CPU to lock up due to the recursion protection, * and have a bug report showing this CPU as the cause of * locking up the computer. To not lose this record, this * writer will simply use the next position to update the * recursed_functions, and it will update the nr_records * accordingly. */ if (index < i) index = i; if (index >= CONFIG_FTRACE_RECORD_RECURSION_SIZE) return; for (i = index - 1; i >= 0; i--) { if (recursed_functions[i].ip == ip) { cached_function = ip; return; } } cached_function = ip; /* * We only want to add a function if it hasn't been added before. * Add to the current location before incrementing the count. * If it fails to add, then increment the index (save in i) * and try again. */ old = cmpxchg(&recursed_functions[index].ip, 0, ip); if (old != 0) { /* Did something else already added this for us? */ if (old == ip) return; /* Try the next location (use i for the next index) */ index++; goto again; } recursed_functions[index].parent_ip = parent_ip; /* * It's still possible that we could race with the clearing * CPU0 CPU1 * ---- ---- * ip = func * nr_records = -1; * recursed_functions[0] = 0; * i = -1 * if (i < 0) * nr_records = 0; * (new recursion detected) * recursed_functions[0] = func * cmpxchg(recursed_functions[0], * func, 0) * * But the worse that could happen is that we get a zero in * the recursed_functions array, and it's likely that "func" will * be recorded again. */ i = atomic_read(&nr_records); smp_mb__after_atomic(); if (i < 0) cmpxchg(&recursed_functions[index].ip, ip, 0); else if (i <= index) atomic_cmpxchg(&nr_records, i, index + 1); }
On Mon 2020-11-02 12:09:07, Steven Rostedt wrote: > On Mon, 2 Nov 2020 17:41:47 +0100 > Petr Mladek <pmladek@suse.com> wrote: > > > On Fri 2020-10-30 17:31:53, Steven Rostedt wrote: > > > From: "Steven Rostedt (VMware)" <rostedt@goodmis.org> > > > > > > This adds CONFIG_FTRACE_RECORD_RECURSION that will record to a file > > > "recursed_functions" all the functions that caused recursion while a > > > callback to the function tracer was running. > > > > > > > > --- /dev/null > > > +++ b/kernel/trace/trace_recursion_record.c > > > + if (index >= CONFIG_FTRACE_RECORD_RECURSION_SIZE) > > > + return; > > > + > > > + for (i = index - 1; i >= 0; i--) { > > > + if (recursed_functions[i].ip == ip) { > > > + cached_function = ip; > > > + return; > > > + } > > > + } > > > + > > > + cached_function = ip; > > > + > > > + /* > > > + * We only want to add a function if it hasn't been added before. > > > + * Add to the current location before incrementing the count. > > > + * If it fails to add, then increment the index (save in i) > > > + * and try again. > > > + */ > > > + old = cmpxchg(&recursed_functions[index].ip, 0, ip); > > > + if (old != 0) { > > > + /* Did something else already added this for us? */ > > > + if (old == ip) > > > + return; > > > + /* Try the next location (use i for the next index) */ > > > + i = index + 1; > > > > What about > > > > index++; > > > > We basically want to run the code again with index + 1 limit. > > But something else could update nr_records, and we want to use that if > nr_records is greater than i. > > Now, we could swap the use case, and have > > int index = 0; > > [..] > i = atomic_read(&nr_records); > if (i > index) > index = i; > > [..] > > index++; > goto again; > > > > > > Maybe, it even does not make sense to check the array again > > and we should just try to store the value into the next slot. > > We do this dance to prevent duplicates. I see. My code was wrong. It reserved slot for the new "ip" by cmpxchg on nr_records. The "ip" was stored later so that any parallel call need not see that it is a dumplicate. Your code reserves the slot by cmpxchg of "ip". Any parallel call would fail to take the slot and see the "ip" in the next iteration. Best Regards, Petr
diff --git a/Documentation/trace/ftrace-uses.rst b/Documentation/trace/ftrace-uses.rst index 86cd14b8e126..5981d5691745 100644 --- a/Documentation/trace/ftrace-uses.rst +++ b/Documentation/trace/ftrace-uses.rst @@ -118,7 +118,7 @@ can help in this regard. If you start your code with: int bit; - bit = ftrace_test_recursion_trylock(); + bit = ftrace_test_recursion_trylock(ip, parent_ip); if (bit < 0) return; @@ -130,7 +130,9 @@ The code in between will be safe to use, even if it ends up calling a function that the callback is tracing. Note, on success, ftrace_test_recursion_trylock() will disable preemption, and the ftrace_test_recursion_unlock() will enable it again (if it was previously -enabled). +enabled). The instruction pointer (ip) and its parent (parent_ip) is passed to +ftrace_test_recursion_trylock() to record where the recursion happened +(if CONFIG_FTRACE_RECORD_RECURSION is set). Alternatively, if the FTRACE_OPS_FL_RECURSION flag is set on the ftrace_ops (as explained below), then a helper trampoline will be used to test diff --git a/arch/csky/kernel/probes/ftrace.c b/arch/csky/kernel/probes/ftrace.c index 5eb2604fdf71..f30b179924ef 100644 --- a/arch/csky/kernel/probes/ftrace.c +++ b/arch/csky/kernel/probes/ftrace.c @@ -18,7 +18,7 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, struct kprobe *p; struct kprobe_ctlblk *kcb; - bit = ftrace_test_recursion_trylock(); + bit = ftrace_test_recursion_trylock(ip, parent_ip); if (bit < 0) return; diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c index 4b1fdf15662c..8b0ed7c5a4ab 100644 --- a/arch/parisc/kernel/ftrace.c +++ b/arch/parisc/kernel/ftrace.c @@ -210,7 +210,7 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, struct kprobe *p = get_kprobe((kprobe_opcode_t *)ip); int bit; - bit = ftrace_test_recursion_trylock(); + bit = ftrace_test_recursion_trylock(ip, parent_ip); if (bit < 0) return; diff --git a/arch/powerpc/kernel/kprobes-ftrace.c b/arch/powerpc/kernel/kprobes-ftrace.c index 5df8d50c65ae..fdfee39938ea 100644 --- a/arch/powerpc/kernel/kprobes-ftrace.c +++ b/arch/powerpc/kernel/kprobes-ftrace.c @@ -20,7 +20,7 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip, struct kprobe_ctlblk *kcb; int bit; - bit = ftrace_test_recursion_trylock(); + bit = ftrace_test_recursion_trylock(nip, parent_nip); if (bit < 0) return; diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index 88466d7fb6b2..a1556333d481 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c @@ -204,7 +204,7 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, struct kprobe *p = get_kprobe((kprobe_opcode_t *)ip); int bit; - bit = ftrace_test_recursion_trylock(); + bit = ftrace_test_recursion_trylock(ip, parent_ip); if (bit < 0) return; diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c index a40a6cdfcca3..954d930a7127 100644 --- a/arch/x86/kernel/kprobes/ftrace.c +++ b/arch/x86/kernel/kprobes/ftrace.c @@ -20,7 +20,7 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, struct kprobe_ctlblk *kcb; int bit; - bit = ftrace_test_recursion_trylock(); + bit = ftrace_test_recursion_trylock(ip, parent_ip); if (bit < 0) return; diff --git a/fs/pstore/ftrace.c b/fs/pstore/ftrace.c index 816210fc5d3a..adb0935eb062 100644 --- a/fs/pstore/ftrace.c +++ b/fs/pstore/ftrace.c @@ -41,7 +41,7 @@ static void notrace pstore_ftrace_call(unsigned long ip, if (unlikely(oops_in_progress)) return; - bit = ftrace_test_recursion_trylock(); + bit = ftrace_test_recursion_trylock(ip, parent_ip); if (bit < 0) return; diff --git a/include/linux/trace_recursion.h b/include/linux/trace_recursion.h index ac3d73484cb2..1cba5fe8777a 100644 --- a/include/linux/trace_recursion.h +++ b/include/linux/trace_recursion.h @@ -142,7 +142,28 @@ static __always_inline int trace_get_context_bit(void) pc & HARDIRQ_MASK ? TRACE_CTX_IRQ : TRACE_CTX_SOFTIRQ; } -static __always_inline int trace_test_and_set_recursion(int start, int max) +#ifdef CONFIG_FTRACE_RECORD_RECURSION +extern void ftrace_record_recursion(unsigned long ip, unsigned long parent_ip); +/* +* The paranoid_test check can cause dropped reports (unlikely), but +* if the recursion is common, it will likely still be recorded later. +* But the paranoid_test is needed to make sure we don't crash. +*/ +# define do_ftrace_record_recursion(ip, pip) \ + do { \ + static atomic_t paranoid_test; \ + if (!atomic_read(¶noid_test)) { \ + atomic_inc(¶noid_test); \ + ftrace_record_recursion(ip, pip); \ + atomic_dec(¶noid_test); \ + } \ + } while (0) +#else +# define do_ftrace_record_recursion(ip, pip) do { } while (0) +#endif + +static __always_inline int trace_test_and_set_recursion(unsigned long ip, unsigned long pip, + int start, int max) { unsigned int val = current->trace_recursion; int bit; @@ -158,8 +179,10 @@ static __always_inline int trace_test_and_set_recursion(int start, int max) * a switch between contexts. Allow for a single recursion. */ bit = TRACE_TRANSITION_BIT; - if (trace_recursion_test(bit)) + if (trace_recursion_test(bit)) { + do_ftrace_record_recursion(ip, pip); return -1; + } trace_recursion_set(bit); barrier(); return bit + 1; @@ -199,9 +222,10 @@ static __always_inline void trace_clear_recursion(int bit) * Returns: -1 if a recursion happened. * >= 0 if no recursion */ -static __always_inline int ftrace_test_recursion_trylock(void) +static __always_inline int ftrace_test_recursion_trylock(unsigned long ip, + unsigned long parent_ip) { - return trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX); + return trace_test_and_set_recursion(ip, parent_ip, TRACE_FTRACE_START, TRACE_FTRACE_MAX); } /** diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c index 15480bf3ce88..875c5dbbdd33 100644 --- a/kernel/livepatch/patch.c +++ b/kernel/livepatch/patch.c @@ -49,7 +49,7 @@ static void notrace klp_ftrace_handler(unsigned long ip, ops = container_of(fops, struct klp_ops, fops); - bit = ftrace_test_recursion_trylock(); + bit = ftrace_test_recursion_trylock(ip, parent_ip); if (WARN_ON_ONCE(bit < 0)) return; /* diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index a4020c0b4508..9b11c096d139 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -727,6 +727,31 @@ config TRACE_EVAL_MAP_FILE If unsure, say N. +config FTRACE_RECORD_RECURSION + bool "Record functions that recurse in function tracing" + depends on FUNCTION_TRACER + help + All callbacks that attach to the function tracing have some sort + of protection against recursion. Even though the protection exists, + it adds overhead. This option will create a file in the tracefs + file system called "recursed_functions" that will list the functions + that triggered a recursion. + + This will add more overhead to cases that have recursion. + + If unsure, say N + +config FTRACE_RECORD_RECURSION_SIZE + int "Max number of recursed functions to record" + default 128 + depends on FTRACE_RECORD_RECURSION + help + This defines the limit of number of functions that can be + listed in the "recursed_functions" file, that lists all + the functions that caused a recursion to happen. + This file can be reset, but the limit can not change in + size at runtime. + config GCOV_PROFILE_FTRACE bool "Enable GCOV profiling on ftrace subsystem" depends on GCOV_KERNEL diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index e153be351548..7e44cea89fdc 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -92,6 +92,7 @@ obj-$(CONFIG_DYNAMIC_EVENTS) += trace_dynevent.o obj-$(CONFIG_PROBE_EVENTS) += trace_probe.o obj-$(CONFIG_UPROBE_EVENTS) += trace_uprobe.o obj-$(CONFIG_BOOTTIME_TRACING) += trace_boot.o +obj-$(CONFIG_FTRACE_RECORD_RECURSION) += trace_recursion_record.o obj-$(CONFIG_TRACEPOINT_BENCHMARK) += trace_benchmark.o diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 39f2bba89b76..03aad2b5cd5e 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -6918,7 +6918,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op; int bit; - bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); + bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START, TRACE_LIST_MAX); if (bit < 0) return; @@ -6993,7 +6993,7 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip, { int bit; - bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); + bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START, TRACE_LIST_MAX); if (bit < 0) return; diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index a2b9fddb8148..1b202e28dfaa 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -447,7 +447,7 @@ perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip, if ((unsigned long)ops->private != smp_processor_id()) return; - bit = ftrace_test_recursion_trylock(); + bit = ftrace_test_recursion_trylock(ip, parent_ip); if (bit < 0) return; diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 89c414ce1388..646eda6c44a5 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -141,7 +141,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip, if (unlikely(!tr->function_enabled)) return; - bit = ftrace_test_recursion_trylock(); + bit = ftrace_test_recursion_trylock(ip, parent_ip); if (bit < 0) return; diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 000e9dc224c6..92b1575ae0ca 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -353,8 +353,8 @@ static inline const char *kretprobed(const char *name) } #endif /* CONFIG_KRETPROBES */ -static void -seq_print_sym(struct trace_seq *s, unsigned long address, bool offset) +void +trace_seq_print_sym(struct trace_seq *s, unsigned long address, bool offset) { #ifdef CONFIG_KALLSYMS char str[KSYM_SYMBOL_LEN]; @@ -420,7 +420,7 @@ seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) goto out; } - seq_print_sym(s, ip, sym_flags & TRACE_ITER_SYM_OFFSET); + trace_seq_print_sym(s, ip, sym_flags & TRACE_ITER_SYM_OFFSET); if (sym_flags & TRACE_ITER_SYM_ADDR) trace_seq_printf(s, " <" IP_FMT ">", ip); diff --git a/kernel/trace/trace_output.h b/kernel/trace/trace_output.h index 2f742b74e7e6..4c954636caf0 100644 --- a/kernel/trace/trace_output.h +++ b/kernel/trace/trace_output.h @@ -16,6 +16,7 @@ extern int seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags); +extern void trace_seq_print_sym(struct trace_seq *s, unsigned long address, bool offset); extern int trace_print_context(struct trace_iterator *iter); extern int trace_print_lat_context(struct trace_iterator *iter); diff --git a/kernel/trace/trace_recursion_record.c b/kernel/trace/trace_recursion_record.c new file mode 100644 index 000000000000..0523071ca97c --- /dev/null +++ b/kernel/trace/trace_recursion_record.c @@ -0,0 +1,220 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/seq_file.h> +#include <linux/kallsyms.h> +#include <linux/module.h> +#include <linux/ftrace.h> +#include <linux/fs.h> + +#include "trace_output.h" + +struct recursed_functions { + unsigned long ip; + unsigned long parent_ip; +}; + +static struct recursed_functions recursed_functions[CONFIG_FTRACE_RECORD_RECURSION_SIZE]; +static atomic_t nr_records; + +/* + * Cache the last found function. Yes, updates to this is racey, but + * so is memory cache ;-) + */ +static unsigned long cached_function; + +void ftrace_record_recursion(unsigned long ip, unsigned long parent_ip) +{ + int index; + int i = 0; + unsigned long old; + + again: + /* First check the last one recorded */ + if (ip == cached_function) + return; + + index = atomic_read(&nr_records); + /* nr_records is -1 when clearing records */ + smp_mb__after_atomic(); + if (index < 0) + return; + + /* See below */ + if (i > index) + index = i; + if (index >= CONFIG_FTRACE_RECORD_RECURSION_SIZE) + return; + + for (i = index - 1; i >= 0; i--) { + if (recursed_functions[i].ip == ip) { + cached_function = ip; + return; + } + } + + cached_function = ip; + + /* + * We only want to add a function if it hasn't been added before. + * Add to the current location before incrementing the count. + * If it fails to add, then increment the index (save in i) + * and try again. + */ + old = cmpxchg(&recursed_functions[index].ip, 0, ip); + if (old != 0) { + /* Did something else already added this for us? */ + if (old == ip) + return; + /* Try the next location (use i for the next index) */ + i = index + 1; + goto again; + } + + recursed_functions[index].parent_ip = parent_ip; + + /* + * It's still possible that we could race with the clearing + * CPU0 CPU1 + * ---- ---- + * ip = func + * nr_records = -1; + * recursed_functions[0] = 0; + * i = -1 + * if (i < 0) + * nr_records = 0; + * (new recursion detected) + * recursed_functions[0] = func + * cmpxchg(recursed_functions[0], + * func, 0) + * + * But the worse that could happen is that we get a zero in + * the recursed_functions array, and it's likely that "func" will + * be recorded again. + */ + i = atomic_read(&nr_records); + smp_mb__after_atomic(); + if (i < 0) + cmpxchg(&recursed_functions[index].ip, ip, 0); + else if (i <= index) + atomic_cmpxchg(&nr_records, i, index + 1); +} + +static DEFINE_MUTEX(recursed_function_lock); +static struct trace_seq *tseq; + +static void *recursed_function_seq_start(struct seq_file *m, loff_t *pos) +{ + void *ret = NULL; + int index; + + mutex_lock(&recursed_function_lock); + index = atomic_read(&nr_records); + if (*pos < index) { + ret = &recursed_functions[*pos]; + } + + tseq = kzalloc(sizeof(*tseq), GFP_KERNEL); + if (!tseq) + return ERR_PTR(-ENOMEM); + + trace_seq_init(tseq); + + return ret; +} + +static void *recursed_function_seq_next(struct seq_file *m, void *v, loff_t *pos) +{ + int index; + int p; + + index = atomic_read(&nr_records); + p = ++(*pos); + + return p < index ? &recursed_functions[p] : NULL; +} + +static void recursed_function_seq_stop(struct seq_file *m, void *v) +{ + kfree(tseq); + mutex_unlock(&recursed_function_lock); +} + +static int recursed_function_seq_show(struct seq_file *m, void *v) +{ + struct recursed_functions *record = v; + int ret = 0; + + if (record) { + trace_seq_print_sym(tseq, record->parent_ip, true); + trace_seq_puts(tseq, ":\t"); + trace_seq_print_sym(tseq, record->ip, true); + trace_seq_putc(tseq, '\n'); + ret = trace_print_seq(m, tseq); + } + + return ret; +} + +static const struct seq_operations recursed_function_seq_ops = { + .start = recursed_function_seq_start, + .next = recursed_function_seq_next, + .stop = recursed_function_seq_stop, + .show = recursed_function_seq_show +}; + +static int recursed_function_open(struct inode *inode, struct file *file) +{ + int ret = 0; + + mutex_lock(&recursed_function_lock); + /* If this file was opened for write, then erase contents */ + if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { + /* disable updating records */ + atomic_set(&nr_records, -1); + smp_mb__after_atomic(); + memset(recursed_functions, 0, sizeof(recursed_functions)); + smp_wmb(); + /* enable them again */ + atomic_set(&nr_records, 0); + } + if (file->f_mode & FMODE_READ) + ret = seq_open(file, &recursed_function_seq_ops); + mutex_unlock(&recursed_function_lock); + + return ret; +} + +static ssize_t recursed_function_write(struct file *file, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + return count; +} + +static int recursed_function_release(struct inode *inode, struct file *file) +{ + if (file->f_mode & FMODE_READ) + seq_release(inode, file); + return 0; +} + +static const struct file_operations recursed_functions_fops = { + .open = recursed_function_open, + .write = recursed_function_write, + .read = seq_read, + .llseek = seq_lseek, + .release = recursed_function_release, +}; + +__init static int create_recursed_functions(void) +{ + struct dentry *dentry; + + dentry = trace_create_file("recursed_functions", 0644, NULL, NULL, + &recursed_functions_fops); + if (!dentry) + pr_warn("WARNING: Failed to create recursed_functions\n"); + return 0; +} + +fs_initcall(create_recursed_functions);