@@ -1536,6 +1536,21 @@ config NODES_SHIFT
Specify the maximum number of NUMA Nodes available on the target
system. Increases memory reserved to accommodate various tables.
+config FUNCTION_METADATA
+ bool "Per-function metadata storage support"
+ default y
+ select HAVE_DYNAMIC_FTRACE_NO_PATCHABLE if !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY
+ depends on !CFI_CLANG
+ help
+ Support per-function metadata storage for kernel functions, and
+ get the metadata of the function by its address with almost no
+ overhead.
+
+ The index of the metadata will be stored in the function padding,
+ which will consume 4-bytes. If FUNCTION_ALIGNMENT_8B is enabled,
+ extra 8-bytes function padding will be reserved during compiling.
+ Otherwise, only extra 4-bytes function padding is needed.
+
source "kernel/Kconfig.hz"
config ARCH_SPARSEMEM_ENABLE
@@ -144,12 +144,31 @@ endif
CHECKFLAGS += -D__aarch64__
+ifeq ($(CONFIG_FUNCTION_METADATA),y)
+ ifeq ($(CONFIG_FUNCTION_ALIGNMENT_8B),y)
+ __padding_nops := 2
+ else
+ __padding_nops := 1
+ endif
+else
+ __padding_nops := 0
+endif
+
ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS),y)
+ __padding_nops := $(shell echo $(__padding_nops) + 2 | bc)
KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY
- CC_FLAGS_FTRACE := -fpatchable-function-entry=4,2
+ CC_FLAGS_FTRACE := -fpatchable-function-entry=$(shell echo $(__padding_nops) + 2 | bc),$(__padding_nops)
else ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_ARGS),y)
+ CC_FLAGS_FTRACE := -fpatchable-function-entry=$(shell echo $(__padding_nops) + 2 | bc),$(__padding_nops)
KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY
- CC_FLAGS_FTRACE := -fpatchable-function-entry=2
+else ifeq ($(CONFIG_FUNCTION_METADATA),y)
+ CC_FLAGS_FTRACE += -fpatchable-function-entry=$(__padding_nops),$(__padding_nops)
+ ifneq ($(CONFIG_FUNCTION_TRACER),y)
+ KBUILD_CFLAGS += $(CC_FLAGS_FTRACE)
+ # some file need to remove this cflag when CONFIG_FUNCTION_TRACER
+ # is not enabled, so we need to export it here
+ export CC_FLAGS_FTRACE
+ endif
endif
ifeq ($(CONFIG_KASAN_SW_TAGS), y)
@@ -24,6 +24,16 @@
#define FTRACE_PLT_IDX 0
#define NR_FTRACE_PLTS 1
+#ifdef CONFIG_FUNCTION_METADATA
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
+#define KFUNC_MD_DATA_OFFSET (AARCH64_INSN_SIZE * 3)
+#else
+#define KFUNC_MD_DATA_OFFSET AARCH64_INSN_SIZE
+#endif
+#define KFUNC_MD_INSN_SIZE AARCH64_INSN_SIZE
+#define KFUNC_MD_INSN_OFFSET KFUNC_MD_DATA_OFFSET
+#endif
+
/*
* Currently, gcc tends to save the link register after the local variables
* on the stack. This causes the max stack tracer to report the function
@@ -216,6 +226,30 @@ static inline bool arch_syscall_match_sym_name(const char *sym,
*/
return !strcmp(sym + 8, name);
}
+
+#ifdef CONFIG_FUNCTION_METADATA
+#include <asm/text-patching.h>
+
+static inline bool kfunc_md_arch_exist(void *ip)
+{
+ return !aarch64_insn_is_nop(*(u32 *)(ip - KFUNC_MD_INSN_OFFSET));
+}
+
+static inline void kfunc_md_arch_pretend(u8 *insn, u32 index)
+{
+ *(u32 *)insn = index;
+}
+
+static inline void kfunc_md_arch_nops(u8 *insn)
+{
+ *(u32 *)insn = aarch64_insn_gen_nop();
+}
+
+static inline int kfunc_md_arch_poke(void *ip, u8 *insn)
+{
+ return aarch64_insn_patch_text_nosync(ip, *(u32 *)insn);
+}
+#endif
#endif /* ifndef __ASSEMBLY__ */
#ifndef __ASSEMBLY__
@@ -88,8 +88,10 @@ unsigned long ftrace_call_adjust(unsigned long addr)
* to `BL <caller>`, which is at `addr + 4` bytes in either case.
*
*/
- if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS))
- return addr + AARCH64_INSN_SIZE;
+ if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS)) {
+ addr += AARCH64_INSN_SIZE;
+ goto out;
+ }
/*
* When using patchable-function-entry with pre-function NOPs, addr is
@@ -139,6 +141,13 @@ unsigned long ftrace_call_adjust(unsigned long addr)
/* Skip the first NOP after function entry */
addr += AARCH64_INSN_SIZE;
+out:
+ if (IS_ENABLED(CONFIG_FUNCTION_METADATA)) {
+ if (IS_ENABLED(CONFIG_FUNCTION_ALIGNMENT_8B))
+ addr += 2 * AARCH64_INSN_SIZE;
+ else
+ addr += AARCH64_INSN_SIZE;
+ }
return addr;
}
The per-function metadata storage is already used by ftrace if CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS is enabled, and it store the pointer of the callback directly to the function padding, which consume 8-bytes, in the commit baaf553d3bc3 ("arm64: Implement HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS"). So we can directly store the index to the function padding too, without a prepending. With CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS enabled, the function is 8-bytes aligned, and we will compile the kernel with extra 8-bytes (2 NOPS) padding space. Otherwise, the function is 4-bytes aligned, and only extra 4-bytes (1 NOPS) is needed. However, we have the same problem with Mark in the commit above: we can't use the function padding together with CFI_CLANG, which can make the clang compiles a wrong offset to the pre-function type hash. He said that he was working with others on this problem 2 years ago. Hi Mark, is there any progress on this problem? Signed-off-by: Menglong Dong <dongml2@chinatelecom.cn> --- arch/arm64/Kconfig | 15 +++++++++++++++ arch/arm64/Makefile | 23 ++++++++++++++++++++-- arch/arm64/include/asm/ftrace.h | 34 +++++++++++++++++++++++++++++++++ arch/arm64/kernel/ftrace.c | 13 +++++++++++-- 4 files changed, 81 insertions(+), 4 deletions(-)