Message ID | 20241115194841.2108634-2-vadfed@meta.com (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | BPF |
Headers | show |
Series | bpf: add cpu cycles kfuncss | expand |
On 11/15/24 11:48 AM, Vadim Fedorenko wrote: > New kfunc to return ARCH-specific timecounter. For x86 BPF JIT converts > it into rdtsc ordered call. Other architectures will get JIT > implementation too if supported. The fallback is to > __arch_get_hw_counter(). > > Acked-by: Eduard Zingerman <eddyz87@gmail.com> > Signed-off-by: Vadim Fedorenko <vadfed@meta.com> LGTM with a small nit below. Acked-by: Yonghong Song <yonghong.song@linux.dev> > --- > v5 -> v6: > * add comment about dropping S64_MAX manipulation in jitted > implementation of rdtsc_oredered (Alexey) > * add comment about using 'lfence;rdtsc' variant (Alexey) > * change the check in fixup_kfunc_call() (Eduard) > * make __arch_get_hw_counter() call more aligned with vDSO > implementation (Yonghong) > v4 -> v5: > * use if instead of ifdef with IS_ENABLED > v3 -> v4: > * change name of the helper to bpf_get_cpu_cycles (Andrii) > * Hide the helper behind CONFIG_GENERIC_GETTIMEOFDAY to avoid exposing > it on architectures which do not have vDSO functions and data > * reduce the scope of check of inlined functions in verifier to only 2, > which are actually inlined. > v2 -> v3: > * change name of the helper to bpf_get_cpu_cycles_counter to explicitly > mention what counter it provides (Andrii) > * move kfunc definition to bpf.h to use it in JIT. > * introduce another kfunc to convert cycles into nanoseconds as more > meaningful time units for generic tracing use case (Andrii) > v1 -> v2: > * Fix incorrect function return value type to u64 > * Introduce bpf_jit_inlines_kfunc_call() and use it in > mark_fastcall_pattern_for_call() to avoid clobbering in case of > running programs with no JIT (Eduard) > * Avoid rewriting instruction and check function pointer directly > in JIT (Alexei) > * Change includes to fix compile issues on non x86 architectures > --- > arch/x86/net/bpf_jit_comp.c | 39 +++++++++++++++++++++++++++++++++ > arch/x86/net/bpf_jit_comp32.c | 14 ++++++++++++ > include/linux/bpf.h | 5 +++++ > include/linux/filter.h | 1 + > kernel/bpf/core.c | 11 ++++++++++ > kernel/bpf/helpers.c | 21 ++++++++++++++++++ > kernel/bpf/verifier.c | 41 ++++++++++++++++++++++++++++++----- > 7 files changed, 126 insertions(+), 6 deletions(-) > > diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c > index a43fc5af973d..107bd921f104 100644 > --- a/arch/x86/net/bpf_jit_comp.c > +++ b/arch/x86/net/bpf_jit_comp.c > @@ -2185,6 +2185,37 @@ st: if (is_imm8(insn->off)) > case BPF_JMP | BPF_CALL: { > u8 *ip = image + addrs[i - 1]; > > + if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && > + imm32 == BPF_CALL_IMM(bpf_get_cpu_cycles)) { > + /* The default implementation of this kfunc uses > + * __arch_get_hw_counter() which is implemented as > + * `(u64)rdtsc_ordered() & S64_MAX`. We skip masking > + * part because we assume it's not needed in BPF > + * use case (two measurements close in time). > + * Original code for rdtsc_ordered() uses sequence: > + * 'rdtsc; nop; nop; nop' to patch it into > + * 'lfence; rdtsc' or 'rdtscp' depending on CPU features. > + * JIT uses 'lfence; rdtsc' variant because BPF program > + * doesn't care about cookie provided by rdtsp in RCX. rdtsp -> tdtscp? > + * Save RDX because RDTSC will use EDX:EAX to return u64 > + */ > + emit_mov_reg(&prog, true, AUX_REG, BPF_REG_3); > + if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) > + EMIT_LFENCE(); > + EMIT2(0x0F, 0x31); > + > + /* shl RDX, 32 */ > + maybe_emit_1mod(&prog, BPF_REG_3, true); > + EMIT3(0xC1, add_1reg(0xE0, BPF_REG_3), 32); > + /* or RAX, RDX */ > + maybe_emit_mod(&prog, BPF_REG_0, BPF_REG_3, true); > + EMIT2(0x09, add_2reg(0xC0, BPF_REG_0, BPF_REG_3)); > + /* restore RDX from R11 */ > + emit_mov_reg(&prog, true, BPF_REG_3, AUX_REG); > + > + break; > + } > + > func = (u8 *) __bpf_call_base + imm32; > if (src_reg == BPF_PSEUDO_CALL && tail_call_reachable) { > LOAD_TAIL_CALL_CNT_PTR(stack_depth); > @@ -3791,3 +3822,11 @@ u64 bpf_arch_uaddress_limit(void) > { > return 0; > } > + > +/* x86-64 JIT can inline kfunc */ > +bool bpf_jit_inlines_kfunc_call(s32 imm) > +{ > + if (imm == BPF_CALL_IMM(bpf_get_cpu_cycles)) > + return true; > + return false; > +} > diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c > index de0f9e5f9f73..e6097a371b69 100644 > --- a/arch/x86/net/bpf_jit_comp32.c > +++ b/arch/x86/net/bpf_jit_comp32.c > @@ -2094,6 +2094,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, > if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { > int err; > > + if (imm32 == BPF_CALL_IMM(bpf_get_cpu_cycles)) { > + if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) > + EMIT3(0x0F, 0xAE, 0xE8); > + EMIT2(0x0F, 0x31); > + break; > + } > + > err = emit_kfunc_call(bpf_prog, > image + addrs[i], > insn, &prog); > @@ -2621,3 +2628,10 @@ bool bpf_jit_supports_kfunc_call(void) > { > return true; > } > + > +bool bpf_jit_inlines_kfunc_call(s32 imm) > +{ > + if (imm == BPF_CALL_IMM(bpf_get_cpu_cycles)) > + return true; > + return false; > +} > diff --git a/include/linux/bpf.h b/include/linux/bpf.h > index 3ace0d6227e3..43a5207a1591 100644 > --- a/include/linux/bpf.h > +++ b/include/linux/bpf.h > @@ -3333,6 +3333,11 @@ void bpf_user_rnd_init_once(void); > u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); > u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); > > +/* Inlined kfuncs */ > +#if IS_ENABLED(CONFIG_GENERIC_GETTIMEOFDAY) > +u64 bpf_get_cpu_cycles(void); > +#endif > + > #if defined(CONFIG_NET) > bool bpf_sock_common_is_valid_access(int off, int size, > enum bpf_access_type type, > diff --git a/include/linux/filter.h b/include/linux/filter.h > index 3a21947f2fd4..9cf57233874f 100644 > --- a/include/linux/filter.h > +++ b/include/linux/filter.h > @@ -1111,6 +1111,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); > void bpf_jit_compile(struct bpf_prog *prog); > bool bpf_jit_needs_zext(void); > bool bpf_jit_inlines_helper_call(s32 imm); > +bool bpf_jit_inlines_kfunc_call(s32 imm); > bool bpf_jit_supports_subprog_tailcalls(void); > bool bpf_jit_supports_percpu_insn(void); > bool bpf_jit_supports_kfunc_call(void); > diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c > index 14d9288441f2..daa3ab458c8a 100644 > --- a/kernel/bpf/core.c > +++ b/kernel/bpf/core.c > @@ -2965,6 +2965,17 @@ bool __weak bpf_jit_inlines_helper_call(s32 imm) > return false; > } > > +/* Return true if the JIT inlines the call to the kfunc corresponding to > + * the imm. > + * > + * The verifier will not patch the insn->imm for the call to the helper if > + * this returns true. > + */ > +bool __weak bpf_jit_inlines_kfunc_call(s32 imm) > +{ > + return false; > +} > + > /* Return TRUE if the JIT backend supports mixing bpf2bpf and tailcalls. */ > bool __weak bpf_jit_supports_subprog_tailcalls(void) > { > diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c > index 751c150f9e1c..12d40537e57b 100644 > --- a/kernel/bpf/helpers.c > +++ b/kernel/bpf/helpers.c > @@ -23,6 +23,10 @@ > #include <linux/btf_ids.h> > #include <linux/bpf_mem_alloc.h> > #include <linux/kasan.h> > +#if IS_ENABLED(CONFIG_GENERIC_GETTIMEOFDAY) > +#include <vdso/datapage.h> > +#include <asm/vdso/vsyscall.h> > +#endif > > #include "../../lib/kstrtox.h" > > @@ -3057,6 +3061,20 @@ __bpf_kfunc int bpf_copy_from_user_str(void *dst, u32 dst__sz, const void __user > return ret + 1; > } > > +#if IS_ENABLED(CONFIG_GENERIC_GETTIMEOFDAY) > +__bpf_kfunc u64 bpf_get_cpu_cycles(void) > +{ > + const struct vdso_data *vd = __arch_get_k_vdso_data(); > + > + vd = &vd[CS_RAW]; > + > + /* CS_RAW clock_mode translates to VDSO_CLOCKMODE_TSC on x86 and > + * to VDSO_CLOCKMODE_ARCHTIMER on aarch64/risc-v. > + */ > + return __arch_get_hw_counter(vd->clock_mode, vd); > +} > +#endif > + > __bpf_kfunc_end_defs(); > > BTF_KFUNCS_START(generic_btf_ids) > @@ -3149,6 +3167,9 @@ BTF_ID_FLAGS(func, bpf_get_kmem_cache) > BTF_ID_FLAGS(func, bpf_iter_kmem_cache_new, KF_ITER_NEW | KF_SLEEPABLE) > BTF_ID_FLAGS(func, bpf_iter_kmem_cache_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE) > BTF_ID_FLAGS(func, bpf_iter_kmem_cache_destroy, KF_ITER_DESTROY | KF_SLEEPABLE) > +#if IS_ENABLED(CONFIG_GENERIC_GETTIMEOFDAY) > +BTF_ID_FLAGS(func, bpf_get_cpu_cycles, KF_FASTCALL) > +#endif > BTF_KFUNCS_END(common_btf_ids) > > static const struct btf_kfunc_id_set common_kfunc_set = { [...]
Hi Vadim, kernel test robot noticed the following build errors: [auto build test ERROR on bpf-next/master] url: https://github.com/intel-lab-lkp/linux/commits/Vadim-Fedorenko/bpf-add-bpf_get_cpu_cycles-kfunc/20241117-002106 base: https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git master patch link: https://lore.kernel.org/r/20241115194841.2108634-2-vadfed%40meta.com patch subject: [PATCH bpf-next v6 1/4] bpf: add bpf_get_cpu_cycles kfunc config: x86_64-kexec (https://download.01.org/0day-ci/archive/20241117/202411171347.9Yb9hhnX-lkp@intel.com/config) compiler: clang version 19.1.3 (https://github.com/llvm/llvm-project ab51eccf88f5321e7c60591c5546b254b6afab99) reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20241117/202411171347.9Yb9hhnX-lkp@intel.com/reproduce) If you fix the issue in a separate patch/commit (i.e. not just a new version of the same patch/commit), kindly add following tags | Reported-by: kernel test robot <lkp@intel.com> | Closes: https://lore.kernel.org/oe-kbuild-all/202411171347.9Yb9hhnX-lkp@intel.com/ All errors (new ones prefixed by >>): >> ld.lld: error: undefined hidden symbol: pvclock_page >>> referenced by pvclock.h:25 (arch/x86/include/asm/pvclock.h:25) >>> vmlinux.o:(vread_pvclock) >>> referenced by gettimeofday.h:228 (arch/x86/include/asm/vdso/gettimeofday.h:228) >>> vmlinux.o:(vread_pvclock) >>> referenced by pvclock.h:86 (arch/x86/include/asm/pvclock.h:86) >>> vmlinux.o:(vread_pvclock) >>> referenced 4 more times -- >> ld.lld: error: undefined hidden symbol: hvclock_page >>> referenced by hyperv_timer.h:65 (include/clocksource/hyperv_timer.h:65) >>> vmlinux.o:(vread_hvclock) >>> referenced by hyperv_timer.h:74 (include/clocksource/hyperv_timer.h:74) >>> vmlinux.o:(vread_hvclock) >>> referenced by hyperv_timer.h:75 (include/clocksource/hyperv_timer.h:75) >>> vmlinux.o:(vread_hvclock) >>> referenced 1 more times
On 15/11/2024 21:54, Yonghong Song wrote: > > > On 11/15/24 11:48 AM, Vadim Fedorenko wrote: >> New kfunc to return ARCH-specific timecounter. For x86 BPF JIT converts >> it into rdtsc ordered call. Other architectures will get JIT >> implementation too if supported. The fallback is to >> __arch_get_hw_counter(). >> >> Acked-by: Eduard Zingerman <eddyz87@gmail.com> >> Signed-off-by: Vadim Fedorenko <vadfed@meta.com> > > LGTM with a small nit below. > > Acked-by: Yonghong Song <yonghong.song@linux.dev> @Yonghong The changes to align to vdso call bring this patch to the state when the kernel fails to compile with CONFIG_PARAVIRT_CLOCK or CONFIG_HYPERV_TIMER enabled. This happens because on x86 there is special way to grab cpu cycle counter in PARAVIRT mode. The paravirt memory structure is hidden for kernel and linked for vDSO only using arch/x86/entry/vdso/vdso-layout.lds.S. But in anycase both vread_pvclock() and vread_hvclock() end up doing rdtsc_ordered(). I believe we can have constant clock_mode for x86 equal to VDSO_CLOCKMODE_TSC given we have JIT for x86 ready. Another way is to switch to use get_cycles() which is also defined for all architectures. But that will bring up another discussion whether we should use rdtsc_ordered in JIT, because on x86 get_cycles() ends up calling rdtsc() which has no LFENCE in assembly. If I remember correctly there was a question of maybe using simple rdtsc() in this patchset as ordered version might be slow on modern CPUs. We still can use shift and mult values for cycles2ns helper because we know that CS_RAW uses the same cpu cycles counter. I'm up for any option, but let's just agree on how to proceed. Thanks. > >> --- >> v5 -> v6: >> * add comment about dropping S64_MAX manipulation in jitted >> implementation of rdtsc_oredered (Alexey) >> * add comment about using 'lfence;rdtsc' variant (Alexey) >> * change the check in fixup_kfunc_call() (Eduard) >> * make __arch_get_hw_counter() call more aligned with vDSO >> implementation (Yonghong) >> v4 -> v5: >> * use if instead of ifdef with IS_ENABLED >> v3 -> v4: >> * change name of the helper to bpf_get_cpu_cycles (Andrii) >> * Hide the helper behind CONFIG_GENERIC_GETTIMEOFDAY to avoid exposing >> it on architectures which do not have vDSO functions and data >> * reduce the scope of check of inlined functions in verifier to only 2, >> which are actually inlined. >> v2 -> v3: >> * change name of the helper to bpf_get_cpu_cycles_counter to explicitly >> mention what counter it provides (Andrii) >> * move kfunc definition to bpf.h to use it in JIT. >> * introduce another kfunc to convert cycles into nanoseconds as more >> meaningful time units for generic tracing use case (Andrii) >> v1 -> v2: >> * Fix incorrect function return value type to u64 >> * Introduce bpf_jit_inlines_kfunc_call() and use it in >> mark_fastcall_pattern_for_call() to avoid clobbering in case of >> running programs with no JIT (Eduard) >> * Avoid rewriting instruction and check function pointer directly >> in JIT (Alexei) >> * Change includes to fix compile issues on non x86 architectures >> --- >> arch/x86/net/bpf_jit_comp.c | 39 +++++++++++++++++++++++++++++++++ >> arch/x86/net/bpf_jit_comp32.c | 14 ++++++++++++ >> include/linux/bpf.h | 5 +++++ >> include/linux/filter.h | 1 + >> kernel/bpf/core.c | 11 ++++++++++ >> kernel/bpf/helpers.c | 21 ++++++++++++++++++ >> kernel/bpf/verifier.c | 41 ++++++++++++++++++++++++++++++----- >> 7 files changed, 126 insertions(+), 6 deletions(-) >> >> diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c >> index a43fc5af973d..107bd921f104 100644 >> --- a/arch/x86/net/bpf_jit_comp.c >> +++ b/arch/x86/net/bpf_jit_comp.c >> @@ -2185,6 +2185,37 @@ st: if (is_imm8(insn->off)) >> case BPF_JMP | BPF_CALL: { >> u8 *ip = image + addrs[i - 1]; >> + if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && >> + imm32 == BPF_CALL_IMM(bpf_get_cpu_cycles)) { >> + /* The default implementation of this kfunc uses >> + * __arch_get_hw_counter() which is implemented as >> + * `(u64)rdtsc_ordered() & S64_MAX`. We skip masking >> + * part because we assume it's not needed in BPF >> + * use case (two measurements close in time). >> + * Original code for rdtsc_ordered() uses sequence: >> + * 'rdtsc; nop; nop; nop' to patch it into >> + * 'lfence; rdtsc' or 'rdtscp' depending on CPU >> features. >> + * JIT uses 'lfence; rdtsc' variant because BPF program >> + * doesn't care about cookie provided by rdtsp in RCX. > > rdtsp -> tdtscp? > >> + * Save RDX because RDTSC will use EDX:EAX to return u64 >> + */ >> + emit_mov_reg(&prog, true, AUX_REG, BPF_REG_3); >> + if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) >> + EMIT_LFENCE(); >> + EMIT2(0x0F, 0x31); >> + >> + /* shl RDX, 32 */ >> + maybe_emit_1mod(&prog, BPF_REG_3, true); >> + EMIT3(0xC1, add_1reg(0xE0, BPF_REG_3), 32); >> + /* or RAX, RDX */ >> + maybe_emit_mod(&prog, BPF_REG_0, BPF_REG_3, true); >> + EMIT2(0x09, add_2reg(0xC0, BPF_REG_0, BPF_REG_3)); >> + /* restore RDX from R11 */ >> + emit_mov_reg(&prog, true, BPF_REG_3, AUX_REG); >> + >> + break; >> + } >> + >> func = (u8 *) __bpf_call_base + imm32; >> if (src_reg == BPF_PSEUDO_CALL && tail_call_reachable) { >> LOAD_TAIL_CALL_CNT_PTR(stack_depth); >> @@ -3791,3 +3822,11 @@ u64 bpf_arch_uaddress_limit(void) >> { >> return 0; >> } >> + >> +/* x86-64 JIT can inline kfunc */ >> +bool bpf_jit_inlines_kfunc_call(s32 imm) >> +{ >> + if (imm == BPF_CALL_IMM(bpf_get_cpu_cycles)) >> + return true; >> + return false; >> +} >> diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/ >> bpf_jit_comp32.c >> index de0f9e5f9f73..e6097a371b69 100644 >> --- a/arch/x86/net/bpf_jit_comp32.c >> +++ b/arch/x86/net/bpf_jit_comp32.c >> @@ -2094,6 +2094,13 @@ static int do_jit(struct bpf_prog *bpf_prog, >> int *addrs, u8 *image, >> if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { >> int err; >> + if (imm32 == BPF_CALL_IMM(bpf_get_cpu_cycles)) { >> + if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) >> + EMIT3(0x0F, 0xAE, 0xE8); >> + EMIT2(0x0F, 0x31); >> + break; >> + } >> + >> err = emit_kfunc_call(bpf_prog, >> image + addrs[i], >> insn, &prog); >> @@ -2621,3 +2628,10 @@ bool bpf_jit_supports_kfunc_call(void) >> { >> return true; >> } >> + >> +bool bpf_jit_inlines_kfunc_call(s32 imm) >> +{ >> + if (imm == BPF_CALL_IMM(bpf_get_cpu_cycles)) >> + return true; >> + return false; >> +} >> diff --git a/include/linux/bpf.h b/include/linux/bpf.h >> index 3ace0d6227e3..43a5207a1591 100644 >> --- a/include/linux/bpf.h >> +++ b/include/linux/bpf.h >> @@ -3333,6 +3333,11 @@ void bpf_user_rnd_init_once(void); >> u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); >> u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); >> +/* Inlined kfuncs */ >> +#if IS_ENABLED(CONFIG_GENERIC_GETTIMEOFDAY) >> +u64 bpf_get_cpu_cycles(void); >> +#endif >> + >> #if defined(CONFIG_NET) >> bool bpf_sock_common_is_valid_access(int off, int size, >> enum bpf_access_type type, >> diff --git a/include/linux/filter.h b/include/linux/filter.h >> index 3a21947f2fd4..9cf57233874f 100644 >> --- a/include/linux/filter.h >> +++ b/include/linux/filter.h >> @@ -1111,6 +1111,7 @@ struct bpf_prog *bpf_int_jit_compile(struct >> bpf_prog *prog); >> void bpf_jit_compile(struct bpf_prog *prog); >> bool bpf_jit_needs_zext(void); >> bool bpf_jit_inlines_helper_call(s32 imm); >> +bool bpf_jit_inlines_kfunc_call(s32 imm); >> bool bpf_jit_supports_subprog_tailcalls(void); >> bool bpf_jit_supports_percpu_insn(void); >> bool bpf_jit_supports_kfunc_call(void); >> diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c >> index 14d9288441f2..daa3ab458c8a 100644 >> --- a/kernel/bpf/core.c >> +++ b/kernel/bpf/core.c >> @@ -2965,6 +2965,17 @@ bool __weak bpf_jit_inlines_helper_call(s32 imm) >> return false; >> } >> +/* Return true if the JIT inlines the call to the kfunc corresponding to >> + * the imm. >> + * >> + * The verifier will not patch the insn->imm for the call to the >> helper if >> + * this returns true. >> + */ >> +bool __weak bpf_jit_inlines_kfunc_call(s32 imm) >> +{ >> + return false; >> +} >> + >> /* Return TRUE if the JIT backend supports mixing bpf2bpf and >> tailcalls. */ >> bool __weak bpf_jit_supports_subprog_tailcalls(void) >> { >> diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c >> index 751c150f9e1c..12d40537e57b 100644 >> --- a/kernel/bpf/helpers.c >> +++ b/kernel/bpf/helpers.c >> @@ -23,6 +23,10 @@ >> #include <linux/btf_ids.h> >> #include <linux/bpf_mem_alloc.h> >> #include <linux/kasan.h> >> +#if IS_ENABLED(CONFIG_GENERIC_GETTIMEOFDAY) >> +#include <vdso/datapage.h> >> +#include <asm/vdso/vsyscall.h> >> +#endif >> #include "../../lib/kstrtox.h" >> @@ -3057,6 +3061,20 @@ __bpf_kfunc int bpf_copy_from_user_str(void >> *dst, u32 dst__sz, const void __user >> return ret + 1; >> } >> +#if IS_ENABLED(CONFIG_GENERIC_GETTIMEOFDAY) >> +__bpf_kfunc u64 bpf_get_cpu_cycles(void) >> +{ >> + const struct vdso_data *vd = __arch_get_k_vdso_data(); >> + >> + vd = &vd[CS_RAW]; >> + >> + /* CS_RAW clock_mode translates to VDSO_CLOCKMODE_TSC on x86 and >> + * to VDSO_CLOCKMODE_ARCHTIMER on aarch64/risc-v. >> + */ >> + return __arch_get_hw_counter(vd->clock_mode, vd); >> +} >> +#endif >> + >> __bpf_kfunc_end_defs(); >> BTF_KFUNCS_START(generic_btf_ids) >> @@ -3149,6 +3167,9 @@ BTF_ID_FLAGS(func, bpf_get_kmem_cache) >> BTF_ID_FLAGS(func, bpf_iter_kmem_cache_new, KF_ITER_NEW | KF_SLEEPABLE) >> BTF_ID_FLAGS(func, bpf_iter_kmem_cache_next, KF_ITER_NEXT | >> KF_RET_NULL | KF_SLEEPABLE) >> BTF_ID_FLAGS(func, bpf_iter_kmem_cache_destroy, KF_ITER_DESTROY | >> KF_SLEEPABLE) >> +#if IS_ENABLED(CONFIG_GENERIC_GETTIMEOFDAY) >> +BTF_ID_FLAGS(func, bpf_get_cpu_cycles, KF_FASTCALL) >> +#endif >> BTF_KFUNCS_END(common_btf_ids) >> static const struct btf_kfunc_id_set common_kfunc_set = { > [...]
On 11/17/24 10:11 AM, Vadim Fedorenko wrote: > On 15/11/2024 21:54, Yonghong Song wrote: >> >> >> On 11/15/24 11:48 AM, Vadim Fedorenko wrote: >>> New kfunc to return ARCH-specific timecounter. For x86 BPF JIT converts >>> it into rdtsc ordered call. Other architectures will get JIT >>> implementation too if supported. The fallback is to >>> __arch_get_hw_counter(). >>> >>> Acked-by: Eduard Zingerman <eddyz87@gmail.com> >>> Signed-off-by: Vadim Fedorenko <vadfed@meta.com> >> >> LGTM with a small nit below. >> >> Acked-by: Yonghong Song <yonghong.song@linux.dev> > > @Yonghong The changes to align to vdso call bring this patch to the > state when the kernel fails to compile with CONFIG_PARAVIRT_CLOCK or > CONFIG_HYPERV_TIMER enabled. This happens because on x86 there is > special way to grab cpu cycle counter in PARAVIRT mode. The paravirt > memory structure is hidden for kernel and linked for vDSO only using > arch/x86/entry/vdso/vdso-layout.lds.S. But in anycase both > vread_pvclock() and vread_hvclock() end up doing rdtsc_ordered(). > I believe we can have constant clock_mode for x86 equal to > VDSO_CLOCKMODE_TSC given we have JIT for x86 ready. What you described is correct. For x86, we have static inline u64 __arch_get_hw_counter(s32 clock_mode, const struct vdso_data *vd) { if (likely(clock_mode == VDSO_CLOCKMODE_TSC)) return (u64)rdtsc_ordered() & S64_MAX; /* * For any memory-mapped vclock type, we need to make sure that gcc * doesn't cleverly hoist a load before the mode check. Otherwise we * might end up touching the memory-mapped page even if the vclock in * question isn't enabled, which will segfault. Hence the barriers. */ #ifdef CONFIG_PARAVIRT_CLOCK if (clock_mode == VDSO_CLOCKMODE_PVCLOCK) { barrier(); return vread_pvclock(); } #endif #ifdef CONFIG_HYPERV_TIMER if (clock_mode == VDSO_CLOCKMODE_HVCLOCK) { barrier(); return vread_hvclock(); } #endif return U64_MAX; } Even if CONFIG_PARAVIRT_CLOCK and CONFIG_HYPERV_TIMER are enabled, if clock_mode is constant 1 (== VDSO_CLOCKMODE_TSC), dead code elimination will happen to remove vread_pvclock() and vread_hvclock() so we are fine. And actually the above PARAVIRT_CLOCK and HYPERV_TIMER are x86 specific. Other arch's do not have such things. s390 has the following implementation: static inline u64 __arch_get_hw_counter(s32 clock_mode, const struct vdso_data *vd) { u64 adj, now; now = get_tod_clock(); adj = vd->arch_data.tod_steering_end - now; if (unlikely((s64) adj > 0)) now += (vd->arch_data.tod_steering_delta < 0) ? (adj >> 15) : -(adj >> 15); return now; } So I think __arch_get_hw_counter(1, vd) probably works. But you need to double check that. This makes us to do __arch_get_hw_counter(const_clock_mode, vd). As you mentioned earlier, const_clock_mode = 1 is working for all architectures. But please add a comment for that. > > Another way is to switch to use get_cycles() which is also defined for > all architectures. But that will bring up another discussion whether we > should use rdtsc_ordered in JIT, because on x86 get_cycles() ends up > calling rdtsc() which has no LFENCE in assembly. If I remember correctly > there was a question of maybe using simple rdtsc() in this patchset as > ordered version might be slow on modern CPUs. We still can use shift > and mult values for cycles2ns helper because we know that CS_RAW uses > the same cpu cycles counter. I think some previous discussion favors rdtsc_ordered to ensure counting results are precise. So let us go with rdtsc_ordered. One more thing, maybe we can replace asm/vdso/vsyscall.h to vdso/vsyscall.h (from kernel/time/vsyscall.c)? diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index e89eff53c340..fb93f003ecf8 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -25,7 +25,7 @@ #include <linux/kasan.h> #if IS_ENABLED(CONFIG_GENERIC_GETTIMEOFDAY) #include <vdso/datapage.h> -#include <asm/vdso/vsyscall.h> +#include <vdso/vsyscall.h> #endif #include "../../lib/kstrtox.h" > > I'm up for any option, but let's just agree on how to proceed. > > Thanks. >> >>> --- >>> v5 -> v6: >>> * add comment about dropping S64_MAX manipulation in jitted >>> implementation of rdtsc_oredered (Alexey) >>> * add comment about using 'lfence;rdtsc' variant (Alexey) >>> * change the check in fixup_kfunc_call() (Eduard) >>> * make __arch_get_hw_counter() call more aligned with vDSO >>> implementation (Yonghong) >>> v4 -> v5: >>> * use if instead of ifdef with IS_ENABLED >>> v3 -> v4: >>> * change name of the helper to bpf_get_cpu_cycles (Andrii) >>> * Hide the helper behind CONFIG_GENERIC_GETTIMEOFDAY to avoid exposing >>> it on architectures which do not have vDSO functions and data >>> * reduce the scope of check of inlined functions in verifier to only 2, >>> which are actually inlined. >>> v2 -> v3: >>> * change name of the helper to bpf_get_cpu_cycles_counter to explicitly >>> mention what counter it provides (Andrii) >>> * move kfunc definition to bpf.h to use it in JIT. >>> * introduce another kfunc to convert cycles into nanoseconds as more >>> meaningful time units for generic tracing use case (Andrii) >>> v1 -> v2: >>> * Fix incorrect function return value type to u64 >>> * Introduce bpf_jit_inlines_kfunc_call() and use it in >>> mark_fastcall_pattern_for_call() to avoid clobbering in case of >>> running programs with no JIT (Eduard) >>> * Avoid rewriting instruction and check function pointer directly >>> in JIT (Alexei) >>> * Change includes to fix compile issues on non x86 architectures >>> --- >>> arch/x86/net/bpf_jit_comp.c | 39 +++++++++++++++++++++++++++++++++ >>> arch/x86/net/bpf_jit_comp32.c | 14 ++++++++++++ >>> include/linux/bpf.h | 5 +++++ >>> include/linux/filter.h | 1 + >>> kernel/bpf/core.c | 11 ++++++++++ >>> kernel/bpf/helpers.c | 21 ++++++++++++++++++ >>> kernel/bpf/verifier.c | 41 >>> ++++++++++++++++++++++++++++++----- >>> 7 files changed, 126 insertions(+), 6 deletions(-) >>> >>> diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c >>> index a43fc5af973d..107bd921f104 100644 >>> --- a/arch/x86/net/bpf_jit_comp.c >>> +++ b/arch/x86/net/bpf_jit_comp.c >>> @@ -2185,6 +2185,37 @@ st: if (is_imm8(insn->off)) >>> case BPF_JMP | BPF_CALL: { >>> u8 *ip = image + addrs[i - 1]; >>> + if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && >>> + imm32 == BPF_CALL_IMM(bpf_get_cpu_cycles)) { >>> + /* The default implementation of this kfunc uses >>> + * __arch_get_hw_counter() which is implemented as >>> + * `(u64)rdtsc_ordered() & S64_MAX`. We skip masking >>> + * part because we assume it's not needed in BPF >>> + * use case (two measurements close in time). >>> + * Original code for rdtsc_ordered() uses sequence: >>> + * 'rdtsc; nop; nop; nop' to patch it into >>> + * 'lfence; rdtsc' or 'rdtscp' depending on CPU >>> features. >>> + * JIT uses 'lfence; rdtsc' variant because BPF >>> program >>> + * doesn't care about cookie provided by rdtsp in RCX. >> >> rdtsp -> tdtscp? >> >>> + * Save RDX because RDTSC will use EDX:EAX to >>> return u64 >>> + */ >>> + emit_mov_reg(&prog, true, AUX_REG, BPF_REG_3); >>> + if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) >>> + EMIT_LFENCE(); >>> + EMIT2(0x0F, 0x31); >>> + >>> + /* shl RDX, 32 */ >>> + maybe_emit_1mod(&prog, BPF_REG_3, true); >>> + EMIT3(0xC1, add_1reg(0xE0, BPF_REG_3), 32); >>> + /* or RAX, RDX */ >>> + maybe_emit_mod(&prog, BPF_REG_0, BPF_REG_3, true); >>> + EMIT2(0x09, add_2reg(0xC0, BPF_REG_0, BPF_REG_3)); >>> + /* restore RDX from R11 */ >>> + emit_mov_reg(&prog, true, BPF_REG_3, AUX_REG); >>> + >>> + break; >>> + } >>> + >>> func = (u8 *) __bpf_call_base + imm32; >>> if (src_reg == BPF_PSEUDO_CALL && tail_call_reachable) { >>> LOAD_TAIL_CALL_CNT_PTR(stack_depth); >>> @@ -3791,3 +3822,11 @@ u64 bpf_arch_uaddress_limit(void) >>> { >>> return 0; >>> } >>> + >>> +/* x86-64 JIT can inline kfunc */ >>> +bool bpf_jit_inlines_kfunc_call(s32 imm) >>> +{ >>> + if (imm == BPF_CALL_IMM(bpf_get_cpu_cycles)) >>> + return true; >>> + return false; >>> +} >>> diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/ >>> bpf_jit_comp32.c >>> index de0f9e5f9f73..e6097a371b69 100644 >>> --- a/arch/x86/net/bpf_jit_comp32.c >>> +++ b/arch/x86/net/bpf_jit_comp32.c >>> @@ -2094,6 +2094,13 @@ static int do_jit(struct bpf_prog *bpf_prog, >>> int *addrs, u8 *image, >>> if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { >>> int err; >>> + if (imm32 == BPF_CALL_IMM(bpf_get_cpu_cycles)) { >>> + if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) >>> + EMIT3(0x0F, 0xAE, 0xE8); >>> + EMIT2(0x0F, 0x31); >>> + break; >>> + } >>> + >>> err = emit_kfunc_call(bpf_prog, >>> image + addrs[i], >>> insn, &prog); >>> @@ -2621,3 +2628,10 @@ bool bpf_jit_supports_kfunc_call(void) >>> { >>> return true; >>> } >>> + >>> +bool bpf_jit_inlines_kfunc_call(s32 imm) >>> +{ >>> + if (imm == BPF_CALL_IMM(bpf_get_cpu_cycles)) >>> + return true; >>> + return false; >>> +} >>> diff --git a/include/linux/bpf.h b/include/linux/bpf.h >>> index 3ace0d6227e3..43a5207a1591 100644 >>> --- a/include/linux/bpf.h >>> +++ b/include/linux/bpf.h >>> @@ -3333,6 +3333,11 @@ void bpf_user_rnd_init_once(void); >>> u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); >>> u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); >>> +/* Inlined kfuncs */ >>> +#if IS_ENABLED(CONFIG_GENERIC_GETTIMEOFDAY) >>> +u64 bpf_get_cpu_cycles(void); >>> +#endif >>> + >>> #if defined(CONFIG_NET) >>> bool bpf_sock_common_is_valid_access(int off, int size, >>> enum bpf_access_type type, >>> diff --git a/include/linux/filter.h b/include/linux/filter.h >>> index 3a21947f2fd4..9cf57233874f 100644 >>> --- a/include/linux/filter.h >>> +++ b/include/linux/filter.h >>> @@ -1111,6 +1111,7 @@ struct bpf_prog *bpf_int_jit_compile(struct >>> bpf_prog *prog); >>> void bpf_jit_compile(struct bpf_prog *prog); >>> bool bpf_jit_needs_zext(void); >>> bool bpf_jit_inlines_helper_call(s32 imm); >>> +bool bpf_jit_inlines_kfunc_call(s32 imm); >>> bool bpf_jit_supports_subprog_tailcalls(void); >>> bool bpf_jit_supports_percpu_insn(void); >>> bool bpf_jit_supports_kfunc_call(void); >>> diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c >>> index 14d9288441f2..daa3ab458c8a 100644 >>> --- a/kernel/bpf/core.c >>> +++ b/kernel/bpf/core.c >>> @@ -2965,6 +2965,17 @@ bool __weak bpf_jit_inlines_helper_call(s32 imm) >>> return false; >>> } >>> +/* Return true if the JIT inlines the call to the kfunc >>> corresponding to >>> + * the imm. >>> + * >>> + * The verifier will not patch the insn->imm for the call to the >>> helper if >>> + * this returns true. >>> + */ >>> +bool __weak bpf_jit_inlines_kfunc_call(s32 imm) >>> +{ >>> + return false; >>> +} >>> + >>> /* Return TRUE if the JIT backend supports mixing bpf2bpf and >>> tailcalls. */ >>> bool __weak bpf_jit_supports_subprog_tailcalls(void) >>> { >>> diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c >>> index 751c150f9e1c..12d40537e57b 100644 >>> --- a/kernel/bpf/helpers.c >>> +++ b/kernel/bpf/helpers.c >>> @@ -23,6 +23,10 @@ >>> #include <linux/btf_ids.h> >>> #include <linux/bpf_mem_alloc.h> >>> #include <linux/kasan.h> >>> +#if IS_ENABLED(CONFIG_GENERIC_GETTIMEOFDAY) >>> +#include <vdso/datapage.h> >>> +#include <asm/vdso/vsyscall.h> >>> +#endif >>> #include "../../lib/kstrtox.h" >>> @@ -3057,6 +3061,20 @@ __bpf_kfunc int bpf_copy_from_user_str(void >>> *dst, u32 dst__sz, const void __user >>> return ret + 1; >>> } >>> +#if IS_ENABLED(CONFIG_GENERIC_GETTIMEOFDAY) >>> +__bpf_kfunc u64 bpf_get_cpu_cycles(void) >>> +{ >>> + const struct vdso_data *vd = __arch_get_k_vdso_data(); >>> + >>> + vd = &vd[CS_RAW]; >>> + >>> + /* CS_RAW clock_mode translates to VDSO_CLOCKMODE_TSC on x86 and >>> + * to VDSO_CLOCKMODE_ARCHTIMER on aarch64/risc-v. >>> + */ >>> + return __arch_get_hw_counter(vd->clock_mode, vd); >>> +} >>> +#endif >>> + >>> __bpf_kfunc_end_defs(); >>> BTF_KFUNCS_START(generic_btf_ids) >>> @@ -3149,6 +3167,9 @@ BTF_ID_FLAGS(func, bpf_get_kmem_cache) >>> BTF_ID_FLAGS(func, bpf_iter_kmem_cache_new, KF_ITER_NEW | >>> KF_SLEEPABLE) >>> BTF_ID_FLAGS(func, bpf_iter_kmem_cache_next, KF_ITER_NEXT | >>> KF_RET_NULL | KF_SLEEPABLE) >>> BTF_ID_FLAGS(func, bpf_iter_kmem_cache_destroy, KF_ITER_DESTROY | >>> KF_SLEEPABLE) >>> +#if IS_ENABLED(CONFIG_GENERIC_GETTIMEOFDAY) >>> +BTF_ID_FLAGS(func, bpf_get_cpu_cycles, KF_FASTCALL) >>> +#endif >>> BTF_KFUNCS_END(common_btf_ids) >>> static const struct btf_kfunc_id_set common_kfunc_set = { >> [...] >
Hi Vadim, kernel test robot noticed the following build errors: [auto build test ERROR on bpf-next/master] url: https://github.com/intel-lab-lkp/linux/commits/Vadim-Fedorenko/bpf-add-bpf_get_cpu_cycles-kfunc/20241117-002106 base: https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git master patch link: https://lore.kernel.org/r/20241115194841.2108634-2-vadfed%40meta.com patch subject: [PATCH bpf-next v6 1/4] bpf: add bpf_get_cpu_cycles kfunc config: x86_64-randconfig-075-20241117 (https://download.01.org/0day-ci/archive/20241118/202411180657.c0eoNysc-lkp@intel.com/config) compiler: gcc-12 (Debian 12.2.0-14) 12.2.0 reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20241118/202411180657.c0eoNysc-lkp@intel.com/reproduce) If you fix the issue in a separate patch/commit (i.e. not just a new version of the same patch/commit), kindly add following tags | Reported-by: kernel test robot <lkp@intel.com> | Closes: https://lore.kernel.org/oe-kbuild-all/202411180657.c0eoNysc-lkp@intel.com/ All errors (new ones prefixed by >>): ld: vmlinux.o: in function `vread_pvclock': >> arch/x86/include/asm/vdso/gettimeofday.h:198: undefined reference to `pvclock_page' >> ld: arch/x86/include/asm/vdso/gettimeofday.h:198: undefined reference to `pvclock_page' ld: vmlinux.o: in function `pvclock_read_begin': >> arch/x86/include/asm/pvclock.h:25: undefined reference to `pvclock_page' ld: vmlinux.o: in function `vread_pvclock': arch/x86/include/asm/vdso/gettimeofday.h:228: undefined reference to `pvclock_page' ld: vmlinux.o: in function `__pvclock_read_cycles': arch/x86/include/asm/pvclock.h:86: undefined reference to `pvclock_page' ld: vmlinux.o:arch/x86/include/asm/pvclock.h:88: more undefined references to `pvclock_page' follow ld: .tmp_vmlinux1: hidden symbol `pvclock_page' isn't defined ld: final link failed: bad value vim +198 arch/x86/include/asm/vdso/gettimeofday.h 7ac8707479886c Vincenzo Frascino 2019-06-21 195 7ac8707479886c Vincenzo Frascino 2019-06-21 196 #ifdef CONFIG_PARAVIRT_CLOCK 7ac8707479886c Vincenzo Frascino 2019-06-21 197 static u64 vread_pvclock(void) 7ac8707479886c Vincenzo Frascino 2019-06-21 @198 { ecf9db3d1f1a8f Andy Lutomirski 2019-06-22 199 const struct pvclock_vcpu_time_info *pvti = &pvclock_page.pvti; 7ac8707479886c Vincenzo Frascino 2019-06-21 200 u32 version; 7ac8707479886c Vincenzo Frascino 2019-06-21 201 u64 ret; 7ac8707479886c Vincenzo Frascino 2019-06-21 202 7ac8707479886c Vincenzo Frascino 2019-06-21 203 /* 7ac8707479886c Vincenzo Frascino 2019-06-21 204 * Note: The kernel and hypervisor must guarantee that cpu ID 7ac8707479886c Vincenzo Frascino 2019-06-21 205 * number maps 1:1 to per-CPU pvclock time info. 7ac8707479886c Vincenzo Frascino 2019-06-21 206 * 7ac8707479886c Vincenzo Frascino 2019-06-21 207 * Because the hypervisor is entirely unaware of guest userspace 7ac8707479886c Vincenzo Frascino 2019-06-21 208 * preemption, it cannot guarantee that per-CPU pvclock time 7ac8707479886c Vincenzo Frascino 2019-06-21 209 * info is updated if the underlying CPU changes or that that 7ac8707479886c Vincenzo Frascino 2019-06-21 210 * version is increased whenever underlying CPU changes. 7ac8707479886c Vincenzo Frascino 2019-06-21 211 * 7ac8707479886c Vincenzo Frascino 2019-06-21 212 * On KVM, we are guaranteed that pvti updates for any vCPU are 7ac8707479886c Vincenzo Frascino 2019-06-21 213 * atomic as seen by *all* vCPUs. This is an even stronger 7ac8707479886c Vincenzo Frascino 2019-06-21 214 * guarantee than we get with a normal seqlock. 7ac8707479886c Vincenzo Frascino 2019-06-21 215 * 7ac8707479886c Vincenzo Frascino 2019-06-21 216 * On Xen, we don't appear to have that guarantee, but Xen still 7ac8707479886c Vincenzo Frascino 2019-06-21 217 * supplies a valid seqlock using the version field. 7ac8707479886c Vincenzo Frascino 2019-06-21 218 * 7ac8707479886c Vincenzo Frascino 2019-06-21 219 * We only do pvclock vdso timing at all if 7ac8707479886c Vincenzo Frascino 2019-06-21 220 * PVCLOCK_TSC_STABLE_BIT is set, and we interpret that bit to 7ac8707479886c Vincenzo Frascino 2019-06-21 221 * mean that all vCPUs have matching pvti and that the TSC is 7ac8707479886c Vincenzo Frascino 2019-06-21 222 * synced, so we can just look at vCPU 0's pvti. 7ac8707479886c Vincenzo Frascino 2019-06-21 223 */ 7ac8707479886c Vincenzo Frascino 2019-06-21 224 7ac8707479886c Vincenzo Frascino 2019-06-21 225 do { 7ac8707479886c Vincenzo Frascino 2019-06-21 226 version = pvclock_read_begin(pvti); 7ac8707479886c Vincenzo Frascino 2019-06-21 227 7ac8707479886c Vincenzo Frascino 2019-06-21 228 if (unlikely(!(pvti->flags & PVCLOCK_TSC_STABLE_BIT))) 7ac8707479886c Vincenzo Frascino 2019-06-21 229 return U64_MAX; 7ac8707479886c Vincenzo Frascino 2019-06-21 230 7ac8707479886c Vincenzo Frascino 2019-06-21 231 ret = __pvclock_read_cycles(pvti, rdtsc_ordered()); 7ac8707479886c Vincenzo Frascino 2019-06-21 232 } while (pvclock_read_retry(pvti, version)); 7ac8707479886c Vincenzo Frascino 2019-06-21 233 77750f78b0b324 Peter Zijlstra 2023-05-19 234 return ret & S64_MAX; 7ac8707479886c Vincenzo Frascino 2019-06-21 235 } 7ac8707479886c Vincenzo Frascino 2019-06-21 236 #endif 7ac8707479886c Vincenzo Frascino 2019-06-21 237
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index a43fc5af973d..107bd921f104 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -2185,6 +2185,37 @@ st: if (is_imm8(insn->off)) case BPF_JMP | BPF_CALL: { u8 *ip = image + addrs[i - 1]; + if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && + imm32 == BPF_CALL_IMM(bpf_get_cpu_cycles)) { + /* The default implementation of this kfunc uses + * __arch_get_hw_counter() which is implemented as + * `(u64)rdtsc_ordered() & S64_MAX`. We skip masking + * part because we assume it's not needed in BPF + * use case (two measurements close in time). + * Original code for rdtsc_ordered() uses sequence: + * 'rdtsc; nop; nop; nop' to patch it into + * 'lfence; rdtsc' or 'rdtscp' depending on CPU features. + * JIT uses 'lfence; rdtsc' variant because BPF program + * doesn't care about cookie provided by rdtsp in RCX. + * Save RDX because RDTSC will use EDX:EAX to return u64 + */ + emit_mov_reg(&prog, true, AUX_REG, BPF_REG_3); + if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) + EMIT_LFENCE(); + EMIT2(0x0F, 0x31); + + /* shl RDX, 32 */ + maybe_emit_1mod(&prog, BPF_REG_3, true); + EMIT3(0xC1, add_1reg(0xE0, BPF_REG_3), 32); + /* or RAX, RDX */ + maybe_emit_mod(&prog, BPF_REG_0, BPF_REG_3, true); + EMIT2(0x09, add_2reg(0xC0, BPF_REG_0, BPF_REG_3)); + /* restore RDX from R11 */ + emit_mov_reg(&prog, true, BPF_REG_3, AUX_REG); + + break; + } + func = (u8 *) __bpf_call_base + imm32; if (src_reg == BPF_PSEUDO_CALL && tail_call_reachable) { LOAD_TAIL_CALL_CNT_PTR(stack_depth); @@ -3791,3 +3822,11 @@ u64 bpf_arch_uaddress_limit(void) { return 0; } + +/* x86-64 JIT can inline kfunc */ +bool bpf_jit_inlines_kfunc_call(s32 imm) +{ + if (imm == BPF_CALL_IMM(bpf_get_cpu_cycles)) + return true; + return false; +} diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c index de0f9e5f9f73..e6097a371b69 100644 --- a/arch/x86/net/bpf_jit_comp32.c +++ b/arch/x86/net/bpf_jit_comp32.c @@ -2094,6 +2094,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { int err; + if (imm32 == BPF_CALL_IMM(bpf_get_cpu_cycles)) { + if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) + EMIT3(0x0F, 0xAE, 0xE8); + EMIT2(0x0F, 0x31); + break; + } + err = emit_kfunc_call(bpf_prog, image + addrs[i], insn, &prog); @@ -2621,3 +2628,10 @@ bool bpf_jit_supports_kfunc_call(void) { return true; } + +bool bpf_jit_inlines_kfunc_call(s32 imm) +{ + if (imm == BPF_CALL_IMM(bpf_get_cpu_cycles)) + return true; + return false; +} diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 3ace0d6227e3..43a5207a1591 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -3333,6 +3333,11 @@ void bpf_user_rnd_init_once(void); u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); +/* Inlined kfuncs */ +#if IS_ENABLED(CONFIG_GENERIC_GETTIMEOFDAY) +u64 bpf_get_cpu_cycles(void); +#endif + #if defined(CONFIG_NET) bool bpf_sock_common_is_valid_access(int off, int size, enum bpf_access_type type, diff --git a/include/linux/filter.h b/include/linux/filter.h index 3a21947f2fd4..9cf57233874f 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -1111,6 +1111,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); void bpf_jit_compile(struct bpf_prog *prog); bool bpf_jit_needs_zext(void); bool bpf_jit_inlines_helper_call(s32 imm); +bool bpf_jit_inlines_kfunc_call(s32 imm); bool bpf_jit_supports_subprog_tailcalls(void); bool bpf_jit_supports_percpu_insn(void); bool bpf_jit_supports_kfunc_call(void); diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 14d9288441f2..daa3ab458c8a 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -2965,6 +2965,17 @@ bool __weak bpf_jit_inlines_helper_call(s32 imm) return false; } +/* Return true if the JIT inlines the call to the kfunc corresponding to + * the imm. + * + * The verifier will not patch the insn->imm for the call to the helper if + * this returns true. + */ +bool __weak bpf_jit_inlines_kfunc_call(s32 imm) +{ + return false; +} + /* Return TRUE if the JIT backend supports mixing bpf2bpf and tailcalls. */ bool __weak bpf_jit_supports_subprog_tailcalls(void) { diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 751c150f9e1c..12d40537e57b 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -23,6 +23,10 @@ #include <linux/btf_ids.h> #include <linux/bpf_mem_alloc.h> #include <linux/kasan.h> +#if IS_ENABLED(CONFIG_GENERIC_GETTIMEOFDAY) +#include <vdso/datapage.h> +#include <asm/vdso/vsyscall.h> +#endif #include "../../lib/kstrtox.h" @@ -3057,6 +3061,20 @@ __bpf_kfunc int bpf_copy_from_user_str(void *dst, u32 dst__sz, const void __user return ret + 1; } +#if IS_ENABLED(CONFIG_GENERIC_GETTIMEOFDAY) +__bpf_kfunc u64 bpf_get_cpu_cycles(void) +{ + const struct vdso_data *vd = __arch_get_k_vdso_data(); + + vd = &vd[CS_RAW]; + + /* CS_RAW clock_mode translates to VDSO_CLOCKMODE_TSC on x86 and + * to VDSO_CLOCKMODE_ARCHTIMER on aarch64/risc-v. + */ + return __arch_get_hw_counter(vd->clock_mode, vd); +} +#endif + __bpf_kfunc_end_defs(); BTF_KFUNCS_START(generic_btf_ids) @@ -3149,6 +3167,9 @@ BTF_ID_FLAGS(func, bpf_get_kmem_cache) BTF_ID_FLAGS(func, bpf_iter_kmem_cache_new, KF_ITER_NEW | KF_SLEEPABLE) BTF_ID_FLAGS(func, bpf_iter_kmem_cache_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE) BTF_ID_FLAGS(func, bpf_iter_kmem_cache_destroy, KF_ITER_DESTROY | KF_SLEEPABLE) +#if IS_ENABLED(CONFIG_GENERIC_GETTIMEOFDAY) +BTF_ID_FLAGS(func, bpf_get_cpu_cycles, KF_FASTCALL) +#endif BTF_KFUNCS_END(common_btf_ids) static const struct btf_kfunc_id_set common_kfunc_set = { diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 09f7fa635f67..a08e05a420e1 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -16411,6 +16411,24 @@ static bool verifier_inlines_helper_call(struct bpf_verifier_env *env, s32 imm) } } +/* True if fixup_kfunc_call() replaces calls to kfunc number 'imm', + * replacement patch is presumed to follow bpf_fastcall contract + * (see mark_fastcall_pattern_for_call() below). + */ +static bool verifier_inlines_kfunc_call(struct bpf_verifier_env *env, s32 imm) +{ + const struct bpf_kfunc_desc *desc = find_kfunc_desc(env->prog, imm, 0); + + if (!env->prog->jit_requested) + return false; + + if (desc->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] || + desc->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) + return true; + + return false; +} + /* Same as helper_fastcall_clobber_mask() but for kfuncs, see comment above */ static u32 kfunc_fastcall_clobber_mask(struct bpf_kfunc_call_arg_meta *meta) { @@ -16538,7 +16556,10 @@ static void mark_fastcall_pattern_for_call(struct bpf_verifier_env *env, return; clobbered_regs_mask = kfunc_fastcall_clobber_mask(&meta); - can_be_inlined = is_fastcall_kfunc_call(&meta); + can_be_inlined = is_fastcall_kfunc_call(&meta) && + (verifier_inlines_kfunc_call(env, call->imm) || + (meta.btf == btf_vmlinux && + bpf_jit_inlines_kfunc_call(call->imm))); } if (clobbered_regs_mask == ALL_CALLER_SAVED_REGS) @@ -20545,6 +20566,7 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, struct bpf_insn *insn_buf, int insn_idx, int *cnt) { const struct bpf_kfunc_desc *desc; + s32 imm = insn->imm; if (!insn->imm) { verbose(env, "invalid kernel function call not eliminated in verifier pass\n"); @@ -20568,7 +20590,18 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, insn->imm = BPF_CALL_IMM(desc->addr); if (insn->off) return 0; - if (desc->func_id == special_kfunc_list[KF_bpf_obj_new_impl] || + if (verifier_inlines_kfunc_call(env, imm)) { + if (desc->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] || + desc->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) { + insn_buf[0] = BPF_MOV64_REG(BPF_REG_0, BPF_REG_1); + *cnt = 1; + } else { + verbose(env, "verifier internal error: kfunc id %d has no inline code\n", + desc->func_id); + return -EFAULT; + } + + } else if (desc->func_id == special_kfunc_list[KF_bpf_obj_new_impl] || desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) { struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta; struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) }; @@ -20629,10 +20662,6 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, __fixup_collection_insert_kfunc(&env->insn_aux_data[insn_idx], struct_meta_reg, node_offset_reg, insn, insn_buf, cnt); - } else if (desc->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] || - desc->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) { - insn_buf[0] = BPF_MOV64_REG(BPF_REG_0, BPF_REG_1); - *cnt = 1; } else if (is_bpf_wq_set_callback_impl_kfunc(desc->func_id)) { struct bpf_insn ld_addrs[2] = { BPF_LD_IMM64(BPF_REG_4, (long)env->prog->aux) };