Context |
Check |
Description |
bpf/vmtest-bpf-next-PR |
success
|
PR summary
|
netdev/series_format |
success
|
Posting correctly formatted
|
netdev/tree_selection |
success
|
Clearly marked for bpf-next, async
|
netdev/ynl |
success
|
Generated files up to date;
no warnings/errors;
no diff in generated;
|
netdev/fixes_present |
success
|
Fixes tag not required for -next series
|
netdev/header_inline |
success
|
No static functions without inline keyword in header files
|
netdev/build_32bit |
success
|
Errors and warnings before: 194 this patch: 194
|
netdev/build_tools |
success
|
Errors and warnings before: 26 (+0) this patch: 26 (+0)
|
netdev/cc_maintainers |
warning
|
12 maintainers not CCed: mingo@redhat.com sdf@fomichev.me dsahern@kernel.org udknight@gmail.com dave.hansen@linux.intel.com jolsa@kernel.org hpa@zytor.com song@kernel.org haoluo@google.com netdev@vger.kernel.org john.fastabend@gmail.com kpsingh@kernel.org
|
netdev/build_clang |
success
|
Errors and warnings before: 225 this patch: 225
|
netdev/verify_signedoff |
success
|
Signed-off-by tag matches author and committer
|
netdev/deprecated_api |
success
|
None detected
|
netdev/check_selftest |
success
|
No net selftest shell script
|
netdev/verify_fixes |
success
|
No Fixes tag
|
netdev/build_allmodconfig_warn |
success
|
Errors and warnings before: 7022 this patch: 7022
|
netdev/checkpatch |
warning
|
WARNING: line length of 81 exceeds 80 columns
WARNING: line length of 82 exceeds 80 columns
WARNING: line length of 83 exceeds 80 columns
WARNING: line length of 84 exceeds 80 columns
WARNING: line length of 85 exceeds 80 columns
WARNING: line length of 86 exceeds 80 columns
WARNING: line length of 87 exceeds 80 columns
WARNING: line length of 88 exceeds 80 columns
WARNING: line length of 89 exceeds 80 columns
WARNING: line length of 90 exceeds 80 columns
WARNING: line length of 91 exceeds 80 columns
WARNING: line length of 92 exceeds 80 columns
WARNING: line length of 95 exceeds 80 columns
WARNING: line length of 96 exceeds 80 columns
WARNING: line length of 97 exceeds 80 columns
|
netdev/build_clang_rust |
success
|
No Rust files in patch. Skipping build
|
netdev/kdoc |
success
|
Errors and warnings before: 18 this patch: 18
|
netdev/source_inline |
fail
|
Was 0 now: 1
|
bpf/vmtest-bpf-next-VM_Test-0 |
success
|
Logs for Lint
|
bpf/vmtest-bpf-next-VM_Test-1 |
success
|
Logs for ShellCheck
|
bpf/vmtest-bpf-next-VM_Test-2 |
success
|
Logs for Unittests
|
bpf/vmtest-bpf-next-VM_Test-3 |
success
|
Logs for Validate matrix.py
|
bpf/vmtest-bpf-next-VM_Test-24 |
success
|
Logs for x86_64-gcc / build-release
|
bpf/vmtest-bpf-next-VM_Test-7 |
success
|
Logs for aarch64-gcc / test (test_maps, false, 360) / test_maps on aarch64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-12 |
success
|
Logs for aarch64-gcc / veristat-meta
|
bpf/vmtest-bpf-next-VM_Test-13 |
success
|
Logs for s390x-gcc / GCC BPF
|
bpf/vmtest-bpf-next-VM_Test-15 |
success
|
Logs for s390x-gcc / build-release
|
bpf/vmtest-bpf-next-VM_Test-6 |
success
|
Logs for aarch64-gcc / build-release
|
bpf/vmtest-bpf-next-VM_Test-20 |
success
|
Logs for s390x-gcc / veristat-meta
|
bpf/vmtest-bpf-next-VM_Test-22 |
success
|
Logs for x86_64-gcc / GCC BPF / GCC BPF
|
bpf/vmtest-bpf-next-VM_Test-5 |
success
|
Logs for aarch64-gcc / build / build for aarch64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-21 |
success
|
Logs for set-matrix
|
bpf/vmtest-bpf-next-VM_Test-10 |
success
|
Logs for aarch64-gcc / test (test_verifier, false, 360) / test_verifier on aarch64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-14 |
success
|
Logs for s390x-gcc / build / build for s390x with gcc
|
bpf/vmtest-bpf-next-VM_Test-19 |
success
|
Logs for s390x-gcc / veristat-kernel
|
bpf/vmtest-bpf-next-VM_Test-4 |
success
|
Logs for aarch64-gcc / GCC BPF
|
bpf/vmtest-bpf-next-VM_Test-18 |
success
|
Logs for s390x-gcc / test (test_verifier, false, 360) / test_verifier on s390x with gcc
|
bpf/vmtest-bpf-next-VM_Test-23 |
success
|
Logs for x86_64-gcc / build / build for x86_64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-11 |
success
|
Logs for aarch64-gcc / veristat-kernel
|
bpf/vmtest-bpf-next-VM_Test-31 |
success
|
Logs for x86_64-gcc / veristat-kernel / x86_64-gcc veristat_kernel
|
bpf/vmtest-bpf-next-VM_Test-32 |
success
|
Logs for x86_64-gcc / veristat-meta / x86_64-gcc veristat_meta
|
bpf/vmtest-bpf-next-VM_Test-33 |
success
|
Logs for x86_64-llvm-17 / GCC BPF / GCC BPF
|
bpf/vmtest-bpf-next-VM_Test-34 |
success
|
Logs for x86_64-llvm-17 / build / build for x86_64 with llvm-17
|
bpf/vmtest-bpf-next-VM_Test-35 |
success
|
Logs for x86_64-llvm-17 / build-release / build for x86_64 with llvm-17-O2
|
bpf/vmtest-bpf-next-VM_Test-40 |
success
|
Logs for x86_64-llvm-17 / veristat-kernel
|
bpf/vmtest-bpf-next-VM_Test-41 |
success
|
Logs for x86_64-llvm-17 / veristat-meta
|
bpf/vmtest-bpf-next-VM_Test-42 |
success
|
Logs for x86_64-llvm-18 / GCC BPF / GCC BPF
|
bpf/vmtest-bpf-next-VM_Test-43 |
success
|
Logs for x86_64-llvm-18 / build / build for x86_64 with llvm-18
|
bpf/vmtest-bpf-next-VM_Test-44 |
success
|
Logs for x86_64-llvm-18 / build-release / build for x86_64 with llvm-18-O2
|
bpf/vmtest-bpf-next-VM_Test-50 |
success
|
Logs for x86_64-llvm-18 / veristat-kernel
|
bpf/vmtest-bpf-next-VM_Test-51 |
success
|
Logs for x86_64-llvm-18 / veristat-meta
|
bpf/vmtest-bpf-next-VM_Test-25 |
success
|
Logs for x86_64-gcc / test (test_maps, false, 360) / test_maps on x86_64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-26 |
success
|
Logs for x86_64-gcc / test (test_progs, false, 360) / test_progs on x86_64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-27 |
success
|
Logs for x86_64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-8 |
success
|
Logs for aarch64-gcc / test (test_progs, false, 360) / test_progs on aarch64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-9 |
success
|
Logs for aarch64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on aarch64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-28 |
success
|
Logs for x86_64-gcc / test (test_progs_no_alu32_parallel, true, 30) / test_progs_no_alu32_parallel on x86_64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-29 |
success
|
Logs for x86_64-gcc / test (test_progs_parallel, true, 30) / test_progs_parallel on x86_64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-30 |
success
|
Logs for x86_64-gcc / test (test_verifier, false, 360) / test_verifier on x86_64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-36 |
success
|
Logs for x86_64-llvm-17 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-17
|
bpf/vmtest-bpf-next-VM_Test-39 |
success
|
Logs for x86_64-llvm-17 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-17
|
bpf/vmtest-bpf-next-VM_Test-45 |
success
|
Logs for x86_64-llvm-18 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-18
|
bpf/vmtest-bpf-next-VM_Test-16 |
success
|
Logs for s390x-gcc / test (test_progs, false, 360) / test_progs on s390x with gcc
|
bpf/vmtest-bpf-next-VM_Test-17 |
success
|
Logs for s390x-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on s390x with gcc
|
bpf/vmtest-bpf-next-VM_Test-37 |
success
|
Logs for x86_64-llvm-17 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-17
|
bpf/vmtest-bpf-next-VM_Test-38 |
success
|
Logs for x86_64-llvm-17 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-17
|
bpf/vmtest-bpf-next-VM_Test-46 |
success
|
Logs for x86_64-llvm-18 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-18
|
bpf/vmtest-bpf-next-VM_Test-47 |
success
|
Logs for x86_64-llvm-18 / test (test_progs_cpuv4, false, 360) / test_progs_cpuv4 on x86_64 with llvm-18
|
bpf/vmtest-bpf-next-VM_Test-49 |
success
|
Logs for x86_64-llvm-18 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-18
|
bpf/vmtest-bpf-next-VM_Test-48 |
success
|
Logs for x86_64-llvm-18 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-18
|
@@ -15,6 +15,7 @@
#include <asm/ftrace.h>
#include <asm/set_memory.h>
#include <asm/nospec-branch.h>
+#include <asm/timer.h>
#include <asm/text-patching.h>
#include <asm/unwind.h>
#include <asm/cfi.h>
@@ -2254,6 +2255,40 @@ st: if (is_imm8(insn->off))
case BPF_JMP | BPF_CALL: {
u8 *ip = image + addrs[i - 1];
+ if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL &&
+ IS_ENABLED(CONFIG_BPF_SYSCALL) &&
+ imm32 == BPF_CALL_IMM(bpf_get_cpu_time_counter) &&
+ cpu_feature_enabled(X86_FEATURE_TSC) &&
+ using_native_sched_clock() && sched_clock_stable()) {
+ /* The default implementation of this kfunc uses
+ * ktime_get_raw_ns() which effectively is implemented as
+ * `(u64)rdtsc_ordered() & S64_MAX`. For JIT We skip
+ * masking part because we assume it's not needed in BPF
+ * use case (two measurements close in time).
+ * Original code for rdtsc_ordered() uses sequence:
+ * 'rdtsc; nop; nop; nop' to patch it into
+ * 'lfence; rdtsc' or 'rdtscp' depending on CPU features.
+ * JIT uses 'lfence; rdtsc' variant because BPF program
+ * doesn't care about cookie provided by rdtscp in RCX.
+ * Save RDX because RDTSC will use EDX:EAX to return u64
+ */
+ emit_mov_reg(&prog, true, AUX_REG, BPF_REG_3);
+ if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC))
+ EMIT_LFENCE();
+ EMIT2(0x0F, 0x31);
+
+ /* shl RDX, 32 */
+ maybe_emit_1mod(&prog, BPF_REG_3, true);
+ EMIT3(0xC1, add_1reg(0xE0, BPF_REG_3), 32);
+ /* or RAX, RDX */
+ maybe_emit_mod(&prog, BPF_REG_0, BPF_REG_3, true);
+ EMIT2(0x09, add_2reg(0xC0, BPF_REG_0, BPF_REG_3));
+ /* restore RDX from R11 */
+ emit_mov_reg(&prog, true, BPF_REG_3, AUX_REG);
+
+ break;
+ }
+
func = (u8 *) __bpf_call_base + imm32;
if (src_reg == BPF_PSEUDO_CALL && tail_call_reachable) {
LOAD_TAIL_CALL_CNT_PTR(stack_depth);
@@ -3865,3 +3900,15 @@ bool bpf_jit_supports_timed_may_goto(void)
{
return true;
}
+
+/* x86-64 JIT can inline kfunc */
+bool bpf_jit_inlines_kfunc_call(s32 imm)
+{
+ if (!IS_ENABLED(CONFIG_BPF_SYSCALL))
+ return false;
+ if (imm == BPF_CALL_IMM(bpf_get_cpu_time_counter) &&
+ cpu_feature_enabled(X86_FEATURE_TSC) &&
+ using_native_sched_clock() && sched_clock_stable())
+ return true;
+ return false;
+}
@@ -16,6 +16,7 @@
#include <asm/set_memory.h>
#include <asm/nospec-branch.h>
#include <asm/asm-prototypes.h>
+#include <asm/timer.h>
#include <linux/bpf.h>
/*
@@ -2094,6 +2095,27 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
int err;
+ if (IS_ENABLED(CONFIG_BPF_SYSCALL) &&
+ imm32 == BPF_CALL_IMM(bpf_get_cpu_time_counter) &&
+ cpu_feature_enabled(X86_FEATURE_TSC) &&
+ using_native_sched_clock() && sched_clock_stable()) {
+ /* The default implementation of this kfunc uses
+ * ktime_get_raw_ns() which effectively is implemented as
+ * `(u64)rdtsc_ordered() & S64_MAX`. For JIT We skip
+ * masking part because we assume it's not needed in BPF
+ * use case (two measurements close in time).
+ * Original code for rdtsc_ordered() uses sequence:
+ * 'rdtsc; nop; nop; nop' to patch it into
+ * 'lfence; rdtsc' or 'rdtscp' depending on CPU features.
+ * JIT uses 'lfence; rdtsc' variant because BPF program
+ * doesn't care about cookie provided by rdtscp in ECX.
+ */
+ if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC))
+ EMIT3(0x0F, 0xAE, 0xE8);
+ EMIT2(0x0F, 0x31);
+ break;
+ }
+
err = emit_kfunc_call(bpf_prog,
image + addrs[i],
insn, &prog);
@@ -2621,3 +2643,14 @@ bool bpf_jit_supports_kfunc_call(void)
{
return true;
}
+
+bool bpf_jit_inlines_kfunc_call(s32 imm)
+{
+ if (!IS_ENABLED(CONFIG_BPF_SYSCALL))
+ return false;
+ if (imm == BPF_CALL_IMM(bpf_get_cpu_time_counter) &&
+ cpu_feature_enabled(X86_FEATURE_TSC) &&
+ using_native_sched_clock() && sched_clock_stable())
+ return true;
+ return false;
+}
@@ -3387,6 +3387,9 @@ void bpf_user_rnd_init_once(void);
u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
+/* Inlined kfuncs */
+u64 bpf_get_cpu_time_counter(void);
+
#if defined(CONFIG_NET)
bool bpf_sock_common_is_valid_access(int off, int size,
enum bpf_access_type type,
@@ -1128,6 +1128,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
void bpf_jit_compile(struct bpf_prog *prog);
bool bpf_jit_needs_zext(void);
bool bpf_jit_inlines_helper_call(s32 imm);
+bool bpf_jit_inlines_kfunc_call(s32 imm);
bool bpf_jit_supports_subprog_tailcalls(void);
bool bpf_jit_supports_percpu_insn(void);
bool bpf_jit_supports_kfunc_call(void);
@@ -3035,6 +3035,17 @@ bool __weak bpf_jit_inlines_helper_call(s32 imm)
return false;
}
+/* Return true if the JIT inlines the call to the kfunc corresponding to
+ * the imm.
+ *
+ * The verifier will not patch the insn->imm for the call to the helper if
+ * this returns true.
+ */
+bool __weak bpf_jit_inlines_kfunc_call(s32 imm)
+{
+ return false;
+}
+
/* Return TRUE if the JIT backend supports mixing bpf2bpf and tailcalls. */
bool __weak bpf_jit_supports_subprog_tailcalls(void)
{
@@ -3193,6 +3193,11 @@ __bpf_kfunc void bpf_local_irq_restore(unsigned long *flags__irq_flag)
local_irq_restore(*flags__irq_flag);
}
+__bpf_kfunc u64 bpf_get_cpu_time_counter(void)
+{
+ return ktime_get_raw_fast_ns();
+}
+
__bpf_kfunc_end_defs();
BTF_KFUNCS_START(generic_btf_ids)
@@ -3293,6 +3298,7 @@ BTF_ID_FLAGS(func, bpf_iter_kmem_cache_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLE
BTF_ID_FLAGS(func, bpf_iter_kmem_cache_destroy, KF_ITER_DESTROY | KF_SLEEPABLE)
BTF_ID_FLAGS(func, bpf_local_irq_save)
BTF_ID_FLAGS(func, bpf_local_irq_restore)
+BTF_ID_FLAGS(func, bpf_get_cpu_time_counter, KF_FASTCALL)
BTF_KFUNCS_END(common_btf_ids)
static const struct btf_kfunc_id_set common_kfunc_set = {
@@ -17035,6 +17035,24 @@ static bool verifier_inlines_helper_call(struct bpf_verifier_env *env, s32 imm)
}
}
+/* True if fixup_kfunc_call() replaces calls to kfunc number 'imm',
+ * replacement patch is presumed to follow bpf_fastcall contract
+ * (see mark_fastcall_pattern_for_call() below).
+ */
+static bool verifier_inlines_kfunc_call(struct bpf_verifier_env *env, s32 imm)
+{
+ const struct bpf_kfunc_desc *desc = find_kfunc_desc(env->prog, imm, 0);
+
+ if (!env->prog->jit_requested)
+ return false;
+
+ if (desc->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] ||
+ desc->func_id == special_kfunc_list[KF_bpf_rdonly_cast])
+ return true;
+
+ return false;
+}
+
struct call_summary {
u8 num_params;
bool is_void;
@@ -17077,7 +17095,10 @@ static bool get_call_summary(struct bpf_verifier_env *env, struct bpf_insn *call
/* error would be reported later */
return false;
cs->num_params = btf_type_vlen(meta.func_proto);
- cs->fastcall = meta.kfunc_flags & KF_FASTCALL;
+ cs->fastcall = meta.kfunc_flags & KF_FASTCALL &&
+ (verifier_inlines_kfunc_call(env, call->imm) ||
+ (meta.btf == btf_vmlinux &&
+ bpf_jit_inlines_kfunc_call(call->imm)));
cs->is_void = btf_type_is_void(btf_type_by_id(meta.btf, meta.func_proto->type));
return true;
}
@@ -21223,6 +21244,7 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
struct bpf_insn *insn_buf, int insn_idx, int *cnt)
{
const struct bpf_kfunc_desc *desc;
+ s32 imm = insn->imm;
if (!insn->imm) {
verbose(env, "invalid kernel function call not eliminated in verifier pass\n");
@@ -21246,7 +21268,18 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
insn->imm = BPF_CALL_IMM(desc->addr);
if (insn->off)
return 0;
- if (desc->func_id == special_kfunc_list[KF_bpf_obj_new_impl] ||
+ if (verifier_inlines_kfunc_call(env, imm)) {
+ if (desc->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] ||
+ desc->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) {
+ insn_buf[0] = BPF_MOV64_REG(BPF_REG_0, BPF_REG_1);
+ *cnt = 1;
+ } else {
+ verbose(env, "verifier internal error: kfunc id %d has no inline code\n",
+ desc->func_id);
+ return -EFAULT;
+ }
+
+ } else if (desc->func_id == special_kfunc_list[KF_bpf_obj_new_impl] ||
desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) {
struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta;
struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) };
@@ -21307,10 +21340,6 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
__fixup_collection_insert_kfunc(&env->insn_aux_data[insn_idx], struct_meta_reg,
node_offset_reg, insn, insn_buf, cnt);
- } else if (desc->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] ||
- desc->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) {
- insn_buf[0] = BPF_MOV64_REG(BPF_REG_0, BPF_REG_1);
- *cnt = 1;
} else if (is_bpf_wq_set_callback_impl_kfunc(desc->func_id)) {
struct bpf_insn ld_addrs[2] = { BPF_LD_IMM64(BPF_REG_4, (long)env->prog->aux) };