Context |
Check |
Description |
bpf/vmtest-bpf-next-PR |
success
|
PR summary
|
bpf/vmtest-bpf-next-VM_Test-3 |
success
|
Logs for Validate matrix.py
|
bpf/vmtest-bpf-next-VM_Test-0 |
success
|
Logs for Lint
|
bpf/vmtest-bpf-next-VM_Test-2 |
success
|
Logs for Unittests
|
bpf/vmtest-bpf-next-VM_Test-5 |
success
|
Logs for aarch64-gcc / build-release
|
bpf/vmtest-bpf-next-VM_Test-1 |
success
|
Logs for ShellCheck
|
bpf/vmtest-bpf-next-VM_Test-4 |
success
|
Logs for aarch64-gcc / build / build for aarch64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-17 |
success
|
Logs for s390x-gcc / veristat
|
bpf/vmtest-bpf-next-VM_Test-10 |
success
|
Logs for aarch64-gcc / veristat
|
bpf/vmtest-bpf-next-VM_Test-12 |
success
|
Logs for s390x-gcc / build-release
|
bpf/vmtest-bpf-next-VM_Test-36 |
success
|
Logs for x86_64-llvm-18 / build-release / build for x86_64 with llvm-18 and -O2 optimization
|
bpf/vmtest-bpf-next-VM_Test-11 |
success
|
Logs for s390x-gcc / build / build for s390x with gcc
|
bpf/vmtest-bpf-next-VM_Test-20 |
success
|
Logs for x86_64-gcc / build-release
|
bpf/vmtest-bpf-next-VM_Test-28 |
success
|
Logs for x86_64-llvm-17 / build / build for x86_64 with llvm-17
|
bpf/vmtest-bpf-next-VM_Test-18 |
success
|
Logs for set-matrix
|
bpf/vmtest-bpf-next-VM_Test-35 |
success
|
Logs for x86_64-llvm-18 / build / build for x86_64 with llvm-18
|
bpf/vmtest-bpf-next-VM_Test-42 |
success
|
Logs for x86_64-llvm-18 / veristat
|
bpf/vmtest-bpf-next-VM_Test-34 |
success
|
Logs for x86_64-llvm-17 / veristat
|
bpf/vmtest-bpf-next-VM_Test-19 |
success
|
Logs for x86_64-gcc / build / build for x86_64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-29 |
success
|
Logs for x86_64-llvm-17 / build-release / build for x86_64 with llvm-17 and -O2 optimization
|
bpf/vmtest-bpf-next-VM_Test-7 |
success
|
Logs for aarch64-gcc / test (test_progs, false, 360) / test_progs on aarch64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-16 |
success
|
Logs for s390x-gcc / test (test_verifier, false, 360) / test_verifier on s390x with gcc
|
bpf/vmtest-bpf-next-VM_Test-6 |
success
|
Logs for aarch64-gcc / test (test_maps, false, 360) / test_maps on aarch64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-9 |
success
|
Logs for aarch64-gcc / test (test_verifier, false, 360) / test_verifier on aarch64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-15 |
success
|
Logs for s390x-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on s390x with gcc
|
bpf/vmtest-bpf-next-VM_Test-14 |
success
|
Logs for s390x-gcc / test (test_progs, false, 360) / test_progs on s390x with gcc
|
bpf/vmtest-bpf-next-VM_Test-13 |
success
|
Logs for s390x-gcc / test (test_maps, false, 360) / test_maps on s390x with gcc
|
bpf/vmtest-bpf-next-VM_Test-8 |
success
|
Logs for aarch64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on aarch64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-33 |
success
|
Logs for x86_64-llvm-17 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-17
|
bpf/vmtest-bpf-next-VM_Test-32 |
success
|
Logs for x86_64-llvm-17 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-17
|
bpf/vmtest-bpf-next-VM_Test-26 |
success
|
Logs for x86_64-gcc / test (test_verifier, false, 360) / test_verifier on x86_64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-31 |
success
|
Logs for x86_64-llvm-17 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-17
|
bpf/vmtest-bpf-next-VM_Test-30 |
success
|
Logs for x86_64-llvm-17 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-17
|
bpf/vmtest-bpf-next-VM_Test-39 |
success
|
Logs for x86_64-llvm-18 / test (test_progs_cpuv4, false, 360) / test_progs_cpuv4 on x86_64 with llvm-18
|
bpf/vmtest-bpf-next-VM_Test-23 |
success
|
Logs for x86_64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-41 |
success
|
Logs for x86_64-llvm-18 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-18
|
bpf/vmtest-bpf-next-VM_Test-22 |
success
|
Logs for x86_64-gcc / test (test_progs, false, 360) / test_progs on x86_64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-38 |
success
|
Logs for x86_64-llvm-18 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-18
|
bpf/vmtest-bpf-next-VM_Test-24 |
success
|
Logs for x86_64-gcc / test (test_progs_no_alu32_parallel, true, 30) / test_progs_no_alu32_parallel on x86_64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-27 |
success
|
Logs for x86_64-gcc / veristat / veristat on x86_64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-40 |
success
|
Logs for x86_64-llvm-18 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-18
|
bpf/vmtest-bpf-next-VM_Test-21 |
success
|
Logs for x86_64-gcc / test (test_maps, false, 360) / test_maps on x86_64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-25 |
success
|
Logs for x86_64-gcc / test (test_progs_parallel, true, 30) / test_progs_parallel on x86_64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-37 |
success
|
Logs for x86_64-llvm-18 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-18
|
netdev/tree_selection |
success
|
Clearly marked for bpf-next
|
netdev/apply |
fail
|
Patch does not apply to bpf-next-0
|
@@ -15107,7 +15107,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
struct bpf_reg_state *eq_branch_regs;
struct bpf_reg_state fake_reg = {};
u8 opcode = BPF_OP(insn->code);
- bool is_jmp32;
+ bool is_jmp32, ignore_pred;
int pred = -1;
int err;
@@ -15177,8 +15177,12 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
}
is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
+ ignore_pred = !(!get_loop_entry(this_branch) || src_reg->precise ||
+ dst_reg->precise ||
+ (BPF_SRC(insn->code) == BPF_K && insn->imm == 0));
+
pred = is_branch_taken(dst_reg, src_reg, opcode, is_jmp32);
- if (pred >= 0) {
+ if (pred >= 0 && !ignore_pred) {
/* If we get here with a dst_reg pointer type it is because
* above is_branch_taken() special cased the 0 comparison.
*/
@@ -15191,6 +15195,14 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
return err;
}
+ if (pred < 0 || ignore_pred) {
+ other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
+ false);
+ if (!other_branch)
+ return -EFAULT;
+ other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
+ }
+
if (pred == 1) {
/* Only follow the goto, ignore fall-through. If needed, push
* the fall-through branch for simulation under speculative
@@ -15202,8 +15214,13 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
return -EFAULT;
if (env->log.level & BPF_LOG_LEVEL)
print_insn_state(env, this_branch->frame[this_branch->curframe]);
- *insn_idx += insn->off;
- return 0;
+ if (ignore_pred) {
+ __mark_reg_unknown(env, dst_reg);
+ __mark_reg_unknown(env, src_reg);
+ } else {
+ *insn_idx += insn->off;
+ return 0;
+ }
} else if (pred == 0) {
/* Only follow the fall-through branch, since that's where the
* program will go. If needed, push the goto branch for
@@ -15216,15 +15233,15 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
return -EFAULT;
if (env->log.level & BPF_LOG_LEVEL)
print_insn_state(env, this_branch->frame[this_branch->curframe]);
- return 0;
+ if (ignore_pred) {
+ __mark_reg_unknown(env, &other_branch_regs[insn->dst_reg]);
+ if (BPF_SRC(insn->code) == BPF_X)
+ __mark_reg_unknown(env, &other_branch_regs[insn->src_reg]);
+ } else {
+ return 0;
+ }
}
- other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
- false);
- if (!other_branch)
- return -EFAULT;
- other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
-
if (BPF_SRC(insn->code) == BPF_X) {
err = reg_set_min_max(env,
&other_branch_regs[insn->dst_reg],
@@ -17217,6 +17234,38 @@ static int propagate_precision(struct bpf_verifier_env *env,
return 0;
}
+static void find_precise_reg(struct bpf_reg_state *cur_reg)
+{
+ struct bpf_reg_state *reg;
+
+ reg = cur_reg->parent;
+ while (reg && reg->type == SCALAR_VALUE) {
+ /*
+ * propagate_liveness() might not have happened for this states yet.
+ * Intermediate reg missing LIVE_READ mark is not an issue.
+ */
+ if (reg->precise && (reg->live & REG_LIVE_READ)) {
+ cur_reg->precise = true;
+ break;
+ }
+ reg = reg->parent;
+ }
+}
+
+static void find_precision(struct bpf_verifier_state *cur_state)
+{
+ struct bpf_func_state *state;
+ struct bpf_reg_state *reg;
+
+ if (!get_loop_entry(cur_state))
+ return;
+ bpf_for_each_reg_in_vstate(cur_state, state, reg, ({
+ if (reg->type != SCALAR_VALUE || reg->precise)
+ continue;
+ find_precise_reg(reg);
+ }));
+}
+
static bool states_maybe_looping(struct bpf_verifier_state *old,
struct bpf_verifier_state *cur)
{
@@ -17409,6 +17458,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
* => unsafe memory access at 11 would not be caught.
*/
if (is_iter_next_insn(env, insn_idx)) {
+ update_loop_entry(cur, &sl->state);
if (states_equal(env, &sl->state, cur, RANGE_WITHIN)) {
struct bpf_func_state *cur_frame;
struct bpf_reg_state *iter_state, *iter_reg;
@@ -17426,15 +17476,14 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
spi = __get_spi(iter_reg->off + iter_reg->var_off.value);
iter_state = &func(env, iter_reg)->stack[spi].spilled_ptr;
if (iter_state->iter.state == BPF_ITER_STATE_ACTIVE) {
- update_loop_entry(cur, &sl->state);
goto hit;
}
}
goto skip_inf_loop_check;
}
if (is_may_goto_insn_at(env, insn_idx)) {
+ update_loop_entry(cur, &sl->state);
if (states_equal(env, &sl->state, cur, RANGE_WITHIN)) {
- update_loop_entry(cur, &sl->state);
goto hit;
}
goto skip_inf_loop_check;
@@ -18066,6 +18115,7 @@ static int do_check(struct bpf_verifier_env *env)
return err;
break;
} else {
+ find_precision(env->cur_state);
do_print_state = true;
continue;
}
@@ -18,25 +18,35 @@ void __arena *htab_for_user;
bool skip = false;
int zero = 0;
+char __arena arr1[100000]; /* works */
+char arr2[1000]; /* ok for small sizes */
SEC("syscall")
int arena_htab_llvm(void *ctx)
{
#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) || defined(BPF_ARENA_FORCE_ASM)
struct htab __arena *htab;
+ char __arena *arr = arr1;
__u64 i;
htab = bpf_alloc(sizeof(*htab));
cast_kern(htab);
htab_init(htab);
+ cast_kern(arr);
+
/* first run. No old elems in the table */
- for (i = zero; i < 1000; i++)
+ for (i = 0; i < 100000 && can_loop; i++) {
htab_update_elem(htab, i, i);
+ arr[i] = i;
+ }
- /* should replace all elems with new ones */
- for (i = zero; i < 1000; i++)
+ /* should replace some elems with new ones */
+ for (i = 0; i < 1000 && can_loop; i++) {
htab_update_elem(htab, i, i);
+ /* Access mem to make the verifier use bounded loop logic */
+ arr2[i] = i;
+ }
cast_user(htab);
htab_for_user = htab;
#else
@@ -188,6 +188,8 @@ int iter_pragma_unroll_loop(const void *ctx)
for (i = 0; i < 3; i++) {
v = bpf_iter_num_next(&it);
bpf_printk("ITER_BASIC: E3 VAL: i=%d v=%d", i, v ? *v : -1);
+ if (!v)
+ break;
}
bpf_iter_num_destroy(&it);
@@ -243,6 +245,8 @@ int iter_multiple_sequential_loops(const void *ctx)
for (i = 0; i < 3; i++) {
v = bpf_iter_num_next(&it);
bpf_printk("ITER_BASIC: E3 VAL: i=%d v=%d", i, v ? *v : -1);
+ if (!v)
+ break;
}
bpf_iter_num_destroy(&it);
@@ -291,10 +295,7 @@ int iter_obfuscate_counter(const void *ctx)
{
struct bpf_iter_num it;
int *v, sum = 0;
- /* Make i's initial value unknowable for verifier to prevent it from
- * pruning if/else branch inside the loop body and marking i as precise.
- */
- int i = zero;
+ int i = 0;
MY_PID_GUARD();
@@ -304,15 +305,6 @@ int iter_obfuscate_counter(const void *ctx)
i += 1;
- /* If we initialized i as `int i = 0;` above, verifier would
- * track that i becomes 1 on first iteration after increment
- * above, and here verifier would eagerly prune else branch
- * and mark i as precise, ruining open-coded iterator logic
- * completely, as each next iteration would have a different
- * *precise* value of i, and thus there would be no
- * convergence of state. This would result in reaching maximum
- * instruction limit, no matter what the limit is.
- */
if (i == 1)
x = 123;
else
@@ -318,8 +318,11 @@ int cond_break1(const void *ctx)
unsigned long i;
unsigned int sum = 0;
- for (i = zero; i < ARR_SZ && can_loop; i++)
+ /* i = 0 is ok here, since i is not used in memory access */
+ for (i = 0; i < ARR_SZ && can_loop; i++)
sum += i;
+
+ /* have to use i = zero due to arr[i] where arr is not an arena */
for (i = zero; i < ARR_SZ; i++) {
barrier_var(i);
sum += i + arr[i];
@@ -336,8 +339,8 @@ int cond_break2(const void *ctx)
int i, j;
int sum = 0;
- for (i = zero; i < 1000 && can_loop; i++)
- for (j = zero; j < 1000; j++) {
+ for (i = 0; i < 1000 && can_loop; i++)
+ for (j = 0; j < 1000; j++) {
sum += i + j;
cond_break;
}
@@ -365,7 +368,7 @@ SEC("socket")
__success __retval(1)
int cond_break4(const void *ctx)
{
- int cnt = zero;
+ int cnt = 0;
for (;;) {
/* should eventually break out of the loop */
@@ -378,7 +381,7 @@ int cond_break4(const void *ctx)
static __noinline int static_subprog(void)
{
- int cnt = zero;
+ int cnt = 0;
for (;;) {
cond_break;
@@ -392,7 +395,7 @@ SEC("socket")
__success __retval(1)
int cond_break5(const void *ctx)
{
- int cnt1 = zero, cnt2;
+ int cnt1 = 0, cnt2;
for (;;) {
cond_break;