diff mbox series

[bpf] bpf: disallow 40-bytes extra stack for bpf_fastcall patterns

Message ID 20241029193911.1575719-1-eddyz87@gmail.com (mailing list archive)
State Accepted
Commit d0b98f6a17a5cb336121302bce0c97eb5fe32d16
Delegated to: BPF
Headers show
Series [bpf] bpf: disallow 40-bytes extra stack for bpf_fastcall patterns | expand

Checks

Context Check Description
bpf/vmtest-bpf-PR success PR summary
netdev/series_format success Single patches do not need cover letters
netdev/tree_selection success Clearly marked for bpf
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag present in non-next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 5 this patch: 5
netdev/build_tools success Errors and warnings before: 157 (+0) this patch: 157 (+0)
netdev/cc_maintainers warning 9 maintainers not CCed: sdf@fomichev.me shuah@kernel.org kpsingh@kernel.org john.fastabend@gmail.com song@kernel.org haoluo@google.com mykolal@fb.com linux-kselftest@vger.kernel.org jolsa@kernel.org
netdev/build_clang success Errors and warnings before: 3 this patch: 3
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success Fixes tag looks correct
netdev/build_allmodconfig_warn success Errors and warnings before: 14 this patch: 14
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 83 lines checked
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
bpf/vmtest-bpf-VM_Test-0 success Logs for Lint
bpf/vmtest-bpf-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-VM_Test-3 success Logs for Validate matrix.py
bpf/vmtest-bpf-VM_Test-2 success Logs for Unittests
bpf/vmtest-bpf-VM_Test-5 success Logs for aarch64-gcc / build-release
bpf/vmtest-bpf-VM_Test-4 success Logs for aarch64-gcc / build / build for aarch64 with gcc
bpf/vmtest-bpf-VM_Test-9 success Logs for aarch64-gcc / test (test_verifier, false, 360) / test_verifier on aarch64 with gcc
bpf/vmtest-bpf-VM_Test-12 success Logs for s390x-gcc / build-release
bpf/vmtest-bpf-VM_Test-10 success Logs for aarch64-gcc / veristat
bpf/vmtest-bpf-VM_Test-6 success Logs for aarch64-gcc / test (test_maps, false, 360) / test_maps on aarch64 with gcc
bpf/vmtest-bpf-VM_Test-7 success Logs for aarch64-gcc / test (test_progs, false, 360) / test_progs on aarch64 with gcc
bpf/vmtest-bpf-VM_Test-8 success Logs for aarch64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-VM_Test-11 success Logs for s390x-gcc / build / build for s390x with gcc
bpf/vmtest-bpf-VM_Test-16 success Logs for s390x-gcc / veristat
bpf/vmtest-bpf-VM_Test-17 success Logs for set-matrix
bpf/vmtest-bpf-VM_Test-15 success Logs for s390x-gcc / test (test_verifier, false, 360) / test_verifier on s390x with gcc
bpf/vmtest-bpf-VM_Test-18 success Logs for x86_64-gcc / build / build for x86_64 with gcc
bpf/vmtest-bpf-VM_Test-19 success Logs for x86_64-gcc / build-release
bpf/vmtest-bpf-VM_Test-28 success Logs for x86_64-llvm-17 / build-release / build for x86_64 with llvm-17-O2
bpf/vmtest-bpf-VM_Test-27 success Logs for x86_64-llvm-17 / build / build for x86_64 with llvm-17
bpf/vmtest-bpf-VM_Test-33 success Logs for x86_64-llvm-17 / veristat
bpf/vmtest-bpf-VM_Test-34 success Logs for x86_64-llvm-18 / build / build for x86_64 with llvm-18
bpf/vmtest-bpf-VM_Test-35 success Logs for x86_64-llvm-18 / build-release / build for x86_64 with llvm-18-O2
bpf/vmtest-bpf-VM_Test-41 success Logs for x86_64-llvm-18 / veristat
bpf/vmtest-bpf-VM_Test-13 success Logs for s390x-gcc / test (test_progs, false, 360) / test_progs on s390x with gcc
bpf/vmtest-bpf-VM_Test-14 success Logs for s390x-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-VM_Test-20 success Logs for x86_64-gcc / test (test_maps, false, 360) / test_maps on x86_64 with gcc
bpf/vmtest-bpf-VM_Test-21 success Logs for x86_64-gcc / test (test_progs, false, 360) / test_progs on x86_64 with gcc
bpf/vmtest-bpf-VM_Test-22 success Logs for x86_64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-VM_Test-23 success Logs for x86_64-gcc / test (test_progs_no_alu32_parallel, true, 30) / test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-VM_Test-25 success Logs for x86_64-gcc / test (test_verifier, false, 360) / test_verifier on x86_64 with gcc
bpf/vmtest-bpf-VM_Test-26 success Logs for x86_64-gcc / veristat / veristat on x86_64 with gcc
bpf/vmtest-bpf-VM_Test-24 success Logs for x86_64-gcc / test (test_progs_parallel, true, 30) / test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-VM_Test-30 success Logs for x86_64-llvm-17 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-17
bpf/vmtest-bpf-VM_Test-29 success Logs for x86_64-llvm-17 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-17
bpf/vmtest-bpf-VM_Test-32 success Logs for x86_64-llvm-17 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-17
bpf/vmtest-bpf-VM_Test-31 success Logs for x86_64-llvm-17 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-17
bpf/vmtest-bpf-VM_Test-40 success Logs for x86_64-llvm-18 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-18
bpf/vmtest-bpf-VM_Test-37 success Logs for x86_64-llvm-18 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-18
bpf/vmtest-bpf-VM_Test-36 success Logs for x86_64-llvm-18 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-18
bpf/vmtest-bpf-VM_Test-39 success Logs for x86_64-llvm-18 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-18
bpf/vmtest-bpf-VM_Test-38 success Logs for x86_64-llvm-18 / test (test_progs_cpuv4, false, 360) / test_progs_cpuv4 on x86_64 with llvm-18

Commit Message

Eduard Zingerman Oct. 29, 2024, 7:39 p.m. UTC
Hou Tao reported an issue with bpf_fastcall patterns allowing extra
stack space above MAX_BPF_STACK limit. This extra stack allowance is
not integrated properly with the following verifier parts:
- backtracking logic still assumes that stack can't exceed
  MAX_BPF_STACK;
- bpf_verifier_env->scratched_stack_slots assumes only 64 slots are
  available.

Here is an example of an issue with precision tracking
(note stack slot -8 tracked as precise instead of -520):

    0: (b7) r1 = 42                       ; R1_w=42
    1: (b7) r2 = 42                       ; R2_w=42
    2: (7b) *(u64 *)(r10 -512) = r1       ; R1_w=42 R10=fp0 fp-512_w=42
    3: (7b) *(u64 *)(r10 -520) = r2       ; R2_w=42 R10=fp0 fp-520_w=42
    4: (85) call bpf_get_smp_processor_id#8       ; R0_w=scalar(...)
    5: (79) r2 = *(u64 *)(r10 -520)       ; R2_w=42 R10=fp0 fp-520_w=42
    6: (79) r1 = *(u64 *)(r10 -512)       ; R1_w=42 R10=fp0 fp-512_w=42
    7: (bf) r3 = r10                      ; R3_w=fp0 R10=fp0
    8: (0f) r3 += r2
    mark_precise: frame0: last_idx 8 first_idx 0 subseq_idx -1
    mark_precise: frame0: regs=r2 stack= before 7: (bf) r3 = r10
    mark_precise: frame0: regs=r2 stack= before 6: (79) r1 = *(u64 *)(r10 -512)
    mark_precise: frame0: regs=r2 stack= before 5: (79) r2 = *(u64 *)(r10 -520)
    mark_precise: frame0: regs= stack=-8 before 4: (85) call bpf_get_smp_processor_id#8
    mark_precise: frame0: regs= stack=-8 before 3: (7b) *(u64 *)(r10 -520) = r2
    mark_precise: frame0: regs=r2 stack= before 2: (7b) *(u64 *)(r10 -512) = r1
    mark_precise: frame0: regs=r2 stack= before 1: (b7) r2 = 42
    9: R2_w=42 R3_w=fp42
    9: (95) exit

This patch disables the additional allowance for the moment.
Also, two test cases are removed:
- bpf_fastcall_max_stack_ok:
  it fails w/o additional stack allowance;
- bpf_fastcall_max_stack_fail:
  this test is no longer necessary, stack size follows
  regular rules, pattern invalidation is checked by other
  test cases.

Reported-by: Hou Tao <houtao@huaweicloud.com>
Closes: https://lore.kernel.org/bpf/20241023022752.172005-1-houtao@huaweicloud.com/
Fixes: 5b5f51bff1b6 ("bpf: no_caller_saved_registers attribute for helper calls")
Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
---
 kernel/bpf/verifier.c                         | 14 +----
 .../bpf/progs/verifier_bpf_fastcall.c         | 55 -------------------
 2 files changed, 2 insertions(+), 67 deletions(-)

Comments

Andrii Nakryiko Oct. 29, 2024, 10:17 p.m. UTC | #1
On Tue, Oct 29, 2024 at 12:39 PM Eduard Zingerman <eddyz87@gmail.com> wrote:
>
> Hou Tao reported an issue with bpf_fastcall patterns allowing extra
> stack space above MAX_BPF_STACK limit. This extra stack allowance is
> not integrated properly with the following verifier parts:
> - backtracking logic still assumes that stack can't exceed
>   MAX_BPF_STACK;
> - bpf_verifier_env->scratched_stack_slots assumes only 64 slots are
>   available.
>
> Here is an example of an issue with precision tracking
> (note stack slot -8 tracked as precise instead of -520):
>
>     0: (b7) r1 = 42                       ; R1_w=42
>     1: (b7) r2 = 42                       ; R2_w=42
>     2: (7b) *(u64 *)(r10 -512) = r1       ; R1_w=42 R10=fp0 fp-512_w=42
>     3: (7b) *(u64 *)(r10 -520) = r2       ; R2_w=42 R10=fp0 fp-520_w=42
>     4: (85) call bpf_get_smp_processor_id#8       ; R0_w=scalar(...)
>     5: (79) r2 = *(u64 *)(r10 -520)       ; R2_w=42 R10=fp0 fp-520_w=42
>     6: (79) r1 = *(u64 *)(r10 -512)       ; R1_w=42 R10=fp0 fp-512_w=42
>     7: (bf) r3 = r10                      ; R3_w=fp0 R10=fp0
>     8: (0f) r3 += r2
>     mark_precise: frame0: last_idx 8 first_idx 0 subseq_idx -1
>     mark_precise: frame0: regs=r2 stack= before 7: (bf) r3 = r10
>     mark_precise: frame0: regs=r2 stack= before 6: (79) r1 = *(u64 *)(r10 -512)
>     mark_precise: frame0: regs=r2 stack= before 5: (79) r2 = *(u64 *)(r10 -520)
>     mark_precise: frame0: regs= stack=-8 before 4: (85) call bpf_get_smp_processor_id#8
>     mark_precise: frame0: regs= stack=-8 before 3: (7b) *(u64 *)(r10 -520) = r2
>     mark_precise: frame0: regs=r2 stack= before 2: (7b) *(u64 *)(r10 -512) = r1
>     mark_precise: frame0: regs=r2 stack= before 1: (b7) r2 = 42
>     9: R2_w=42 R3_w=fp42
>     9: (95) exit
>
> This patch disables the additional allowance for the moment.
> Also, two test cases are removed:
> - bpf_fastcall_max_stack_ok:
>   it fails w/o additional stack allowance;
> - bpf_fastcall_max_stack_fail:
>   this test is no longer necessary, stack size follows
>   regular rules, pattern invalidation is checked by other
>   test cases.
>
> Reported-by: Hou Tao <houtao@huaweicloud.com>
> Closes: https://lore.kernel.org/bpf/20241023022752.172005-1-houtao@huaweicloud.com/
> Fixes: 5b5f51bff1b6 ("bpf: no_caller_saved_registers attribute for helper calls")
> Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
> ---
>  kernel/bpf/verifier.c                         | 14 +----
>  .../bpf/progs/verifier_bpf_fastcall.c         | 55 -------------------
>  2 files changed, 2 insertions(+), 67 deletions(-)
>

LGTM

Acked-by: Andrii Nakryiko <andrii@kernel.org>

> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> index 587a6c76e564..a494396bef2a 100644
> --- a/kernel/bpf/verifier.c
> +++ b/kernel/bpf/verifier.c
> @@ -6804,20 +6804,10 @@ static int check_stack_slot_within_bounds(struct bpf_verifier_env *env,
>                                            struct bpf_func_state *state,
>                                            enum bpf_access_type t)
>  {
> -       struct bpf_insn_aux_data *aux = &env->insn_aux_data[env->insn_idx];
> -       int min_valid_off, max_bpf_stack;
> -
> -       /* If accessing instruction is a spill/fill from bpf_fastcall pattern,
> -        * add room for all caller saved registers below MAX_BPF_STACK.
> -        * In case if bpf_fastcall rewrite won't happen maximal stack depth
> -        * would be checked by check_max_stack_depth_subprog().
> -        */
> -       max_bpf_stack = MAX_BPF_STACK;
> -       if (aux->fastcall_pattern)
> -               max_bpf_stack += CALLER_SAVED_REGS * BPF_REG_SIZE;
> +       int min_valid_off;
>
>         if (t == BPF_WRITE || env->allow_uninit_stack)
> -               min_valid_off = -max_bpf_stack;
> +               min_valid_off = -MAX_BPF_STACK;
>         else
>                 min_valid_off = -state->allocated_stack;
>
> diff --git a/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c b/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c
> index 9da97d2efcd9..5094c288cfd7 100644
> --- a/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c
> +++ b/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c
> @@ -790,61 +790,6 @@ __naked static void cumulative_stack_depth_subprog(void)
>         :: __imm(bpf_get_smp_processor_id) : __clobber_all);
>  }
>
> -SEC("raw_tp")
> -__arch_x86_64
> -__log_level(4)
> -__msg("stack depth 512")
> -__xlated("0: r1 = 42")
> -__xlated("1: *(u64 *)(r10 -512) = r1")
> -__xlated("2: w0 = ")
> -__xlated("3: r0 = &(void __percpu *)(r0)")
> -__xlated("4: r0 = *(u32 *)(r0 +0)")
> -__xlated("5: exit")
> -__success
> -__naked int bpf_fastcall_max_stack_ok(void)
> -{
> -       asm volatile(
> -       "r1 = 42;"
> -       "*(u64 *)(r10 - %[max_bpf_stack]) = r1;"
> -       "*(u64 *)(r10 - %[max_bpf_stack_8]) = r1;"
> -       "call %[bpf_get_smp_processor_id];"
> -       "r1 = *(u64 *)(r10 - %[max_bpf_stack_8]);"
> -       "exit;"
> -       :
> -       : __imm_const(max_bpf_stack, MAX_BPF_STACK),
> -         __imm_const(max_bpf_stack_8, MAX_BPF_STACK + 8),
> -         __imm(bpf_get_smp_processor_id)
> -       : __clobber_all
> -       );
> -}
> -
> -SEC("raw_tp")
> -__arch_x86_64
> -__log_level(4)
> -__msg("stack depth 520")
> -__failure
> -__naked int bpf_fastcall_max_stack_fail(void)
> -{
> -       asm volatile(
> -       "r1 = 42;"
> -       "*(u64 *)(r10 - %[max_bpf_stack]) = r1;"
> -       "*(u64 *)(r10 - %[max_bpf_stack_8]) = r1;"
> -       "call %[bpf_get_smp_processor_id];"
> -       "r1 = *(u64 *)(r10 - %[max_bpf_stack_8]);"
> -       /* call to prandom blocks bpf_fastcall rewrite */
> -       "*(u64 *)(r10 - %[max_bpf_stack_8]) = r1;"
> -       "call %[bpf_get_prandom_u32];"
> -       "r1 = *(u64 *)(r10 - %[max_bpf_stack_8]);"
> -       "exit;"
> -       :
> -       : __imm_const(max_bpf_stack, MAX_BPF_STACK),
> -         __imm_const(max_bpf_stack_8, MAX_BPF_STACK + 8),
> -         __imm(bpf_get_smp_processor_id),
> -         __imm(bpf_get_prandom_u32)
> -       : __clobber_all
> -       );
> -}
> -
>  SEC("cgroup/getsockname_unix")
>  __xlated("0: r2 = 1")
>  /* bpf_cast_to_kern_ctx is replaced by a single assignment */
> --
> 2.47.0
>
Hou Tao Oct. 30, 2024, 2:41 a.m. UTC | #2
On 10/30/2024 3:39 AM, Eduard Zingerman wrote:
> Hou Tao reported an issue with bpf_fastcall patterns allowing extra
> stack space above MAX_BPF_STACK limit. This extra stack allowance is
> not integrated properly with the following verifier parts:
> - backtracking logic still assumes that stack can't exceed
>   MAX_BPF_STACK;
> - bpf_verifier_env->scratched_stack_slots assumes only 64 slots are
>   available.
>
> Here is an example of an issue with precision tracking
> (note stack slot -8 tracked as precise instead of -520):
>
>     0: (b7) r1 = 42                       ; R1_w=42
>     1: (b7) r2 = 42                       ; R2_w=42
>     2: (7b) *(u64 *)(r10 -512) = r1       ; R1_w=42 R10=fp0 fp-512_w=42
>     3: (7b) *(u64 *)(r10 -520) = r2       ; R2_w=42 R10=fp0 fp-520_w=42
>     4: (85) call bpf_get_smp_processor_id#8       ; R0_w=scalar(...)
>     5: (79) r2 = *(u64 *)(r10 -520)       ; R2_w=42 R10=fp0 fp-520_w=42
>     6: (79) r1 = *(u64 *)(r10 -512)       ; R1_w=42 R10=fp0 fp-512_w=42
>     7: (bf) r3 = r10                      ; R3_w=fp0 R10=fp0
>     8: (0f) r3 += r2
>     mark_precise: frame0: last_idx 8 first_idx 0 subseq_idx -1
>     mark_precise: frame0: regs=r2 stack= before 7: (bf) r3 = r10
>     mark_precise: frame0: regs=r2 stack= before 6: (79) r1 = *(u64 *)(r10 -512)
>     mark_precise: frame0: regs=r2 stack= before 5: (79) r2 = *(u64 *)(r10 -520)
>     mark_precise: frame0: regs= stack=-8 before 4: (85) call bpf_get_smp_processor_id#8
>     mark_precise: frame0: regs= stack=-8 before 3: (7b) *(u64 *)(r10 -520) = r2
>     mark_precise: frame0: regs=r2 stack= before 2: (7b) *(u64 *)(r10 -512) = r1
>     mark_precise: frame0: regs=r2 stack= before 1: (b7) r2 = 42
>     9: R2_w=42 R3_w=fp42
>     9: (95) exit
>
> This patch disables the additional allowance for the moment.
> Also, two test cases are removed:
> - bpf_fastcall_max_stack_ok:
>   it fails w/o additional stack allowance;
> - bpf_fastcall_max_stack_fail:
>   this test is no longer necessary, stack size follows
>   regular rules, pattern invalidation is checked by other
>   test cases.
>
> Reported-by: Hou Tao <houtao@huaweicloud.com>
> Closes: https://lore.kernel.org/bpf/20241023022752.172005-1-houtao@huaweicloud.com/
> Fixes: 5b5f51bff1b6 ("bpf: no_caller_saved_registers attribute for helper calls")
> Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>

Tested-by: Hou Tao <houtao1@huawei.com>
patchwork-bot+netdevbpf@kernel.org Oct. 30, 2024, 2:50 a.m. UTC | #3
Hello:

This patch was applied to bpf/bpf.git (master)
by Alexei Starovoitov <ast@kernel.org>:

On Tue, 29 Oct 2024 12:39:11 -0700 you wrote:
> Hou Tao reported an issue with bpf_fastcall patterns allowing extra
> stack space above MAX_BPF_STACK limit. This extra stack allowance is
> not integrated properly with the following verifier parts:
> - backtracking logic still assumes that stack can't exceed
>   MAX_BPF_STACK;
> - bpf_verifier_env->scratched_stack_slots assumes only 64 slots are
>   available.
> 
> [...]

Here is the summary with links:
  - [bpf] bpf: disallow 40-bytes extra stack for bpf_fastcall patterns
    https://git.kernel.org/bpf/bpf/c/d0b98f6a17a5

You are awesome, thank you!
diff mbox series

Patch

diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 587a6c76e564..a494396bef2a 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -6804,20 +6804,10 @@  static int check_stack_slot_within_bounds(struct bpf_verifier_env *env,
                                           struct bpf_func_state *state,
                                           enum bpf_access_type t)
 {
-	struct bpf_insn_aux_data *aux = &env->insn_aux_data[env->insn_idx];
-	int min_valid_off, max_bpf_stack;
-
-	/* If accessing instruction is a spill/fill from bpf_fastcall pattern,
-	 * add room for all caller saved registers below MAX_BPF_STACK.
-	 * In case if bpf_fastcall rewrite won't happen maximal stack depth
-	 * would be checked by check_max_stack_depth_subprog().
-	 */
-	max_bpf_stack = MAX_BPF_STACK;
-	if (aux->fastcall_pattern)
-		max_bpf_stack += CALLER_SAVED_REGS * BPF_REG_SIZE;
+	int min_valid_off;
 
 	if (t == BPF_WRITE || env->allow_uninit_stack)
-		min_valid_off = -max_bpf_stack;
+		min_valid_off = -MAX_BPF_STACK;
 	else
 		min_valid_off = -state->allocated_stack;
 
diff --git a/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c b/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c
index 9da97d2efcd9..5094c288cfd7 100644
--- a/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c
+++ b/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c
@@ -790,61 +790,6 @@  __naked static void cumulative_stack_depth_subprog(void)
 	:: __imm(bpf_get_smp_processor_id) : __clobber_all);
 }
 
-SEC("raw_tp")
-__arch_x86_64
-__log_level(4)
-__msg("stack depth 512")
-__xlated("0: r1 = 42")
-__xlated("1: *(u64 *)(r10 -512) = r1")
-__xlated("2: w0 = ")
-__xlated("3: r0 = &(void __percpu *)(r0)")
-__xlated("4: r0 = *(u32 *)(r0 +0)")
-__xlated("5: exit")
-__success
-__naked int bpf_fastcall_max_stack_ok(void)
-{
-	asm volatile(
-	"r1 = 42;"
-	"*(u64 *)(r10 - %[max_bpf_stack]) = r1;"
-	"*(u64 *)(r10 - %[max_bpf_stack_8]) = r1;"
-	"call %[bpf_get_smp_processor_id];"
-	"r1 = *(u64 *)(r10 - %[max_bpf_stack_8]);"
-	"exit;"
-	:
-	: __imm_const(max_bpf_stack, MAX_BPF_STACK),
-	  __imm_const(max_bpf_stack_8, MAX_BPF_STACK + 8),
-	  __imm(bpf_get_smp_processor_id)
-	: __clobber_all
-	);
-}
-
-SEC("raw_tp")
-__arch_x86_64
-__log_level(4)
-__msg("stack depth 520")
-__failure
-__naked int bpf_fastcall_max_stack_fail(void)
-{
-	asm volatile(
-	"r1 = 42;"
-	"*(u64 *)(r10 - %[max_bpf_stack]) = r1;"
-	"*(u64 *)(r10 - %[max_bpf_stack_8]) = r1;"
-	"call %[bpf_get_smp_processor_id];"
-	"r1 = *(u64 *)(r10 - %[max_bpf_stack_8]);"
-	/* call to prandom blocks bpf_fastcall rewrite */
-	"*(u64 *)(r10 - %[max_bpf_stack_8]) = r1;"
-	"call %[bpf_get_prandom_u32];"
-	"r1 = *(u64 *)(r10 - %[max_bpf_stack_8]);"
-	"exit;"
-	:
-	: __imm_const(max_bpf_stack, MAX_BPF_STACK),
-	  __imm_const(max_bpf_stack_8, MAX_BPF_STACK + 8),
-	  __imm(bpf_get_smp_processor_id),
-	  __imm(bpf_get_prandom_u32)
-	: __clobber_all
-	);
-}
-
 SEC("cgroup/getsockname_unix")
 __xlated("0: r2 = 1")
 /* bpf_cast_to_kern_ctx is replaced by a single assignment */