diff options
author | Eduard Zingerman <eddyz87@gmail.com> | 2024-10-29 12:39:11 -0700 |
---|---|---|
committer | Alexei Starovoitov <ast@kernel.org> | 2024-10-29 19:43:16 -0700 |
commit | d0b98f6a17a5cb336121302bce0c97eb5fe32d16 (patch) | |
tree | 75478b86bf83bb155f0af17ee0ac6ffaf9fb6dbb /tools/testing/selftests | |
parent | d7f214aeacb984b9d42da0146e789f595eb09068 (diff) |
bpf: disallow 40-bytes extra stack for bpf_fastcall patterns
Hou Tao reported an issue with bpf_fastcall patterns allowing extra
stack space above MAX_BPF_STACK limit. This extra stack allowance is
not integrated properly with the following verifier parts:
- backtracking logic still assumes that stack can't exceed
MAX_BPF_STACK;
- bpf_verifier_env->scratched_stack_slots assumes only 64 slots are
available.
Here is an example of an issue with precision tracking
(note stack slot -8 tracked as precise instead of -520):
0: (b7) r1 = 42 ; R1_w=42
1: (b7) r2 = 42 ; R2_w=42
2: (7b) *(u64 *)(r10 -512) = r1 ; R1_w=42 R10=fp0 fp-512_w=42
3: (7b) *(u64 *)(r10 -520) = r2 ; R2_w=42 R10=fp0 fp-520_w=42
4: (85) call bpf_get_smp_processor_id#8 ; R0_w=scalar(...)
5: (79) r2 = *(u64 *)(r10 -520) ; R2_w=42 R10=fp0 fp-520_w=42
6: (79) r1 = *(u64 *)(r10 -512) ; R1_w=42 R10=fp0 fp-512_w=42
7: (bf) r3 = r10 ; R3_w=fp0 R10=fp0
8: (0f) r3 += r2
mark_precise: frame0: last_idx 8 first_idx 0 subseq_idx -1
mark_precise: frame0: regs=r2 stack= before 7: (bf) r3 = r10
mark_precise: frame0: regs=r2 stack= before 6: (79) r1 = *(u64 *)(r10 -512)
mark_precise: frame0: regs=r2 stack= before 5: (79) r2 = *(u64 *)(r10 -520)
mark_precise: frame0: regs= stack=-8 before 4: (85) call bpf_get_smp_processor_id#8
mark_precise: frame0: regs= stack=-8 before 3: (7b) *(u64 *)(r10 -520) = r2
mark_precise: frame0: regs=r2 stack= before 2: (7b) *(u64 *)(r10 -512) = r1
mark_precise: frame0: regs=r2 stack= before 1: (b7) r2 = 42
9: R2_w=42 R3_w=fp42
9: (95) exit
This patch disables the additional allowance for the moment.
Also, two test cases are removed:
- bpf_fastcall_max_stack_ok:
it fails w/o additional stack allowance;
- bpf_fastcall_max_stack_fail:
this test is no longer necessary, stack size follows
regular rules, pattern invalidation is checked by other
test cases.
Reported-by: Hou Tao <houtao@huaweicloud.com>
Closes: https://lore.kernel.org/bpf/20241023022752.172005-1-houtao@huaweicloud.com/
Fixes: 5b5f51bff1b6 ("bpf: no_caller_saved_registers attribute for helper calls")
Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Tested-by: Hou Tao <houtao1@huawei.com>
Link: https://lore.kernel.org/r/20241029193911.1575719-1-eddyz87@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'tools/testing/selftests')
-rw-r--r-- | tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c | 55 |
1 files changed, 0 insertions, 55 deletions
diff --git a/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c b/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c index 9da97d2efcd9..5094c288cfd7 100644 --- a/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c +++ b/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c @@ -790,61 +790,6 @@ __naked static void cumulative_stack_depth_subprog(void) :: __imm(bpf_get_smp_processor_id) : __clobber_all); } -SEC("raw_tp") -__arch_x86_64 -__log_level(4) -__msg("stack depth 512") -__xlated("0: r1 = 42") -__xlated("1: *(u64 *)(r10 -512) = r1") -__xlated("2: w0 = ") -__xlated("3: r0 = &(void __percpu *)(r0)") -__xlated("4: r0 = *(u32 *)(r0 +0)") -__xlated("5: exit") -__success -__naked int bpf_fastcall_max_stack_ok(void) -{ - asm volatile( - "r1 = 42;" - "*(u64 *)(r10 - %[max_bpf_stack]) = r1;" - "*(u64 *)(r10 - %[max_bpf_stack_8]) = r1;" - "call %[bpf_get_smp_processor_id];" - "r1 = *(u64 *)(r10 - %[max_bpf_stack_8]);" - "exit;" - : - : __imm_const(max_bpf_stack, MAX_BPF_STACK), - __imm_const(max_bpf_stack_8, MAX_BPF_STACK + 8), - __imm(bpf_get_smp_processor_id) - : __clobber_all - ); -} - -SEC("raw_tp") -__arch_x86_64 -__log_level(4) -__msg("stack depth 520") -__failure -__naked int bpf_fastcall_max_stack_fail(void) -{ - asm volatile( - "r1 = 42;" - "*(u64 *)(r10 - %[max_bpf_stack]) = r1;" - "*(u64 *)(r10 - %[max_bpf_stack_8]) = r1;" - "call %[bpf_get_smp_processor_id];" - "r1 = *(u64 *)(r10 - %[max_bpf_stack_8]);" - /* call to prandom blocks bpf_fastcall rewrite */ - "*(u64 *)(r10 - %[max_bpf_stack_8]) = r1;" - "call %[bpf_get_prandom_u32];" - "r1 = *(u64 *)(r10 - %[max_bpf_stack_8]);" - "exit;" - : - : __imm_const(max_bpf_stack, MAX_BPF_STACK), - __imm_const(max_bpf_stack_8, MAX_BPF_STACK + 8), - __imm(bpf_get_smp_processor_id), - __imm(bpf_get_prandom_u32) - : __clobber_all - ); -} - SEC("cgroup/getsockname_unix") __xlated("0: r2 = 1") /* bpf_cast_to_kern_ctx is replaced by a single assignment */ |