diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-09-21 09:27:50 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-09-21 09:27:50 -0700 |
commit | 440b65232829fad69947b8de983c13a525cc8871 (patch) | |
tree | 3cab57fca48b43ba0e11804683b33b71743494c6 | |
parent | 1ec6d097897a35dfb55c4c31fc8633cf5be46497 (diff) | |
parent | 5277d130947ba8c0d54c16eed89eb97f0b6d2e5a (diff) |
Merge tag 'bpf-next-6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Pull bpf updates from Alexei Starovoitov:
- Introduce '__attribute__((bpf_fastcall))' for helpers and kfuncs with
corresponding support in LLVM.
It is similar to existing 'no_caller_saved_registers' attribute in
GCC/LLVM with a provision for backward compatibility. It allows
compilers generate more efficient BPF code assuming the verifier or
JITs will inline or partially inline a helper/kfunc with such
attribute. bpf_cast_to_kern_ctx, bpf_rdonly_cast,
bpf_get_smp_processor_id are the first set of such helpers.
- Harden and extend ELF build ID parsing logic.
When called from sleepable context the relevants parts of ELF file
will be read to find and fetch .note.gnu.build-id information. Also
harden the logic to avoid TOCTOU, overflow, out-of-bounds problems.
- Improvements and fixes for sched-ext:
- Allow passing BPF iterators as kfunc arguments
- Make the pointer returned from iter_next method trusted
- Fix x86 JIT convergence issue due to growing/shrinking conditional
jumps in variable length encoding
- BPF_LSM related:
- Introduce few VFS kfuncs and consolidate them in
fs/bpf_fs_kfuncs.c
- Enforce correct range of return values from certain LSM hooks
- Disallow attaching to other LSM hooks
- Prerequisite work for upcoming Qdisc in BPF:
- Allow kptrs in program provided structs
- Support for gen_epilogue in verifier_ops
- Important fixes:
- Fix uprobe multi pid filter check
- Fix bpf_strtol and bpf_strtoul helpers
- Track equal scalars history on per-instruction level
- Fix tailcall hierarchy on x86 and arm64
- Fix signed division overflow to prevent INT_MIN/-1 trap on x86
- Fix get kernel stack in BPF progs attached to tracepoint:syscall
- Selftests:
- Add uprobe bench/stress tool
- Generate file dependencies to drastically improve re-build time
- Match JIT-ed and BPF asm with __xlated/__jited keywords
- Convert older tests to test_progs framework
- Add support for RISC-V
- Few fixes when BPF programs are compiled with GCC-BPF backend
(support for GCC-BPF in BPF CI is ongoing in parallel)
- Add traffic monitor
- Enable cross compile and musl libc
* tag 'bpf-next-6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (260 commits)
btf: require pahole 1.21+ for DEBUG_INFO_BTF with default DWARF version
btf: move pahole check in scripts/link-vmlinux.sh to lib/Kconfig.debug
btf: remove redundant CONFIG_BPF test in scripts/link-vmlinux.sh
bpf: Call the missed kfree() when there is no special field in btf
bpf: Call the missed btf_record_free() when map creation fails
selftests/bpf: Add a test case to write mtu result into .rodata
selftests/bpf: Add a test case to write strtol result into .rodata
selftests/bpf: Rename ARG_PTR_TO_LONG test description
selftests/bpf: Fix ARG_PTR_TO_LONG {half-,}uninitialized test
bpf: Zero former ARG_PTR_TO_{LONG,INT} args in case of error
bpf: Improve check_raw_mode_ok test for MEM_UNINIT-tagged types
bpf: Fix helper writes to read-only maps
bpf: Remove truncation test in bpf_strtol and bpf_strtoul helpers
bpf: Fix bpf_strtol and bpf_strtoul helpers for 32bit
selftests/bpf: Add tests for sdiv/smod overflow cases
bpf: Fix a sdiv overflow issue
libbpf: Add bpf_object__token_fd accessor
docs/bpf: Add missing BPF program types to docs
docs/bpf: Add constant values for linkages
bpf: Use fake pt_regs when doing bpf syscall tracepoint tracing
...
249 files changed, 11430 insertions, 3048 deletions
diff --git a/Documentation/bpf/btf.rst b/Documentation/bpf/btf.rst index 257a7e1cdf5d..93060283b6fd 100644 --- a/Documentation/bpf/btf.rst +++ b/Documentation/bpf/btf.rst @@ -368,7 +368,7 @@ No additional type data follow ``btf_type``. * ``info.kind_flag``: 0 * ``info.kind``: BTF_KIND_FUNC * ``info.vlen``: linkage information (BTF_FUNC_STATIC, BTF_FUNC_GLOBAL - or BTF_FUNC_EXTERN) + or BTF_FUNC_EXTERN - see :ref:`BTF_Function_Linkage_Constants`) * ``type``: a BTF_KIND_FUNC_PROTO type No additional type data follow ``btf_type``. @@ -424,9 +424,8 @@ following data:: __u32 linkage; }; -``struct btf_var`` encoding: - * ``linkage``: currently only static variable 0, or globally allocated - variable in ELF sections 1 +``btf_var.linkage`` may take the values: BTF_VAR_STATIC, BTF_VAR_GLOBAL_ALLOCATED or BTF_VAR_GLOBAL_EXTERN - +see :ref:`BTF_Var_Linkage_Constants`. Not all type of global variables are supported by LLVM at this point. The following is currently available: @@ -549,6 +548,38 @@ The ``btf_enum64`` encoding: If the original enum value is signed and the size is less than 8, that value will be sign extended into 8 bytes. +2.3 Constant Values +------------------- + +.. _BTF_Function_Linkage_Constants: + +2.3.1 Function Linkage Constant Values +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. table:: Function Linkage Values and Meanings + + =================== ===== =========== + kind value description + =================== ===== =========== + ``BTF_FUNC_STATIC`` 0x0 definition of subprogram not visible outside containing compilation unit + ``BTF_FUNC_GLOBAL`` 0x1 definition of subprogram visible outside containing compilation unit + ``BTF_FUNC_EXTERN`` 0x2 declaration of a subprogram whose definition is outside the containing compilation unit + =================== ===== =========== + + +.. _BTF_Var_Linkage_Constants: + +2.3.2 Variable Linkage Constant Values +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. table:: Variable Linkage Values and Meanings + + ============================ ===== =========== + kind value description + ============================ ===== =========== + ``BTF_VAR_STATIC`` 0x0 definition of global variable not visible outside containing compilation unit + ``BTF_VAR_GLOBAL_ALLOCATED`` 0x1 definition of global variable visible outside containing compilation unit + ``BTF_VAR_GLOBAL_EXTERN`` 0x2 declaration of global variable whose definition is outside the containing compilation unit + ============================ ===== =========== + 3. BTF Kernel API ================= diff --git a/Documentation/bpf/libbpf/program_types.rst b/Documentation/bpf/libbpf/program_types.rst index 63bb88846e50..218b020a2f81 100644 --- a/Documentation/bpf/libbpf/program_types.rst +++ b/Documentation/bpf/libbpf/program_types.rst @@ -121,6 +121,8 @@ described in more detail in the footnotes. +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_LWT_XMIT`` | | ``lwt_xmit`` | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ +| ``BPF_PROG_TYPE_NETFILTER`` | | ``netfilter`` | | ++-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_PERF_EVENT`` | | ``perf_event`` | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE`` | | ``raw_tp.w+`` [#rawtp]_ | | @@ -131,11 +133,23 @@ described in more detail in the footnotes. + + +----------------------------------+-----------+ | | | ``raw_tracepoint+`` | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ -| ``BPF_PROG_TYPE_SCHED_ACT`` | | ``action`` | | +| ``BPF_PROG_TYPE_SCHED_ACT`` | | ``action`` [#tc_legacy]_ | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ -| ``BPF_PROG_TYPE_SCHED_CLS`` | | ``classifier`` | | +| ``BPF_PROG_TYPE_SCHED_CLS`` | | ``classifier`` [#tc_legacy]_ | | + + +----------------------------------+-----------+ -| | | ``tc`` | | +| | | ``tc`` [#tc_legacy]_ | | ++ +----------------------------------------+----------------------------------+-----------+ +| | ``BPF_NETKIT_PRIMARY`` | ``netkit/primary`` | | ++ +----------------------------------------+----------------------------------+-----------+ +| | ``BPF_NETKIT_PEER`` | ``netkit/peer`` | | ++ +----------------------------------------+----------------------------------+-----------+ +| | ``BPF_TCX_INGRESS`` | ``tc/ingress`` | | ++ +----------------------------------------+----------------------------------+-----------+ +| | ``BPF_TCX_EGRESS`` | ``tc/egress`` | | ++ +----------------------------------------+----------------------------------+-----------+ +| | ``BPF_TCX_INGRESS`` | ``tcx/ingress`` | | ++ +----------------------------------------+----------------------------------+-----------+ +| | ``BPF_TCX_EGRESS`` | ``tcx/egress`` | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_SK_LOOKUP`` | ``BPF_SK_LOOKUP`` | ``sk_lookup`` | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ @@ -155,7 +169,9 @@ described in more detail in the footnotes. +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_SOCK_OPS`` | ``BPF_CGROUP_SOCK_OPS`` | ``sockops`` | | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ -| ``BPF_PROG_TYPE_STRUCT_OPS`` | | ``struct_ops+`` | | +| ``BPF_PROG_TYPE_STRUCT_OPS`` | | ``struct_ops+`` [#struct_ops]_ | | ++ + +----------------------------------+-----------+ +| | | ``struct_ops.s+`` [#struct_ops]_ | Yes | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ | ``BPF_PROG_TYPE_SYSCALL`` | | ``syscall`` | Yes | +-------------------------------------------+----------------------------------------+----------------------------------+-----------+ @@ -209,5 +225,11 @@ described in more detail in the footnotes. ``a-zA-Z0-9_.*?``. .. [#lsm] The ``lsm`` attachment format is ``lsm[.s]/<hook>``. .. [#rawtp] The ``raw_tp`` attach format is ``raw_tracepoint[.w]/<tracepoint>``. +.. [#tc_legacy] The ``tc``, ``classifier`` and ``action`` attach types are deprecated, use + ``tcx/*`` instead. +.. [#struct_ops] The ``struct_ops`` attach format supports ``struct_ops[.s]/<name>`` convention, + but ``name`` is ignored and it is recommended to just use plain + ``SEC("struct_ops[.s]")``. The attachments are defined in a struct initializer + that is tagged with ``SEC(".struct_ops[.link]")``. .. [#tp] The ``tracepoint`` attach format is ``tracepoint/<category>/<name>``. .. [#iter] The ``iter`` attach format is ``iter[.s]/<struct-name>``. diff --git a/Documentation/bpf/verifier.rst b/Documentation/bpf/verifier.rst index 356894399fbf..d23761540002 100644 --- a/Documentation/bpf/verifier.rst +++ b/Documentation/bpf/verifier.rst @@ -418,7 +418,7 @@ The rules for correspondence between registers / stack slots are as follows: linked to the registers and stack slots of the parent state with the same indices. -* For the outer stack frames, only caller saved registers (r6-r9) and stack +* For the outer stack frames, only callee saved registers (r6-r9) and stack slots are linked to the registers and stack slots of the parent state with the same indices. diff --git a/MAINTAINERS b/MAINTAINERS index d9820369aafc..a0a8de589df8 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3997,7 +3997,7 @@ F: Documentation/devicetree/bindings/iio/imu/bosch,bmi323.yaml F: drivers/iio/imu/bmi323/ BPF JIT for ARC -M: Shahab Vahedi <shahab@synopsys.com> +M: Shahab Vahedi <list+bpf@vahedi.org> L: bpf@vger.kernel.org S: Maintained F: arch/arc/net/ @@ -4164,6 +4164,7 @@ F: include/uapi/linux/btf* F: include/uapi/linux/filter.h F: kernel/bpf/ F: kernel/trace/bpf_trace.c +F: lib/buildid.c F: lib/test_bpf.c F: net/bpf/ F: net/core/filter.c @@ -4284,6 +4285,7 @@ L: bpf@vger.kernel.org S: Maintained F: kernel/bpf/stackmap.c F: kernel/trace/bpf_trace.c +F: lib/buildid.c BROADCOM ASP 2.0 ETHERNET DRIVER M: Justin Chen <justin.chen@broadcom.com> diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index dd0bb069df4b..8bbd0b20136a 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -26,9 +26,8 @@ #define TMP_REG_1 (MAX_BPF_JIT_REG + 0) #define TMP_REG_2 (MAX_BPF_JIT_REG + 1) -#define TCALL_CNT (MAX_BPF_JIT_REG + 2) +#define TCCNT_PTR (MAX_BPF_JIT_REG + 2) #define TMP_REG_3 (MAX_BPF_JIT_REG + 3) -#define FP_BOTTOM (MAX_BPF_JIT_REG + 4) #define ARENA_VM_START (MAX_BPF_JIT_REG + 5) #define check_imm(bits, imm) do { \ @@ -63,11 +62,10 @@ static const int bpf2a64[] = { [TMP_REG_1] = A64_R(10), [TMP_REG_2] = A64_R(11), [TMP_REG_3] = A64_R(12), - /* tail_call_cnt */ - [TCALL_CNT] = A64_R(26), + /* tail_call_cnt_ptr */ + [TCCNT_PTR] = A64_R(26), /* temporary register for blinding constants */ [BPF_REG_AX] = A64_R(9), - [FP_BOTTOM] = A64_R(27), /* callee saved register for kern_vm_start address */ [ARENA_VM_START] = A64_R(28), }; @@ -78,11 +76,15 @@ struct jit_ctx { int epilogue_offset; int *offset; int exentry_idx; + int nr_used_callee_reg; + u8 used_callee_reg[8]; /* r6~r9, fp, arena_vm_start */ __le32 *image; __le32 *ro_image; u32 stack_size; - int fpb_offset; u64 user_vm_start; + u64 arena_vm_start; + bool fp_used; + bool write; }; struct bpf_plt { @@ -96,7 +98,7 @@ struct bpf_plt { static inline void emit(const u32 insn, struct jit_ctx *ctx) { - if (ctx->image != NULL) + if (ctx->image != NULL && ctx->write) ctx->image[ctx->idx] = cpu_to_le32(insn); ctx->idx++; @@ -181,14 +183,47 @@ static inline void emit_addr_mov_i64(const int reg, const u64 val, } } -static inline void emit_call(u64 target, struct jit_ctx *ctx) +static bool should_emit_indirect_call(long target, const struct jit_ctx *ctx) +{ + long offset; + + /* when ctx->ro_image is not allocated or the target is unknown, + * emit indirect call + */ + if (!ctx->ro_image || !target) + return true; + + offset = target - (long)&ctx->ro_image[ctx->idx]; + return offset < -SZ_128M || offset >= SZ_128M; +} + +static void emit_direct_call(u64 target, struct jit_ctx *ctx) { - u8 tmp = bpf2a64[TMP_REG_1]; + u32 insn; + unsigned long pc; + + pc = (unsigned long)&ctx->ro_image[ctx->idx]; + insn = aarch64_insn_gen_branch_imm(pc, target, AARCH64_INSN_BRANCH_LINK); + emit(insn, ctx); +} + +static void emit_indirect_call(u64 target, struct jit_ctx *ctx) +{ + u8 tmp; + tmp = bpf2a64[TMP_REG_1]; emit_addr_mov_i64(tmp, target, ctx); emit(A64_BLR(tmp), ctx); } +static void emit_call(u64 target, struct jit_ctx *ctx) +{ + if (should_emit_indirect_call((long)target, ctx)) + emit_indirect_call(target, ctx); + else + emit_direct_call(target, ctx); +} + static inline int bpf2a64_offset(int bpf_insn, int off, const struct jit_ctx *ctx) { @@ -273,21 +308,143 @@ static bool is_lsi_offset(int offset, int scale) return true; } -/* generated prologue: +/* generated main prog prologue: * bti c // if CONFIG_ARM64_BTI_KERNEL * mov x9, lr * nop // POKE_OFFSET * paciasp // if CONFIG_ARM64_PTR_AUTH_KERNEL * stp x29, lr, [sp, #-16]! * mov x29, sp - * stp x19, x20, [sp, #-16]! - * stp x21, x22, [sp, #-16]! - * stp x25, x26, [sp, #-16]! - * stp x27, x28, [sp, #-16]! - * mov x25, sp - * mov tcc, #0 + * stp xzr, x26, [sp, #-16]! + * mov x26, sp * // PROLOGUE_OFFSET + * // save callee-saved registers */ +static void prepare_bpf_tail_call_cnt(struct jit_ctx *ctx) +{ + const bool is_main_prog = !bpf_is_subprog(ctx->prog); + const u8 ptr = bpf2a64[TCCNT_PTR]; + + if (is_main_prog) { + /* Initialize tail_call_cnt. */ + emit(A64_PUSH(A64_ZR, ptr, A64_SP), ctx); + emit(A64_MOV(1, ptr, A64_SP), ctx); + } else + emit(A64_PUSH(ptr, ptr, A64_SP), ctx); +} + +static void find_used_callee_regs(struct jit_ctx *ctx) +{ + int i; + const struct bpf_prog *prog = ctx->prog; + const struct bpf_insn *insn = &prog->insnsi[0]; + int reg_used = 0; + + for (i = 0; i < prog->len; i++, insn++) { + if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6) + reg_used |= 1; + + if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7) + reg_used |= 2; + + if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8) + reg_used |= 4; + + if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9) + reg_used |= 8; + + if (insn->dst_reg == BPF_REG_FP || insn->src_reg == BPF_REG_FP) { + ctx->fp_used = true; + reg_used |= 16; + } + } + + i = 0; + if (reg_used & 1) + ctx->used_callee_reg[i++] = bpf2a64[BPF_REG_6]; + + if (reg_used & 2) + ctx->used_callee_reg[i++] = bpf2a64[BPF_REG_7]; + + if (reg_used & 4) + ctx->used_callee_reg[i++] = bpf2a64[BPF_REG_8]; + + if (reg_used & 8) + ctx->used_callee_reg[i++] = bpf2a64[BPF_REG_9]; + + if (reg_used & 16) + ctx->used_callee_reg[i++] = bpf2a64[BPF_REG_FP]; + + if (ctx->arena_vm_start) + ctx->used_callee_reg[i++] = bpf2a64[ARENA_VM_START]; + + ctx->nr_used_callee_reg = i; +} + +/* Save callee-saved registers */ +static void push_callee_regs(struct jit_ctx *ctx) +{ + int reg1, reg2, i; + + /* + * Program acting as exception boundary should save all ARM64 + * Callee-saved registers as the exception callback needs to recover + * all ARM64 Callee-saved registers in its epilogue. + */ + if (ctx->prog->aux->exception_boundary) { + emit(A64_PUSH(A64_R(19), A64_R(20), A64_SP), ctx); + emit(A64_PUSH(A64_R(21), A64_R(22), A64_SP), ctx); + emit(A64_PUSH(A64_R(23), A64_R(24), A64_SP), ctx); + emit(A64_PUSH(A64_R(25), A64_R(26), A64_SP), ctx); + emit(A64_PUSH(A64_R(27), A64_R(28), A64_SP), ctx); + } else { + find_used_callee_regs(ctx); + for (i = 0; i + 1 < ctx->nr_used_callee_reg; i += 2) { + reg1 = ctx->used_callee_reg[i]; + reg2 = ctx->used_callee_reg[i + 1]; + emit(A64_PUSH(reg1, reg2, A64_SP), ctx); + } + if (i < ctx->nr_used_callee_reg) { + reg1 = ctx->used_callee_reg[i]; + /* keep SP 16-byte aligned */ + emit(A64_PUSH(reg1, A64_ZR, A64_SP), ctx); + } + } +} + +/* Restore callee-saved registers */ +static void pop_callee_regs(struct jit_ctx *ctx) +{ + struct bpf_prog_aux *aux = ctx->prog->aux; + int reg1, reg2, i; + + /* + * Program acting as exception boundary pushes R23 and R24 in addition + * to BPF callee-saved registers. Exception callback uses the boundary + * program's stack frame, so recover these extra registers in the above + * two cases. + */ + if (aux->exception_boundary || aux->exception_cb) { + emit(A64_POP(A64_R(27), A64_R(28), A64_SP), ctx); + emit(A64_POP(A64_R(25), A64_R(26), A64_SP), ctx); + emit(A64_POP(A64_R(23), A64_R(24), A64_SP), ctx); + emit(A64_POP(A64_R(21), A64_R(22), A64_SP), ctx); + emit(A64_POP(A64_R(19), A64_R(20), A64_SP), ctx); + } else { + i = ctx->nr_used_callee_reg - 1; + if (ctx->nr_used_callee_reg % 2 != 0) { + reg1 = ctx->used_callee_reg[i]; + emit(A64_POP(reg1, A64_ZR, A64_SP), ctx); + i--; + } + while (i > 0) { + reg1 = ctx->used_callee_reg[i - 1]; + reg2 = ctx->used_callee_reg[i]; + emit(A64_POP(reg1, reg2, A64_SP), ctx); + i -= 2; + } + } +} #define BTI_INSNS (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) ? 1 : 0) #define PAC_INSNS (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL) ? 1 : 0) @@ -296,20 +453,13 @@ static bool is_lsi_offset(int offset, int scale) #define POKE_OFFSET (BTI_INSNS + 1) /* Tail call offset to jump into */ -#define PROLOGUE_OFFSET (BTI_INSNS + 2 + PAC_INSNS + 8) +#define PROLOGUE_OFFSET (BTI_INSNS + 2 + PAC_INSNS + 4) -static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf, - bool is_exception_cb, u64 arena_vm_start) +static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf) { const struct bpf_prog *prog = ctx->prog; const bool is_main_prog = !bpf_is_subprog(prog); - const u8 r6 = bpf2a64[BPF_REG_6]; - const u8 r7 = bpf2a64[BPF_REG_7]; - const u8 r8 = bpf2a64[BPF_REG_8]; - const u8 r9 = bpf2a64[BPF_REG_9]; const u8 fp = bpf2a64[BPF_REG_FP]; - const u8 tcc = bpf2a64[TCALL_CNT]; - const u8 fpb = bpf2a64[FP_BOTTOM]; const u8 arena_vm_base = bpf2a64[ARENA_VM_START]; const int idx0 = ctx->idx; int cur_offset; @@ -348,19 +498,28 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf, emit(A64_MOV(1, A64_R(9), A64_LR), ctx); emit(A64_NOP, ctx); - if (!is_exception_cb) { + if (!prog->aux->exception_cb) { /* Sign lr */ if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL)) emit(A64_PACIASP, ctx); + /* Save FP and LR registers to stay align with ARM64 AAPCS */ emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx); emit(A64_MOV(1, A64_FP, A64_SP), ctx); - /* Save callee-saved registers */ - emit(A64_PUSH(r6, r7, A64_SP), ctx); - emit(A64_PUSH(r8, r9, A64_SP), ctx); - emit(A64_PUSH(fp, tcc, A64_SP), ctx); - emit(A64_PUSH(fpb, A64_R(28), A64_SP), ctx); + prepare_bpf_tail_call_cnt(ctx); + + if (!ebpf_from_cbpf && is_main_prog) { + cur_offset = ctx->idx - idx0; + if (cur_offset != PROLOGUE_OFFSET) { + pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n", + cur_offset, PROLOGUE_OFFSET); + return -1; + } + /* BTI landing pad for the tail call, done with a BR */ + emit_bti(A64_BTI_J, ctx); + } + push_callee_regs(ctx); } else { /* * Exception callback receives FP of Main Program as third @@ -372,58 +531,28 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf, * callee-saved registers. The exception callback will not push * anything and re-use the main program's stack. * - * 10 registers are on the stack + * 12 registers are on the stack */ - emit(A64_SUB_I(1, A64_SP, A64_FP, 80), ctx); + emit(A64_SUB_I(1, A64_SP, A64_FP, 96), ctx); } - /* Set up BPF prog stack base register */ - emit(A64_MOV(1, fp, A64_SP), ctx); - - if (!ebpf_from_cbpf && is_main_prog) { - /* Initialize tail_call_cnt */ - emit(A64_MOVZ(1, tcc, 0, 0), ctx); - - cur_offset = ctx->idx - idx0; - if (cur_offset != PROLOGUE_OFFSET) { - pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n", - cur_offset, PROLOGUE_OFFSET); - return -1; - } - - /* BTI landing pad for the tail call, done with a BR */ - emit_bti(A64_BTI_J, ctx); - } - - /* - * Program acting as exception boundary should save all ARM64 - * Callee-saved registers as the exception callback needs to recover - * all ARM64 Callee-saved registers in its epilogue. - */ - if (prog->aux->exception_boundary) { - /* - * As we are pushing two more registers, BPF_FP should be moved - * 16 bytes - */ - emit(A64_SUB_I(1, fp, fp, 16), ctx); - emit(A64_PUSH(A64_R(23), A64_R(24), A64_SP), ctx); - } - - emit(A64_SUB_I(1, fpb, fp, ctx->fpb_offset), ctx); + if (ctx->fp_used) + /* Set up BPF prog stack base register */ + emit(A64_MOV(1, fp, A64_SP), ctx); /* Stack must be multiples of 16B */ ctx->stack_size = round_up(prog->aux->stack_depth, 16); /* Set up function call stack */ - emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx); + if (ctx->stack_size) + emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx); - if (arena_vm_start) - emit_a64_mov_i64(arena_vm_base, arena_vm_start, ctx); + if (ctx->arena_vm_start) + emit_a64_mov_i64(arena_vm_base, ctx->arena_vm_start, ctx); return 0; } -static int out_offset = -1; /* initialized on the first pass of build_body() */ static int emit_bpf_tail_call(struct jit_ctx *ctx) { /* bpf_tail_call(void *prog_ctx, struct bpf_array *array, u64 index) */ @@ -432,11 +561,12 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) const u8 tmp = bpf2a64[TMP_REG_1]; const u8 prg = bpf2a64[TMP_REG_2]; - const u8 tcc = bpf2a64[TCALL_CNT]; - const int idx0 = ctx->idx; -#define cur_offset (ctx->idx - idx0) -#define jmp_offset (out_offset - (cur_offset)) + const u8 tcc = bpf2a64[TMP_REG_3]; + const u8 ptr = bpf2a64[TCCNT_PTR]; size_t off; + __le32 *branch1 = NULL; + __le32 *branch2 = NULL; + __le32 *branch3 = NULL; /* if (index >= array->map.max_entries) * goto out; @@ -446,16 +576,20 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) emit(A64_LDR32(tmp, r2, tmp), ctx); emit(A64_MOV(0, r3, r3), ctx); emit(A64_CMP(0, r3, tmp), ctx); - emit(A64_B_(A64_COND_CS, jmp_offset), ctx); + branch1 = ctx->image + ctx->idx; + emit(A64_NOP, ctx); /* - * if (tail_call_cnt >= MAX_TAIL_CALL_CNT) + * if ((*tail_call_cnt_ptr) >= MAX_TAIL_CALL_CNT) * goto out; - * tail_call_cnt++; */ emit_a64_mov_i64(tmp, MAX_TAIL_CALL_CNT, ctx); + emit(A64_LDR64I(tcc, ptr, 0), ctx); emit(A64_CMP(1, tcc, tmp), ctx); - emit(A64_B_(A64_COND_CS, jmp_offset), ctx); + branch2 = ctx->image + ctx->idx; + emit(A64_NOP, ctx); + + /* (*tail_call_cnt_ptr)++; */ emit(A64_ADD_I(1, tcc, tcc, 1), ctx); /* prog = array->ptrs[index]; @@ -467,27 +601,37 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) emit(A64_ADD(1, tmp, r2, tmp), ctx); emit(A64_LSL(1, prg, r3, 3), ctx); emit(A64_LDR64(prg, tmp, prg), ctx); - emit(A64_CBZ(1, prg, jmp_offset), ctx); + branch3 = ctx->image + ctx->idx; + emit(A64_NOP, ctx); + + /* Update tail_call_cnt if the slot is populated. */ + emit(A64_STR64I(tcc, ptr, 0), ctx); + + /* restore SP */ + if (ctx->stack_size) + emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx); + + pop_callee_regs(ctx); /* goto *(prog->bpf_func + prologue_offset); */ off = offsetof(struct bpf_prog, bpf_func); emit_a64_mov_i64(tmp, off, ctx); emit(A64_LDR64(tmp, prg, tmp), ctx); emit(A64_ADD_I(1, tmp, tmp, sizeof(u32) * PROLOGUE_OFFSET), ctx); - emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx); emit(A64_BR(tmp), ctx); - /* out: */ - if (out_offset == -1) - out_offset = cur_offset; - if (cur_offset != out_offset) { - pr_err_once("tail_call out_offset = %d, expected %d!\n", - cur_offset, out_offset); - return -1; + if (ctx->image) { + off = &ctx->image[ctx->idx] - branch1; + *branch1 = cpu_to_le32(A64_B_(A64_COND_CS, off)); + + off = &ctx->image[ctx->idx] - branch2; + *branch2 = cpu_to_le32(A64_B_(A64_COND_CS, off)); + + off = &ctx->image[ctx->idx] - branch3; + *branch3 = cpu_to_le32(A64_CBZ(1, prg, off)); } + return 0; -#undef cur_offset -#undef jmp_offset } #ifdef CONFIG_ARM64_LSE_ATOMICS @@ -713,36 +857,18 @@ static void build_plt(struct jit_ctx *ctx) plt->target = (u64)&dummy_tramp; } -static void build_epilogue(struct jit_ctx *ctx, bool is_exception_cb) +static void build_epilogue(struct jit_ctx *ctx) { const u8 r0 = bpf2a64[BPF_REG_0]; - const u8 r6 = bpf2a64[BPF_REG_6]; - const u8 r7 = bpf2a64[BPF_REG_7]; - const u8 r8 = bpf2a64[BPF_REG_8]; - const u8 r9 = bpf2a64[BPF_REG_9]; - const u8 fp = bpf2a64[BPF_REG_FP]; - const u8 fpb = bpf2a64[FP_BOTTOM]; + const u8 ptr = bpf2a64[TCCNT_PTR]; /* We're done with BPF stack */ - emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx); + if (ctx->stack_size) + emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx); - /* - * Program acting as exception boundary pushes R23 and R24 in addition - * to BPF callee-saved registers. Exception callback uses the boundary - * program's stack frame, so recover these extra registers in the above - * two cases. - */ - if (ctx->prog->aux->exception_boundary || is_exception_cb) - emit(A64_POP(A64_R(23), A64_R(24), A64_SP), ctx); + pop_callee_regs(ctx); - /* Restore x27 and x28 */ - emit(A64_POP(fpb, A64_R(28), A64_SP), ctx); - /* Restore fs (x25) and x26 */ - emit(A64_POP(fp, A64_R(26), A64_SP), ctx); - - /* Restore callee-saved register */ - emit(A64_POP(r8, r9, A64_SP), ctx); - emit(A64_POP(r6, r7, A64_SP), ctx); + emit(A64_POP(A64_ZR, ptr, A64_SP), ctx); /* Restore FP/LR registers */ emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx); @@ -862,7 +988,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, const u8 tmp = bpf2a64[TMP_REG_1]; const u8 tmp2 = bpf2a64[TMP_REG_2]; const u8 fp = bpf2a64[BPF_REG_FP]; - const u8 fpb = bpf2a64[FP_BOTTOM]; const u8 arena_vm_base = bpf2a64[ARENA_VM_START]; const s16 off = insn->off; const s32 imm = insn->imm; @@ -1314,9 +1439,9 @@ emit_cond_jmp: emit(A64_ADD(1, tmp2, src, arena_vm_base), ctx); src = tmp2; } - if (ctx->fpb_offset > 0 && src == fp && BPF_MODE(insn->code) != BPF_PROBE_MEM32) { - src_adj = fpb; - off_adj = off + ctx->fpb_offset; + if (src == fp) { + src_adj = A64_SP; + off_adj = off + ctx->stack_size; } else { src_adj = src; off_adj = off; @@ -1407,9 +1532,9 @@ emit_cond_jmp: emit(A64_ADD(1, tmp2, dst, arena_vm_base), ctx); dst = tmp2; } - if (ctx->fpb_offset > 0 && dst == fp && BPF_MODE(insn->code) != BPF_PROBE_MEM32) { - dst_adj = fpb; - off_adj = off + ctx->fpb_offset; + if (dst == fp) { + dst_adj = A64_SP; + off_adj = off + ctx->stack_size; } else { dst_adj = dst; off_adj = off; @@ -1469,9 +1594,9 @@ emit_cond_jmp: emit(A64_ADD(1, tmp2, dst, arena_vm_base), ctx); dst = tmp2; } - if (ctx->fpb_offset > 0 && dst == fp && BPF_MODE(insn->code) != BPF_PROBE_MEM32) { - dst_adj = fpb; - off_adj = off + ctx->fpb_offset; + if (dst == fp) { + dst_adj = A64_SP; + off_adj = off + ctx->stack_size; } else { dst_adj = dst; off_adj = off; @@ -1540,79 +1665,6 @@ emit_cond_jmp: return 0; } -/* - * Return 0 if FP may change at runtime, otherwise find the minimum negative - * offset to FP, converts it to positive number, and align down to 8 bytes. - */ -static int find_fpb_offset(struct bpf_prog *prog) -{ - int i; - int offset = 0; - - for (i = 0; i < prog->len; i++) { - const struct bpf_insn *insn = &prog->insnsi[i]; - const u8 class = BPF_CLASS(insn->code); - const u8 mode = BPF_MODE(insn->code); - const u8 src = insn->src_reg; - const u8 dst = insn->dst_reg; - const s32 imm = insn->imm; - const s16 off = insn->off; - - switch (class) { - case BPF_STX: - case BPF_ST: - /* fp holds atomic operation result */ - if (class == BPF_STX && mode == BPF_ATOMIC && - ((imm == BPF_XCHG || - imm == (BPF_FETCH | BPF_ADD) || - imm == (BPF_FETCH | BPF_AND) || - imm == (BPF_FETCH | BPF_XOR) || - imm == (BPF_FETCH | BPF_OR)) && - src == BPF_REG_FP)) - return 0; - - if (mode == BPF_MEM && dst == BPF_REG_FP && - off < offset) - offset = insn->off; - break; - - case BPF_JMP32: - case BPF_JMP: - break; - - case BPF_LDX: - case BPF_LD: - /* fp holds load result */ - if (dst == BPF_REG_FP) - return 0; - - if (class == BPF_LDX && mode == BPF_MEM && - src == BPF_REG_FP && off < offset) - offset = off; - break; - - case BPF_ALU: - case BPF_ALU64: - default: - /* fp holds ALU result */ - if (dst == BPF_REG_FP) - return 0; - } - } - - if (offset < 0) { - /* - * safely be converted to a positive 'int', since insn->off - * is 's16' - */ - offset = -offset; - /* align down to 8 bytes */ - offset = ALIGN_DOWN(offset, 8); - } - - return offset; -} - static int build_body(struct jit_ctx *ctx, bool extra_pass) { const struct bpf_prog *prog = ctx->prog; @@ -1631,13 +1683,11 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass) const struct bpf_insn *insn = &prog->insnsi[i]; int ret; - if (ctx->image == NULL) - ctx->offset[i] = ctx->idx; + ctx->offset[i] = ctx->idx; ret = build_insn(insn, ctx, extra_pass); if (ret > 0) { i++; - if (ctx->image == NULL) - ctx->offset[i] = ctx->idx; + ctx->offset[i] = ctx->idx; continue; } if (ret) @@ -1648,8 +1698,7 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass) * the last element with the offset after the last * instruction (end of program) */ - if (ctx->image == NULL) - ctx->offset[i] = ctx->idx; + ctx->offset[i] = ctx->idx; return 0; } @@ -1701,9 +1750,10 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) bool tmp_blinded = false; bool extra_pass = false; struct jit_ctx ctx; - u64 arena_vm_start; u8 *image_ptr; u8 *ro_image_ptr; + int body_idx; + int exentry_idx; if (!prog->jit_requested) return orig_prog; @@ -1719,7 +1769,6 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) prog = tmp; } - arena_vm_start = bpf_arena_get_kern_vm_start(prog->aux->arena); jit_data = prog->aux->jit_data; if (!jit_data) { jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); @@ -1749,17 +1798,15 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) goto out_off; } - ctx.fpb_offset = find_fpb_offset(prog); ctx.user_vm_start = bpf_arena_get_user_vm_start(prog->aux->arena); + ctx.arena_vm_start = bpf_arena_get_kern_vm_start(prog->aux->arena); - /* - * 1. Initial fake pass to compute ctx->idx and ctx->offset. + /* Pass 1: Estimate the maximum image size. * * BPF line info needs ctx->offset[i] to be the offset of * instruction[i] in jited image, so build prologue first. */ - if (build_prologue(&ctx, was_classic, prog->aux->exception_cb, - arena_vm_start)) { + if (build_prologue(&ctx, was_classic)) { prog = orig_prog; goto out_off; } @@ -1770,14 +1817,14 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) } ctx.epilogue_offset = ctx.idx; - build_epilogue(&ctx, prog->aux->exception_cb); + build_epilogue(&ctx); build_plt(&ctx); extable_align = __alignof__(struct exception_table_entry); extable_size = prog->aux->num_exentries * sizeof(struct exception_table_entry); - /* Now we know the actual image size. */ + /* Now we know the maximum image size. */ prog_size = sizeof(u32) * ctx.idx; /* also allocate space for plt target */ extable_offset = round_up(prog_size + PLT_TARGET_SIZE, extable_align); @@ -1790,7 +1837,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) goto out_off; } - /* 2. Now, the actual pass. */ + /* Pass 2: Determine jited position and result for each instruction */ /* * Use the image(RW) for writing the JITed instructions. But also save @@ -1806,30 +1853,56 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) skip_init_ctx: ctx.idx = 0; ctx.exentry_idx = 0; + ctx.write = true; + + build_prologue(&ctx, was_classic); - build_prologue(&ctx, was_classic, prog->aux->exception_cb, arena_vm_start); + /* Record exentry_idx and body_idx before first build_body */ + exentry_idx = ctx.exentry_idx; + body_idx = ctx.idx; + /* Dont write body instructions to memory for now */ + ctx.write = false; if (build_body(&ctx, extra_pass)) { prog = orig_prog; goto out_free_hdr; } - build_epilogue(&ctx, prog->aux->exception_cb); + ctx.epilogue_offset = ctx.idx; + ctx.exentry_idx = exentry_idx; + ctx.idx = body_idx; + ctx.write = true; + + /* Pass 3: Adjust jump offset and write final image */ + if (build_body(&ctx, extra_pass) || + WARN_ON_ONCE(ctx.idx != ctx.epilogue_offset)) { + prog = orig_prog; + goto out_free_hdr; + } + + build_epilogue(&ctx); build_plt(&ctx); - /* 3. Extra pass to validate JITed code. */ + /* Extra pass to validate JITed code. */ if (validate_ctx(&ctx)) { prog = orig_prog; goto out_free_hdr; } + /* update the real prog size */ + prog_size = sizeof(u32) * ctx.idx; + /* And we're done. */ if (bpf_jit_enable > 1) bpf_jit_dump(prog->len, prog_size, 2, ctx.image); if (!prog->is_func || extra_pass) { - if (extra_pass && ctx.idx != jit_data->ctx.idx) { - pr_err_once("multi-func JIT bug %d != %d\n", + /* The jited image may shrink since the jited result for + * BPF_CALL to subprog may be changed from indirect call + * to direct call. + */ + if (extra_pass && ctx.idx > jit_data->ctx.idx) { + pr_err_once("multi-func JIT bug %d > %d\n", ctx.idx, jit_data->ctx.idx); prog->bpf_func = NULL; prog->jited = 0; @@ -2300,6 +2373,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image, .image = image, .ro_image = ro_image, .idx = 0, + .write = true, }; nregs = btf_func_model_nregs(m); diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index d25d81c8ecc0..06b080b61aa5 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -64,6 +64,56 @@ static bool is_imm8(int value) return value <= 127 && value >= -128; } +/* + * Let us limit the positive offset to be <= 123. + * This is to ensure eventual jit convergence For the following patterns: + * ... + * pass4, final_proglen=4391: + * ... + * 20e: 48 85 ff test rdi,rdi + * 211: 74 7d je 0x290 + * 213: 48 8b 77 00 mov rsi,QWORD PTR [rdi+0x0] + * ... + * 289: 48 85 ff test rdi,rdi + * 28c: 74 17 je 0x2a5 + * 28e: e9 7f ff ff ff jmp 0x212 + * 293: bf 03 00 00 00 mov edi,0x3 + * Note that insn at 0x211 is 2-byte cond jump insn for offset 0x7d (-125) + * and insn at 0x28e is 5-byte jmp insn with offset -129. + * + * pass5, final_proglen=4392: + * ... + * 20e: 48 85 ff test rdi,rdi + * 211: 0f 84 80 00 00 00 je 0x297 + * 217: 48 8b 77 00 mov rsi,QWORD PTR [rdi+0x0] + * ... + * 28d: 48 85 ff test rdi,rdi + * 290: 74 1a je 0x2ac + * 292: eb 84 jmp 0x218 + * 294: bf 03 00 00 00 mov edi,0x3 + * Note that insn at 0x211 is 6-byte cond jump insn now since its offset + * becomes 0x80 based on previous round (0x293 - 0x213 = 0x80). + * At the same time, insn at 0x292 is a 2-byte insn since its offset is + * -124. + * + * pass6 will repeat the same code as in pass4 and this will prevent + * eventual convergence. + * + * To fix this issue, we need to break je (2->6 bytes) <-> jmp (5->2 bytes) + * cycle in the above. In the above example je offset <= 0x7c should work. + * + * For other cases, je <-> je needs offset <= 0x7b to avoid no convergence + * issue. For jmp <-> je and jmp <-> jmp cases, jmp offset <= 0x7c should + * avoid no convergence issue. + * + * Overall, let us limit the positive offset for 8bit cond/uncond jmp insn + * to maximum 123 (0x7b). This way, the jit pass can eventually converge. + */ +static bool is_imm8_jmp_offset(int value) +{ + return value <= 123 && value >= -128; +} + static bool is_simm32(s64 value) { return value == (s64)(s32)value; @@ -273,7 +323,7 @@ struct jit_context { /* Number of bytes emit_patch() needs to generate instructions */ #define X86_PATCH_SIZE 5 /* Number of bytes that will be skipped on tailcall */ -#define X86_TAIL_CALL_OFFSET (11 + ENDBR_INSN_SIZE) +#define X86_TAIL_CALL_OFFSET (12 + ENDBR_INSN_SIZE) static void push_r12(u8 **pprog) { @@ -403,6 +453,37 @@ static void emit_cfi(u8 **pprog, u32 hash) *pprog = prog; } +static void emit_prologue_tail_call(u8 **pprog, bool is_subprog) +{ + u8 *prog = *pprog; + + if (!is_subprog) { + /* cmp rax, MAX_TAIL_CALL_CNT */ + EMIT4(0x48, 0x83, 0xF8, MAX_TAIL_CALL_CNT); + EMIT2(X86_JA, 6); /* ja 6 */ + /* rax is tail_call_cnt if <= MAX_TAIL_CALL_CNT. + * case1: entry of main prog. + * case2: tail callee of main prog. + */ + EMIT1(0x50); /* push rax */ + /* Make rax as tail_call_cnt_ptr. */ + EMIT3(0x48, 0x89, 0xE0); /* mov rax, rsp */ + EMIT2(0xEB, 1); /* jmp 1 */ + /* rax is tail_call_cnt_ptr if > MAX_TAIL_CALL_CNT. + * case: tail callee of subprog. + */ + EMIT1(0x50); /* push rax */ + /* push tail_call_cnt_ptr */ + EMIT1(0x50); /* push rax */ + } else { /* is_subprog */ + /* rax is tail_call_cnt_ptr. */ + EMIT1(0x50); /* push rax */ + EMIT1(0x50); /* push rax */ + } + + *pprog = prog; +} + /* * Emit x86-64 prologue code for BPF program. * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes @@ -424,10 +505,10 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf, /* When it's the entry of the whole tailcall context, * zeroing rax means initialising tail_call_cnt. */ - EMIT2(0x31, 0xC0); /* xor eax, eax */ + EMIT3(0x48, 0x31, 0xC0); /* xor rax, rax */ else /* Keep the same instruction layout. */ - EMIT2(0x66, 0x90); /* nop2 */ + emit_nops(&prog, 3); /* nop3 */ } /* Exception callback receives FP as third parameter */ if (is_exception_cb) { @@ -453,7 +534,7 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf, if (stack_depth) EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8)); if (tail_call_reachable) - EMIT1(0x50); /* push rax */ + emit_prologue_tail_call(&prog, is_subprog); *pprog = prog; } @@ -589,13 +670,15 @@ static void emit_return(u8 **pprog, u8 *ip) *pprog = prog; } +#define BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack) (-16 - round_up(stack, 8)) + /* * Generate the following code: * * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ... * if (index >= array->map.max_entries) * goto out; - * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) + * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT) * goto out; * prog = array->ptrs[index]; * if (prog == NULL) @@ -608,7 +691,7 @@ static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog, u32 stack_depth, u8 *ip, struct jit_context *ctx) { - int tcc_off = -4 - round_up(stack_depth, 8); + int tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack_depth); u8 *prog = *pprog, *start = *pprog; int offset; @@ -630,16 +713,14 @@ static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog, EMIT2(X86_JBE, offset); /* jbe out */ /* - * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) + * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT) * goto out; */ - EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ - EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ + EMIT3_off32(0x48, 0x8B, 0x85, tcc_ptr_off); /* mov rax, qword ptr [rbp - tcc_ptr_off] */ + EMIT4(0x48, 0x83, 0x38, MAX_TAIL_CALL_CNT); /* cmp qword ptr [rax], MAX_TAIL_CALL_CNT */ offset = ctx->tail_call_indirect_label - (prog + 2 - start); EMIT2(X86_JAE, offset); /* jae out */ - EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ - EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */ /* prog = array->ptrs[index]; */ EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */ @@ -654,6 +735,9 @@ static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog, offset = ctx->tail_call_indirect_label - (prog + 2 - start); EMIT2(X86_JE, offset); /* je out */ + /* Inc tail_call_cnt if the slot is populated. */ + EMIT4(0x48, 0x83, 0x00, 0x01); /* add qword ptr [rax], 1 */ + if (bpf_prog->aux->exception_boundary) { pop_callee_regs(&prog, all_callee_regs_used); pop_r12(&prog); @@ -663,6 +747,11 @@ static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog, pop_r12(&prog); } + /* Pop tail_call_cnt_ptr. */ + EMIT1(0x58); /* pop rax */ + /* Pop tail_call_cnt, if it's main prog. + * Pop tail_call_cnt_ptr, if it's subprog. + */ EMIT1(0x58); /* pop rax */ if (stack_depth) EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */ @@ -691,21 +780,19 @@ static void emit_bpf_tail_call_direct(struct bpf_prog *bpf_prog, bool *callee_regs_used, u32 stack_depth, struct jit_context *ctx) { - int tcc_off = -4 - round_up(stack_depth, 8); + int tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack_depth); u8 *prog = *pprog, *start = *pprog; int offset; /* - * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) + * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT) * goto out; */ - EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ - EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ + EMIT3_off32(0x48, 0x8B, 0x85, tcc_ptr_off); /* mov rax, qword ptr [rbp - tcc_ptr_off] */ + EMIT4(0x48, 0x83, 0x38, MAX_TAIL_CALL_CNT); /* cmp qword ptr [rax], MAX_TAIL_CALL_CNT */ offset = ctx->tail_call_direct_label - (prog + 2 - start); EMIT2(X86_JAE, offset); /* jae out */ - EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ - EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */ poke->tailcall_bypass = ip + (prog - start); poke->adj_off = X86_TAIL_CALL_OFFSET; @@ -715,6 +802,9 @@ static void emit_bpf_tail_call_direct(struct bpf_prog *bpf_prog, emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE, poke->tailcall_bypass); + /* Inc tail_call_cnt if the slot is populated. */ + EMIT4(0x48, 0x83, 0x00, 0x01); /* add qword ptr [rax], 1 */ + if (bpf_prog->aux->exception_boundary) { pop_callee_regs(&prog, all_callee_regs_used); pop_r12(&prog); @@ -724,6 +814,11 @@ static void emit_bpf_tail_call_direct(struct bpf_prog *bpf_prog, pop_r12(&prog); } + /* Pop tail_call_cnt_ptr. */ + EMIT1(0x58); /* pop rax */ + /* Pop tail_call_cnt, if it's main prog. + * Pop tail_call_cnt_ptr, if it's subprog. + */ EMIT1(0x58); /* pop rax */ if (stack_depth) EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8)); @@ -1311,9 +1406,11 @@ static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op) #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp))) -/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */ -#define RESTORE_TAIL_CALL_CNT(stack) \ - EMIT3_off32(0x48, 0x8B, 0x85, -round_up(stack, 8) - 8) +#define __LOAD_TCC_PTR(off) \ + EMIT3_off32(0x48, 0x8B, 0x85, off) +/* mov rax, qword ptr [rbp - rounded_stack_depth - 16] */ +#define LOAD_TAIL_CALL_CNT_PTR(stack) \ + __LOAD_TCC_PTR(BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack)) static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image, int oldproglen, struct jit_context *ctx, bool jmp_padding) @@ -2031,7 +2128,7 @@ populate_extable: func = (u8 *) __bpf_call_base + imm32; if (tail_call_reachable) { - RESTORE_TAIL_CALL_CNT(bpf_prog->aux->stack_depth); + LOAD_TAIL_CALL_CNT_PTR(bpf_prog->aux->stack_depth); ip += 7; } if (!imm32) @@ -2184,7 +2281,7 @@ emit_cond_jmp: /* Convert BPF opcode to x86 */ return -EFAULT; } jmp_offset = addrs[i + insn->off] - addrs[i]; - if (is_imm8(jmp_offset)) { + if (is_imm8_jmp_offset(jmp_offset)) { if (jmp_padding) { /* To keep the jmp_offset valid, the extra bytes are * padded before the jump insn, so we subtract the @@ -2266,7 +2363,7 @@ emit_cond_jmp: /* Convert BPF opcode to x86 */ break; } emit_jmp: - if (is_imm8(jmp_offset)) { + if (is_imm8_jmp_offset(jmp_offset)) { if (jmp_padding) { /* To avoid breaking jmp_offset, the extra bytes * are padded before the actual jmp insn, so @@ -2706,6 +2803,10 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog, return 0; } +/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */ +#define LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack) \ + __LOAD_TCC_PTR(-round_up(stack, 8) - 8) + /* Example: * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); * its 'struct btf_func_model' will be nr_args=2 @@ -2826,7 +2927,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im * [ ... ] * [ stack_arg2 ] * RBP - arg_stack_off [ stack_arg1 ] - * RSP [ tail_call_cnt ] BPF_TRAMP_F_TAIL_CALL_CTX + * RSP [ tail_call_cnt_ptr ] BPF_TRAMP_F_TAIL_CALL_CTX */ /* room for return value of orig_call or fentry prog */ @@ -2955,10 +3056,10 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im save_args(m, &prog, arg_stack_off, true); if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) { - /* Before calling the original function, restore the - * tail_call_cnt from stack to rax. + /* Before calling the original function, load the + * tail_call_cnt_ptr from stack to rax. */ - RESTORE_TAIL_CALL_CNT(stack_size); + LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack_size); } if (flags & BPF_TRAMP_F_ORIG_STACK) { @@ -3017,10 +3118,10 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im goto cleanup; } } else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) { - /* Before running the original function, restore the - * tail_call_cnt from stack to rax. + /* Before running the original function, load the + * tail_call_cnt_ptr from stack to rax. */ - RESTORE_TAIL_CALL_CNT(stack_size); + LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack_size); } /* restore return value of orig_call or fentry prog back into RAX */ diff --git a/fs/Makefile b/fs/Makefile index 6ecc9b0a53f2..61679fd587b7 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -129,3 +129,4 @@ obj-$(CONFIG_EFIVAR_FS) += efivarfs/ obj-$(CONFIG_EROFS_FS) += erofs/ obj-$(CONFIG_VBOXSF_FS) += vboxsf/ obj-$(CONFIG_ZONEFS_FS) += zonefs/ +obj-$(CONFIG_BPF_LSM) += bpf_fs_kfuncs.o diff --git a/fs/bpf_fs_kfuncs.c b/fs/bpf_fs_kfuncs.c new file mode 100644 index 000000000000..3fe9f59ef867 --- /dev/null +++ b/fs/bpf_fs_kfuncs.c @@ -0,0 +1,185 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Google LLC. */ + +#include <linux/bpf.h> +#include <linux/btf.h> +#include <linux/btf_ids.h> +#include <linux/dcache.h> +#include <linux/fs.h> +#include <linux/file.h> +#include <linux/mm.h> +#include <linux/xattr.h> + +__bpf_kfunc_start_defs(); + +/** + * bpf_get_task_exe_file - get a reference on the exe_file struct file member of + * the mm_struct that is nested within the supplied + * task_struct + * @task: task_struct of which the nested mm_struct exe_file member to get a + * reference on + * + * Get a reference on the exe_file struct file member field of the mm_struct + * nested within the supplied *task*. The referenced file pointer acquired by + * this BPF kfunc must be released using bpf_put_file(). Failing to call + * bpf_put_file() on the returned referenced struct file pointer that has been + * acquired by this BPF kfunc will result in the BPF program being rejected by + * the BPF verifier. + * + * This BPF kfunc may only be called from BPF LSM programs. + * + * Internally, this BPF kfunc leans on get_task_exe_file(), such that calling + * bpf_get_task_exe_file() would be analogous to calling get_task_exe_file() + * directly in kernel context. + * + * Return: A referenced struct file pointer to the exe_file member of the + * mm_struct that is nested within the supplied *task*. On error, NULL is + * returned. + */ +__bpf_kfunc struct file *bpf_get_task_exe_file(struct task_struct *task) +{ + return get_task_exe_file(task); +} + +/** + * bpf_put_file - put a reference on the supplied file + * @file: file to put a reference on + * + * Put a reference on the supplied *file*. Only referenced file pointers may be + * passed to this BPF kfunc. Attempting to pass an unreferenced file pointer, or + * any other arbitrary pointer for that matter, will result in the BPF program + * being rejected by the BPF verifier. + * + * This BPF kfunc may only be called from BPF LSM programs. + */ +__bpf_kfunc void bpf_put_file(struct file *file) +{ + fput(file); +} + +/** + * bpf_path_d_path - resolve the pathname for the supplied path + * @path: path to resolve the pathname for + * @buf: buffer to return the resolved pathname in + * @buf__sz: length of the supplied buffer + * + * Resolve the pathname for the supplied *path* and store it in *buf*. This BPF + * kfunc is the safer variant of the legacy bpf_d_path() helper and should be + * used in place of bpf_d_path() whenever possible. It enforces KF_TRUSTED_ARGS + * semantics, meaning that the supplied *path* must itself hold a valid + * reference, or else the BPF program will be outright rejected by the BPF + * verifier. + * + * This BPF kfunc may only be called from BPF LSM programs. + * + * Return: A positive integer corresponding to the length of the resolved + * pathname in *buf*, including the NUL termination character. On error, a + * negative integer is returned. + */ +__bpf_kfunc int bpf_path_d_path(struct path *path, char *buf, size_t buf__sz) +{ + int len; + char *ret; + + if (!buf__sz) + return -EINVAL; + + ret = d_path(path, buf, buf__sz); + if (IS_ERR(ret)) + return PTR_ERR(ret); + + len = buf + buf__sz - ret; + memmove(buf, ret, len); + return len; +} + +/** + * bpf_get_dentry_xattr - get xattr of a dentry + * @dentry: dentry to get xattr from + * @name__str: name of the xattr + * @value_p: output buffer of the xattr value + * + * Get xattr *name__str* of *dentry* and store the output in *value_ptr*. + * + * For security reasons, only *name__str* with prefix "user." is allowed. + * + * Return: 0 on success, a negative value on error. + */ +__bpf_kfunc int bpf_get_dentry_xattr(struct dentry *dentry, const char *name__str, + struct bpf_dynptr *value_p) +{ + struct bpf_dynptr_kern *value_ptr = (struct bpf_dynptr_kern *)value_p; + struct inode *inode = d_inode(dentry); + u32 value_len; + void *value; + int ret; + + if (WARN_ON(!inode)) + return -EINVAL; + + if (strncmp(name__str, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) + return -EPERM; + + value_len = __bpf_dynptr_size(value_ptr); + value = __bpf_dynptr_data_rw(value_ptr, value_len); + if (!value) + return -EINVAL; + + ret = inode_permission(&nop_mnt_idmap, inode, MAY_READ); + if (ret) + return ret; + return __vfs_getxattr(dentry, inode, name__str, value, value_len); +} + +/** + * bpf_get_file_xattr - get xattr of a file + * @file: file to get xattr from + * @name__str: name of the xattr + * @value_p: output buffer of the xattr value + * + * Get xattr *name__str* of *file* and store the output in *value_ptr*. + * + * For security reasons, only *name__str* with prefix "user." is allowed. + * + * Return: 0 on success, a negative value on error. + */ +__bpf_kfunc int bpf_get_file_xattr(struct file *file, const char *name__str, + struct bpf_dynptr *value_p) +{ + struct dentry *dentry; + + dentry = file_dentry(file); + return bpf_get_dentry_xattr(dentry, name__str, value_p); +} + +__bpf_kfunc_end_defs(); + +BTF_KFUNCS_START(bpf_fs_kfunc_set_ids) +BTF_ID_FLAGS(func, bpf_get_task_exe_file, + KF_ACQUIRE | KF_TRUSTED_ARGS | KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_put_file, KF_RELEASE) +BTF_ID_FLAGS(func, bpf_path_d_path, KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_get_dentry_xattr, KF_SLEEPABLE | KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_get_file_xattr, KF_SLEEPABLE | KF_TRUSTED_ARGS) +BTF_KFUNCS_END(bpf_fs_kfunc_set_ids) + +static int bpf_fs_kfuncs_filter(const struct bpf_prog *prog, u32 kfunc_id) +{ + if (!btf_id_set8_contains(&bpf_fs_kfunc_set_ids, kfunc_id) || + prog->type == BPF_PROG_TYPE_LSM) + return 0; + return -EACCES; +} + +static const struct btf_kfunc_id_set bpf_fs_kfunc_set = { + .owner = THIS_MODULE, + .set = &bpf_fs_kfunc_set_ids, + .filter = bpf_fs_kfuncs_filter, +}; + +static int __init bpf_fs_kfuncs_init(void) +{ + return register_btf_kfunc_id_set(BPF_PROG_TYPE_LSM, &bpf_fs_kfunc_set); +} + +late_initcall(bpf_fs_kfuncs_init); diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 3b94ec161e8c..0c3893c47171 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -294,6 +294,7 @@ struct bpf_map { * same prog type, JITed flag and xdp_has_frags flag. */ struct { + const struct btf_type *attach_func_proto; spinlock_t lock; enum bpf_prog_type type; bool jited; @@ -694,6 +695,11 @@ enum bpf_type_flag { /* DYNPTR points to xdp_buff */ DYNPTR_TYPE_XDP = BIT(16 + BPF_BASE_TYPE_BITS), + /* Memory must be aligned on some architectures, used in combination with + * MEM_FIXED_SIZE. + */ + MEM_ALIGNED = BIT(17 + BPF_BASE_TYPE_BITS), + __BPF_TYPE_FLAG_MAX, __BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1, }; @@ -731,8 +737,6 @@ enum bpf_arg_type { ARG_ANYTHING, /* any (initialized) argument is ok */ ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */ ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */ - ARG_PTR_TO_INT, /* pointer to int */ - ARG_PTR_TO_LONG, /* pointer to long */ ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */ ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */ ARG_PTR_TO_RINGBUF_MEM, /* pointer to dynamically reserved ringbuf memory */ @@ -743,7 +747,7 @@ enum bpf_arg_type { ARG_PTR_TO_STACK, /* pointer to stack */ ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */ ARG_PTR_TO_TIMER, /* pointer to bpf_timer */ - ARG_PTR_TO_KPTR, /* pointer to referenced kptr */ + ARG_KPTR_XCHG_DEST, /* pointer to destination that kptrs are bpf_kptr_xchg'd into */ ARG_PTR_TO_DYNPTR, /* pointer to bpf_dynptr. See bpf_type_flag for dynptr type */ __BPF_ARG_TYPE_MAX, @@ -807,6 +811,12 @@ struct bpf_func_proto { bool gpl_only; bool pkt_access; bool might_sleep; + /* set to true if helper follows contract for llvm + * attribute bpf_fastcall: + * - void functions do not scratch r0 + * - functions taking N arguments scratch only registers r1-rN + */ + bool allow_fastcall; enum bpf_return_type ret_type; union { struct { @@ -919,6 +929,7 @@ static_assert(__BPF_REG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT); */ struct bpf_insn_access_aux { enum bpf_reg_type reg_type; + bool is_ldsx; union { int ctx_field_size; struct { @@ -927,6 +938,7 @@ struct bpf_insn_access_aux { }; }; struct bpf_verifier_log *log; /* for verbose logs */ + bool is_retval; /* is accessing function return value ? */ }; static inline void @@ -965,6 +977,8 @@ struct bpf_verifier_ops { struct bpf_insn_access_aux *info); int (*gen_prologue)(struct bpf_insn *insn, bool direct_write, const struct bpf_prog *prog); + int (*gen_epilogue)(struct bpf_insn *insn, const struct bpf_prog *prog, + s16 ctx_stack_off); int (*gen_ld_abs)(const struct bpf_insn *orig, struct bpf_insn *insn_buf); u32 (*convert_ctx_access)(enum bpf_access_type type, @@ -1795,6 +1809,7 @@ struct bpf_struct_ops_common_value { #define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA)) bool bpf_struct_ops_get(const void *kdata); void bpf_struct_ops_put(const void *kdata); +int bpf_struct_ops_supported(const struct bpf_struct_ops *st_ops, u32 moff); int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, void *value); int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks, @@ -1851,6 +1866,10 @@ static inline void bpf_module_put(const void *data, struct module *owner) { module_put(owner); } +static inline int bpf_struct_ops_supported(const struct bpf_struct_ops *st_ops, u32 moff) +{ + return -ENOTSUPP; +} static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, void *value) @@ -3184,7 +3203,9 @@ extern const struct bpf_func_proto bpf_get_current_uid_gid_proto; extern const struct bpf_func_proto bpf_get_current_comm_proto; extern const struct bpf_func_proto bpf_get_stackid_proto; extern const struct bpf_func_proto bpf_get_stack_proto; +extern const struct bpf_func_proto bpf_get_stack_sleepable_proto; extern const struct bpf_func_proto bpf_get_task_stack_proto; +extern const struct bpf_func_proto bpf_get_task_stack_sleepable_proto; extern const struct bpf_func_proto bpf_get_stackid_proto_pe; extern const struct bpf_func_proto bpf_get_stack_proto_pe; extern const struct bpf_func_proto bpf_sock_map_update_proto; @@ -3192,6 +3213,7 @@ extern const struct bpf_func_proto bpf_sock_hash_update_proto; extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto; extern const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto; +extern const struct bpf_func_proto bpf_current_task_under_cgroup_proto; extern const struct bpf_func_proto bpf_msg_redirect_hash_proto; extern const struct bpf_func_proto bpf_msg_redirect_map_proto; extern const struct bpf_func_proto bpf_sk_redirect_hash_proto; diff --git a/include/linux/bpf_lsm.h b/include/linux/bpf_lsm.h index 1de7ece5d36d..aefcd6564251 100644 --- a/include/linux/bpf_lsm.h +++ b/include/linux/bpf_lsm.h @@ -9,6 +9,7 @@ #include <linux/sched.h> #include <linux/bpf.h> +#include <linux/bpf_verifier.h> #include <linux/lsm_hooks.h> #ifdef CONFIG_BPF_LSM @@ -45,6 +46,8 @@ void bpf_inode_storage_free(struct inode *inode); void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog, bpf_func_t *bpf_func); +int bpf_lsm_get_retval_range(const struct bpf_prog *prog, + struct bpf_retval_range *range); #else /* !CONFIG_BPF_LSM */ static inline bool bpf_lsm_is_sleepable_hook(u32 btf_id) @@ -78,6 +81,11 @@ static inline void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog, { } +static inline int bpf_lsm_get_retval_range(const struct bpf_prog *prog, + struct bpf_retval_range *range) +{ + return -EOPNOTSUPP; +} #endif /* CONFIG_BPF_LSM */ #endif /* _LINUX_BPF_LSM_H */ diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 7b776dae36e5..4513372c5bc8 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -23,6 +23,8 @@ * (in the "-8,-16,...,-512" form) */ #define TMP_STR_BUF_LEN 320 +/* Patch buffer size */ +#define INSN_BUF_SIZE 32 /* Liveness marks, used for registers and spilled-regs (in stack slots). * Read marks propagate upwards until they find a write mark; they record that @@ -371,6 +373,10 @@ struct bpf_jmp_history_entry { u32 prev_idx : 22; /* special flags, e.g., whether insn is doing register stack spill/load */ u32 flags : 10; + /* additional registers that need precision tracking when this + * jump is backtracked, vector of six 10-bit records + */ + u64 linked_regs; }; /* Maximum number of register states that can exist at once */ @@ -572,6 +578,14 @@ struct bpf_insn_aux_data { bool is_iter_next; /* bpf_iter_<type>_next() kfunc call */ bool call_with_percpu_alloc_ptr; /* {this,per}_cpu_ptr() with prog percpu alloc */ u8 alu_state; /* used in combination with alu_limit */ + /* true if STX or LDX instruction is a part of a spill/fill + * pattern for a bpf_fastcall call. + */ + u8 fastcall_pattern:1; + /* for CALL instructions, a number of spill/fill pairs in the + * bpf_fastcall pattern. + */ + u8 fastcall_spills_num:3; /* below fields are initialized once */ unsigned int orig_idx; /* original instruction index */ @@ -641,6 +655,10 @@ struct bpf_subprog_info { u32 linfo_idx; /* The idx to the main_prog->aux->linfo */ u16 stack_depth; /* max. stack depth used by this function */ u16 stack_extra; + /* offsets in range [stack_depth .. fastcall_stack_off) + * are used for bpf_fastcall spills and fills. + */ + s16 fastcall_stack_off; bool has_tail_call: 1; bool tail_call_reachable: 1; bool has_ld_abs: 1; @@ -648,6 +666,8 @@ struct bpf_subprog_info { bool is_async_cb: 1; bool is_exception_cb: 1; bool args_cached: 1; + /* true if bpf_fastcall stack region is used by functions that can't be inlined */ + bool keep_fastcall_stack: 1; u8 arg_cnt; struct bpf_subprog_arg_info args[MAX_BPF_FUNC_REG_ARGS]; @@ -762,6 +782,8 @@ struct bpf_verifier_env { * e.g., in reg_type_str() to generate reg_type string */ char tmp_str_buf[TMP_STR_BUF_LEN]; + struct bpf_insn insn_buf[INSN_BUF_SIZE]; + struct bpf_insn epilogue_buf[INSN_BUF_SIZE]; }; static inline struct bpf_func_info_aux *subprog_aux(struct bpf_verifier_env *env, int subprog) @@ -905,6 +927,11 @@ static inline bool type_is_sk_pointer(enum bpf_reg_type type) type == PTR_TO_XDP_SOCK; } +static inline bool type_may_be_null(u32 type) +{ + return type & PTR_MAYBE_NULL; +} + static inline void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno) { env->scratched_regs |= 1U << regno; diff --git a/include/linux/btf.h b/include/linux/btf.h index cffb43133c68..b8a583194c4a 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -580,6 +580,7 @@ bool btf_is_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf, int get_kern_ctx_btf_id(struct bpf_verifier_log *log, enum bpf_prog_type prog_type); bool btf_types_are_same(const struct btf *btf1, u32 id1, const struct btf *btf2, u32 id2); +int btf_check_iter_arg(struct btf *btf, const struct btf_type *func, int arg_idx); #else static inline const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id) @@ -654,6 +655,10 @@ static inline bool btf_types_are_same(const struct btf *btf1, u32 id1, { return false; } +static inline int btf_check_iter_arg(struct btf *btf, const struct btf_type *func, int arg_idx) +{ + return -EOPNOTSUPP; +} #endif static inline bool btf_type_is_struct_ptr(struct btf *btf, const struct btf_type *t) diff --git a/include/linux/buildid.h b/include/linux/buildid.h index 20aa3c2d89f7..014a88c41073 100644 --- a/include/linux/buildid.h +++ b/include/linux/buildid.h @@ -7,8 +7,8 @@ #define BUILD_ID_SIZE_MAX 20 struct vm_area_struct; -int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, - __u32 *size); +int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size); +int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size); int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size); #if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_VMCORE_INFO) diff --git a/include/linux/filter.h b/include/linux/filter.h index 64e1506fefb8..7d7578a8eac1 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -437,6 +437,16 @@ static inline bool insn_is_cast_user(const struct bpf_insn *insn) .off = OFF, \ .imm = 0 }) +/* Unconditional jumps, gotol pc + imm32 */ + +#define BPF_JMP32_A(IMM) \ + ((struct bpf_insn) { \ + .code = BPF_JMP32 | BPF_JA, \ + .dst_reg = 0, \ + .src_reg = 0, \ + .off = 0, \ + .imm = IMM }) + /* Relative call */ #define BPF_CALL_REL(TGT) \ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index e05b39e39c3f..c6cd7c7aeeee 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -5519,11 +5519,12 @@ union bpf_attr { * **-EOPNOTSUPP** if the hash calculation failed or **-EINVAL** if * invalid arguments are passed. * - * void *bpf_kptr_xchg(void *map_value, void *ptr) + * void *bpf_kptr_xchg(void *dst, void *ptr) * Description - * Exchange kptr at pointer *map_value* with *ptr*, and return the - * old value. *ptr* can be NULL, otherwise it must be a referenced - * pointer which will be released when this helper is called. + * Exchange kptr at pointer *dst* with *ptr*, and return the old value. + * *dst* can be map value or local kptr. *ptr* can be NULL, otherwise + * it must be a referenced pointer which will be released when this helper + * is called. * Return * The old value of kptr (which can be NULL). The returned pointer * if not NULL, is a reference which must be released using its @@ -7513,4 +7514,13 @@ struct bpf_iter_num { __u64 __opaque[1]; } __attribute__((aligned(8))); +/* + * Flags to control BPF kfunc behaviour. + * - BPF_F_PAD_ZEROS: Pad destination buffer with zeros. (See the respective + * helper documentation for details.) + */ +enum bpf_kfunc_flags { + BPF_F_PAD_ZEROS = (1ULL << 0), +}; + #endif /* _UAPI__LINUX_BPF_H__ */ diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile index 0291eef9ce92..9b9c151b5c82 100644 --- a/kernel/bpf/Makefile +++ b/kernel/bpf/Makefile @@ -52,9 +52,3 @@ obj-$(CONFIG_BPF_PRELOAD) += preload/ obj-$(CONFIG_BPF_SYSCALL) += relo_core.o obj-$(CONFIG_BPF_SYSCALL) += btf_iter.o obj-$(CONFIG_BPF_SYSCALL) += btf_relocate.o - -# Some source files are common to libbpf. -vpath %.c $(srctree)/kernel/bpf:$(srctree)/tools/lib/bpf - -$(obj)/%.o: %.c FORCE - $(call if_changed_rule,cc_o_c) diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index feabc0193852..79660e3fca4c 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -73,6 +73,9 @@ int array_map_alloc_check(union bpf_attr *attr) /* avoid overflow on round_up(map->value_size) */ if (attr->value_size > INT_MAX) return -E2BIG; + /* percpu map value size is bound by PCPU_MIN_UNIT_SIZE */ + if (percpu && round_up(attr->value_size, 8) > PCPU_MIN_UNIT_SIZE) + return -E2BIG; return 0; } @@ -494,7 +497,7 @@ static void array_map_seq_show_elem(struct bpf_map *map, void *key, if (map->btf_key_type_id) seq_printf(m, "%u: ", *(u32 *)key); btf_type_seq_show(map->btf, map->btf_value_type_id, value, m); - seq_puts(m, "\n"); + seq_putc(m, '\n'); rcu_read_unlock(); } @@ -515,7 +518,7 @@ static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key, seq_printf(m, "\tcpu%d: ", cpu); btf_type_seq_show(map->btf, map->btf_value_type_id, per_cpu_ptr(pptr, cpu), m); - seq_puts(m, "\n"); + seq_putc(m, '\n'); } seq_puts(m, "}\n"); @@ -600,7 +603,7 @@ static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos) array = container_of(map, struct bpf_array, map); index = info->index & array->index_mask; if (info->percpu_value_buf) - return array->pptrs[index]; + return (void *)(uintptr_t)array->pptrs[index]; return array_map_elem_ptr(array, index); } @@ -619,7 +622,7 @@ static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos) array = container_of(map, struct bpf_array, map); index = info->index & array->index_mask; if (info->percpu_value_buf) - return array->pptrs[index]; + return (void *)(uintptr_t)array->pptrs[index]; return array_map_elem_ptr(array, index); } @@ -632,7 +635,7 @@ static int __bpf_array_map_seq_show(struct seq_file *seq, void *v) struct bpf_iter_meta meta; struct bpf_prog *prog; int off = 0, cpu = 0; - void __percpu **pptr; + void __percpu *pptr; u32 size; meta.seq = seq; @@ -648,7 +651,7 @@ static int __bpf_array_map_seq_show(struct seq_file *seq, void *v) if (!info->percpu_value_buf) { ctx.value = v; } else { - pptr = v; + pptr = (void __percpu *)(uintptr_t)v; size = array->elem_size; for_each_possible_cpu(cpu) { copy_map_value_long(map, info->percpu_value_buf + off, @@ -993,7 +996,7 @@ static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key, prog_id = prog_fd_array_sys_lookup_elem(ptr); btf_type_seq_show(map->btf, map->btf_value_type_id, &prog_id, m); - seq_puts(m, "\n"); + seq_putc(m, '\n'); } } diff --git a/kernel/bpf/bpf_lsm.c b/kernel/bpf/bpf_lsm.c index 08a338e1f231..6292ac5f9bd1 100644 --- a/kernel/bpf/bpf_lsm.c +++ b/kernel/bpf/bpf_lsm.c @@ -11,7 +11,6 @@ #include <linux/lsm_hooks.h> #include <linux/bpf_lsm.h> #include <linux/kallsyms.h> -#include <linux/bpf_verifier.h> #include <net/bpf_sk_storage.h> #include <linux/bpf_local_storage.h> #include <linux/btf_ids.h> @@ -36,6 +35,24 @@ BTF_SET_START(bpf_lsm_hooks) #undef LSM_HOOK BTF_SET_END(bpf_lsm_hooks) +BTF_SET_START(bpf_lsm_disabled_hooks) +BTF_ID(func, bpf_lsm_vm_enough_memory) +BTF_ID(func, bpf_lsm_inode_need_killpriv) +BTF_ID(func, bpf_lsm_inode_getsecurity) +BTF_ID(func, bpf_lsm_inode_listsecurity) +BTF_ID(func, bpf_lsm_inode_copy_up_xattr) +BTF_ID(func, bpf_lsm_getselfattr) +BTF_ID(func, bpf_lsm_getprocattr) +BTF_ID(func, bpf_lsm_setprocattr) +#ifdef CONFIG_KEYS +BTF_ID(func, bpf_lsm_key_getsecurity) +#endif +#ifdef CONFIG_AUDIT +BTF_ID(func, bpf_lsm_audit_rule_match) +#endif +BTF_ID(func, bpf_lsm_ismaclabel) +BTF_SET_END(bpf_lsm_disabled_hooks) + /* List of LSM hooks that should operate on 'current' cgroup regardless * of function signature. */ @@ -97,15 +114,24 @@ void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog, int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog, const struct bpf_prog *prog) { + u32 btf_id = prog->aux->attach_btf_id; + const char *func_name = prog->aux->attach_func_name; + if (!prog->gpl_compatible) { bpf_log(vlog, "LSM programs must have a GPL compatible license\n"); return -EINVAL; } - if (!btf_id_set_contains(&bpf_lsm_hooks, prog->aux->attach_btf_id)) { + if (btf_id_set_contains(&bpf_lsm_disabled_hooks, btf_id)) { + bpf_log(vlog, "attach_btf_id %u points to disabled hook %s\n", + btf_id, func_name); + return -EINVAL; + } + + if (!btf_id_set_contains(&bpf_lsm_hooks, btf_id)) { bpf_log(vlog, "attach_btf_id %u points to wrong type name %s\n", - prog->aux->attach_btf_id, prog->aux->attach_func_name); + btf_id, func_name); return -EINVAL; } @@ -390,3 +416,36 @@ const struct bpf_verifier_ops lsm_verifier_ops = { .get_func_proto = bpf_lsm_func_proto, .is_valid_access = btf_ctx_access, }; + +/* hooks return 0 or 1 */ +BTF_SET_START(bool_lsm_hooks) +#ifdef CONFIG_SECURITY_NETWORK_XFRM +BTF_ID(func, bpf_lsm_xfrm_state_pol_flow_match) +#endif +#ifdef CONFIG_AUDIT +BTF_ID(func, bpf_lsm_audit_rule_known) +#endif +BTF_ID(func, bpf_lsm_inode_xattr_skipcap) +BTF_SET_END(bool_lsm_hooks) + +int bpf_lsm_get_retval_range(const struct bpf_prog *prog, + struct bpf_retval_range *retval_range) +{ + /* no return value range for void hooks */ + if (!prog->aux->attach_func_proto->type) + return -EINVAL; + + if (btf_id_set_contains(&bool_lsm_hooks, prog->aux->attach_btf_id)) { + retval_range->minval = 0; + retval_range->maxval = 1; + } else { + /* All other available LSM hooks, except task_prctl, return 0 + * on success and negative error code on failure. + * To keep things simple, we only allow bpf progs to return 0 + * or negative errno for task_prctl too. + */ + retval_range->minval = -MAX_ERRNO; + retval_range->maxval = 0; + } + return 0; +} diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c index 0d515ec57aa5..fda3dd2ee984 100644 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@ -837,7 +837,7 @@ static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key, btf_type_seq_show(st_map->btf, map->btf_vmlinux_value_type_id, value, m); - seq_puts(m, "\n"); + seq_putc(m, '\n'); } kfree(value); @@ -1040,6 +1040,13 @@ void bpf_struct_ops_put(const void *kdata) bpf_map_put(&st_map->map); } +int bpf_struct_ops_supported(const struct bpf_struct_ops *st_ops, u32 moff) +{ + void *func_ptr = *(void **)(st_ops->cfi_stubs + moff); + + return func_ptr ? 0 : -ENOTSUPP; +} + static bool bpf_struct_ops_valid_to_reg(struct bpf_map *map) { struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map; diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index ba91be08763a..8ae092ae1573 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -212,7 +212,7 @@ enum btf_kfunc_hook { BTF_KFUNC_HOOK_TRACING, BTF_KFUNC_HOOK_SYSCALL, BTF_KFUNC_HOOK_FMODRET, - BTF_KFUNC_HOOK_CGROUP_SKB, + BTF_KFUNC_HOOK_CGROUP, BTF_KFUNC_HOOK_SCHED_ACT, BTF_KFUNC_HOOK_SK_SKB, BTF_KFUNC_HOOK_SOCKET_FILTER, @@ -790,7 +790,7 @@ const char *btf_str_by_offset(const struct btf *btf, u32 offset) return NULL; } -static bool __btf_name_valid(const struct btf *btf, u32 offset) +static bool btf_name_valid_identifier(const struct btf *btf, u32 offset) { /* offset must be valid */ const char *src = btf_str_by_offset(btf, offset); @@ -811,11 +811,6 @@ static bool __btf_name_valid(const struct btf *btf, u32 offset) return !*src; } -static bool btf_name_valid_identifier(const struct btf *btf, u32 offset) -{ - return __btf_name_valid(btf, offset); -} - /* Allow any printable character in DATASEC names */ static bool btf_name_valid_section(const struct btf *btf, u32 offset) { @@ -3761,6 +3756,7 @@ static int btf_find_field(const struct btf *btf, const struct btf_type *t, return -EINVAL; } +/* Callers have to ensure the life cycle of btf if it is program BTF */ static int btf_parse_kptr(const struct btf *btf, struct btf_field *field, struct btf_field_info *info) { @@ -3789,7 +3785,6 @@ static int btf_parse_kptr(const struct btf *btf, struct btf_field *field, field->kptr.dtor = NULL; id = info->kptr.type_id; kptr_btf = (struct btf *)btf; - btf_get(kptr_btf); goto found_dtor; } if (id < 0) @@ -4631,7 +4626,7 @@ static s32 btf_var_check_meta(struct btf_verifier_env *env, } if (!t->name_off || - !__btf_name_valid(env->btf, t->name_off)) { + !btf_name_valid_identifier(env->btf, t->name_off)) { btf_verifier_log_type(env, t, "Invalid name"); return -EINVAL; } @@ -5519,36 +5514,72 @@ static const char *alloc_obj_fields[] = { static struct btf_struct_metas * btf_parse_struct_metas(struct bpf_verifier_log *log, struct btf *btf) { - union { - struct btf_id_set set; - struct { - u32 _cnt; - u32 _ids[ARRAY_SIZE(alloc_obj_fields)]; - } _arr; - } aof; struct btf_struct_metas *tab = NULL; + struct btf_id_set *aof; int i, n, id, ret; BUILD_BUG_ON(offsetof(struct btf_id_set, cnt) != 0); BUILD_BUG_ON(sizeof(struct btf_id_set) != sizeof(u32)); - memset(&aof, 0, sizeof(aof)); + aof = kmalloc(sizeof(*aof), GFP_KERNEL | __GFP_NOWARN); + if (!aof) + return ERR_PTR(-ENOMEM); + aof->cnt = 0; + for (i = 0; i < ARRAY_SIZE(alloc_obj_fields); i++) { /* Try to find whether this special type exists in user BTF, and * if so remember its ID so we can easily find it among members * of structs that we iterate in the next loop. */ + struct btf_id_set *new_aof; + id = btf_find_by_name_kind(btf, alloc_obj_fields[i], BTF_KIND_STRUCT); if (id < 0) continue; - aof.set.ids[aof.set.cnt++] = id; + + new_aof = krealloc(aof, offsetof(struct btf_id_set, ids[aof->cnt + 1]), + GFP_KERNEL | __GFP_NOWARN); + if (!new_aof) { + ret = -ENOMEM; + goto free_aof; + } + aof = new_aof; + aof->ids[aof->cnt++] = id; } - if (!aof.set.cnt) + n = btf_nr_types(btf); + for (i = 1; i < n; i++) { + /* Try to find if there are kptrs in user BTF and remember their ID */ + struct btf_id_set *new_aof; + struct btf_field_info tmp; + const struct btf_type *t; + + t = btf_type_by_id(btf, i); + if (!t) { + ret = -EINVAL; + goto free_aof; + } + + ret = btf_find_kptr(btf, t, 0, 0, &tmp); + if (ret != BTF_FIELD_FOUND) + continue; + + new_aof = krealloc(aof, offsetof(struct btf_id_set, ids[aof->cnt + 1]), + GFP_KERNEL | __GFP_NOWARN); + if (!new_aof) { + ret = -ENOMEM; + goto free_aof; + } + aof = new_aof; + aof->ids[aof->cnt++] = i; + } + + if (!aof->cnt) { + kfree(aof); return NULL; - sort(&aof.set.ids, aof.set.cnt, sizeof(aof.set.ids[0]), btf_id_cmp_func, NULL); + } + sort(&aof->ids, aof->cnt, sizeof(aof->ids[0]), btf_id_cmp_func, NULL); - n = btf_nr_types(btf); for (i = 1; i < n; i++) { struct btf_struct_metas *new_tab; const struct btf_member *member; @@ -5558,17 +5589,13 @@ btf_parse_struct_metas(struct bpf_verifier_log *log, struct btf *btf) int j, tab_cnt; t = btf_type_by_id(btf, i); - if (!t) { - ret = -EINVAL; - goto free; - } if (!__btf_type_is_struct(t)) continue; cond_resched(); for_each_member(j, t, member) { - if (btf_id_set_contains(&aof.set, member->type)) + if (btf_id_set_contains(aof, member->type)) goto parse; } continue; @@ -5587,7 +5614,8 @@ btf_parse_struct_metas(struct bpf_verifier_log *log, struct btf *btf) type = &tab->types[tab->cnt]; type->btf_id = i; record = btf_parse_fields(btf, t, BPF_SPIN_LOCK | BPF_LIST_HEAD | BPF_LIST_NODE | - BPF_RB_ROOT | BPF_RB_NODE | BPF_REFCOUNT, t->size); + BPF_RB_ROOT | BPF_RB_NODE | BPF_REFCOUNT | + BPF_KPTR, t->size); /* The record cannot be unset, treat it as an error if so */ if (IS_ERR_OR_NULL(record)) { ret = PTR_ERR_OR_ZERO(record) ?: -EFAULT; @@ -5596,9 +5624,12 @@ btf_parse_struct_metas(struct bpf_verifier_log *log, struct btf *btf) type->record = record; tab->cnt++; } + kfree(aof); return tab; free: btf_struct_metas_free(tab); +free_aof: + kfree(aof); return ERR_PTR(ret); } @@ -6245,12 +6276,11 @@ static struct btf *btf_parse_module(const char *module_name, const void *data, btf->kernel_btf = true; snprintf(btf->name, sizeof(btf->name), "%s", module_name); - btf->data = kvmalloc(data_size, GFP_KERNEL | __GFP_NOWARN); + btf->data = kvmemdup(data, data_size, GFP_KERNEL | __GFP_NOWARN); if (!btf->data) { err = -ENOMEM; goto errout; } - memcpy(btf->data, data, data_size); btf->data_size = data_size; err = btf_parse_hdr(env); @@ -6418,8 +6448,11 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type, if (arg == nr_args) { switch (prog->expected_attach_type) { - case BPF_LSM_CGROUP: case BPF_LSM_MAC: + /* mark we are accessing the return value */ + info->is_retval = true; + fallthrough; + case BPF_LSM_CGROUP: case BPF_TRACE_FEXIT: /* When LSM programs are attached to void LSM hooks * they use FEXIT trampolines and when attached to @@ -8054,15 +8087,44 @@ BTF_ID_LIST_GLOBAL(btf_tracing_ids, MAX_BTF_TRACING_TYPE) BTF_TRACING_TYPE_xxx #undef BTF_TRACING_TYPE +/* Validate well-formedness of iter argument type. + * On success, return positive BTF ID of iter state's STRUCT type. + * On error, negative error is returned. + */ +int btf_check_iter_arg(struct btf *btf, const struct btf_type *func, int arg_idx) +{ + const struct btf_param *arg; + const struct btf_type *t; + const char *name; + int btf_id; + + if (btf_type_vlen(func) <= arg_idx) + return -EINVAL; + + arg = &btf_params(func)[arg_idx]; + t = btf_type_skip_modifiers(btf, arg->type, NULL); + if (!t || !btf_type_is_ptr(t)) + return -EINVAL; + t = btf_type_skip_modifiers(btf, t->type, &btf_id); + if (!t || !__btf_type_is_struct(t)) + return -EINVAL; + + name = btf_name_by_offset(btf, t->name_off); + if (!name || strncmp(name, ITER_PREFIX, sizeof(ITER_PREFIX) - 1)) + return -EINVAL; + + return btf_id; +} + static int btf_check_iter_kfuncs(struct btf *btf, const char *func_name, const struct btf_type *func, u32 func_flags) { u32 flags = func_flags & (KF_ITER_NEW | KF_ITER_NEXT | KF_ITER_DESTROY); - const char *name, *sfx, *iter_name; - const struct btf_param *arg; + const char *sfx, *iter_name; const struct btf_type *t; char exp_name[128]; u32 nr_args; + int btf_id; /* exactly one of KF_ITER_{NEW,NEXT,DESTROY} can be set */ if (!flags || (flags & (flags - 1))) @@ -8073,28 +8135,21 @@ static int btf_check_iter_kfuncs(struct btf *btf, const char *func_name, if (nr_args < 1) return -EINVAL; - arg = &btf_params(func)[0]; - t = btf_type_skip_modifiers(btf, arg->type, NULL); - if (!t || !btf_type_is_ptr(t)) - return -EINVAL; - t = btf_type_skip_modifiers(btf, t->type, NULL); - if (!t || !__btf_type_is_struct(t)) - return -EINVAL; - - name = btf_name_by_offset(btf, t->name_off); - if (!name || strncmp(name, ITER_PREFIX, sizeof(ITER_PREFIX) - 1)) - return -EINVAL; + btf_id = btf_check_iter_arg(btf, func, 0); + if (btf_id < 0) + return btf_id; /* sizeof(struct bpf_iter_<type>) should be a multiple of 8 to * fit nicely in stack slots */ + t = btf_type_by_id(btf, btf_id); if (t->size == 0 || (t->size % 8)) return -EINVAL; /* validate bpf_iter_<type>_{new,next,destroy}(struct bpf_iter_<type> *) * naming pattern */ - iter_name = name + sizeof(ITER_PREFIX) - 1; + iter_name = btf_name_by_offset(btf, t->name_off) + sizeof(ITER_PREFIX) - 1; if (flags & KF_ITER_NEW) sfx = "new"; else if (flags & KF_ITER_NEXT) @@ -8309,13 +8364,19 @@ static int bpf_prog_type_to_kfunc_hook(enum bpf_prog_type prog_type) case BPF_PROG_TYPE_STRUCT_OPS: return BTF_KFUNC_HOOK_STRUCT_OPS; case BPF_PROG_TYPE_TRACING: + case BPF_PROG_TYPE_TRACEPOINT: + case BPF_PROG_TYPE_PERF_EVENT: case BPF_PROG_TYPE_LSM: return BTF_KFUNC_HOOK_TRACING; case BPF_PROG_TYPE_SYSCALL: return BTF_KFUNC_HOOK_SYSCALL; case BPF_PROG_TYPE_CGROUP_SKB: + case BPF_PROG_TYPE_CGROUP_SOCK: + case BPF_PROG_TYPE_CGROUP_DEVICE: case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: - return BTF_KFUNC_HOOK_CGROUP_SKB; + case BPF_PROG_TYPE_CGROUP_SOCKOPT: + case BPF_PROG_TYPE_CGROUP_SYSCTL: + return BTF_KFUNC_HOOK_CGROUP; case BPF_PROG_TYPE_SCHED_ACT: return BTF_KFUNC_HOOK_SCHED_ACT; case BPF_PROG_TYPE_SK_SKB: @@ -8891,6 +8952,7 @@ int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo, struct bpf_core_cand_list cands = {}; struct bpf_core_relo_res targ_res; struct bpf_core_spec *specs; + const struct btf_type *type; int err; /* ~4k of temp memory necessary to convert LLVM spec like "0:1:0:5" @@ -8900,6 +8962,13 @@ int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo, if (!specs) return -ENOMEM; + type = btf_type_by_id(ctx->btf, relo->type_id); + if (!type) { + bpf_log(ctx->log, "relo #%u: bad type id %u\n", + relo_idx, relo->type_id); + return -EINVAL; + } + if (need_cands) { struct bpf_cand_cache *cc; int i; diff --git a/kernel/bpf/btf_iter.c b/kernel/bpf/btf_iter.c new file mode 100644 index 000000000000..0e2c66a52df9 --- /dev/null +++ b/kernel/bpf/btf_iter.c @@ -0,0 +1,2 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) +#include "../../tools/lib/bpf/btf_iter.c" diff --git a/kernel/bpf/btf_relocate.c b/kernel/bpf/btf_relocate.c new file mode 100644 index 000000000000..c12ccbf66507 --- /dev/null +++ b/kernel/bpf/btf_relocate.c @@ -0,0 +1,2 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) +#include "../../tools/lib/bpf/btf_relocate.c" diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 8ba73042a239..e7113d700b87 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -2581,6 +2581,8 @@ cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) case BPF_FUNC_get_cgroup_classid: return &bpf_get_cgroup_classid_curr_proto; #endif + case BPF_FUNC_current_task_under_cgroup: + return &bpf_current_task_under_cgroup_proto; default: return NULL; } diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 7ee62e38faf0..4e07cc057d6f 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -2302,6 +2302,7 @@ bool bpf_prog_map_compatible(struct bpf_map *map, { enum bpf_prog_type prog_type = resolve_prog_type(fp); bool ret; + struct bpf_prog_aux *aux = fp->aux; if (fp->kprobe_override) return false; @@ -2311,7 +2312,7 @@ bool bpf_prog_map_compatible(struct bpf_map *map, * in the case of devmap and cpumap). Until device checks * are implemented, prohibit adding dev-bound programs to program maps. */ - if (bpf_prog_is_dev_bound(fp->aux)) + if (bpf_prog_is_dev_bound(aux)) return false; spin_lock(&map->owner.lock); @@ -2321,12 +2322,26 @@ bool bpf_prog_map_compatible(struct bpf_map *map, */ map->owner.type = prog_type; map->owner.jited = fp->jited; - map->owner.xdp_has_frags = fp->aux->xdp_has_frags; + map->owner.xdp_has_frags = aux->xdp_has_frags; + map->owner.attach_func_proto = aux->attach_func_proto; ret = true; } else { ret = map->owner.type == prog_type && map->owner.jited == fp->jited && - map->owner.xdp_has_frags == fp->aux->xdp_has_frags; + map->owner.xdp_has_frags == aux->xdp_has_frags; + if (ret && + map->owner.attach_func_proto != aux->attach_func_proto) { + switch (prog_type) { + case BPF_PROG_TYPE_TRACING: + case BPF_PROG_TYPE_LSM: + case BPF_PROG_TYPE_EXT: + case BPF_PROG_TYPE_STRUCT_OPS: + ret = false; + break; + default: + break; + } + } } spin_unlock(&map->owner.lock); diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 06115f8728e8..b14b87463ee0 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -462,6 +462,9 @@ static int htab_map_alloc_check(union bpf_attr *attr) * kmalloc-able later in htab_map_update_elem() */ return -E2BIG; + /* percpu map value size is bound by PCPU_MIN_UNIT_SIZE */ + if (percpu && round_up(attr->value_size, 8) > PCPU_MIN_UNIT_SIZE) + return -E2BIG; return 0; } @@ -1049,14 +1052,15 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, pptr = htab_elem_get_ptr(l_new, key_size); } else { /* alloc_percpu zero-fills */ - pptr = bpf_mem_cache_alloc(&htab->pcpu_ma); - if (!pptr) { + void *ptr = bpf_mem_cache_alloc(&htab->pcpu_ma); + + if (!ptr) { bpf_mem_cache_free(&htab->ma, l_new); l_new = ERR_PTR(-ENOMEM); goto dec_count; } - l_new->ptr_to_pptr = pptr; - pptr = *(void **)pptr; + l_new->ptr_to_pptr = ptr; + pptr = *(void __percpu **)ptr; } pcpu_init_value(htab, pptr, value, onallcpus); @@ -1586,7 +1590,7 @@ static void htab_map_seq_show_elem(struct bpf_map *map, void *key, btf_type_seq_show(map->btf, map->btf_key_type_id, key, m); seq_puts(m, ": "); btf_type_seq_show(map->btf, map->btf_value_type_id, value, m); - seq_puts(m, "\n"); + seq_putc(m, '\n'); rcu_read_unlock(); } @@ -2450,7 +2454,7 @@ static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key, seq_printf(m, "\tcpu%d: ", cpu); btf_type_seq_show(map->btf, map->btf_value_type_id, per_cpu_ptr(pptr, cpu), m); - seq_puts(m, "\n"); + seq_putc(m, '\n'); } seq_puts(m, "}\n"); diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index b5f0adae8293..1a43d06eab28 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -158,6 +158,7 @@ const struct bpf_func_proto bpf_get_smp_processor_id_proto = { .func = bpf_get_smp_processor_id, .gpl_only = false, .ret_type = RET_INTEGER, + .allow_fastcall = true, }; BPF_CALL_0(bpf_get_numa_node_id) @@ -517,16 +518,15 @@ static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags, } BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags, - long *, res) + s64 *, res) { long long _res; int err; + *res = 0; err = __bpf_strtoll(buf, buf_len, flags, &_res); if (err < 0) return err; - if (_res != (long)_res) - return -ERANGE; *res = _res; return err; } @@ -538,23 +538,23 @@ const struct bpf_func_proto bpf_strtol_proto = { .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg2_type = ARG_CONST_SIZE, .arg3_type = ARG_ANYTHING, - .arg4_type = ARG_PTR_TO_LONG, + .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED, + .arg4_size = sizeof(s64), }; BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags, - unsigned long *, res) + u64 *, res) { unsigned long long _res; bool is_negative; int err; + *res = 0; err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative); if (err < 0) return err; if (is_negative) return -EINVAL; - if (_res != (unsigned long)_res) - return -ERANGE; *res = _res; return err; } @@ -566,7 +566,8 @@ const struct bpf_func_proto bpf_strtoul_proto = { .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg2_type = ARG_CONST_SIZE, .arg3_type = ARG_ANYTHING, - .arg4_type = ARG_PTR_TO_LONG, + .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED, + .arg4_size = sizeof(u64), }; BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2) @@ -714,7 +715,7 @@ BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu) if (cpu >= nr_cpu_ids) return (unsigned long)NULL; - return (unsigned long)per_cpu_ptr((const void __percpu *)ptr, cpu); + return (unsigned long)per_cpu_ptr((const void __percpu *)(const uintptr_t)ptr, cpu); } const struct bpf_func_proto bpf_per_cpu_ptr_proto = { @@ -727,7 +728,7 @@ const struct bpf_func_proto bpf_per_cpu_ptr_proto = { BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr) { - return (unsigned long)this_cpu_ptr((const void __percpu *)percpu_ptr); + return (unsigned long)this_cpu_ptr((const void __percpu *)(const uintptr_t)percpu_ptr); } const struct bpf_func_proto bpf_this_cpu_ptr_proto = { @@ -1618,9 +1619,9 @@ void bpf_wq_cancel_and_free(void *val) schedule_work(&work->delete_work); } -BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr) +BPF_CALL_2(bpf_kptr_xchg, void *, dst, void *, ptr) { - unsigned long *kptr = map_value; + unsigned long *kptr = dst; /* This helper may be inlined by verifier. */ return xchg(kptr, (unsigned long)ptr); @@ -1635,7 +1636,7 @@ static const struct bpf_func_proto bpf_kptr_xchg_proto = { .gpl_only = false, .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, .ret_btf_id = BPF_PTR_POISON, - .arg1_type = ARG_PTR_TO_KPTR, + .arg1_type = ARG_KPTR_XCHG_DEST, .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL | OBJ_RELEASE, .arg2_btf_id = BPF_PTR_POISON, }; @@ -2033,6 +2034,7 @@ bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return NULL; } } +EXPORT_SYMBOL_GPL(bpf_base_func_proto); void bpf_list_head_free(const struct btf_field *field, void *list_head, struct bpf_spin_lock *spin_lock) @@ -2457,6 +2459,29 @@ __bpf_kfunc long bpf_task_under_cgroup(struct task_struct *task, return ret; } +BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx) +{ + struct bpf_array *array = container_of(map, struct bpf_array, map); + struct cgroup *cgrp; + + if (unlikely(idx >= array->map.max_entries)) + return -E2BIG; + + cgrp = READ_ONCE(array->ptrs[idx]); + if (unlikely(!cgrp)) + return -EAGAIN; + + return task_under_cgroup_hierarchy(current, cgrp); +} + +const struct bpf_func_proto bpf_current_task_under_cgroup_proto = { + .func = bpf_current_task_under_cgroup, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_CONST_MAP_PTR, + .arg2_type = ARG_ANYTHING, +}; + /** * bpf_task_get_cgroup1 - Acquires the associated cgroup of a task within a * specific cgroup1 hierarchy. The cgroup1 hierarchy is identified by its @@ -2938,6 +2963,47 @@ __bpf_kfunc void bpf_iter_bits_destroy(struct bpf_iter_bits *it) bpf_mem_free(&bpf_global_ma, kit->bits); } +/** + * bpf_copy_from_user_str() - Copy a string from an unsafe user address + * @dst: Destination address, in kernel space. This buffer must be + * at least @dst__sz bytes long. + * @dst__sz: Maximum number of bytes to copy, includes the trailing NUL. + * @unsafe_ptr__ign: Source address, in user space. + * @flags: The only supported flag is BPF_F_PAD_ZEROS + * + * Copies a NUL-terminated string from userspace to BPF space. If user string is + * too long this will still ensure zero termination in the dst buffer unless + * buffer size is 0. + * + * If BPF_F_PAD_ZEROS flag is set, memset the tail of @dst to 0 on success and + * memset all of @dst on failure. + */ +__bpf_kfunc int bpf_copy_from_user_str(void *dst, u32 dst__sz, const void __user *unsafe_ptr__ign, u64 flags) +{ + int ret; + + if (unlikely(flags & ~BPF_F_PAD_ZEROS)) + return -EINVAL; + + if (unlikely(!dst__sz)) + return 0; + + ret = strncpy_from_user(dst, unsafe_ptr__ign, dst__sz - 1); + if (ret < 0) { + if (flags & BPF_F_PAD_ZEROS) + memset((char *)dst, 0, dst__sz); + + return ret; + } + + if (flags & BPF_F_PAD_ZEROS) + memset((char *)dst + ret, 0, dst__sz - ret); + else + ((char *)dst)[ret] = '\0'; + + return ret + 1; +} + __bpf_kfunc_end_defs(); BTF_KFUNCS_START(generic_btf_ids) @@ -3023,6 +3089,7 @@ BTF_ID_FLAGS(func, bpf_preempt_enable) BTF_ID_FLAGS(func, bpf_iter_bits_new, KF_ITER_NEW) BTF_ID_FLAGS(func, bpf_iter_bits_next, KF_ITER_NEXT | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_iter_bits_destroy, KF_ITER_DESTROY) +BTF_ID_FLAGS(func, bpf_copy_from_user_str, KF_SLEEPABLE) BTF_KFUNCS_END(common_btf_ids) static const struct btf_kfunc_id_set common_kfunc_set = { @@ -3051,6 +3118,7 @@ static int __init kfunc_init(void) ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &generic_kfunc_set); ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &generic_kfunc_set); ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &generic_kfunc_set); + ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_CGROUP_SKB, &generic_kfunc_set); ret = ret ?: register_btf_id_dtor_kfuncs(generic_dtors, ARRAY_SIZE(generic_dtors), THIS_MODULE); diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c index af5d2ffadd70..d8fc5eba529d 100644 --- a/kernel/bpf/inode.c +++ b/kernel/bpf/inode.c @@ -709,10 +709,10 @@ static void seq_print_delegate_opts(struct seq_file *m, msk = 1ULL << e->val; if (delegate_msk & msk) { /* emit lower-case name without prefix */ - seq_printf(m, "%c", first ? '=' : ':'); + seq_putc(m, first ? '=' : ':'); name += pfx_len; while (*name) { - seq_printf(m, "%c", tolower(*name)); + seq_putc(m, tolower(*name)); name++; } diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c index a04f505aefe9..3969eb0382af 100644 --- a/kernel/bpf/local_storage.c +++ b/kernel/bpf/local_storage.c @@ -431,7 +431,7 @@ static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *key, seq_puts(m, ": "); btf_type_seq_show(map->btf, map->btf_value_type_id, &READ_ONCE(storage->buf)->data[0], m); - seq_puts(m, "\n"); + seq_putc(m, '\n'); } else { seq_puts(m, ": {\n"); for_each_possible_cpu(cpu) { @@ -439,7 +439,7 @@ static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *key, btf_type_seq_show(map->btf, map->btf_value_type_id, per_cpu_ptr(storage->percpu_buf, cpu), m); - seq_puts(m, "\n"); + seq_putc(m, '\n'); } seq_puts(m, "}\n"); } diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c index dec892ded031..b3858a76e0b3 100644 --- a/kernel/bpf/memalloc.c +++ b/kernel/bpf/memalloc.c @@ -138,8 +138,8 @@ static struct llist_node notrace *__llist_del_first(struct llist_head *head) static void *__alloc(struct bpf_mem_cache *c, int node, gfp_t flags) { if (c->percpu_size) { - void **obj = kmalloc_node(c->percpu_size, flags, node); - void *pptr = __alloc_percpu_gfp(c->unit_size, 8, flags); + void __percpu **obj = kmalloc_node(c->percpu_size, flags, node); + void __percpu *pptr = __alloc_percpu_gfp(c->unit_size, 8, flags); if (!obj || !pptr) { free_percpu(pptr); @@ -253,7 +253,7 @@ static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node, bool atomic) static void free_one(void *obj, bool percpu) { if (percpu) { - free_percpu(((void **)obj)[1]); + free_percpu(((void __percpu **)obj)[1]); kfree(obj); return; } @@ -509,8 +509,8 @@ static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu) */ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu) { - struct bpf_mem_caches *cc, __percpu *pcc; - struct bpf_mem_cache *c, __percpu *pc; + struct bpf_mem_caches *cc; struct bpf_mem_caches __percpu *pcc; + struct bpf_mem_cache *c; struct bpf_mem_cache __percpu *pc; struct obj_cgroup *objcg = NULL; int cpu, i, unit_size, percpu_size = 0; @@ -591,7 +591,7 @@ int bpf_mem_alloc_percpu_init(struct bpf_mem_alloc *ma, struct obj_cgroup *objcg int bpf_mem_alloc_percpu_unit_init(struct bpf_mem_alloc *ma, int size) { - struct bpf_mem_caches *cc, __percpu *pcc; + struct bpf_mem_caches *cc; struct bpf_mem_caches __percpu *pcc; int cpu, i, unit_size, percpu_size; struct obj_cgroup *objcg; struct bpf_mem_cache *c; diff --git a/kernel/bpf/relo_core.c b/kernel/bpf/relo_core.c new file mode 100644 index 000000000000..aa822c9fcfde --- /dev/null +++ b/kernel/bpf/relo_core.c @@ -0,0 +1,2 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) +#include "../../tools/lib/bpf/relo_core.c" diff --git a/kernel/bpf/reuseport_array.c b/kernel/bpf/reuseport_array.c index 4b4f9670f1a9..49b8e5a0c6b4 100644 --- a/kernel/bpf/reuseport_array.c +++ b/kernel/bpf/reuseport_array.c @@ -308,7 +308,7 @@ put_file_unlock: spin_unlock_bh(&reuseport_lock); put_file: - fput(socket->file); + sockfd_put(socket); return err; } diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index c99f8e5234ac..3615c06b7dfa 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -124,8 +124,24 @@ free_smap: return ERR_PTR(err); } +static int fetch_build_id(struct vm_area_struct *vma, unsigned char *build_id, bool may_fault) +{ + return may_fault ? build_id_parse(vma, build_id, NULL) + : build_id_parse_nofault(vma, build_id, NULL); +} + +/* + * Expects all id_offs[i].ip values to be set to correct initial IPs. + * They will be subsequently: + * - either adjusted in place to a file offset, if build ID fetching + * succeeds; in this case id_offs[i].build_id is set to correct build ID, + * and id_offs[i].status is set to BPF_STACK_BUILD_ID_VALID; + * - or IP will be kept intact, if build ID fetching failed; in this case + * id_offs[i].build_id is zeroed out and id_offs[i].status is set to + * BPF_STACK_BUILD_ID_IP. + */ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, - u64 *ips, u32 trace_nr, bool user) + u32 trace_nr, bool user, bool may_fault) { int i; struct mmap_unlock_irq_work *work = NULL; @@ -142,30 +158,28 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, /* cannot access current->mm, fall back to ips */ for (i = 0; i < trace_nr; i++) { id_offs[i].status = BPF_STACK_BUILD_ID_IP; - id_offs[i].ip = ips[i]; memset(id_offs[i].build_id, 0, BUILD_ID_SIZE_MAX); } return; } for (i = 0; i < trace_nr; i++) { - if (range_in_vma(prev_vma, ips[i], ips[i])) { + u64 ip = READ_ONCE(id_offs[i].ip); + + if (range_in_vma(prev_vma, ip, ip)) { vma = prev_vma; - memcpy(id_offs[i].build_id, prev_build_id, - BUILD_ID_SIZE_MAX); + memcpy(id_offs[i].build_id, prev_build_id, BUILD_ID_SIZE_MAX); goto build_id_valid; } - vma = find_vma(current->mm, ips[i]); - if (!vma || build_id_parse(vma, id_offs[i].build_id, NULL)) { + vma = find_vma(current->mm, ip); + if (!vma || fetch_build_id(vma, id_offs[i].build_id, may_fault)) { /* per entry fall back to ips */ id_offs[i].status = BPF_STACK_BUILD_ID_IP; - id_offs[i].ip = ips[i]; memset(id_offs[i].build_id, 0, BUILD_ID_SIZE_MAX); continue; } build_id_valid: - id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i] - - vma->vm_start; + id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ip - vma->vm_start; id_offs[i].status = BPF_STACK_BUILD_ID_VALID; prev_vma = vma; prev_build_id = id_offs[i].build_id; @@ -216,7 +230,7 @@ static long __bpf_get_stackid(struct bpf_map *map, struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); struct stack_map_bucket *bucket, *new_bucket, *old_bucket; u32 skip = flags & BPF_F_SKIP_FIELD_MASK; - u32 hash, id, trace_nr, trace_len; + u32 hash, id, trace_nr, trace_len, i; bool user = flags & BPF_F_USER_STACK; u64 *ips; bool hash_matches; @@ -238,15 +252,18 @@ static long __bpf_get_stackid(struct bpf_map *map, return id; if (stack_map_use_build_id(map)) { + struct bpf_stack_build_id *id_offs; + /* for build_id+offset, pop a bucket before slow cmp */ new_bucket = (struct stack_map_bucket *) pcpu_freelist_pop(&smap->freelist); if (unlikely(!new_bucket)) return -ENOMEM; new_bucket->nr = trace_nr; - stack_map_get_build_id_offset( - (struct bpf_stack_build_id *)new_bucket->data, - ips, trace_nr, user); + id_offs = (struct bpf_stack_build_id *)new_bucket->data; + for (i = 0; i < trace_nr; i++) + id_offs[i].ip = ips[i]; + stack_map_get_build_id_offset(id_offs, trace_nr, user, false /* !may_fault */); trace_len = trace_nr * sizeof(struct bpf_stack_build_id); if (hash_matches && bucket->nr == trace_nr && memcmp(bucket->data, new_bucket->data, trace_len) == 0) { @@ -387,7 +404,7 @@ const struct bpf_func_proto bpf_get_stackid_proto_pe = { static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task, struct perf_callchain_entry *trace_in, - void *buf, u32 size, u64 flags) + void *buf, u32 size, u64 flags, bool may_fault) { u32 trace_nr, copy_len, elem_size, num_elem, max_depth; bool user_build_id = flags & BPF_F_USER_BUILD_ID; @@ -405,8 +422,7 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task, if (kernel && user_build_id) goto clear; - elem_size = (user && user_build_id) ? sizeof(struct bpf_stack_build_id) - : sizeof(u64); + elem_size = user_build_id ? sizeof(struct bpf_stack_build_id) : sizeof(u64); if (unlikely(size % elem_size)) goto clear; @@ -427,6 +443,9 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task, if (sysctl_perf_event_max_stack < max_depth) max_depth = sysctl_perf_event_max_stack; + if (may_fault) + rcu_read_lock(); /* need RCU for perf's callchain below */ + if (trace_in) trace = trace_in; else if (kernel && task) @@ -434,21 +453,34 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task, else trace = get_perf_callchain(regs, 0, kernel, user, max_depth, crosstask, false); - if (unlikely(!trace)) - goto err_fault; - if (trace->nr < skip) + if (unlikely(!trace) || trace->nr < skip) { + if (may_fault) + rcu_read_unlock(); goto err_fault; + } trace_nr = trace->nr - skip; trace_nr = (trace_nr <= num_elem) ? trace_nr : num_elem; copy_len = trace_nr * elem_size; ips = trace->ip + skip; - if (user && user_build_id) - stack_map_get_build_id_offset(buf, ips, trace_nr, user); - else + if (user_build_id) { + struct bpf_stack_build_id *id_offs = buf; + u32 i; + + for (i = 0; i < trace_nr; i++) + id_offs[i].ip = ips[i]; + } else { memcpy(buf, ips, copy_len); + } + + /* trace/ips should not be dereferenced after this point */ + if (may_fault) + rcu_read_unlock(); + + if (user_build_id) + stack_map_get_build_id_offset(buf, trace_nr, user, may_fault); if (size > copy_len) memset(buf + copy_len, 0, size - copy_len); @@ -464,7 +496,7 @@ clear: BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size, u64, flags) { - return __bpf_get_stack(regs, NULL, NULL, buf, size, flags); + return __bpf_get_stack(regs, NULL, NULL, buf, size, flags, false /* !may_fault */); } const struct bpf_func_proto bpf_get_stack_proto = { @@ -477,8 +509,24 @@ const struct bpf_func_proto bpf_get_stack_proto = { .arg4_type = ARG_ANYTHING, }; -BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf, - u32, size, u64, flags) +BPF_CALL_4(bpf_get_stack_sleepable, struct pt_regs *, regs, void *, buf, u32, size, + u64, flags) +{ + return __bpf_get_stack(regs, NULL, NULL, buf, size, flags, true /* may_fault */); +} + +const struct bpf_func_proto bpf_get_stack_sleepable_proto = { + .func = bpf_get_stack_sleepable, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_PTR_TO_UNINIT_MEM, + .arg3_type = ARG_CONST_SIZE_OR_ZERO, + .arg4_type = ARG_ANYTHING, +}; + +static long __bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, + u64 flags, bool may_fault) { struct pt_regs *regs; long res = -EINVAL; @@ -488,12 +536,18 @@ BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf, regs = task_pt_regs(task); if (regs) - res = __bpf_get_stack(regs, task, NULL, buf, size, flags); + res = __bpf_get_stack(regs, task, NULL, buf, size, flags, may_fault); put_task_stack(task); return res; } +BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf, + u32, size, u64, flags) +{ + return __bpf_get_task_stack(task, buf, size, flags, false /* !may_fault */); +} + const struct bpf_func_proto bpf_get_task_stack_proto = { .func = bpf_get_task_stack, .gpl_only = false, @@ -505,6 +559,23 @@ const struct bpf_func_proto bpf_get_task_stack_proto = { .arg4_type = ARG_ANYTHING, }; +BPF_CALL_4(bpf_get_task_stack_sleepable, struct task_struct *, task, void *, buf, + u32, size, u64, flags) +{ + return __bpf_get_task_stack(task, buf, size, flags, true /* !may_fault */); +} + +const struct bpf_func_proto bpf_get_task_stack_sleepable_proto = { + .func = bpf_get_task_stack_sleepable, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_BTF_ID, + .arg1_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK], + .arg2_type = ARG_PTR_TO_UNINIT_MEM, + .arg3_type = ARG_CONST_SIZE_OR_ZERO, + .arg4_type = ARG_ANYTHING, +}; + BPF_CALL_4(bpf_get_stack_pe, struct bpf_perf_event_data_kern *, ctx, void *, buf, u32, size, u64, flags) { @@ -516,7 +587,7 @@ BPF_CALL_4(bpf_get_stack_pe, struct bpf_perf_event_data_kern *, ctx, __u64 nr_kernel; if (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)) - return __bpf_get_stack(regs, NULL, NULL, buf, size, flags); + return __bpf_get_stack(regs, NULL, NULL, buf, size, flags, false /* !may_fault */); if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK | BPF_F_USER_BUILD_ID))) @@ -536,7 +607,7 @@ BPF_CALL_4(bpf_get_stack_pe, struct bpf_perf_event_data_kern *, ctx, __u64 nr = trace->nr; trace->nr = nr_kernel; - err = __bpf_get_stack(regs, NULL, trace, buf, size, flags); + err = __bpf_get_stack(regs, NULL, trace, buf, size, flags, false /* !may_fault */); /* restore nr */ trace->nr = nr; @@ -548,7 +619,7 @@ BPF_CALL_4(bpf_get_stack_pe, struct bpf_perf_event_data_kern *, ctx, goto clear; flags = (flags & ~BPF_F_SKIP_FIELD_MASK) | skip; - err = __bpf_get_stack(regs, NULL, trace, buf, size, flags); + err = __bpf_get_stack(regs, NULL, trace, buf, size, flags, false /* !may_fault */); } return err; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index bf6c5f685ea2..8a4117f6d761 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -550,7 +550,8 @@ void btf_record_free(struct btf_record *rec) case BPF_KPTR_PERCPU: if (rec->fields[i].kptr.module) module_put(rec->fields[i].kptr.module); - btf_put(rec->fields[i].kptr.btf); + if (btf_is_kernel(rec->fields[i].kptr.btf)) + btf_put(rec->fields[i].kptr.btf); break; case BPF_LIST_HEAD: case BPF_LIST_NODE: @@ -596,7 +597,8 @@ struct btf_record *btf_record_dup(const struct btf_record *rec) case BPF_KPTR_UNREF: case BPF_KPTR_REF: case BPF_KPTR_PERCPU: - btf_get(fields[i].kptr.btf); + if (btf_is_kernel(fields[i].kptr.btf)) + btf_get(fields[i].kptr.btf); if (fields[i].kptr.module && !try_module_get(fields[i].kptr.module)) { ret = -ENXIO; goto free; @@ -733,15 +735,11 @@ void bpf_obj_free_fields(const struct btf_record *rec, void *obj) } } -/* called from workqueue */ -static void bpf_map_free_deferred(struct work_struct *work) +static void bpf_map_free(struct bpf_map *map) { - struct bpf_map *map = container_of(work, struct bpf_map, work); struct btf_record *rec = map->record; struct btf *btf = map->btf; - security_bpf_map_free(map); - bpf_map_release_memcg(map); /* implementation dependent freeing */ map->ops->map_free(map); /* Delay freeing of btf_record for maps, as map_free @@ -760,6 +758,16 @@ static void bpf_map_free_deferred(struct work_struct *work) btf_put(btf); } +/* called from workqueue */ +static void bpf_map_free_deferred(struct work_struct *work) +{ + struct bpf_map *map = container_of(work, struct bpf_map, work); + + security_bpf_map_free(map); + bpf_map_release_memcg(map); + bpf_map_free(map); +} + static void bpf_map_put_uref(struct bpf_map *map) { if (atomic64_dec_and_test(&map->usercnt)) { @@ -1411,8 +1419,7 @@ static int map_create(union bpf_attr *attr) free_map_sec: security_bpf_map_free(map); free_map: - btf_put(map->btf); - map->ops->map_free(map); + bpf_map_free(map); put_token: bpf_token_put(token); return err; @@ -5668,7 +5675,7 @@ static int token_create(union bpf_attr *attr) return bpf_token_create(attr); } -static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size) +static int __sys_bpf(enum bpf_cmd cmd, bpfptr_t uattr, unsigned int size) { union bpf_attr attr; int err; @@ -5932,6 +5939,7 @@ static const struct bpf_func_proto bpf_sys_close_proto = { BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res) { + *res = 0; if (flags) return -EINVAL; @@ -5952,7 +5960,8 @@ static const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = { .arg1_type = ARG_PTR_TO_MEM, .arg2_type = ARG_CONST_SIZE_OR_ZERO, .arg3_type = ARG_ANYTHING, - .arg4_type = ARG_PTR_TO_LONG, + .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED, + .arg4_size = sizeof(u64), }; static const struct bpf_func_proto * diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 39d5710c68ad..dd86282ccaa4 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -385,11 +385,6 @@ static void verbose_invalid_scalar(struct bpf_verifier_env *env, verbose(env, " should have been in [%d, %d]\n", range.minval, range.maxval); } -static bool type_may_be_null(u32 type) -{ - return type & PTR_MAYBE_NULL; -} - static bool reg_not_null(const struct bpf_reg_state *reg) { enum bpf_reg_type type; @@ -2184,6 +2179,44 @@ static void __reg_deduce_mixed_bounds(struct bpf_reg_state *reg) reg->smin_value = max_t(s64, reg->smin_value, new_smin); reg->smax_value = min_t(s64, reg->smax_value, new_smax); } + + /* Here we would like to handle a special case after sign extending load, + * when upper bits for a 64-bit range are all 1s or all 0s. + * + * Upper bits are all 1s when register is in a range: + * [0xffff_ffff_0000_0000, 0xffff_ffff_ffff_ffff] + * Upper bits are all 0s when register is in a range: + * [0x0000_0000_0000_0000, 0x0000_0000_ffff_ffff] + * Together this forms are continuous range: + * [0xffff_ffff_0000_0000, 0x0000_0000_ffff_ffff] + * + * Now, suppose that register range is in fact tighter: + * [0xffff_ffff_8000_0000, 0x0000_0000_ffff_ffff] (R) + * Also suppose that it's 32-bit range is positive, + * meaning that lower 32-bits of the full 64-bit register + * are in the range: + * [0x0000_0000, 0x7fff_ffff] (W) + * + * If this happens, then any value in a range: + * [0xffff_ffff_0000_0000, 0xffff_ffff_7fff_ffff] + * is smaller than a lowest bound of the range (R): + * 0xffff_ffff_8000_0000 + * which means that upper bits of the full 64-bit register + * can't be all 1s, when lower bits are in range (W). + * + * Note that: + * - 0xffff_ffff_8000_0000 == (s64)S32_MIN + * - 0x0000_0000_7fff_ffff == (s64)S32_MAX + * These relations are used in the conditions below. + */ + if (reg->s32_min_value >= 0 && reg->smin_value >= S32_MIN && reg->smax_value <= S32_MAX) { + reg->smin_value = reg->s32_min_value; + reg->smax_value = reg->s32_max_value; + reg->umin_value = reg->s32_min_value; + reg->umax_value = reg->s32_max_value; + reg->var_off = tnum_intersect(reg->var_off, + tnum_range(reg->smin_value, reg->smax_value)); + } } static void __reg_deduce_bounds(struct bpf_reg_state *reg) @@ -2336,6 +2369,25 @@ static void mark_reg_unknown(struct bpf_verifier_env *env, __mark_reg_unknown(env, regs + regno); } +static int __mark_reg_s32_range(struct bpf_verifier_env *env, + struct bpf_reg_state *regs, + u32 regno, + s32 s32_min, + s32 s32_max) +{ + struct bpf_reg_state *reg = regs + regno; + + reg->s32_min_value = max_t(s32, reg->s32_min_value, s32_min); + reg->s32_max_value = min_t(s32, reg->s32_max_value, s32_max); + + reg->smin_value = max_t(s64, reg->smin_value, s32_min); + reg->smax_value = min_t(s64, reg->smax_value, s32_max); + + reg_bounds_sync(reg); + + return reg_bounds_sanity_check(env, reg, "s32_range"); +} + static void __mark_reg_not_init(const struct bpf_verifier_env *env, struct bpf_reg_state *reg) { @@ -3337,9 +3389,87 @@ static bool is_jmp_point(struct bpf_verifier_env *env, int insn_idx) return env->insn_aux_data[insn_idx].jmp_point; } +#define LR_FRAMENO_BITS 3 +#define LR_SPI_BITS 6 +#define LR_ENTRY_BITS (LR_SPI_BITS + LR_FRAMENO_BITS + 1) +#define LR_SIZE_BITS 4 +#define LR_FRAMENO_MASK ((1ull << LR_FRAMENO_BITS) - 1) +#define LR_SPI_MASK ((1ull << LR_SPI_BITS) - 1) +#define LR_SIZE_MASK ((1ull << LR_SIZE_BITS) - 1) +#define LR_SPI_OFF LR_FRAMENO_BITS +#define LR_IS_REG_OFF (LR_SPI_BITS + LR_FRAMENO_BITS) +#define LINKED_REGS_MAX 6 + +struct linked_reg { + u8 frameno; + union { + u8 spi; + u8 regno; + }; + bool is_reg; +}; + +struct linked_regs { + int cnt; + struct linked_reg entries[LINKED_REGS_MAX]; +}; + +static struct linked_reg *linked_regs_push(struct linked_regs *s) +{ + if (s->cnt < LINKED_REGS_MAX) + return &s->entries[s->cnt++]; + + return NULL; +} + +/* Use u64 as a vector of 6 10-bit values, use first 4-bits to track + * number of elements currently in stack. + * Pack one history entry for linked registers as 10 bits in the following format: + * - 3-bits frameno + * - 6-bits spi_or_reg + * - 1-bit is_reg + */ +static u64 linked_regs_pack(struct linked_regs *s) +{ + u64 val = 0; + int i; + + for (i = 0; i < s->cnt; ++i) { + struct linked_reg *e = &s->entries[i]; + u64 tmp = 0; + + tmp |= e->frameno; + tmp |= e->spi << LR_SPI_OFF; + tmp |= (e->is_reg ? 1 : 0) << LR_IS_REG_OFF; + + val <<= LR_ENTRY_BITS; + val |= tmp; + } + val <<= LR_SIZE_BITS; + val |= s->cnt; + return val; +} + +static void linked_regs_unpack(u64 val, struct linked_regs *s) +{ + int i; + + s->cnt = val & LR_SIZE_MASK; + val >>= LR_SIZE_BITS; + + for (i = 0; i < s->cnt; ++i) { + struct linked_reg *e = &s->entries[i]; + + e->frameno = val & LR_FRAMENO_MASK; + e->spi = (val >> LR_SPI_OFF) & LR_SPI_MASK; + e->is_reg = (val >> LR_IS_REG_OFF) & 0x1; + val >>= LR_ENTRY_BITS; + } +} + /* for any branch, call, exit record the history of jmps in the given state */ static int push_jmp_history(struct bpf_verifier_env *env, struct bpf_verifier_state *cur, - int insn_flags) + int insn_flags, u64 linked_regs) { u32 cnt = cur->jmp_history_cnt; struct bpf_jmp_history_entry *p; @@ -3355,6 +3485,10 @@ static int push_jmp_history(struct bpf_verifier_env *env, struct bpf_verifier_st "verifier insn history bug: insn_idx %d cur flags %x new flags %x\n", env->insn_idx, env->cur_hist_ent->flags, insn_flags); env->cur_hist_ent->flags |= insn_flags; + WARN_ONCE(env->cur_hist_ent->linked_regs != 0, + "verifier insn history bug: insn_idx %d linked_regs != 0: %#llx\n", + env->insn_idx, env->cur_hist_ent->linked_regs); + env->cur_hist_ent->linked_regs = linked_regs; return 0; } @@ -3369,6 +3503,7 @@ static int push_jmp_history(struct bpf_verifier_env *env, struct bpf_verifier_st p->idx = env->insn_idx; p->prev_idx = env->prev_insn_idx; p->flags = insn_flags; + p->linked_regs = linked_regs; cur->jmp_history_cnt = cnt; env->cur_hist_ent = p; @@ -3534,6 +3669,11 @@ static inline bool bt_is_reg_set(struct backtrack_state *bt, u32 reg) return bt->reg_masks[bt->frame] & (1 << reg); } +static inline bool bt_is_frame_reg_set(struct backtrack_state *bt, u32 frame, u32 reg) +{ + return bt->reg_masks[frame] & (1 << reg); +} + static inline bool bt_is_frame_slot_set(struct backtrack_state *bt, u32 frame, u32 slot) { return bt->stack_masks[frame] & (1ull << slot); @@ -3578,6 +3718,42 @@ static void fmt_stack_mask(char *buf, ssize_t buf_sz, u64 stack_mask) } } +/* If any register R in hist->linked_regs is marked as precise in bt, + * do bt_set_frame_{reg,slot}(bt, R) for all registers in hist->linked_regs. + */ +static void bt_sync_linked_regs(struct backtrack_state *bt, struct bpf_jmp_history_entry *hist) +{ + struct linked_regs linked_regs; + bool some_precise = false; + int i; + + if (!hist || hist->linked_regs == 0) + return; + + linked_regs_unpack(hist->linked_regs, &linked_regs); + for (i = 0; i < linked_regs.cnt; ++i) { + struct linked_reg *e = &linked_regs.entries[i]; + + if ((e->is_reg && bt_is_frame_reg_set(bt, e->frameno, e->regno)) || + (!e->is_reg && bt_is_frame_slot_set(bt, e->frameno, e->spi))) { + some_precise = true; + break; + } + } + + if (!some_precise) + return; + + for (i = 0; i < linked_regs.cnt; ++i) { + struct linked_reg *e = &linked_regs.entries[i]; + + if (e->is_reg) + bt_set_frame_reg(bt, e->frameno, e->regno); + else + bt_set_frame_slot(bt, e->frameno, e->spi); + } +} + static bool calls_callback(struct bpf_verifier_env *env, int insn_idx); /* For given verifier state backtrack_insn() is called from the last insn to @@ -3617,6 +3793,12 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx, print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); } + /* If there is a history record that some registers gained range at this insn, + * propagate precision marks to those registers, so that bt_is_reg_set() + * accounts for these registers. + */ + bt_sync_linked_regs(bt, hist); + if (class == BPF_ALU || class == BPF_ALU64) { if (!bt_is_reg_set(bt, dreg)) return 0; @@ -3846,7 +4028,8 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx, */ bt_set_reg(bt, dreg); bt_set_reg(bt, sreg); - /* else dreg <cond> K + } else if (BPF_SRC(insn->code) == BPF_K) { + /* dreg <cond> K * Only dreg still needs precision before * this insn, so for the K-based conditional * there is nothing new to be marked. @@ -3864,6 +4047,10 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx, /* to be analyzed */ return -ENOTSUPP; } + /* Propagate precision marks to linked registers, to account for + * registers marked as precise in this function. + */ + bt_sync_linked_regs(bt, hist); return 0; } @@ -3991,96 +4178,6 @@ static void mark_all_scalars_imprecise(struct bpf_verifier_env *env, struct bpf_ } } -static bool idset_contains(struct bpf_idset *s, u32 id) -{ - u32 i; - - for (i = 0; i < s->count; ++i) - if (s->ids[i] == (id & ~BPF_ADD_CONST)) - return true; - - return false; -} - -static int idset_push(struct bpf_idset *s, u32 id) -{ - if (WARN_ON_ONCE(s->count >= ARRAY_SIZE(s->ids))) - return -EFAULT; - s->ids[s->count++] = id & ~BPF_ADD_CONST; - return 0; -} - -static void idset_reset(struct bpf_idset *s) -{ - s->count = 0; -} - -/* Collect a set of IDs for all registers currently marked as precise in env->bt. - * Mark all registers with these IDs as precise. - */ -static int mark_precise_scalar_ids(struct bpf_verifier_env *env, struct bpf_verifier_state *st) -{ - struct bpf_idset *precise_ids = &env->idset_scratch; - struct backtrack_state *bt = &env->bt; - struct bpf_func_state *func; - struct bpf_reg_state *reg; - DECLARE_BITMAP(mask, 64); - int i, fr; - - idset_reset(precise_ids); - - for (fr = bt->frame; fr >= 0; fr--) { - func = st->frame[fr]; - - bitmap_from_u64(mask, bt_frame_reg_mask(bt, fr)); - for_each_set_bit(i, mask, 32) { - reg = &func->regs[i]; - if (!reg->id || reg->type != SCALAR_VALUE) - continue; - if (idset_push(precise_ids, reg->id)) - return -EFAULT; - } - - bitmap_from_u64(mask, bt_frame_stack_mask(bt, fr)); - for_each_set_bit(i, mask, 64) { - if (i >= func->allocated_stack / BPF_REG_SIZE) - break; - if (!is_spilled_scalar_reg(&func->stack[i])) - continue; - reg = &func->stack[i].spilled_ptr; - if (!reg->id) - continue; - if (idset_push(precise_ids, reg->id)) - return -EFAULT; - } - } - - for (fr = 0; fr <= st->curframe; ++fr) { - func = st->frame[fr]; - - for (i = BPF_REG_0; i < BPF_REG_10; ++i) { - reg = &func->regs[i]; - if (!reg->id) - continue; - if (!idset_contains(precise_ids, reg->id)) - continue; - bt_set_frame_reg(bt, fr, i); - } - for (i = 0; i < func->allocated_stack / BPF_REG_SIZE; ++i) { - if (!is_spilled_scalar_reg(&func->stack[i])) - continue; - reg = &func->stack[i].spilled_ptr; - if (!reg->id) - continue; - if (!idset_contains(precise_ids, reg->id)) - continue; - bt_set_frame_slot(bt, fr, i); - } - } - - return 0; -} - /* * __mark_chain_precision() backtracks BPF program instruction sequence and * chain of verifier states making sure that register *regno* (if regno >= 0) @@ -4213,31 +4310,6 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno) bt->frame, last_idx, first_idx, subseq_idx); } - /* If some register with scalar ID is marked as precise, - * make sure that all registers sharing this ID are also precise. - * This is needed to estimate effect of find_equal_scalars(). - * Do this at the last instruction of each state, - * bpf_reg_state::id fields are valid for these instructions. - * - * Allows to track precision in situation like below: - * - * r2 = unknown value - * ... - * --- state #0 --- - * ... - * r1 = r2 // r1 and r2 now share the same ID - * ... - * --- state #1 {r1.id = A, r2.id = A} --- - * ... - * if (r2 > 10) goto exit; // find_equal_scalars() assigns range to r1 - * ... - * --- state #2 {r1.id = A, r2.id = A} --- - * r3 = r10 - * r3 += r1 // need to mark both r1 and r2 - */ - if (mark_precise_scalar_ids(env, st)) - return -EFAULT; - if (last_idx < 0) { /* we are at the entry into subprog, which * is expected for global funcs, but only if @@ -4458,7 +4530,7 @@ static void assign_scalar_id_before_mov(struct bpf_verifier_env *env, if (!src_reg->id && !tnum_is_const(src_reg->var_off)) /* Ensure that src_reg has a valid ID that will be copied to - * dst_reg and then will be used by find_equal_scalars() to + * dst_reg and then will be used by sync_linked_regs() to * propagate min/max range. */ src_reg->id = ++env->id_gen; @@ -4504,6 +4576,31 @@ static int get_reg_width(struct bpf_reg_state *reg) return fls64(reg->umax_value); } +/* See comment for mark_fastcall_pattern_for_call() */ +static void check_fastcall_stack_contract(struct bpf_verifier_env *env, + struct bpf_func_state *state, int insn_idx, int off) +{ + struct bpf_subprog_info *subprog = &env->subprog_info[state->subprogno]; + struct bpf_insn_aux_data *aux = env->insn_aux_data; + int i; + + if (subprog->fastcall_stack_off <= off || aux[insn_idx].fastcall_pattern) + return; + /* access to the region [max_stack_depth .. fastcall_stack_off) + * from something that is not a part of the fastcall pattern, + * disable fastcall rewrites for current subprogram by setting + * fastcall_stack_off to a value smaller than any possible offset. + */ + subprog->fastcall_stack_off = S16_MIN; + /* reset fastcall aux flags within subprogram, + * happens at most once per subprogram + */ + for (i = subprog->start; i < (subprog + 1)->start; ++i) { + aux[i].fastcall_spills_num = 0; + aux[i].fastcall_pattern = 0; + } +} + /* check_stack_{read,write}_fixed_off functions track spill/fill of registers, * stack boundary and alignment are checked in check_mem_access() */ @@ -4552,6 +4649,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, if (err) return err; + check_fastcall_stack_contract(env, state, insn_idx, off); mark_stack_slot_scratched(env, spi); if (reg && !(off % BPF_REG_SIZE) && reg->type == SCALAR_VALUE && env->bpf_capable) { bool reg_value_fits; @@ -4627,7 +4725,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, } if (insn_flags) - return push_jmp_history(env, env->cur_state, insn_flags); + return push_jmp_history(env, env->cur_state, insn_flags, 0); return 0; } @@ -4686,6 +4784,7 @@ static int check_stack_write_var_off(struct bpf_verifier_env *env, return err; } + check_fastcall_stack_contract(env, state, insn_idx, min_off); /* Variable offset writes destroy any spilled pointers in range. */ for (i = min_off; i < max_off; i++) { u8 new_type, *stype; @@ -4824,6 +4923,7 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env, reg = ®_state->stack[spi].spilled_ptr; mark_stack_slot_scratched(env, spi); + check_fastcall_stack_contract(env, state, env->insn_idx, off); if (is_spilled_reg(®_state->stack[spi])) { u8 spill_size = 1; @@ -4932,7 +5032,7 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env, insn_flags = 0; /* we are not restoring spilled register */ } if (insn_flags) - return push_jmp_history(env, env->cur_state, insn_flags); + return push_jmp_history(env, env->cur_state, insn_flags, 0); return 0; } @@ -4984,6 +5084,7 @@ static int check_stack_read_var_off(struct bpf_verifier_env *env, min_off = reg->smin_value + off; max_off = reg->smax_value + off; mark_reg_stack_read(env, ptr_state, min_off, max_off + size, dst_regno); + check_fastcall_stack_contract(env, ptr_state, env->insn_idx, min_off); return 0; } @@ -5589,11 +5690,13 @@ static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, /* check access to 'struct bpf_context' fields. Supports fixed offsets only */ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size, enum bpf_access_type t, enum bpf_reg_type *reg_type, - struct btf **btf, u32 *btf_id) + struct btf **btf, u32 *btf_id, bool *is_retval, bool is_ldsx) { struct bpf_insn_access_aux info = { .reg_type = *reg_type, .log = &env->log, + .is_retval = false, + .is_ldsx = is_ldsx, }; if (env->ops->is_valid_access && @@ -5606,6 +5709,7 @@ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, * type of narrower access. */ *reg_type = info.reg_type; + *is_retval = info.is_retval; if (base_type(*reg_type) == PTR_TO_BTF_ID) { *btf = info.btf; @@ -6694,10 +6798,20 @@ static int check_stack_slot_within_bounds(struct bpf_verifier_env *env, struct bpf_func_state *state, enum bpf_access_type t) { - int min_valid_off; + struct bpf_insn_aux_data *aux = &env->insn_aux_data[env->insn_idx]; + int min_valid_off, max_bpf_stack; + + /* If accessing instruction is a spill/fill from bpf_fastcall pattern, + * add room for all caller saved registers below MAX_BPF_STACK. + * In case if bpf_fastcall rewrite won't happen maximal stack depth + * would be checked by check_max_stack_depth_subprog(). + */ + max_bpf_stack = MAX_BPF_STACK; + if (aux->fastcall_pattern) + max_bpf_stack += CALLER_SAVED_REGS * BPF_REG_SIZE; if (t == BPF_WRITE || env->allow_uninit_stack) - min_valid_off = -MAX_BPF_STACK; + min_valid_off = -max_bpf_stack; else min_valid_off = -state->allocated_stack; @@ -6774,6 +6888,17 @@ static int check_stack_access_within_bounds( return grow_stack_state(env, state, -min_off /* size */); } +static bool get_func_retval_range(struct bpf_prog *prog, + struct bpf_retval_range *range) +{ + if (prog->type == BPF_PROG_TYPE_LSM && + prog->expected_attach_type == BPF_LSM_MAC && + !bpf_lsm_get_retval_range(prog, range)) { + return true; + } + return false; +} + /* check whether memory at (regno + off) is accessible for t = (read | write) * if t==write, value_regno is a register which value is stored into memory * if t==read, value_regno is a register which will receive the value from memory @@ -6878,6 +7003,8 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn if (!err && value_regno >= 0 && (t == BPF_READ || rdonly_mem)) mark_reg_unknown(env, regs, value_regno); } else if (reg->type == PTR_TO_CTX) { + bool is_retval = false; + struct bpf_retval_range range; enum bpf_reg_type reg_type = SCALAR_VALUE; struct btf *btf = NULL; u32 btf_id = 0; @@ -6893,7 +7020,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn return err; err = check_ctx_access(env, insn_idx, off, size, t, ®_type, &btf, - &btf_id); + &btf_id, &is_retval, is_ldsx); if (err) verbose_linfo(env, insn_idx, "; "); if (!err && t == BPF_READ && value_regno >= 0) { @@ -6902,7 +7029,14 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn * case, we know the offset is zero. */ if (reg_type == SCALAR_VALUE) { - mark_reg_unknown(env, regs, value_regno); + if (is_retval && get_func_retval_range(env->prog, &range)) { + err = __mark_reg_s32_range(env, regs, value_regno, + range.minval, range.maxval); + if (err) + return err; + } else { + mark_reg_unknown(env, regs, value_regno); + } } else { mark_reg_known_zero(env, regs, value_regno); @@ -7666,29 +7800,38 @@ static int process_kptr_func(struct bpf_verifier_env *env, int regno, struct bpf_call_arg_meta *meta) { struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; - struct bpf_map *map_ptr = reg->map_ptr; struct btf_field *kptr_field; + struct bpf_map *map_ptr; + struct btf_record *rec; u32 kptr_off; + if (type_is_ptr_alloc_obj(reg->type)) { + rec = reg_btf_record(reg); + } else { /* PTR_TO_MAP_VALUE */ + map_ptr = reg->map_ptr; + if (!map_ptr->btf) { + verbose(env, "map '%s' has to have BTF in order to use bpf_kptr_xchg\n", + map_ptr->name); + return -EINVAL; + } + rec = map_ptr->record; + meta->map_ptr = map_ptr; + } + if (!tnum_is_const(reg->var_off)) { verbose(env, "R%d doesn't have constant offset. kptr has to be at the constant offset\n", regno); return -EINVAL; } - if (!map_ptr->btf) { - verbose(env, "map '%s' has to have BTF in order to use bpf_kptr_xchg\n", - map_ptr->name); - return -EINVAL; - } - if (!btf_record_has_field(map_ptr->record, BPF_KPTR)) { - verbose(env, "map '%s' has no valid kptr\n", map_ptr->name); + + if (!btf_record_has_field(rec, BPF_KPTR)) { + verbose(env, "R%d has no valid kptr\n", regno); return -EINVAL; } - meta->map_ptr = map_ptr; kptr_off = reg->off + reg->var_off.value; - kptr_field = btf_record_find(map_ptr->record, kptr_off, BPF_KPTR); + kptr_field = btf_record_find(rec, kptr_off, BPF_KPTR); if (!kptr_field) { verbose(env, "off=%d doesn't point to kptr\n", kptr_off); return -EACCES; @@ -7833,12 +7976,17 @@ static bool is_iter_destroy_kfunc(struct bpf_kfunc_call_arg_meta *meta) return meta->kfunc_flags & KF_ITER_DESTROY; } -static bool is_kfunc_arg_iter(struct bpf_kfunc_call_arg_meta *meta, int arg) +static bool is_kfunc_arg_iter(struct bpf_kfunc_call_arg_meta *meta, int arg_idx, + const struct btf_param *arg) { /* btf_check_iter_kfuncs() guarantees that first argument of any iter * kfunc is iter state pointer */ - return arg == 0 && is_iter_kfunc(meta); + if (is_iter_kfunc(meta)) + return arg_idx == 0; + + /* iter passed as an argument to a generic kfunc */ + return btf_param_match_suffix(meta->btf, arg, "__iter"); } static int process_iter_arg(struct bpf_verifier_env *env, int regno, int insn_idx, @@ -7846,14 +7994,20 @@ static int process_iter_arg(struct bpf_verifier_env *env, int regno, int insn_id { struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; const struct btf_type *t; - const struct btf_param *arg; - int spi, err, i, nr_slots; - u32 btf_id; + int spi, err, i, nr_slots, btf_id; - /* btf_check_iter_kfuncs() ensures we don't need to validate anything here */ - arg = &btf_params(meta->func_proto)[0]; - t = btf_type_skip_modifiers(meta->btf, arg->type, NULL); /* PTR */ - t = btf_type_skip_modifiers(meta->btf, t->type, &btf_id); /* STRUCT */ + /* For iter_{new,next,destroy} functions, btf_check_iter_kfuncs() + * ensures struct convention, so we wouldn't need to do any BTF + * validation here. But given iter state can be passed as a parameter + * to any kfunc, if arg has "__iter" suffix, we need to be a bit more + * conservative here. + */ + btf_id = btf_check_iter_arg(meta->btf, meta->func_proto, regno - 1); + if (btf_id < 0) { + verbose(env, "expected valid iter pointer as arg #%d\n", regno); + return -EINVAL; + } + t = btf_type_by_id(meta->btf, btf_id); nr_slots = t->size / BPF_REG_SIZE; if (is_iter_new_kfunc(meta)) { @@ -7875,7 +8029,9 @@ static int process_iter_arg(struct bpf_verifier_env *env, int regno, int insn_id if (err) return err; } else { - /* iter_next() or iter_destroy() expect initialized iter state*/ + /* iter_next() or iter_destroy(), as well as any kfunc + * accepting iter argument, expect initialized iter state + */ err = is_iter_reg_valid_init(env, reg, meta->btf, btf_id, nr_slots); switch (err) { case 0: @@ -7989,6 +8145,15 @@ static int widen_imprecise_scalars(struct bpf_verifier_env *env, return 0; } +static struct bpf_reg_state *get_iter_from_state(struct bpf_verifier_state *cur_st, + struct bpf_kfunc_call_arg_meta *meta) +{ + int iter_frameno = meta->iter.frameno; + int iter_spi = meta->iter.spi; + + return &cur_st->frame[iter_frameno]->stack[iter_spi].spilled_ptr; +} + /* process_iter_next_call() is called when verifier gets to iterator's next * "method" (e.g., bpf_iter_num_next() for numbers iterator) call. We'll refer * to it as just "iter_next()" in comments below. @@ -8073,12 +8238,10 @@ static int process_iter_next_call(struct bpf_verifier_env *env, int insn_idx, struct bpf_verifier_state *cur_st = env->cur_state, *queued_st, *prev_st; struct bpf_func_state *cur_fr = cur_st->frame[cur_st->curframe], *queued_fr; struct bpf_reg_state *cur_iter, *queued_iter; - int iter_frameno = meta->iter.frameno; - int iter_spi = meta->iter.spi; BTF_TYPE_EMIT(struct bpf_iter); - cur_iter = &env->cur_state->frame[iter_frameno]->stack[iter_spi].spilled_ptr; + cur_iter = get_iter_from_state(cur_st, meta); if (cur_iter->iter.state != BPF_ITER_STATE_ACTIVE && cur_iter->iter.state != BPF_ITER_STATE_DRAINED) { @@ -8106,7 +8269,7 @@ static int process_iter_next_call(struct bpf_verifier_env *env, int insn_idx, if (!queued_st) return -ENOMEM; - queued_iter = &queued_st->frame[iter_frameno]->stack[iter_spi].spilled_ptr; + queued_iter = get_iter_from_state(queued_st, meta); queued_iter->iter.state = BPF_ITER_STATE_ACTIVE; queued_iter->iter.depth++; if (prev_st) @@ -8130,6 +8293,12 @@ static bool arg_type_is_mem_size(enum bpf_arg_type type) type == ARG_CONST_SIZE_OR_ZERO; } +static bool arg_type_is_raw_mem(enum bpf_arg_type type) +{ + return base_type(type) == ARG_PTR_TO_MEM && + type & MEM_UNINIT; +} + static bool arg_type_is_release(enum bpf_arg_type type) { return type & OBJ_RELEASE; @@ -8140,16 +8309,6 @@ static bool arg_type_is_dynptr(enum bpf_arg_type type) return base_type(type) == ARG_PTR_TO_DYNPTR; } -static int int_ptr_type_to_size(enum bpf_arg_type type) -{ - if (type == ARG_PTR_TO_INT) - return sizeof(u32); - else if (type == ARG_PTR_TO_LONG) - return sizeof(u64); - - return -EINVAL; -} - static int resolve_map_arg_type(struct bpf_verifier_env *env, const struct bpf_call_arg_meta *meta, enum bpf_arg_type *arg_type) @@ -8222,16 +8381,6 @@ static const struct bpf_reg_types mem_types = { }, }; -static const struct bpf_reg_types int_ptr_types = { - .types = { - PTR_TO_STACK, - PTR_TO_PACKET, - PTR_TO_PACKET_META, - PTR_TO_MAP_KEY, - PTR_TO_MAP_VALUE, - }, -}; - static const struct bpf_reg_types spin_lock_types = { .types = { PTR_TO_MAP_VALUE, @@ -8262,7 +8411,12 @@ static const struct bpf_reg_types func_ptr_types = { .types = { PTR_TO_FUNC } }; static const struct bpf_reg_types stack_ptr_types = { .types = { PTR_TO_STACK } }; static const struct bpf_reg_types const_str_ptr_types = { .types = { PTR_TO_MAP_VALUE } }; static const struct bpf_reg_types timer_types = { .types = { PTR_TO_MAP_VALUE } }; -static const struct bpf_reg_types kptr_types = { .types = { PTR_TO_MAP_VALUE } }; +static const struct bpf_reg_types kptr_xchg_dest_types = { + .types = { + PTR_TO_MAP_VALUE, + PTR_TO_BTF_ID | MEM_ALLOC + } +}; static const struct bpf_reg_types dynptr_types = { .types = { PTR_TO_STACK, @@ -8287,14 +8441,12 @@ static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = { [ARG_PTR_TO_SPIN_LOCK] = &spin_lock_types, [ARG_PTR_TO_MEM] = &mem_types, [ARG_PTR_TO_RINGBUF_MEM] = &ringbuf_mem_types, - [ARG_PTR_TO_INT] = &int_ptr_types, - [ARG_PTR_TO_LONG] = &int_ptr_types, [ARG_PTR_TO_PERCPU_BTF_ID] = &percpu_btf_ptr_types, [ARG_PTR_TO_FUNC] = &func_ptr_types, [ARG_PTR_TO_STACK] = &stack_ptr_types, [ARG_PTR_TO_CONST_STR] = &const_str_ptr_types, [ARG_PTR_TO_TIMER] = &timer_types, - [ARG_PTR_TO_KPTR] = &kptr_types, + [ARG_KPTR_XCHG_DEST] = &kptr_xchg_dest_types, [ARG_PTR_TO_DYNPTR] = &dynptr_types, }; @@ -8333,7 +8485,8 @@ static int check_reg_type(struct bpf_verifier_env *env, u32 regno, if (base_type(arg_type) == ARG_PTR_TO_MEM) type &= ~DYNPTR_TYPE_FLAG_MASK; - if (meta->func_id == BPF_FUNC_kptr_xchg && type_is_alloc(type)) { + /* Local kptr types are allowed as the source argument of bpf_kptr_xchg */ + if (meta->func_id == BPF_FUNC_kptr_xchg && type_is_alloc(type) && regno == BPF_REG_2) { type &= ~MEM_ALLOC; type &= ~MEM_PERCPU; } @@ -8426,7 +8579,8 @@ found: verbose(env, "verifier internal error: unimplemented handling of MEM_ALLOC\n"); return -EFAULT; } - if (meta->func_id == BPF_FUNC_kptr_xchg) { + /* Check if local kptr in src arg matches kptr in dst arg */ + if (meta->func_id == BPF_FUNC_kptr_xchg && regno == BPF_REG_2) { if (map_kptr_match_type(env, meta->kptr_field, reg, regno)) return -EACCES; } @@ -8737,7 +8891,7 @@ skip_type_check: meta->release_regno = regno; } - if (reg->ref_obj_id) { + if (reg->ref_obj_id && base_type(arg_type) != ARG_KPTR_XCHG_DEST) { if (meta->ref_obj_id) { verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", regno, reg->ref_obj_id, @@ -8849,9 +9003,11 @@ skip_type_check: */ meta->raw_mode = arg_type & MEM_UNINIT; if (arg_type & MEM_FIXED_SIZE) { - err = check_helper_mem_access(env, regno, - fn->arg_size[arg], false, - meta); + err = check_helper_mem_access(env, regno, fn->arg_size[arg], false, meta); + if (err) + return err; + if (arg_type & MEM_ALIGNED) + err = check_ptr_alignment(env, reg, 0, fn->arg_size[arg], true); } break; case ARG_CONST_SIZE: @@ -8876,17 +9032,6 @@ skip_type_check: if (err) return err; break; - case ARG_PTR_TO_INT: - case ARG_PTR_TO_LONG: - { - int size = int_ptr_type_to_size(arg_type); - - err = check_helper_mem_access(env, regno, size, false, meta); - if (err) - return err; - err = check_ptr_alignment(env, reg, 0, size, true); - break; - } case ARG_PTR_TO_CONST_STR: { err = check_reg_const_str(env, reg, regno); @@ -8894,7 +9039,7 @@ skip_type_check: return err; break; } - case ARG_PTR_TO_KPTR: + case ARG_KPTR_XCHG_DEST: err = process_kptr_func(env, regno, meta); if (err) return err; @@ -9203,15 +9348,15 @@ static bool check_raw_mode_ok(const struct bpf_func_proto *fn) { int count = 0; - if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM) + if (arg_type_is_raw_mem(fn->arg1_type)) count++; - if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM) + if (arg_type_is_raw_mem(fn->arg2_type)) count++; - if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM) + if (arg_type_is_raw_mem(fn->arg3_type)) count++; - if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM) + if (arg_type_is_raw_mem(fn->arg4_type)) count++; - if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM) + if (arg_type_is_raw_mem(fn->arg5_type)) count++; /* We only support one arg being in raw mode at the moment, @@ -9925,9 +10070,13 @@ static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env) return is_rbtree_lock_required_kfunc(kfunc_btf_id); } -static bool retval_range_within(struct bpf_retval_range range, const struct bpf_reg_state *reg) +static bool retval_range_within(struct bpf_retval_range range, const struct bpf_reg_state *reg, + bool return_32bit) { - return range.minval <= reg->smin_value && reg->smax_value <= range.maxval; + if (return_32bit) + return range.minval <= reg->s32_min_value && reg->s32_max_value <= range.maxval; + else + return range.minval <= reg->smin_value && reg->smax_value <= range.maxval; } static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) @@ -9964,8 +10113,8 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) if (err) return err; - /* enforce R0 return value range */ - if (!retval_range_within(callee->callback_ret_range, r0)) { + /* enforce R0 return value range, and bpf_callback_t returns 64bit */ + if (!retval_range_within(callee->callback_ret_range, r0, false)) { verbose_invalid_scalar(env, r0, callee->callback_ret_range, "At callback return", "R0"); return -EINVAL; @@ -10267,6 +10416,19 @@ static void update_loop_inline_state(struct bpf_verifier_env *env, u32 subprogno state->callback_subprogno == subprogno); } +static int get_helper_proto(struct bpf_verifier_env *env, int func_id, + const struct bpf_func_proto **ptr) +{ + if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) + return -ERANGE; + + if (!env->ops->get_func_proto) + return -EINVAL; + + *ptr = env->ops->get_func_proto(func_id, env->prog); + return *ptr ? 0 : -EINVAL; +} + static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn, int *insn_idx_p) { @@ -10283,18 +10445,16 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn /* find function prototype */ func_id = insn->imm; - if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) { - verbose(env, "invalid func %s#%d\n", func_id_name(func_id), - func_id); + err = get_helper_proto(env, insn->imm, &fn); + if (err == -ERANGE) { + verbose(env, "invalid func %s#%d\n", func_id_name(func_id), func_id); return -EINVAL; } - if (env->ops->get_func_proto) - fn = env->ops->get_func_proto(func_id, env->prog); - if (!fn) { + if (err) { verbose(env, "program of this type cannot use helper %s#%d\n", func_id_name(func_id), func_id); - return -EINVAL; + return err; } /* eBPF programs must be GPL compatible to use GPL-ed functions */ @@ -11230,7 +11390,7 @@ get_kfunc_ptr_arg_type(struct bpf_verifier_env *env, if (is_kfunc_arg_dynptr(meta->btf, &args[argno])) return KF_ARG_PTR_TO_DYNPTR; - if (is_kfunc_arg_iter(meta, argno)) + if (is_kfunc_arg_iter(meta, argno, &args[argno])) return KF_ARG_PTR_TO_ITER; if (is_kfunc_arg_list_head(meta->btf, &args[argno])) @@ -11332,8 +11492,7 @@ static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env, * btf_struct_ids_match() to walk the struct at the 0th offset, and * resolve types. */ - if (is_kfunc_acquire(meta) || - (is_kfunc_release(meta) && reg->ref_obj_id) || + if ((is_kfunc_release(meta) && reg->ref_obj_id) || btf_type_ids_nocast_alias(&env->log, reg_btf, reg_ref_id, meta->btf, ref_id)) strict_type_match = true; @@ -11950,7 +12109,8 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ switch (kf_arg_type) { case KF_ARG_PTR_TO_CTX: if (reg->type != PTR_TO_CTX) { - verbose(env, "arg#%d expected pointer to ctx, but got %s\n", i, btf_type_str(t)); + verbose(env, "arg#%d expected pointer to ctx, but got %s\n", + i, reg_type_str(env, reg->type)); return -EINVAL; } @@ -12673,6 +12833,17 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, regs[BPF_REG_0].btf = desc_btf; regs[BPF_REG_0].type = PTR_TO_BTF_ID; regs[BPF_REG_0].btf_id = ptr_type_id; + + if (is_iter_next_kfunc(&meta)) { + struct bpf_reg_state *cur_iter; + + cur_iter = get_iter_from_state(env->cur_state, &meta); + + if (cur_iter->type & MEM_RCU) /* KF_RCU_PROTECTED */ + regs[BPF_REG_0].type |= MEM_RCU; + else + regs[BPF_REG_0].type |= PTR_TRUSTED; + } } if (is_kfunc_ret_null(&meta)) { @@ -14101,7 +14272,7 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, u64 val = reg_const_value(src_reg, alu32); if ((dst_reg->id & BPF_ADD_CONST) || - /* prevent overflow in find_equal_scalars() later */ + /* prevent overflow in sync_linked_regs() later */ val > (u32)S32_MAX) { /* * If the register already went through rX += val @@ -14116,7 +14287,7 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, } else { /* * Make sure ID is cleared otherwise dst_reg min/max could be - * incorrectly propagated into other registers by find_equal_scalars() + * incorrectly propagated into other registers by sync_linked_regs() */ dst_reg->id = 0; } @@ -14266,7 +14437,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) copy_register_state(dst_reg, src_reg); /* Make sure ID is cleared if src_reg is not in u32 * range otherwise dst_reg min/max could be incorrectly - * propagated into src_reg by find_equal_scalars() + * propagated into src_reg by sync_linked_regs() */ if (!is_src_reg_u32) dst_reg->id = 0; @@ -15089,14 +15260,66 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn, return true; } -static void find_equal_scalars(struct bpf_verifier_state *vstate, - struct bpf_reg_state *known_reg) +static void __collect_linked_regs(struct linked_regs *reg_set, struct bpf_reg_state *reg, + u32 id, u32 frameno, u32 spi_or_reg, bool is_reg) +{ + struct linked_reg *e; + + if (reg->type != SCALAR_VALUE || (reg->id & ~BPF_ADD_CONST) != id) + return; + + e = linked_regs_push(reg_set); + if (e) { + e->frameno = frameno; + e->is_reg = is_reg; + e->regno = spi_or_reg; + } else { + reg->id = 0; + } +} + +/* For all R being scalar registers or spilled scalar registers + * in verifier state, save R in linked_regs if R->id == id. + * If there are too many Rs sharing same id, reset id for leftover Rs. + */ +static void collect_linked_regs(struct bpf_verifier_state *vstate, u32 id, + struct linked_regs *linked_regs) +{ + struct bpf_func_state *func; + struct bpf_reg_state *reg; + int i, j; + + id = id & ~BPF_ADD_CONST; + for (i = vstate->curframe; i >= 0; i--) { + func = vstate->frame[i]; + for (j = 0; j < BPF_REG_FP; j++) { + reg = &func->regs[j]; + __collect_linked_regs(linked_regs, reg, id, i, j, true); + } + for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { + if (!is_spilled_reg(&func->stack[j])) + continue; + reg = &func->stack[j].spilled_ptr; + __collect_linked_regs(linked_regs, reg, id, i, j, false); + } + } +} + +/* For all R in linked_regs, copy known_reg range into R + * if R->id == known_reg->id. + */ +static void sync_linked_regs(struct bpf_verifier_state *vstate, struct bpf_reg_state *known_reg, + struct linked_regs *linked_regs) { struct bpf_reg_state fake_reg; - struct bpf_func_state *state; struct bpf_reg_state *reg; + struct linked_reg *e; + int i; - bpf_for_each_reg_in_vstate(vstate, state, reg, ({ + for (i = 0; i < linked_regs->cnt; ++i) { + e = &linked_regs->entries[i]; + reg = e->is_reg ? &vstate->frame[e->frameno]->regs[e->regno] + : &vstate->frame[e->frameno]->stack[e->spi].spilled_ptr; if (reg->type != SCALAR_VALUE || reg == known_reg) continue; if ((reg->id & ~BPF_ADD_CONST) != (known_reg->id & ~BPF_ADD_CONST)) @@ -15114,7 +15337,7 @@ static void find_equal_scalars(struct bpf_verifier_state *vstate, copy_register_state(reg, known_reg); /* * Must preserve off, id and add_const flag, - * otherwise another find_equal_scalars() will be incorrect. + * otherwise another sync_linked_regs() will be incorrect. */ reg->off = saved_off; @@ -15122,7 +15345,7 @@ static void find_equal_scalars(struct bpf_verifier_state *vstate, scalar_min_max_add(reg, &fake_reg); reg->var_off = tnum_add(reg->var_off, fake_reg.var_off); } - })); + } } static int check_cond_jmp_op(struct bpf_verifier_env *env, @@ -15133,6 +15356,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs; struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL; struct bpf_reg_state *eq_branch_regs; + struct linked_regs linked_regs = {}; u8 opcode = BPF_OP(insn->code); bool is_jmp32; int pred = -1; @@ -15247,6 +15471,21 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, return 0; } + /* Push scalar registers sharing same ID to jump history, + * do this before creating 'other_branch', so that both + * 'this_branch' and 'other_branch' share this history + * if parent state is created. + */ + if (BPF_SRC(insn->code) == BPF_X && src_reg->type == SCALAR_VALUE && src_reg->id) + collect_linked_regs(this_branch, src_reg->id, &linked_regs); + if (dst_reg->type == SCALAR_VALUE && dst_reg->id) + collect_linked_regs(this_branch, dst_reg->id, &linked_regs); + if (linked_regs.cnt > 1) { + err = push_jmp_history(env, this_branch, 0, linked_regs_pack(&linked_regs)); + if (err) + return err; + } + other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx, false); if (!other_branch) @@ -15277,13 +15516,13 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, if (BPF_SRC(insn->code) == BPF_X && src_reg->type == SCALAR_VALUE && src_reg->id && !WARN_ON_ONCE(src_reg->id != other_branch_regs[insn->src_reg].id)) { - find_equal_scalars(this_branch, src_reg); - find_equal_scalars(other_branch, &other_branch_regs[insn->src_reg]); + sync_linked_regs(this_branch, src_reg, &linked_regs); + sync_linked_regs(other_branch, &other_branch_regs[insn->src_reg], &linked_regs); } if (dst_reg->type == SCALAR_VALUE && dst_reg->id && !WARN_ON_ONCE(dst_reg->id != other_branch_regs[insn->dst_reg].id)) { - find_equal_scalars(this_branch, dst_reg); - find_equal_scalars(other_branch, &other_branch_regs[insn->dst_reg]); + sync_linked_regs(this_branch, dst_reg, &linked_regs); + sync_linked_regs(other_branch, &other_branch_regs[insn->dst_reg], &linked_regs); } /* if one pointer register is compared to another pointer @@ -15571,6 +15810,7 @@ static int check_return_code(struct bpf_verifier_env *env, int regno, const char int err; struct bpf_func_state *frame = env->cur_state->frame[0]; const bool is_subprog = frame->subprogno; + bool return_32bit = false; /* LSM and struct_ops func-ptr's return type could be "void" */ if (!is_subprog || frame->in_exception_callback_fn) { @@ -15676,12 +15916,14 @@ static int check_return_code(struct bpf_verifier_env *env, int regno, const char case BPF_PROG_TYPE_LSM: if (env->prog->expected_attach_type != BPF_LSM_CGROUP) { - /* Regular BPF_PROG_TYPE_LSM programs can return - * any value. - */ - return 0; - } - if (!env->prog->aux->attach_func_proto->type) { + /* no range found, any return value is allowed */ + if (!get_func_retval_range(env->prog, &range)) + return 0; + /* no restricted range, any return value is allowed */ + if (range.minval == S32_MIN && range.maxval == S32_MAX) + return 0; + return_32bit = true; + } else if (!env->prog->aux->attach_func_proto->type) { /* Make sure programs that attach to void * hooks don't try to modify return value. */ @@ -15711,7 +15953,7 @@ enforce_retval: if (err) return err; - if (!retval_range_within(range, reg)) { + if (!retval_range_within(range, reg, return_32bit)) { verbose_invalid_scalar(env, reg, range, exit_ctx, reg_name); if (!is_subprog && prog->expected_attach_type == BPF_LSM_CGROUP && @@ -15877,6 +16119,274 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns, return ret; } +/* Bitmask with 1s for all caller saved registers */ +#define ALL_CALLER_SAVED_REGS ((1u << CALLER_SAVED_REGS) - 1) + +/* Return a bitmask specifying which caller saved registers are + * clobbered by a call to a helper *as if* this helper follows + * bpf_fastcall contract: + * - includes R0 if function is non-void; + * - includes R1-R5 if corresponding parameter has is described + * in the function prototype. + */ +static u32 helper_fastcall_clobber_mask(const struct bpf_func_proto *fn) +{ + u32 mask; + int i; + + mask = 0; + if (fn->ret_type != RET_VOID) + mask |= BIT(BPF_REG_0); + for (i = 0; i < ARRAY_SIZE(fn->arg_type); ++i) + if (fn->arg_type[i] != ARG_DONTCARE) + mask |= BIT(BPF_REG_1 + i); + return mask; +} + +/* True if do_misc_fixups() replaces calls to helper number 'imm', + * replacement patch is presumed to follow bpf_fastcall contract + * (see mark_fastcall_pattern_for_call() below). + */ +static bool verifier_inlines_helper_call(struct bpf_verifier_env *env, s32 imm) +{ + switch (imm) { +#ifdef CONFIG_X86_64 + case BPF_FUNC_get_smp_processor_id: + return env->prog->jit_requested && bpf_jit_supports_percpu_insn(); +#endif + default: + return false; + } +} + +/* Same as helper_fastcall_clobber_mask() but for kfuncs, see comment above */ +static u32 kfunc_fastcall_clobber_mask(struct bpf_kfunc_call_arg_meta *meta) +{ + u32 vlen, i, mask; + + vlen = btf_type_vlen(meta->func_proto); + mask = 0; + if (!btf_type_is_void(btf_type_by_id(meta->btf, meta->func_proto->type))) + mask |= BIT(BPF_REG_0); + for (i = 0; i < vlen; ++i) + mask |= BIT(BPF_REG_1 + i); + return mask; +} + +/* Same as verifier_inlines_helper_call() but for kfuncs, see comment above */ +static bool is_fastcall_kfunc_call(struct bpf_kfunc_call_arg_meta *meta) +{ + if (meta->btf == btf_vmlinux) + return meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] || + meta->func_id == special_kfunc_list[KF_bpf_rdonly_cast]; + return false; +} + +/* LLVM define a bpf_fastcall function attribute. + * This attribute means that function scratches only some of + * the caller saved registers defined by ABI. + * For BPF the set of such registers could be defined as follows: + * - R0 is scratched only if function is non-void; + * - R1-R5 are scratched only if corresponding parameter type is defined + * in the function prototype. + * + * The contract between kernel and clang allows to simultaneously use + * such functions and maintain backwards compatibility with old + * kernels that don't understand bpf_fastcall calls: + * + * - for bpf_fastcall calls clang allocates registers as-if relevant r0-r5 + * registers are not scratched by the call; + * + * - as a post-processing step, clang visits each bpf_fastcall call and adds + * spill/fill for every live r0-r5; + * + * - stack offsets used for the spill/fill are allocated as lowest + * stack offsets in whole function and are not used for any other + * purposes; + * + * - when kernel loads a program, it looks for such patterns + * (bpf_fastcall function surrounded by spills/fills) and checks if + * spill/fill stack offsets are used exclusively in fastcall patterns; + * + * - if so, and if verifier or current JIT inlines the call to the + * bpf_fastcall function (e.g. a helper call), kernel removes unnecessary + * spill/fill pairs; + * + * - when old kernel loads a program, presence of spill/fill pairs + * keeps BPF program valid, albeit slightly less efficient. + * + * For example: + * + * r1 = 1; + * r2 = 2; + * *(u64 *)(r10 - 8) = r1; r1 = 1; + * *(u64 *)(r10 - 16) = r2; r2 = 2; + * call %[to_be_inlined] --> call %[to_be_inlined] + * r2 = *(u64 *)(r10 - 16); r0 = r1; + * r1 = *(u64 *)(r10 - 8); r0 += r2; + * r0 = r1; exit; + * r0 += r2; + * exit; + * + * The purpose of mark_fastcall_pattern_for_call is to: + * - look for such patterns; + * - mark spill and fill instructions in env->insn_aux_data[*].fastcall_pattern; + * - mark set env->insn_aux_data[*].fastcall_spills_num for call instruction; + * - update env->subprog_info[*]->fastcall_stack_off to find an offset + * at which bpf_fastcall spill/fill stack slots start; + * - update env->subprog_info[*]->keep_fastcall_stack. + * + * The .fastcall_pattern and .fastcall_stack_off are used by + * check_fastcall_stack_contract() to check if every stack access to + * fastcall spill/fill stack slot originates from spill/fill + * instructions, members of fastcall patterns. + * + * If such condition holds true for a subprogram, fastcall patterns could + * be rewritten by remove_fastcall_spills_fills(). + * Otherwise bpf_fastcall patterns are not changed in the subprogram + * (code, presumably, generated by an older clang version). + * + * For example, it is *not* safe to remove spill/fill below: + * + * r1 = 1; + * *(u64 *)(r10 - 8) = r1; r1 = 1; + * call %[to_be_inlined] --> call %[to_be_inlined] + * r1 = *(u64 *)(r10 - 8); r0 = *(u64 *)(r10 - 8); <---- wrong !!! + * r0 = *(u64 *)(r10 - 8); r0 += r1; + * r0 += r1; exit; + * exit; + */ +static void mark_fastcall_pattern_for_call(struct bpf_verifier_env *env, + struct bpf_subprog_info *subprog, + int insn_idx, s16 lowest_off) +{ + struct bpf_insn *insns = env->prog->insnsi, *stx, *ldx; + struct bpf_insn *call = &env->prog->insnsi[insn_idx]; + const struct bpf_func_proto *fn; + u32 clobbered_regs_mask = ALL_CALLER_SAVED_REGS; + u32 expected_regs_mask; + bool can_be_inlined = false; + s16 off; + int i; + + if (bpf_helper_call(call)) { + if (get_helper_proto(env, call->imm, &fn) < 0) + /* error would be reported later */ + return; + clobbered_regs_mask = helper_fastcall_clobber_mask(fn); + can_be_inlined = fn->allow_fastcall && + (verifier_inlines_helper_call(env, call->imm) || + bpf_jit_inlines_helper_call(call->imm)); + } + + if (bpf_pseudo_kfunc_call(call)) { + struct bpf_kfunc_call_arg_meta meta; + int err; + + err = fetch_kfunc_meta(env, call, &meta, NULL); + if (err < 0) + /* error would be reported later */ + return; + + clobbered_regs_mask = kfunc_fastcall_clobber_mask(&meta); + can_be_inlined = is_fastcall_kfunc_call(&meta); + } + + if (clobbered_regs_mask == ALL_CALLER_SAVED_REGS) + return; + + /* e.g. if helper call clobbers r{0,1}, expect r{2,3,4,5} in the pattern */ + expected_regs_mask = ~clobbered_regs_mask & ALL_CALLER_SAVED_REGS; + + /* match pairs of form: + * + * *(u64 *)(r10 - Y) = rX (where Y % 8 == 0) + * ... + * call %[to_be_inlined] + * ... + * rX = *(u64 *)(r10 - Y) + */ + for (i = 1, off = lowest_off; i <= ARRAY_SIZE(caller_saved); ++i, off += BPF_REG_SIZE) { + if (insn_idx - i < 0 || insn_idx + i >= env->prog->len) + break; + stx = &insns[insn_idx - i]; + ldx = &insns[insn_idx + i]; + /* must be a stack spill/fill pair */ + if (stx->code != (BPF_STX | BPF_MEM | BPF_DW) || + ldx->code != (BPF_LDX | BPF_MEM | BPF_DW) || + stx->dst_reg != BPF_REG_10 || + ldx->src_reg != BPF_REG_10) + break; + /* must be a spill/fill for the same reg */ + if (stx->src_reg != ldx->dst_reg) + break; + /* must be one of the previously unseen registers */ + if ((BIT(stx->src_reg) & expected_regs_mask) == 0) + break; + /* must be a spill/fill for the same expected offset, + * no need to check offset alignment, BPF_DW stack access + * is always 8-byte aligned. + */ + if (stx->off != off || ldx->off != off) + break; + expected_regs_mask &= ~BIT(stx->src_reg); + env->insn_aux_data[insn_idx - i].fastcall_pattern = 1; + env->insn_aux_data[insn_idx + i].fastcall_pattern = 1; + } + if (i == 1) + return; + + /* Conditionally set 'fastcall_spills_num' to allow forward + * compatibility when more helper functions are marked as + * bpf_fastcall at compile time than current kernel supports, e.g: + * + * 1: *(u64 *)(r10 - 8) = r1 + * 2: call A ;; assume A is bpf_fastcall for current kernel + * 3: r1 = *(u64 *)(r10 - 8) + * 4: *(u64 *)(r10 - 8) = r1 + * 5: call B ;; assume B is not bpf_fastcall for current kernel + * 6: r1 = *(u64 *)(r10 - 8) + * + * There is no need to block bpf_fastcall rewrite for such program. + * Set 'fastcall_pattern' for both calls to keep check_fastcall_stack_contract() happy, + * don't set 'fastcall_spills_num' for call B so that remove_fastcall_spills_fills() + * does not remove spill/fill pair {4,6}. + */ + if (can_be_inlined) + env->insn_aux_data[insn_idx].fastcall_spills_num = i - 1; + else + subprog->keep_fastcall_stack = 1; + subprog->fastcall_stack_off = min(subprog->fastcall_stack_off, off); +} + +static int mark_fastcall_patterns(struct bpf_verifier_env *env) +{ + struct bpf_subprog_info *subprog = env->subprog_info; + struct bpf_insn *insn; + s16 lowest_off; + int s, i; + + for (s = 0; s < env->subprog_cnt; ++s, ++subprog) { + /* find lowest stack spill offset used in this subprog */ + lowest_off = 0; + for (i = subprog->start; i < (subprog + 1)->start; ++i) { + insn = env->prog->insnsi + i; + if (insn->code != (BPF_STX | BPF_MEM | BPF_DW) || + insn->dst_reg != BPF_REG_10) + continue; + lowest_off = min(lowest_off, insn->off); + } + /* use this offset to find fastcall patterns */ + for (i = subprog->start; i < (subprog + 1)->start; ++i) { + insn = env->prog->insnsi + i; + if (insn->code != (BPF_JMP | BPF_CALL)) + continue; + mark_fastcall_pattern_for_call(env, subprog, i, lowest_off); + } + } + return 0; +} + /* Visits the instruction at index t and returns one of the following: * < 0 - an error occurred * DONE_EXPLORING - the instruction was fully explored @@ -16772,7 +17282,7 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold, * * First verification path is [1-6]: * - at (4) same bpf_reg_state::id (b) would be assigned to r6 and r7; - * - at (5) r6 would be marked <= X, find_equal_scalars() would also mark + * - at (5) r6 would be marked <= X, sync_linked_regs() would also mark * r7 <= X, because r6 and r7 share same id. * Next verification path is [1-4, 6]. * @@ -17566,7 +18076,7 @@ hit: * the current state. */ if (is_jmp_point(env, env->insn_idx)) - err = err ? : push_jmp_history(env, cur, 0); + err = err ? : push_jmp_history(env, cur, 0, 0); err = err ? : propagate_precision(env, &sl->state); if (err) return err; @@ -17834,7 +18344,7 @@ static int do_check(struct bpf_verifier_env *env) } if (is_jmp_point(env, env->insn_idx)) { - err = push_jmp_history(env, state, 0); + err = push_jmp_history(env, state, 0, 0); if (err) return err; } @@ -18767,6 +19277,9 @@ static int adjust_jmp_off(struct bpf_prog *prog, u32 tgt_idx, u32 delta) for (i = 0; i < insn_cnt; i++, insn++) { u8 code = insn->code; + if (tgt_idx <= i && i < tgt_idx + delta) + continue; + if ((BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32) || BPF_OP(code) == BPF_CALL || BPF_OP(code) == BPF_EXIT) continue; @@ -19026,9 +19539,11 @@ static int opt_remove_dead_code(struct bpf_verifier_env *env) return 0; } +static const struct bpf_insn NOP = BPF_JMP_IMM(BPF_JA, 0, 0, 0); + static int opt_remove_nops(struct bpf_verifier_env *env) { - const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0); + const struct bpf_insn ja = NOP; struct bpf_insn *insn = env->prog->insnsi; int insn_cnt = env->prog->len; int i, err; @@ -19153,14 +19668,39 @@ apply_patch_buffer: */ static int convert_ctx_accesses(struct bpf_verifier_env *env) { + struct bpf_subprog_info *subprogs = env->subprog_info; const struct bpf_verifier_ops *ops = env->ops; - int i, cnt, size, ctx_field_size, delta = 0; + int i, cnt, size, ctx_field_size, delta = 0, epilogue_cnt = 0; const int insn_cnt = env->prog->len; - struct bpf_insn insn_buf[16], *insn; + struct bpf_insn *epilogue_buf = env->epilogue_buf; + struct bpf_insn *insn_buf = env->insn_buf; + struct bpf_insn *insn; u32 target_size, size_default, off; struct bpf_prog *new_prog; enum bpf_access_type type; bool is_narrower_load; + int epilogue_idx = 0; + + if (ops->gen_epilogue) { + epilogue_cnt = ops->gen_epilogue(epilogue_buf, env->prog, + -(subprogs[0].stack_depth + 8)); + if (epilogue_cnt >= INSN_BUF_SIZE) { + verbose(env, "bpf verifier is misconfigured\n"); + return -EINVAL; + } else if (epilogue_cnt) { + /* Save the ARG_PTR_TO_CTX for the epilogue to use */ + cnt = 0; + subprogs[0].stack_depth += 8; + insn_buf[cnt++] = BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_1, + -subprogs[0].stack_depth); + insn_buf[cnt++] = env->prog->insnsi[0]; + new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt); + if (!new_prog) + return -ENOMEM; + env->prog = new_prog; + delta += cnt - 1; + } + } if (ops->gen_prologue || env->seen_direct_write) { if (!ops->gen_prologue) { @@ -19169,7 +19709,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) } cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, env->prog); - if (cnt >= ARRAY_SIZE(insn_buf)) { + if (cnt >= INSN_BUF_SIZE) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } else if (cnt) { @@ -19182,6 +19722,9 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) } } + if (delta) + WARN_ON(adjust_jmp_off(env->prog, 0, delta)); + if (bpf_prog_is_offloaded(env->prog->aux)) return 0; @@ -19214,6 +19757,25 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) insn->code = BPF_STX | BPF_PROBE_ATOMIC | BPF_SIZE(insn->code); env->prog->aux->num_exentries++; continue; + } else if (insn->code == (BPF_JMP | BPF_EXIT) && + epilogue_cnt && + i + delta < subprogs[1].start) { + /* Generate epilogue for the main prog */ + if (epilogue_idx) { + /* jump back to the earlier generated epilogue */ + insn_buf[0] = BPF_JMP32_A(epilogue_idx - i - delta - 1); + cnt = 1; + } else { + memcpy(insn_buf, epilogue_buf, + epilogue_cnt * sizeof(*epilogue_buf)); + cnt = epilogue_cnt; + /* epilogue_idx cannot be 0. It must have at + * least one ctx ptr saving insn before the + * epilogue. + */ + epilogue_idx = i + delta; + } + goto patch_insn_buf; } else { continue; } @@ -19316,7 +19878,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) target_size = 0; cnt = convert_ctx_access(type, insn, insn_buf, env->prog, &target_size); - if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) || + if (cnt == 0 || cnt >= INSN_BUF_SIZE || (ctx_field_size && !target_size)) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; @@ -19325,7 +19887,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) if (is_narrower_load && size < target_size) { u8 shift = bpf_ctx_narrow_access_offset( off, size, size_default) * 8; - if (shift && cnt + 1 >= ARRAY_SIZE(insn_buf)) { + if (shift && cnt + 1 >= INSN_BUF_SIZE) { verbose(env, "bpf verifier narrow ctx load misconfigured\n"); return -EINVAL; } @@ -19350,6 +19912,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) insn->dst_reg, insn->dst_reg, size * 8, 0); +patch_insn_buf: new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; @@ -19870,7 +20433,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env) const int insn_cnt = prog->len; const struct bpf_map_ops *ops; struct bpf_insn_aux_data *aux; - struct bpf_insn insn_buf[16]; + struct bpf_insn *insn_buf = env->insn_buf; struct bpf_prog *new_prog; struct bpf_map *map_ptr; int i, ret, cnt, delta = 0, cur_subprog = 0; @@ -19913,13 +20476,46 @@ static int do_misc_fixups(struct bpf_verifier_env *env) /* Convert BPF_CLASS(insn->code) == BPF_ALU64 to 32-bit ALU */ insn->code = BPF_ALU | BPF_OP(insn->code) | BPF_SRC(insn->code); - /* Make divide-by-zero exceptions impossible. */ + /* Make sdiv/smod divide-by-minus-one exceptions impossible. */ + if ((insn->code == (BPF_ALU64 | BPF_MOD | BPF_K) || + insn->code == (BPF_ALU64 | BPF_DIV | BPF_K) || + insn->code == (BPF_ALU | BPF_MOD | BPF_K) || + insn->code == (BPF_ALU | BPF_DIV | BPF_K)) && + insn->off == 1 && insn->imm == -1) { + bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; + bool isdiv = BPF_OP(insn->code) == BPF_DIV; + struct bpf_insn *patchlet; + struct bpf_insn chk_and_sdiv[] = { + BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) | + BPF_NEG | BPF_K, insn->dst_reg, + 0, 0, 0), + }; + struct bpf_insn chk_and_smod[] = { + BPF_MOV32_IMM(insn->dst_reg, 0), + }; + + patchlet = isdiv ? chk_and_sdiv : chk_and_smod; + cnt = isdiv ? ARRAY_SIZE(chk_and_sdiv) : ARRAY_SIZE(chk_and_smod); + + new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt); + if (!new_prog) + return -ENOMEM; + + delta += cnt - 1; + env->prog = prog = new_prog; + insn = new_prog->insnsi + i + delta; + goto next_insn; + } + + /* Make divide-by-zero and divide-by-minus-one exceptions impossible. */ if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) || insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || insn->code == (BPF_ALU | BPF_MOD | BPF_X) || insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; bool isdiv = BPF_OP(insn->code) == BPF_DIV; + bool is_sdiv = isdiv && insn->off == 1; + bool is_smod = !isdiv && insn->off == 1; struct bpf_insn *patchlet; struct bpf_insn chk_and_div[] = { /* [R,W]x div 0 -> 0 */ @@ -19939,10 +20535,62 @@ static int do_misc_fixups(struct bpf_verifier_env *env) BPF_JMP_IMM(BPF_JA, 0, 0, 1), BPF_MOV32_REG(insn->dst_reg, insn->dst_reg), }; + struct bpf_insn chk_and_sdiv[] = { + /* [R,W]x sdiv 0 -> 0 + * LLONG_MIN sdiv -1 -> LLONG_MIN + * INT_MIN sdiv -1 -> INT_MIN + */ + BPF_MOV64_REG(BPF_REG_AX, insn->src_reg), + BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) | + BPF_ADD | BPF_K, BPF_REG_AX, + 0, 0, 1), + BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | + BPF_JGT | BPF_K, BPF_REG_AX, + 0, 4, 1), + BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | + BPF_JEQ | BPF_K, BPF_REG_AX, + 0, 1, 0), + BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) | + BPF_MOV | BPF_K, insn->dst_reg, + 0, 0, 0), + /* BPF_NEG(LLONG_MIN) == -LLONG_MIN == LLONG_MIN */ + BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) | + BPF_NEG | BPF_K, insn->dst_reg, + 0, 0, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + *insn, + }; + struct bpf_insn chk_and_smod[] = { + /* [R,W]x mod 0 -> [R,W]x */ + /* [R,W]x mod -1 -> 0 */ + BPF_MOV64_REG(BPF_REG_AX, insn->src_reg), + BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) | + BPF_ADD | BPF_K, BPF_REG_AX, + 0, 0, 1), + BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | + BPF_JGT | BPF_K, BPF_REG_AX, + 0, 3, 1), + BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | + BPF_JEQ | BPF_K, BPF_REG_AX, + 0, 3 + (is64 ? 0 : 1), 1), + BPF_MOV32_IMM(insn->dst_reg, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + *insn, + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_MOV32_REG(insn->dst_reg, insn->dst_reg), + }; - patchlet = isdiv ? chk_and_div : chk_and_mod; - cnt = isdiv ? ARRAY_SIZE(chk_and_div) : - ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0); + if (is_sdiv) { + patchlet = chk_and_sdiv; + cnt = ARRAY_SIZE(chk_and_sdiv); + } else if (is_smod) { + patchlet = chk_and_smod; + cnt = ARRAY_SIZE(chk_and_smod) - (is64 ? 2 : 0); + } else { + patchlet = isdiv ? chk_and_div : chk_and_mod; + cnt = isdiv ? ARRAY_SIZE(chk_and_div) : + ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0); + } new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt); if (!new_prog) @@ -19989,7 +20637,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env) (BPF_MODE(insn->code) == BPF_ABS || BPF_MODE(insn->code) == BPF_IND)) { cnt = env->ops->gen_ld_abs(insn, insn_buf); - if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { + if (cnt == 0 || cnt >= INSN_BUF_SIZE) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } @@ -20282,7 +20930,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env) cnt = ops->map_gen_lookup(map_ptr, insn_buf); if (cnt == -EOPNOTSUPP) goto patch_map_ops_generic; - if (cnt <= 0 || cnt >= ARRAY_SIZE(insn_buf)) { + if (cnt <= 0 || cnt >= INSN_BUF_SIZE) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } @@ -20384,7 +21032,7 @@ patch_map_ops_generic: #if defined(CONFIG_X86_64) && !defined(CONFIG_UML) /* Implement bpf_get_smp_processor_id() inline. */ if (insn->imm == BPF_FUNC_get_smp_processor_id && - prog->jit_requested && bpf_jit_supports_percpu_insn()) { + verifier_inlines_helper_call(env, insn->imm)) { /* BPF_FUNC_get_smp_processor_id inlining is an * optimization, so if pcpu_hot.cpu_number is ever * changed in some incompatible and hard to support @@ -20642,7 +21290,7 @@ static struct bpf_prog *inline_bpf_loop(struct bpf_verifier_env *env, int position, s32 stack_base, u32 callback_subprogno, - u32 *cnt) + u32 *total_cnt) { s32 r6_offset = stack_base + 0 * BPF_REG_SIZE; s32 r7_offset = stack_base + 1 * BPF_REG_SIZE; @@ -20651,55 +21299,56 @@ static struct bpf_prog *inline_bpf_loop(struct bpf_verifier_env *env, int reg_loop_cnt = BPF_REG_7; int reg_loop_ctx = BPF_REG_8; + struct bpf_insn *insn_buf = env->insn_buf; struct bpf_prog *new_prog; u32 callback_start; u32 call_insn_offset; s32 callback_offset; + u32 cnt = 0; /* This represents an inlined version of bpf_iter.c:bpf_loop, * be careful to modify this code in sync. */ - struct bpf_insn insn_buf[] = { - /* Return error and jump to the end of the patch if - * expected number of iterations is too big. - */ - BPF_JMP_IMM(BPF_JLE, BPF_REG_1, BPF_MAX_LOOPS, 2), - BPF_MOV32_IMM(BPF_REG_0, -E2BIG), - BPF_JMP_IMM(BPF_JA, 0, 0, 16), - /* spill R6, R7, R8 to use these as loop vars */ - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, r6_offset), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_7, r7_offset), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_8, r8_offset), - /* initialize loop vars */ - BPF_MOV64_REG(reg_loop_max, BPF_REG_1), - BPF_MOV32_IMM(reg_loop_cnt, 0), - BPF_MOV64_REG(reg_loop_ctx, BPF_REG_3), - /* loop header, - * if reg_loop_cnt >= reg_loop_max skip the loop body - */ - BPF_JMP_REG(BPF_JGE, reg_loop_cnt, reg_loop_max, 5), - /* callback call, - * correct callback offset would be set after patching - */ - BPF_MOV64_REG(BPF_REG_1, reg_loop_cnt), - BPF_MOV64_REG(BPF_REG_2, reg_loop_ctx), - BPF_CALL_REL(0), - /* increment loop counter */ - BPF_ALU64_IMM(BPF_ADD, reg_loop_cnt, 1), - /* jump to loop header if callback returned 0 */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -6), - /* return value of bpf_loop, - * set R0 to the number of iterations - */ - BPF_MOV64_REG(BPF_REG_0, reg_loop_cnt), - /* restore original values of R6, R7, R8 */ - BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, r6_offset), - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, r7_offset), - BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_10, r8_offset), - }; - *cnt = ARRAY_SIZE(insn_buf); - new_prog = bpf_patch_insn_data(env, position, insn_buf, *cnt); + /* Return error and jump to the end of the patch if + * expected number of iterations is too big. + */ + insn_buf[cnt++] = BPF_JMP_IMM(BPF_JLE, BPF_REG_1, BPF_MAX_LOOPS, 2); + insn_buf[cnt++] = BPF_MOV32_IMM(BPF_REG_0, -E2BIG); + insn_buf[cnt++] = BPF_JMP_IMM(BPF_JA, 0, 0, 16); + /* spill R6, R7, R8 to use these as loop vars */ + insn_buf[cnt++] = BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, r6_offset); + insn_buf[cnt++] = BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_7, r7_offset); + insn_buf[cnt++] = BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_8, r8_offset); + /* initialize loop vars */ + insn_buf[cnt++] = BPF_MOV64_REG(reg_loop_max, BPF_REG_1); + insn_buf[cnt++] = BPF_MOV32_IMM(reg_loop_cnt, 0); + insn_buf[cnt++] = BPF_MOV64_REG(reg_loop_ctx, BPF_REG_3); + /* loop header, + * if reg_loop_cnt >= reg_loop_max skip the loop body + */ + insn_buf[cnt++] = BPF_JMP_REG(BPF_JGE, reg_loop_cnt, reg_loop_max, 5); + /* callback call, + * correct callback offset would be set after patching + */ + insn_buf[cnt++] = BPF_MOV64_REG(BPF_REG_1, reg_loop_cnt); + insn_buf[cnt++] = BPF_MOV64_REG(BPF_REG_2, reg_loop_ctx); + insn_buf[cnt++] = BPF_CALL_REL(0); + /* increment loop counter */ + insn_buf[cnt++] = BPF_ALU64_IMM(BPF_ADD, reg_loop_cnt, 1); + /* jump to loop header if callback returned 0 */ + insn_buf[cnt++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -6); + /* return value of bpf_loop, + * set R0 to the number of iterations + */ + insn_buf[cnt++] = BPF_MOV64_REG(BPF_REG_0, reg_loop_cnt); + /* restore original values of R6, R7, R8 */ + insn_buf[cnt++] = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, r6_offset); + insn_buf[cnt++] = BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, r7_offset); + insn_buf[cnt++] = BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_10, r8_offset); + + *total_cnt = cnt; + new_prog = bpf_patch_insn_data(env, position, insn_buf, cnt); if (!new_prog) return new_prog; @@ -20774,6 +21423,40 @@ static int optimize_bpf_loop(struct bpf_verifier_env *env) return 0; } +/* Remove unnecessary spill/fill pairs, members of fastcall pattern, + * adjust subprograms stack depth when possible. + */ +static int remove_fastcall_spills_fills(struct bpf_verifier_env *env) +{ + struct bpf_subprog_info *subprog = env->subprog_info; + struct bpf_insn_aux_data *aux = env->insn_aux_data; + struct bpf_insn *insn = env->prog->insnsi; + int insn_cnt = env->prog->len; + u32 spills_num; + bool modified = false; + int i, j; + + for (i = 0; i < insn_cnt; i++, insn++) { + if (aux[i].fastcall_spills_num > 0) { + spills_num = aux[i].fastcall_spills_num; + /* NOPs would be removed by opt_remove_nops() */ + for (j = 1; j <= spills_num; ++j) { + *(insn - j) = NOP; + *(insn + j) = NOP; + } + modified = true; + } + if ((subprog + 1)->start == i + 1) { + if (modified && !subprog->keep_fastcall_stack) + subprog->stack_depth = -subprog->fastcall_stack_off; + subprog++; + modified = false; + } + } + + return 0; +} + static void free_states(struct bpf_verifier_env *env) { struct bpf_verifier_state_list *sl, *sln; @@ -21047,6 +21730,7 @@ static int check_struct_ops_btf_id(struct bpf_verifier_env *env) u32 btf_id, member_idx; struct btf *btf; const char *mname; + int err; if (!prog->gpl_compatible) { verbose(env, "struct ops programs must have a GPL compatible license\n"); @@ -21094,8 +21778,15 @@ static int check_struct_ops_btf_id(struct bpf_verifier_env *env) return -EINVAL; } + err = bpf_struct_ops_supported(st_ops, __btf_member_bit_offset(t, member) / 8); + if (err) { + verbose(env, "attach to unsupported member %s of struct %s\n", + mname, st_ops->name); + return err; + } + if (st_ops->check_member) { - int err = st_ops->check_member(t, member, prog); + err = st_ops->check_member(t, member, prog); if (err) { verbose(env, "attach to unsupported member %s of struct %s\n", @@ -21706,6 +22397,10 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3 if (ret < 0) goto skip_full_check; + ret = mark_fastcall_patterns(env); + if (ret < 0) + goto skip_full_check; + ret = do_check_main(env); ret = ret ?: do_check_subprogs(env); @@ -21715,6 +22410,12 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3 skip_full_check: kvfree(env->explored_states); + /* might decrease stack depth, keep it before passes that + * allocate additional slots. + */ + if (ret == 0) + ret = remove_fastcall_spills_fills(env); + if (ret == 0) ret = check_max_stack_depth(env); diff --git a/kernel/events/core.c b/kernel/events/core.c index 4f03eb908e7f..0016f5ff9ba2 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -8964,7 +8964,7 @@ got_name: mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; if (atomic_read(&nr_build_id_events)) - build_id_parse(vma, mmap_event->build_id, &mmap_event->build_id_size); + build_id_parse_nofault(vma, mmap_event->build_id, &mmap_event->build_id_size); perf_iterate_sb(perf_event_mmap_output, mmap_event, diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index ac0a01cc8634..a582cd25ca87 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -24,7 +24,6 @@ #include <linux/key.h> #include <linux/verification.h> #include <linux/namei.h> -#include <linux/fileattr.h> #include <net/bpf_sk_storage.h> @@ -798,29 +797,6 @@ const struct bpf_func_proto bpf_task_pt_regs_proto = { .ret_btf_id = &bpf_task_pt_regs_ids[0], }; -BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx) -{ - struct bpf_array *array = container_of(map, struct bpf_array, map); - struct cgroup *cgrp; - - if (unlikely(idx >= array->map.max_entries)) - return -E2BIG; - - cgrp = READ_ONCE(array->ptrs[idx]); - if (unlikely(!cgrp)) - return -EAGAIN; - - return task_under_cgroup_hierarchy(current, cgrp); -} - -static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = { - .func = bpf_current_task_under_cgroup, - .gpl_only = false, - .ret_type = RET_INTEGER, - .arg1_type = ARG_CONST_MAP_PTR, - .arg2_type = ARG_ANYTHING, -}; - struct send_signal_irq_work { struct irq_work irq_work; struct task_struct *task; @@ -1226,7 +1202,8 @@ static const struct bpf_func_proto bpf_get_func_arg_proto = { .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, - .arg3_type = ARG_PTR_TO_LONG, + .arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED, + .arg3_size = sizeof(u64), }; BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value) @@ -1242,7 +1219,8 @@ static const struct bpf_func_proto bpf_get_func_ret_proto = { .func = get_func_ret, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, - .arg2_type = ARG_PTR_TO_LONG, + .arg2_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED, + .arg2_size = sizeof(u64), }; BPF_CALL_1(get_func_arg_cnt, void *, ctx) @@ -1439,73 +1417,6 @@ static int __init bpf_key_sig_kfuncs_init(void) late_initcall(bpf_key_sig_kfuncs_init); #endif /* CONFIG_KEYS */ -/* filesystem kfuncs */ -__bpf_kfunc_start_defs(); - -/** - * bpf_get_file_xattr - get xattr of a file - * @file: file to get xattr from - * @name__str: name of the xattr - * @value_p: output buffer of the xattr value - * - * Get xattr *name__str* of *file* and store the output in *value_ptr*. - * - * For security reasons, only *name__str* with prefix "user." is allowed. - * - * Return: 0 on success, a negative value on error. - */ -__bpf_kfunc int bpf_get_file_xattr(struct file *file, const char *name__str, - struct bpf_dynptr *value_p) -{ - struct bpf_dynptr_kern *value_ptr = (struct bpf_dynptr_kern *)value_p; - struct dentry *dentry; - u32 value_len; - void *value; - int ret; - - if (strncmp(name__str, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) - return -EPERM; - - value_len = __bpf_dynptr_size(value_ptr); - value = __bpf_dynptr_data_rw(value_ptr, value_len); - if (!value) - return -EINVAL; - - dentry = file_dentry(file); - ret = inode_permission(&nop_mnt_idmap, dentry->d_inode, MAY_READ); - if (ret) - return ret; - return __vfs_getxattr(dentry, dentry->d_inode, name__str, value, value_len); -} - -__bpf_kfunc_end_defs(); - -BTF_KFUNCS_START(fs_kfunc_set_ids) -BTF_ID_FLAGS(func, bpf_get_file_xattr, KF_SLEEPABLE | KF_TRUSTED_ARGS) -BTF_KFUNCS_END(fs_kfunc_set_ids) - -static int bpf_get_file_xattr_filter(const struct bpf_prog *prog, u32 kfunc_id) -{ - if (!btf_id_set8_contains(&fs_kfunc_set_ids, kfunc_id)) - return 0; - - /* Only allow to attach from LSM hooks, to avoid recursion */ - return prog->type != BPF_PROG_TYPE_LSM ? -EACCES : 0; -} - -static const struct btf_kfunc_id_set bpf_fs_kfunc_set = { - .owner = THIS_MODULE, - .set = &fs_kfunc_set_ids, - .filter = bpf_get_file_xattr_filter, -}; - -static int __init bpf_fs_kfuncs_init(void) -{ - return register_btf_kfunc_id_set(BPF_PROG_TYPE_LSM, &bpf_fs_kfunc_set); -} - -late_initcall(bpf_fs_kfuncs_init); - static const struct bpf_func_proto * bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { @@ -1548,8 +1459,6 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_get_numa_node_id_proto; case BPF_FUNC_perf_event_read: return &bpf_perf_event_read_proto; - case BPF_FUNC_current_task_under_cgroup: - return &bpf_current_task_under_cgroup_proto; case BPF_FUNC_get_prandom_u32: return &bpf_get_prandom_u32_proto; case BPF_FUNC_probe_write_user: @@ -1578,6 +1487,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_cgrp_storage_get_proto; case BPF_FUNC_cgrp_storage_delete: return &bpf_cgrp_storage_delete_proto; + case BPF_FUNC_current_task_under_cgroup: + return &bpf_current_task_under_cgroup_proto; #endif case BPF_FUNC_send_signal: return &bpf_send_signal_proto; @@ -1598,7 +1509,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) case BPF_FUNC_jiffies64: return &bpf_jiffies64_proto; case BPF_FUNC_get_task_stack: - return &bpf_get_task_stack_proto; + return prog->sleepable ? &bpf_get_task_stack_sleepable_proto + : &bpf_get_task_stack_proto; case BPF_FUNC_copy_from_user: return &bpf_copy_from_user_proto; case BPF_FUNC_copy_from_user_task: @@ -1654,7 +1566,7 @@ kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) case BPF_FUNC_get_stackid: return &bpf_get_stackid_proto; case BPF_FUNC_get_stack: - return &bpf_get_stack_proto; + return prog->sleepable ? &bpf_get_stack_sleepable_proto : &bpf_get_stack_proto; #ifdef CONFIG_BPF_KPROBE_OVERRIDE case BPF_FUNC_override_return: return &bpf_override_return_proto; @@ -3299,7 +3211,7 @@ static int uprobe_prog_run(struct bpf_uprobe *uprobe, struct bpf_run_ctx *old_run_ctx; int err = 0; - if (link->task && current->mm != link->task->mm) + if (link->task && !same_thread_group(current, link->task)) return 0; if (sleepable) diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 9c581d6da843..785733245ead 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -564,6 +564,7 @@ static int perf_call_bpf_enter(struct trace_event_call *call, struct pt_regs *re BUILD_BUG_ON(sizeof(param.ent) < sizeof(void *)); /* bpf prog requires 'regs' to be the first member in the ctx (a.k.a. ¶m) */ + perf_fetch_caller_regs(regs); *(struct pt_regs **)¶m = regs; param.syscall_nr = rec->nr; for (i = 0; i < sys_data->nb_args; i++) @@ -575,6 +576,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) { struct syscall_metadata *sys_data; struct syscall_trace_enter *rec; + struct pt_regs *fake_regs; struct hlist_head *head; unsigned long args[6]; bool valid_prog_array; @@ -602,7 +604,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) size = ALIGN(size + sizeof(u32), sizeof(u64)); size -= sizeof(u32); - rec = perf_trace_buf_alloc(size, NULL, &rctx); + rec = perf_trace_buf_alloc(size, &fake_regs, &rctx); if (!rec) return; @@ -611,7 +613,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) memcpy(&rec->args, args, sizeof(unsigned long) * sys_data->nb_args); if ((valid_prog_array && - !perf_call_bpf_enter(sys_data->enter_event, regs, sys_data, rec)) || + !perf_call_bpf_enter(sys_data->enter_event, fake_regs, sys_data, rec)) || hlist_empty(head)) { perf_swevent_put_recursion_context(rctx); return; @@ -666,6 +668,7 @@ static int perf_call_bpf_exit(struct trace_event_call *call, struct pt_regs *reg } __aligned(8) param; /* bpf prog requires 'regs' to be the first member in the ctx (a.k.a. ¶m) */ + perf_fetch_caller_regs(regs); *(struct pt_regs **)¶m = regs; param.syscall_nr = rec->nr; param.ret = rec->ret; @@ -676,6 +679,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) { struct syscall_metadata *sys_data; struct syscall_trace_exit *rec; + struct pt_regs *fake_regs; struct hlist_head *head; bool valid_prog_array; int syscall_nr; @@ -701,7 +705,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64)); size -= sizeof(u32); - rec = perf_trace_buf_alloc(size, NULL, &rctx); + rec = perf_trace_buf_alloc(size, &fake_regs, &rctx); if (!rec) return; @@ -709,7 +713,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) rec->ret = syscall_get_return_value(current, regs); if ((valid_prog_array && - !perf_call_bpf_exit(sys_data->exit_event, regs, rec)) || + !perf_call_bpf_exit(sys_data->exit_event, fake_regs, rec)) || hlist_empty(head)) { perf_swevent_put_recursion_context(rctx); return; diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 62fce4ce0b16..bc8faa4509e1 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -379,13 +379,15 @@ config DEBUG_INFO_BTF depends on !DEBUG_INFO_SPLIT && !DEBUG_INFO_REDUCED depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST depends on BPF_SYSCALL - depends on !DEBUG_INFO_DWARF5 || PAHOLE_VERSION >= 121 + depends on PAHOLE_VERSION >= 116 + depends on DEBUG_INFO_DWARF4 || PAHOLE_VERSION >= 121 # pahole uses elfutils, which does not have support for Hexagon relocations depends on !HEXAGON help Generate deduplicated BTF type information from DWARF debug info. - Turning this on expects presence of pahole tool, which will convert - DWARF type info into equivalent deduplicated BTF type info. + Turning this on requires pahole v1.16 or later (v1.21 or later to + support DWARF 5), which will convert DWARF type info into equivalent + deduplicated BTF type info. config PAHOLE_HAS_SPLIT_BTF def_bool PAHOLE_VERSION >= 119 diff --git a/lib/buildid.c b/lib/buildid.c index e02b5507418b..290641d92ac1 100644 --- a/lib/buildid.c +++ b/lib/buildid.c @@ -8,154 +8,302 @@ #define BUILD_ID 3 +#define MAX_PHDR_CNT 256 + +struct freader { + void *buf; + u32 buf_sz; + int err; + union { + struct { + struct file *file; + struct folio *folio; + void *addr; + loff_t folio_off; + bool may_fault; + }; + struct { + const char *data; + u64 data_sz; + }; + }; +}; + +static void freader_init_from_file(struct freader *r, void *buf, u32 buf_sz, + struct file *file, bool may_fault) +{ + memset(r, 0, sizeof(*r)); + r->buf = buf; + r->buf_sz = buf_sz; + r->file = file; + r->may_fault = may_fault; +} + +static void freader_init_from_mem(struct freader *r, const char *data, u64 data_sz) +{ + memset(r, 0, sizeof(*r)); + r->data = data; + r->data_sz = data_sz; +} + +static void freader_put_folio(struct freader *r) +{ + if (!r->folio) + return; + kunmap_local(r->addr); + folio_put(r->folio); + r->folio = NULL; +} + +static int freader_get_folio(struct freader *r, loff_t file_off) +{ + /* check if we can just reuse current folio */ + if (r->folio && file_off >= r->folio_off && + file_off < r->folio_off + folio_size(r->folio)) + return 0; + + freader_put_folio(r); + + r->folio = filemap_get_folio(r->file->f_mapping, file_off >> PAGE_SHIFT); + + /* if sleeping is allowed, wait for the page, if necessary */ + if (r->may_fault && (IS_ERR(r->folio) || !folio_test_uptodate(r->folio))) { + filemap_invalidate_lock_shared(r->file->f_mapping); + r->folio = read_cache_folio(r->file->f_mapping, file_off >> PAGE_SHIFT, + NULL, r->file); + filemap_invalidate_unlock_shared(r->file->f_mapping); + } + + if (IS_ERR(r->folio) || !folio_test_uptodate(r->folio)) { + if (!IS_ERR(r->folio)) + folio_put(r->folio); + r->folio = NULL; + return -EFAULT; + } + + r->folio_off = folio_pos(r->folio); + r->addr = kmap_local_folio(r->folio, 0); + + return 0; +} + +static const void *freader_fetch(struct freader *r, loff_t file_off, size_t sz) +{ + size_t folio_sz; + + /* provided internal temporary buffer should be sized correctly */ + if (WARN_ON(r->buf && sz > r->buf_sz)) { + r->err = -E2BIG; + return NULL; + } + + if (unlikely(file_off + sz < file_off)) { + r->err = -EOVERFLOW; + return NULL; + } + + /* working with memory buffer is much more straightforward */ + if (!r->buf) { + if (file_off + sz > r->data_sz) { + r->err = -ERANGE; + return NULL; + } + return r->data + file_off; + } + + /* fetch or reuse folio for given file offset */ + r->err = freader_get_folio(r, file_off); + if (r->err) + return NULL; + + /* if requested data is crossing folio boundaries, we have to copy + * everything into our local buffer to keep a simple linear memory + * access interface + */ + folio_sz = folio_size(r->folio); + if (file_off + sz > r->folio_off + folio_sz) { + int part_sz = r->folio_off + folio_sz - file_off; + + /* copy the part that resides in the current folio */ + memcpy(r->buf, r->addr + (file_off - r->folio_off), part_sz); + + /* fetch next folio */ + r->err = freader_get_folio(r, r->folio_off + folio_sz); + if (r->err) + return NULL; + + /* copy the rest of requested data */ + memcpy(r->buf + part_sz, r->addr, sz - part_sz); + + return r->buf; + } + + /* if data fits in a single folio, just return direct pointer */ + return r->addr + (file_off - r->folio_off); +} + +static void freader_cleanup(struct freader *r) +{ + if (!r->buf) + return; /* non-file-backed mode */ + + freader_put_folio(r); +} + /* * Parse build id from the note segment. This logic can be shared between * 32-bit and 64-bit system, because Elf32_Nhdr and Elf64_Nhdr are * identical. */ -static int parse_build_id_buf(unsigned char *build_id, - __u32 *size, - const void *note_start, - Elf32_Word note_size) +static int parse_build_id(struct freader *r, unsigned char *build_id, __u32 *size, + loff_t note_off, Elf32_Word note_size) { - Elf32_Word note_offs = 0, new_offs; + const char note_name[] = "GNU"; + const size_t note_name_sz = sizeof(note_name); + u32 build_id_off, new_off, note_end, name_sz, desc_sz; + const Elf32_Nhdr *nhdr; + const char *data; + + if (check_add_overflow(note_off, note_size, ¬e_end)) + return -EINVAL; - while (note_offs + sizeof(Elf32_Nhdr) < note_size) { - Elf32_Nhdr *nhdr = (Elf32_Nhdr *)(note_start + note_offs); + while (note_end - note_off > sizeof(Elf32_Nhdr) + note_name_sz) { + nhdr = freader_fetch(r, note_off, sizeof(Elf32_Nhdr) + note_name_sz); + if (!nhdr) + return r->err; + + name_sz = READ_ONCE(nhdr->n_namesz); + desc_sz = READ_ONCE(nhdr->n_descsz); + + new_off = note_off + sizeof(Elf32_Nhdr); + if (check_add_overflow(new_off, ALIGN(name_sz, 4), &new_off) || + check_add_overflow(new_off, ALIGN(desc_sz, 4), &new_off) || + new_off > note_end) + break; if (nhdr->n_type == BUILD_ID && - nhdr->n_namesz == sizeof("GNU") && - !strcmp((char *)(nhdr + 1), "GNU") && - nhdr->n_descsz > 0 && - nhdr->n_descsz <= BUILD_ID_SIZE_MAX) { - memcpy(build_id, - note_start + note_offs + - ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr), - nhdr->n_descsz); - memset(build_id + nhdr->n_descsz, 0, - BUILD_ID_SIZE_MAX - nhdr->n_descsz); + name_sz == note_name_sz && + memcmp(nhdr + 1, note_name, note_name_sz) == 0 && + desc_sz > 0 && desc_sz <= BUILD_ID_SIZE_MAX) { + build_id_off = note_off + sizeof(Elf32_Nhdr) + ALIGN(note_name_sz, 4); + + /* freader_fetch() will invalidate nhdr pointer */ + data = freader_fetch(r, build_id_off, desc_sz); + if (!data) + return r->err; + + memcpy(build_id, data, desc_sz); + memset(build_id + desc_sz, 0, BUILD_ID_SIZE_MAX - desc_sz); if (size) - *size = nhdr->n_descsz; + *size = desc_sz; return 0; } - new_offs = note_offs + sizeof(Elf32_Nhdr) + - ALIGN(nhdr->n_namesz, 4) + ALIGN(nhdr->n_descsz, 4); - if (new_offs <= note_offs) /* overflow */ - break; - note_offs = new_offs; + + note_off = new_off; } return -EINVAL; } -static inline int parse_build_id(const void *page_addr, - unsigned char *build_id, - __u32 *size, - const void *note_start, - Elf32_Word note_size) +/* Parse build ID from 32-bit ELF */ +static int get_build_id_32(struct freader *r, unsigned char *build_id, __u32 *size) { - /* check for overflow */ - if (note_start < page_addr || note_start + note_size < note_start) - return -EINVAL; + const Elf32_Ehdr *ehdr; + const Elf32_Phdr *phdr; + __u32 phnum, phoff, i; - /* only supports note that fits in the first page */ - if (note_start + note_size > page_addr + PAGE_SIZE) - return -EINVAL; + ehdr = freader_fetch(r, 0, sizeof(Elf32_Ehdr)); + if (!ehdr) + return r->err; - return parse_build_id_buf(build_id, size, note_start, note_size); -} + /* subsequent freader_fetch() calls invalidate pointers, so remember locally */ + phnum = READ_ONCE(ehdr->e_phnum); + phoff = READ_ONCE(ehdr->e_phoff); -/* Parse build ID from 32-bit ELF */ -static int get_build_id_32(const void *page_addr, unsigned char *build_id, - __u32 *size) -{ - Elf32_Ehdr *ehdr = (Elf32_Ehdr *)page_addr; - Elf32_Phdr *phdr; - int i; - - /* - * FIXME - * Neither ELF spec nor ELF loader require that program headers - * start immediately after ELF header. - */ - if (ehdr->e_phoff != sizeof(Elf32_Ehdr)) - return -EINVAL; - /* only supports phdr that fits in one page */ - if (ehdr->e_phnum > - (PAGE_SIZE - sizeof(Elf32_Ehdr)) / sizeof(Elf32_Phdr)) + /* set upper bound on amount of segments (phdrs) we iterate */ + if (phnum > MAX_PHDR_CNT) + phnum = MAX_PHDR_CNT; + + /* check that phoff is not large enough to cause an overflow */ + if (phoff + phnum * sizeof(Elf32_Phdr) < phoff) return -EINVAL; - phdr = (Elf32_Phdr *)(page_addr + sizeof(Elf32_Ehdr)); + for (i = 0; i < phnum; ++i) { + phdr = freader_fetch(r, phoff + i * sizeof(Elf32_Phdr), sizeof(Elf32_Phdr)); + if (!phdr) + return r->err; - for (i = 0; i < ehdr->e_phnum; ++i) { - if (phdr[i].p_type == PT_NOTE && - !parse_build_id(page_addr, build_id, size, - page_addr + phdr[i].p_offset, - phdr[i].p_filesz)) + if (phdr->p_type == PT_NOTE && + !parse_build_id(r, build_id, size, READ_ONCE(phdr->p_offset), + READ_ONCE(phdr->p_filesz))) return 0; } return -EINVAL; } /* Parse build ID from 64-bit ELF */ -static int get_build_id_64(const void *page_addr, unsigned char *build_id, - __u32 *size) +static int get_build_id_64(struct freader *r, unsigned char *build_id, __u32 *size) { - Elf64_Ehdr *ehdr = (Elf64_Ehdr *)page_addr; - Elf64_Phdr *phdr; - int i; - - /* - * FIXME - * Neither ELF spec nor ELF loader require that program headers - * start immediately after ELF header. - */ - if (ehdr->e_phoff != sizeof(Elf64_Ehdr)) - return -EINVAL; - /* only supports phdr that fits in one page */ - if (ehdr->e_phnum > - (PAGE_SIZE - sizeof(Elf64_Ehdr)) / sizeof(Elf64_Phdr)) + const Elf64_Ehdr *ehdr; + const Elf64_Phdr *phdr; + __u32 phnum, i; + __u64 phoff; + + ehdr = freader_fetch(r, 0, sizeof(Elf64_Ehdr)); + if (!ehdr) + return r->err; + + /* subsequent freader_fetch() calls invalidate pointers, so remember locally */ + phnum = READ_ONCE(ehdr->e_phnum); + phoff = READ_ONCE(ehdr->e_phoff); + + /* set upper bound on amount of segments (phdrs) we iterate */ + if (phnum > MAX_PHDR_CNT) + phnum = MAX_PHDR_CNT; + + /* check that phoff is not large enough to cause an overflow */ + if (phoff + phnum * sizeof(Elf64_Phdr) < phoff) return -EINVAL; - phdr = (Elf64_Phdr *)(page_addr + sizeof(Elf64_Ehdr)); + for (i = 0; i < phnum; ++i) { + phdr = freader_fetch(r, phoff + i * sizeof(Elf64_Phdr), sizeof(Elf64_Phdr)); + if (!phdr) + return r->err; - for (i = 0; i < ehdr->e_phnum; ++i) { - if (phdr[i].p_type == PT_NOTE && - !parse_build_id(page_addr, build_id, size, - page_addr + phdr[i].p_offset, - phdr[i].p_filesz)) + if (phdr->p_type == PT_NOTE && + !parse_build_id(r, build_id, size, READ_ONCE(phdr->p_offset), + READ_ONCE(phdr->p_filesz))) return 0; } + return -EINVAL; } -/* - * Parse build ID of ELF file mapped to vma - * @vma: vma object - * @build_id: buffer to store build id, at least BUILD_ID_SIZE long - * @size: returns actual build id size in case of success - * - * Return: 0 on success, -EINVAL otherwise - */ -int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, - __u32 *size) +/* enough for Elf64_Ehdr, Elf64_Phdr, and all the smaller requests */ +#define MAX_FREADER_BUF_SZ 64 + +static int __build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, + __u32 *size, bool may_fault) { - Elf32_Ehdr *ehdr; - struct page *page; - void *page_addr; + const Elf32_Ehdr *ehdr; + struct freader r; + char buf[MAX_FREADER_BUF_SZ]; int ret; /* only works for page backed storage */ if (!vma->vm_file) return -EINVAL; - page = find_get_page(vma->vm_file->f_mapping, 0); - if (!page) - return -EFAULT; /* page not mapped */ + freader_init_from_file(&r, buf, sizeof(buf), vma->vm_file, may_fault); + + /* fetch first 18 bytes of ELF header for checks */ + ehdr = freader_fetch(&r, 0, offsetofend(Elf32_Ehdr, e_type)); + if (!ehdr) { + ret = r.err; + goto out; + } ret = -EINVAL; - page_addr = kmap_local_page(page); - ehdr = (Elf32_Ehdr *)page_addr; /* compare magic x7f "ELF" */ if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0) @@ -166,15 +314,46 @@ int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, goto out; if (ehdr->e_ident[EI_CLASS] == ELFCLASS32) - ret = get_build_id_32(page_addr, build_id, size); + ret = get_build_id_32(&r, build_id, size); else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64) - ret = get_build_id_64(page_addr, build_id, size); + ret = get_build_id_64(&r, build_id, size); out: - kunmap_local(page_addr); - put_page(page); + freader_cleanup(&r); return ret; } +/* + * Parse build ID of ELF file mapped to vma + * @vma: vma object + * @build_id: buffer to store build id, at least BUILD_ID_SIZE long + * @size: returns actual build id size in case of success + * + * Assumes no page fault can be taken, so if relevant portions of ELF file are + * not already paged in, fetching of build ID fails. + * + * Return: 0 on success; negative error, otherwise + */ +int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size) +{ + return __build_id_parse(vma, build_id, size, false /* !may_fault */); +} + +/* + * Parse build ID of ELF file mapped to VMA + * @vma: vma object + * @build_id: buffer to store build id, at least BUILD_ID_SIZE long + * @size: returns actual build id size in case of success + * + * Assumes faultable context and can cause page faults to bring in file data + * into page cache. + * + * Return: 0 on success; negative error, otherwise + */ +int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size) +{ + return __build_id_parse(vma, build_id, size, true /* may_fault */); +} + /** * build_id_parse_buf - Get build ID from a buffer * @buf: ELF note section(s) to parse @@ -185,7 +364,15 @@ out: */ int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size) { - return parse_build_id_buf(build_id, NULL, buf, buf_size); + struct freader r; + int err; + + freader_init_from_mem(&r, buf, buf_size); + + err = parse_build_id(&r, build_id, NULL, 0, buf_size); + + freader_cleanup(&r); + return err; } #if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_VMCORE_INFO) diff --git a/net/bpf/bpf_dummy_struct_ops.c b/net/bpf/bpf_dummy_struct_ops.c index 3ea52b05adfb..f71f67c6896b 100644 --- a/net/bpf/bpf_dummy_struct_ops.c +++ b/net/bpf/bpf_dummy_struct_ops.c @@ -115,7 +115,7 @@ static int check_test_run_args(struct bpf_prog *prog, struct bpf_dummy_ops_test_ offset = btf_ctx_arg_offset(bpf_dummy_ops_btf, func_proto, arg_no); info = find_ctx_arg_info(prog->aux, offset); - if (info && (info->reg_type & PTR_MAYBE_NULL)) + if (info && type_may_be_null(info->reg_type)) continue; return -EINVAL; diff --git a/net/core/filter.c b/net/core/filter.c index 0f4d9f3b206e..cd3524cb326b 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1266,8 +1266,8 @@ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp) * so we need to keep the user BPF around until the 2nd * pass. At this time, the user BPF is stored in fp->insns. */ - old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter), - GFP_KERNEL | __GFP_NOWARN); + old_prog = kmemdup_array(fp->insns, old_len, sizeof(struct sock_filter), + GFP_KERNEL | __GFP_NOWARN); if (!old_prog) { err = -ENOMEM; goto out_err; @@ -6280,20 +6280,25 @@ BPF_CALL_5(bpf_skb_check_mtu, struct sk_buff *, skb, int ret = BPF_MTU_CHK_RET_FRAG_NEEDED; struct net_device *dev = skb->dev; int skb_len, dev_len; - int mtu; + int mtu = 0; - if (unlikely(flags & ~(BPF_MTU_CHK_SEGS))) - return -EINVAL; + if (unlikely(flags & ~(BPF_MTU_CHK_SEGS))) { + ret = -EINVAL; + goto out; + } - if (unlikely(flags & BPF_MTU_CHK_SEGS && (len_diff || *mtu_len))) - return -EINVAL; + if (unlikely(flags & BPF_MTU_CHK_SEGS && (len_diff || *mtu_len))) { + ret = -EINVAL; + goto out; + } dev = __dev_via_ifindex(dev, ifindex); - if (unlikely(!dev)) - return -ENODEV; + if (unlikely(!dev)) { + ret = -ENODEV; + goto out; + } mtu = READ_ONCE(dev->mtu); - dev_len = mtu + dev->hard_header_len; /* If set use *mtu_len as input, L3 as iph->tot_len (like fib_lookup) */ @@ -6311,15 +6316,12 @@ BPF_CALL_5(bpf_skb_check_mtu, struct sk_buff *, skb, */ if (skb_is_gso(skb)) { ret = BPF_MTU_CHK_RET_SUCCESS; - if (flags & BPF_MTU_CHK_SEGS && !skb_gso_validate_network_len(skb, mtu)) ret = BPF_MTU_CHK_RET_SEGS_TOOBIG; } out: - /* BPF verifier guarantees valid pointer */ *mtu_len = mtu; - return ret; } @@ -6329,19 +6331,21 @@ BPF_CALL_5(bpf_xdp_check_mtu, struct xdp_buff *, xdp, struct net_device *dev = xdp->rxq->dev; int xdp_len = xdp->data_end - xdp->data; int ret = BPF_MTU_CHK_RET_SUCCESS; - int mtu, dev_len; + int mtu = 0, dev_len; /* XDP variant doesn't support multi-buffer segment check (yet) */ - if (unlikely(flags)) - return -EINVAL; + if (unlikely(flags)) { + ret = -EINVAL; + goto out; + } dev = __dev_via_ifindex(dev, ifindex); - if (unlikely(!dev)) - return -ENODEV; + if (unlikely(!dev)) { + ret = -ENODEV; + goto out; + } mtu = READ_ONCE(dev->mtu); - - /* Add L2-header as dev MTU is L3 size */ dev_len = mtu + dev->hard_header_len; /* Use *mtu_len as input, L3 as iph->tot_len (like fib_lookup) */ @@ -6351,10 +6355,8 @@ BPF_CALL_5(bpf_xdp_check_mtu, struct xdp_buff *, xdp, xdp_len += len_diff; /* minus result pass check */ if (xdp_len > dev_len) ret = BPF_MTU_CHK_RET_FRAG_NEEDED; - - /* BPF verifier guarantees valid pointer */ +out: *mtu_len = mtu; - return ret; } @@ -6364,7 +6366,8 @@ static const struct bpf_func_proto bpf_skb_check_mtu_proto = { .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, - .arg3_type = ARG_PTR_TO_INT, + .arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED, + .arg3_size = sizeof(u32), .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; @@ -6375,7 +6378,8 @@ static const struct bpf_func_proto bpf_xdp_check_mtu_proto = { .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, - .arg3_type = ARG_PTR_TO_INT, + .arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED, + .arg3_size = sizeof(u32), .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; @@ -8597,13 +8601,16 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type if (off + size > offsetofend(struct __sk_buff, cb[4])) return false; break; + case bpf_ctx_range(struct __sk_buff, data): + case bpf_ctx_range(struct __sk_buff, data_meta): + case bpf_ctx_range(struct __sk_buff, data_end): + if (info->is_ldsx || size != size_default) + return false; + break; case bpf_ctx_range_till(struct __sk_buff, remote_ip6[0], remote_ip6[3]): case bpf_ctx_range_till(struct __sk_buff, local_ip6[0], local_ip6[3]): case bpf_ctx_range_till(struct __sk_buff, remote_ip4, remote_ip4): case bpf_ctx_range_till(struct __sk_buff, local_ip4, local_ip4): - case bpf_ctx_range(struct __sk_buff, data): - case bpf_ctx_range(struct __sk_buff, data_meta): - case bpf_ctx_range(struct __sk_buff, data_end): if (size != size_default) return false; break; @@ -9047,6 +9054,14 @@ static bool xdp_is_valid_access(int off, int size, } } return false; + } else { + switch (off) { + case offsetof(struct xdp_md, data_meta): + case offsetof(struct xdp_md, data): + case offsetof(struct xdp_md, data_end): + if (info->is_ldsx) + return false; + } } switch (off) { @@ -9372,12 +9387,12 @@ static bool flow_dissector_is_valid_access(int off, int size, switch (off) { case bpf_ctx_range(struct __sk_buff, data): - if (size != size_default) + if (info->is_ldsx || size != size_default) return false; info->reg_type = PTR_TO_PACKET; return true; case bpf_ctx_range(struct __sk_buff, data_end): - if (size != size_default) + if (info->is_ldsx || size != size_default) return false; info->reg_type = PTR_TO_PACKET_END; return true; diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c index 3f88d0961e5b..554804774628 100644 --- a/net/ipv4/bpf_tcp_ca.c +++ b/net/ipv4/bpf_tcp_ca.c @@ -14,10 +14,6 @@ /* "extern" is to avoid sparse warning. It is only used in bpf_struct_ops.c. */ static struct bpf_struct_ops bpf_tcp_congestion_ops; -static u32 unsupported_ops[] = { - offsetof(struct tcp_congestion_ops, get_info), -}; - static const struct btf_type *tcp_sock_type; static u32 tcp_sock_id, sock_id; static const struct btf_type *tcp_congestion_ops_type; @@ -45,18 +41,6 @@ static int bpf_tcp_ca_init(struct btf *btf) return 0; } -static bool is_unsupported(u32 member_offset) -{ - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(unsupported_ops); i++) { - if (member_offset == unsupported_ops[i]) - return true; - } - - return false; -} - static bool bpf_tcp_ca_is_valid_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, @@ -251,15 +235,6 @@ static int bpf_tcp_ca_init_member(const struct btf_type *t, return 0; } -static int bpf_tcp_ca_check_member(const struct btf_type *t, - const struct btf_member *member, - const struct bpf_prog *prog) -{ - if (is_unsupported(__btf_member_bit_offset(t, member) / 8)) - return -ENOTSUPP; - return 0; -} - static int bpf_tcp_ca_reg(void *kdata, struct bpf_link *link) { return tcp_register_congestion_control(kdata); @@ -354,7 +329,6 @@ static struct bpf_struct_ops bpf_tcp_congestion_ops = { .reg = bpf_tcp_ca_reg, .unreg = bpf_tcp_ca_unreg, .update = bpf_tcp_ca_update, - .check_member = bpf_tcp_ca_check_member, .init_member = bpf_tcp_ca_init_member, .init = bpf_tcp_ca_init, .validate = bpf_tcp_ca_validate, diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 7e16336044b2..1140b2a120ca 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -1320,14 +1320,6 @@ struct xdp_umem_reg_v1 { __u32 headroom; }; -struct xdp_umem_reg_v2 { - __u64 addr; /* Start of packet data area */ - __u64 len; /* Length of packet data area */ - __u32 chunk_size; - __u32 headroom; - __u32 flags; -}; - static int xsk_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int optlen) { @@ -1371,10 +1363,19 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname, if (optlen < sizeof(struct xdp_umem_reg_v1)) return -EINVAL; - else if (optlen < sizeof(struct xdp_umem_reg_v2)) - mr_size = sizeof(struct xdp_umem_reg_v1); else if (optlen < sizeof(mr)) - mr_size = sizeof(struct xdp_umem_reg_v2); + mr_size = sizeof(struct xdp_umem_reg_v1); + + BUILD_BUG_ON(sizeof(struct xdp_umem_reg_v1) >= sizeof(struct xdp_umem_reg)); + + /* Make sure the last field of the struct doesn't have + * uninitialized padding. All padding has to be explicit + * and has to be set to zero by the userspace to make + * struct xdp_umem_reg extensible in the future. + */ + BUILD_BUG_ON(offsetof(struct xdp_umem_reg, tx_metadata_len) + + sizeof_field(struct xdp_umem_reg, tx_metadata_len) != + sizeof(struct xdp_umem_reg)); if (copy_from_sockptr(&mr, optval, mr_size)) return -EFAULT; diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 3e003dd6bea0..7afe040cf43b 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -13,7 +13,6 @@ tprogs-y += sockex1 tprogs-y += sockex2 tprogs-y += sockex3 tprogs-y += tracex1 -tprogs-y += tracex2 tprogs-y += tracex3 tprogs-y += tracex4 tprogs-y += tracex5 @@ -63,7 +62,6 @@ sockex1-objs := sockex1_user.o sockex2-objs := sockex2_user.o sockex3-objs := sockex3_user.o tracex1-objs := tracex1_user.o $(TRACE_HELPERS) -tracex2-objs := tracex2_user.o tracex3-objs := tracex3_user.o tracex4-objs := tracex4_user.o tracex5-objs := tracex5_user.o $(TRACE_HELPERS) @@ -105,7 +103,6 @@ always-y += sockex1_kern.o always-y += sockex2_kern.o always-y += sockex3_kern.o always-y += tracex1.bpf.o -always-y += tracex2.bpf.o always-y += tracex3.bpf.o always-y += tracex4.bpf.o always-y += tracex5.bpf.o @@ -169,6 +166,10 @@ BPF_EXTRA_CFLAGS += -I$(srctree)/arch/mips/include/asm/mach-generic endif endif +ifeq ($(ARCH), x86) +BPF_EXTRA_CFLAGS += -fcf-protection +endif + TPROGS_CFLAGS += -Wall -O2 TPROGS_CFLAGS += -Wmissing-prototypes TPROGS_CFLAGS += -Wstrict-prototypes @@ -405,7 +406,7 @@ $(obj)/%.o: $(src)/%.c -Wno-gnu-variable-sized-type-not-at-end \ -Wno-address-of-packed-member -Wno-tautological-compare \ -Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \ - -fno-asynchronous-unwind-tables -fcf-protection \ + -fno-asynchronous-unwind-tables \ -I$(srctree)/samples/bpf/ -include asm_goto_workaround.h \ -O2 -emit-llvm -Xclang -disable-llvm-passes -c $< -o - | \ $(OPT) -O2 -mtriple=bpf-pc-linux | $(LLVM_DIS) | \ diff --git a/samples/bpf/tracex2.bpf.c b/samples/bpf/tracex2.bpf.c deleted file mode 100644 index 0a5c75b367be..000000000000 --- a/samples/bpf/tracex2.bpf.c +++ /dev/null @@ -1,99 +0,0 @@ -/* Copyright (c) 2013-2015 PLUMgrid, http://plumgrid.com - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - */ -#include "vmlinux.h" -#include <linux/version.h> -#include <bpf/bpf_helpers.h> -#include <bpf/bpf_tracing.h> -#include <bpf/bpf_core_read.h> - -struct { - __uint(type, BPF_MAP_TYPE_HASH); - __type(key, long); - __type(value, long); - __uint(max_entries, 1024); -} my_map SEC(".maps"); - -/* kprobe is NOT a stable ABI. If kernel internals change this bpf+kprobe - * example will no longer be meaningful - */ -SEC("kprobe/kfree_skb_reason") -int bpf_prog2(struct pt_regs *ctx) -{ - long loc = 0; - long init_val = 1; - long *value; - - /* read ip of kfree_skb_reason caller. - * non-portable version of __builtin_return_address(0) - */ - BPF_KPROBE_READ_RET_IP(loc, ctx); - - value = bpf_map_lookup_elem(&my_map, &loc); - if (value) - *value += 1; - else - bpf_map_update_elem(&my_map, &loc, &init_val, BPF_ANY); - return 0; -} - -static unsigned int log2(unsigned int v) -{ - unsigned int r; - unsigned int shift; - - r = (v > 0xFFFF) << 4; v >>= r; - shift = (v > 0xFF) << 3; v >>= shift; r |= shift; - shift = (v > 0xF) << 2; v >>= shift; r |= shift; - shift = (v > 0x3) << 1; v >>= shift; r |= shift; - r |= (v >> 1); - return r; -} - -static unsigned int log2l(unsigned long v) -{ - unsigned int hi = v >> 32; - if (hi) - return log2(hi) + 32; - else - return log2(v); -} - -struct hist_key { - char comm[16]; - u64 pid_tgid; - u64 uid_gid; - u64 index; -}; - -struct { - __uint(type, BPF_MAP_TYPE_PERCPU_HASH); - __uint(key_size, sizeof(struct hist_key)); - __uint(value_size, sizeof(long)); - __uint(max_entries, 1024); -} my_hist_map SEC(".maps"); - -SEC("ksyscall/write") -int BPF_KSYSCALL(bpf_prog3, unsigned int fd, const char *buf, size_t count) -{ - long init_val = 1; - long *value; - struct hist_key key; - - key.index = log2l(count); - key.pid_tgid = bpf_get_current_pid_tgid(); - key.uid_gid = bpf_get_current_uid_gid(); - bpf_get_current_comm(&key.comm, sizeof(key.comm)); - - value = bpf_map_lookup_elem(&my_hist_map, &key); - if (value) - __sync_fetch_and_add(value, 1); - else - bpf_map_update_elem(&my_hist_map, &key, &init_val, BPF_ANY); - return 0; -} -char _license[] SEC("license") = "GPL"; -u32 _version SEC("version") = LINUX_VERSION_CODE; diff --git a/samples/bpf/tracex2_user.c b/samples/bpf/tracex2_user.c deleted file mode 100644 index 2131f1648cf1..000000000000 --- a/samples/bpf/tracex2_user.c +++ /dev/null @@ -1,187 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include <stdio.h> -#include <unistd.h> -#include <stdlib.h> -#include <signal.h> -#include <string.h> - -#include <bpf/bpf.h> -#include <bpf/libbpf.h> -#include "bpf_util.h" - -#define MAX_INDEX 64 -#define MAX_STARS 38 - -/* my_map, my_hist_map */ -static int map_fd[2]; - -static void stars(char *str, long val, long max, int width) -{ - int i; - - for (i = 0; i < (width * val / max) - 1 && i < width - 1; i++) - str[i] = '*'; - if (val > max) - str[i - 1] = '+'; - str[i] = '\0'; -} - -struct task { - char comm[16]; - __u64 pid_tgid; - __u64 uid_gid; -}; - -struct hist_key { - struct task t; - __u32 index; -}; - -#define SIZE sizeof(struct task) - -static void print_hist_for_pid(int fd, void *task) -{ - unsigned int nr_cpus = bpf_num_possible_cpus(); - struct hist_key key = {}, next_key; - long values[nr_cpus]; - char starstr[MAX_STARS]; - long value; - long data[MAX_INDEX] = {}; - int max_ind = -1; - long max_value = 0; - int i, ind; - - while (bpf_map_get_next_key(fd, &key, &next_key) == 0) { - if (memcmp(&next_key, task, SIZE)) { - key = next_key; - continue; - } - bpf_map_lookup_elem(fd, &next_key, values); - value = 0; - for (i = 0; i < nr_cpus; i++) - value += values[i]; - ind = next_key.index; - data[ind] = value; - if (value && ind > max_ind) - max_ind = ind; - if (value > max_value) - max_value = value; - key = next_key; - } - - printf(" syscall write() stats\n"); - printf(" byte_size : count distribution\n"); - for (i = 1; i <= max_ind + 1; i++) { - stars(starstr, data[i - 1], max_value, MAX_STARS); - printf("%8ld -> %-8ld : %-8ld |%-*s|\n", - (1l << i) >> 1, (1l << i) - 1, data[i - 1], - MAX_STARS, starstr); - } -} - -static void print_hist(int fd) -{ - struct hist_key key = {}, next_key; - static struct task tasks[1024]; - int task_cnt = 0; - int i; - - while (bpf_map_get_next_key(fd, &key, &next_key) == 0) { - int found = 0; - - for (i = 0; i < task_cnt; i++) - if (memcmp(&tasks[i], &next_key, SIZE) == 0) - found = 1; - if (!found) - memcpy(&tasks[task_cnt++], &next_key, SIZE); - key = next_key; - } - - for (i = 0; i < task_cnt; i++) { - printf("\npid %d cmd %s uid %d\n", - (__u32) tasks[i].pid_tgid, - tasks[i].comm, - (__u32) tasks[i].uid_gid); - print_hist_for_pid(fd, &tasks[i]); - } - -} - -static void int_exit(int sig) -{ - print_hist(map_fd[1]); - exit(0); -} - -int main(int ac, char **argv) -{ - long key, next_key, value; - struct bpf_link *links[2]; - struct bpf_program *prog; - struct bpf_object *obj; - char filename[256]; - int i, j = 0; - FILE *f; - - snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]); - obj = bpf_object__open_file(filename, NULL); - if (libbpf_get_error(obj)) { - fprintf(stderr, "ERROR: opening BPF object file failed\n"); - return 0; - } - - /* load BPF program */ - if (bpf_object__load(obj)) { - fprintf(stderr, "ERROR: loading BPF object file failed\n"); - goto cleanup; - } - - map_fd[0] = bpf_object__find_map_fd_by_name(obj, "my_map"); - map_fd[1] = bpf_object__find_map_fd_by_name(obj, "my_hist_map"); - if (map_fd[0] < 0 || map_fd[1] < 0) { - fprintf(stderr, "ERROR: finding a map in obj file failed\n"); - goto cleanup; - } - - signal(SIGINT, int_exit); - signal(SIGTERM, int_exit); - - /* start 'ping' in the background to have some kfree_skb_reason - * events */ - f = popen("ping -4 -c5 localhost", "r"); - (void) f; - - /* start 'dd' in the background to have plenty of 'write' syscalls */ - f = popen("dd if=/dev/zero of=/dev/null count=5000000", "r"); - (void) f; - - bpf_object__for_each_program(prog, obj) { - links[j] = bpf_program__attach(prog); - if (libbpf_get_error(links[j])) { - fprintf(stderr, "ERROR: bpf_program__attach failed\n"); - links[j] = NULL; - goto cleanup; - } - j++; - } - - for (i = 0; i < 5; i++) { - key = 0; - while (bpf_map_get_next_key(map_fd[0], &key, &next_key) == 0) { - bpf_map_lookup_elem(map_fd[0], &next_key, &value); - printf("location 0x%lx count %ld\n", next_key, value); - key = next_key; - } - if (key) - printf("\n"); - sleep(1); - } - print_hist(map_fd[1]); - -cleanup: - for (j--; j >= 0; j--) - bpf_link__destroy(links[j]); - - bpf_object__close(obj); - return 0; -} diff --git a/samples/bpf/tracex4.bpf.c b/samples/bpf/tracex4.bpf.c index ca826750901a..d786492fd926 100644 --- a/samples/bpf/tracex4.bpf.c +++ b/samples/bpf/tracex4.bpf.c @@ -33,13 +33,13 @@ int bpf_prog1(struct pt_regs *ctx) return 0; } -SEC("kretprobe/kmem_cache_alloc_node") +SEC("kretprobe/kmem_cache_alloc_node_noprof") int bpf_prog2(struct pt_regs *ctx) { long ptr = PT_REGS_RC(ctx); long ip = 0; - /* get ip address of kmem_cache_alloc_node() caller */ + /* get ip address of kmem_cache_alloc_node_noprof() caller */ BPF_KRETPROBE_READ_RET_IP(ip, ctx); struct pair v = { diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh index 070a319140e8..8445b1390126 100755 --- a/scripts/link-vmlinux.sh +++ b/scripts/link-vmlinux.sh @@ -107,20 +107,8 @@ vmlinux_link() # ${1} - vmlinux image gen_btf() { - local pahole_ver local btf_data=${1}.btf.o - if ! [ -x "$(command -v ${PAHOLE})" ]; then - echo >&2 "BTF: ${1}: pahole (${PAHOLE}) is not available" - return 1 - fi - - pahole_ver=$(${PAHOLE} --version | sed -E 's/v([0-9]+)\.([0-9]+)/\1\2/') - if [ "${pahole_ver}" -lt "116" ]; then - echo >&2 "BTF: ${1}: pahole version $(${PAHOLE} --version) is too old, need at least v1.16" - return 1 - fi - info BTF "${btf_data}" LLVM_OBJCOPY="${OBJCOPY}" ${PAHOLE} -J ${PAHOLE_FLAGS} ${1} @@ -284,7 +272,7 @@ strip_debug= vmlinux_link vmlinux # fill in BTF IDs -if is_enabled CONFIG_DEBUG_INFO_BTF && is_enabled CONFIG_BPF; then +if is_enabled CONFIG_DEBUG_INFO_BTF; then info BTFIDS vmlinux ${RESOLVE_BTFIDS} vmlinux fi diff --git a/security/bpf/hooks.c b/security/bpf/hooks.c index 57b9ffd53c98..3663aec7bcbd 100644 --- a/security/bpf/hooks.c +++ b/security/bpf/hooks.c @@ -31,7 +31,6 @@ static int __init bpf_lsm_init(void) struct lsm_blob_sizes bpf_lsm_blob_sizes __ro_after_init = { .lbs_inode = sizeof(struct bpf_storage_blob), - .lbs_task = sizeof(struct bpf_storage_blob), }; DEFINE_LSM(bpf) = { diff --git a/tools/bpf/bpftool/Documentation/bpftool-gen.rst b/tools/bpf/bpftool/Documentation/bpftool-gen.rst index c768e6d4ae09..ca860fd97d8d 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-gen.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-gen.rst @@ -104,7 +104,7 @@ bpftool gen skeleton *FILE* - **example__load**. This function creates maps, loads and verifies BPF programs, initializes - global data maps. It corresponds to libppf's **bpf_object__load**\ () + global data maps. It corresponds to libbpf's **bpf_object__load**\ () API. - **example__open_and_load** combines **example__open** and @@ -172,7 +172,7 @@ bpftool gen min_core_btf *INPUT* *OUTPUT* *OBJECT* [*OBJECT*...] CO-RE based application, turning the application portable to different kernel versions. - Check examples bellow for more information how to use it. + Check examples below for more information on how to use it. bpftool gen help Print short help message. diff --git a/tools/bpf/bpftool/Documentation/bpftool-net.rst b/tools/bpf/bpftool/Documentation/bpftool-net.rst index 348812881297..a9ed8992800f 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-net.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-net.rst @@ -29,7 +29,7 @@ NET COMMANDS | **bpftool** **net help** | | *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* | **name** *PROG_NAME* } -| *ATTACH_TYPE* := { **xdp** | **xdpgeneric** | **xdpdrv** | **xdpoffload** } +| *ATTACH_TYPE* := { **xdp** | **xdpgeneric** | **xdpdrv** | **xdpoffload** | **tcx_ingress** | **tcx_egress** } DESCRIPTION =========== @@ -69,6 +69,8 @@ bpftool net attach *ATTACH_TYPE* *PROG* dev *NAME* [ overwrite ] **xdpgeneric** - Generic XDP. runs at generic XDP hook when packet already enters receive path as skb; **xdpdrv** - Native XDP. runs earliest point in driver's receive path; **xdpoffload** - Offload XDP. runs directly on NIC on each packet reception; + **tcx_ingress** - Ingress TCX. runs on ingress net traffic; + **tcx_egress** - Egress TCX. runs on egress net traffic; bpftool net detach *ATTACH_TYPE* dev *NAME* Detach bpf program attached to network interface *NAME* with type specified @@ -178,3 +180,23 @@ EXAMPLES :: xdp: + +| +| **# bpftool net attach tcx_ingress name tc_prog dev lo** +| **# bpftool net** +| + +:: + + tc: + lo(1) tcx/ingress tc_prog prog_id 29 + +| +| **# bpftool net attach tcx_ingress name tc_prog dev lo** +| **# bpftool net detach tcx_ingress dev lo** +| **# bpftool net** +| + +:: + + tc: diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool index be99d49b8714..0c541498c301 100644 --- a/tools/bpf/bpftool/bash-completion/bpftool +++ b/tools/bpf/bpftool/bash-completion/bpftool @@ -1079,7 +1079,7 @@ _bpftool() esac ;; net) - local ATTACH_TYPES='xdp xdpgeneric xdpdrv xdpoffload' + local ATTACH_TYPES='xdp xdpgeneric xdpdrv xdpoffload tcx_ingress tcx_egress' case $command in show|list) [[ $prev != "$command" ]] && return 0 diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c index 6789c7a4d5ca..7d2af1ff3c8d 100644 --- a/tools/bpf/bpftool/btf.c +++ b/tools/bpf/bpftool/btf.c @@ -50,6 +50,7 @@ struct sort_datum { int type_rank; const char *sort_name; const char *own_name; + __u64 disambig_hash; }; static const char *btf_int_enc_str(__u8 encoding) @@ -561,9 +562,10 @@ static const char *btf_type_sort_name(const struct btf *btf, __u32 index, bool f case BTF_KIND_ENUM64: { int name_off = t->name_off; - /* Use name of the first element for anonymous enums if allowed */ - if (!from_ref && !t->name_off && btf_vlen(t)) - name_off = btf_enum(t)->name_off; + if (!from_ref && !name_off && btf_vlen(t)) + name_off = btf_kind(t) == BTF_KIND_ENUM64 ? + btf_enum64(t)->name_off : + btf_enum(t)->name_off; return btf__name_by_offset(btf, name_off); } @@ -583,20 +585,88 @@ static const char *btf_type_sort_name(const struct btf *btf, __u32 index, bool f return NULL; } +static __u64 hasher(__u64 hash, __u64 val) +{ + return hash * 31 + val; +} + +static __u64 btf_name_hasher(__u64 hash, const struct btf *btf, __u32 name_off) +{ + if (!name_off) + return hash; + + return hasher(hash, str_hash(btf__name_by_offset(btf, name_off))); +} + +static __u64 btf_type_disambig_hash(const struct btf *btf, __u32 id, bool include_members) +{ + const struct btf_type *t = btf__type_by_id(btf, id); + int i; + size_t hash = 0; + + hash = btf_name_hasher(hash, btf, t->name_off); + + switch (btf_kind(t)) { + case BTF_KIND_ENUM: + case BTF_KIND_ENUM64: + for (i = 0; i < btf_vlen(t); i++) { + __u32 name_off = btf_is_enum(t) ? + btf_enum(t)[i].name_off : + btf_enum64(t)[i].name_off; + + hash = btf_name_hasher(hash, btf, name_off); + } + break; + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + if (!include_members) + break; + for (i = 0; i < btf_vlen(t); i++) { + const struct btf_member *m = btf_members(t) + i; + + hash = btf_name_hasher(hash, btf, m->name_off); + /* resolve field type's name and hash it as well */ + hash = hasher(hash, btf_type_disambig_hash(btf, m->type, false)); + } + break; + case BTF_KIND_TYPE_TAG: + case BTF_KIND_CONST: + case BTF_KIND_PTR: + case BTF_KIND_VOLATILE: + case BTF_KIND_RESTRICT: + case BTF_KIND_TYPEDEF: + case BTF_KIND_DECL_TAG: + hash = hasher(hash, btf_type_disambig_hash(btf, t->type, include_members)); + break; + case BTF_KIND_ARRAY: { + struct btf_array *arr = btf_array(t); + + hash = hasher(hash, arr->nelems); + hash = hasher(hash, btf_type_disambig_hash(btf, arr->type, include_members)); + break; + } + default: + break; + } + return hash; +} + static int btf_type_compare(const void *left, const void *right) { const struct sort_datum *d1 = (const struct sort_datum *)left; const struct sort_datum *d2 = (const struct sort_datum *)right; int r; - if (d1->type_rank != d2->type_rank) - return d1->type_rank < d2->type_rank ? -1 : 1; - - r = strcmp(d1->sort_name, d2->sort_name); + r = d1->type_rank - d2->type_rank; + r = r ?: strcmp(d1->sort_name, d2->sort_name); + r = r ?: strcmp(d1->own_name, d2->own_name); if (r) return r; - return strcmp(d1->own_name, d2->own_name); + if (d1->disambig_hash != d2->disambig_hash) + return d1->disambig_hash < d2->disambig_hash ? -1 : 1; + + return d1->index - d2->index; } static struct sort_datum *sort_btf_c(const struct btf *btf) @@ -617,6 +687,7 @@ static struct sort_datum *sort_btf_c(const struct btf *btf) d->type_rank = btf_type_rank(btf, i, false); d->sort_name = btf_type_sort_name(btf, i, false); d->own_name = btf__name_by_offset(btf, t->name_off); + d->disambig_hash = btf_type_disambig_hash(btf, i, true); } qsort(datums, n, sizeof(struct sort_datum), btf_type_compare); diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c index c754a428c8c6..4dbc4fcdf473 100644 --- a/tools/bpf/bpftool/feature.c +++ b/tools/bpf/bpftool/feature.c @@ -196,7 +196,7 @@ static void probe_unprivileged_disabled(void) { long res; - /* No support for C-style ouptut */ + /* No support for C-style output */ res = read_procfs("/proc/sys/kernel/unprivileged_bpf_disabled"); if (json_output) { @@ -225,7 +225,7 @@ static void probe_jit_enable(void) { long res; - /* No support for C-style ouptut */ + /* No support for C-style output */ res = read_procfs("/proc/sys/net/core/bpf_jit_enable"); if (json_output) { @@ -255,7 +255,7 @@ static void probe_jit_harden(void) { long res; - /* No support for C-style ouptut */ + /* No support for C-style output */ res = read_procfs("/proc/sys/net/core/bpf_jit_harden"); if (json_output) { @@ -285,7 +285,7 @@ static void probe_jit_kallsyms(void) { long res; - /* No support for C-style ouptut */ + /* No support for C-style output */ res = read_procfs("/proc/sys/net/core/bpf_jit_kallsyms"); if (json_output) { @@ -311,7 +311,7 @@ static void probe_jit_limit(void) { long res; - /* No support for C-style ouptut */ + /* No support for C-style output */ res = read_procfs("/proc/sys/net/core/bpf_jit_limit"); if (json_output) { diff --git a/tools/bpf/bpftool/net.c b/tools/bpf/bpftool/net.c index 968714b4c3d4..d2242d9f8441 100644 --- a/tools/bpf/bpftool/net.c +++ b/tools/bpf/bpftool/net.c @@ -67,6 +67,8 @@ enum net_attach_type { NET_ATTACH_TYPE_XDP_GENERIC, NET_ATTACH_TYPE_XDP_DRIVER, NET_ATTACH_TYPE_XDP_OFFLOAD, + NET_ATTACH_TYPE_TCX_INGRESS, + NET_ATTACH_TYPE_TCX_EGRESS, }; static const char * const attach_type_strings[] = { @@ -74,6 +76,8 @@ static const char * const attach_type_strings[] = { [NET_ATTACH_TYPE_XDP_GENERIC] = "xdpgeneric", [NET_ATTACH_TYPE_XDP_DRIVER] = "xdpdrv", [NET_ATTACH_TYPE_XDP_OFFLOAD] = "xdpoffload", + [NET_ATTACH_TYPE_TCX_INGRESS] = "tcx_ingress", + [NET_ATTACH_TYPE_TCX_EGRESS] = "tcx_egress", }; static const char * const attach_loc_strings[] = { @@ -482,9 +486,9 @@ static void __show_dev_tc_bpf(const struct ip_devname_ifindex *dev, if (prog_flags[i] || json_output) { NET_START_ARRAY("prog_flags", "%s "); for (j = 0; prog_flags[i] && j < 32; j++) { - if (!(prog_flags[i] & (1 << j))) + if (!(prog_flags[i] & (1U << j))) continue; - NET_DUMP_UINT_ONLY(1 << j); + NET_DUMP_UINT_ONLY(1U << j); } NET_END_ARRAY(""); } @@ -493,9 +497,9 @@ static void __show_dev_tc_bpf(const struct ip_devname_ifindex *dev, if (link_flags[i] || json_output) { NET_START_ARRAY("link_flags", "%s "); for (j = 0; link_flags[i] && j < 32; j++) { - if (!(link_flags[i] & (1 << j))) + if (!(link_flags[i] & (1U << j))) continue; - NET_DUMP_UINT_ONLY(1 << j); + NET_DUMP_UINT_ONLY(1U << j); } NET_END_ARRAY(""); } @@ -647,6 +651,32 @@ static int do_attach_detach_xdp(int progfd, enum net_attach_type attach_type, return bpf_xdp_attach(ifindex, progfd, flags, NULL); } +static int get_tcx_type(enum net_attach_type attach_type) +{ + switch (attach_type) { + case NET_ATTACH_TYPE_TCX_INGRESS: + return BPF_TCX_INGRESS; + case NET_ATTACH_TYPE_TCX_EGRESS: + return BPF_TCX_EGRESS; + default: + return -1; + } +} + +static int do_attach_tcx(int progfd, enum net_attach_type attach_type, int ifindex) +{ + int type = get_tcx_type(attach_type); + + return bpf_prog_attach(progfd, ifindex, type, 0); +} + +static int do_detach_tcx(int targetfd, enum net_attach_type attach_type) +{ + int type = get_tcx_type(attach_type); + + return bpf_prog_detach(targetfd, type); +} + static int do_attach(int argc, char **argv) { enum net_attach_type attach_type; @@ -684,10 +714,23 @@ static int do_attach(int argc, char **argv) } } + switch (attach_type) { /* attach xdp prog */ - if (is_prefix("xdp", attach_type_strings[attach_type])) - err = do_attach_detach_xdp(progfd, attach_type, ifindex, - overwrite); + case NET_ATTACH_TYPE_XDP: + case NET_ATTACH_TYPE_XDP_GENERIC: + case NET_ATTACH_TYPE_XDP_DRIVER: + case NET_ATTACH_TYPE_XDP_OFFLOAD: + err = do_attach_detach_xdp(progfd, attach_type, ifindex, overwrite); + break; + /* attach tcx prog */ + case NET_ATTACH_TYPE_TCX_INGRESS: + case NET_ATTACH_TYPE_TCX_EGRESS: + err = do_attach_tcx(progfd, attach_type, ifindex); + break; + default: + break; + } + if (err) { p_err("interface %s attach failed: %s", attach_type_strings[attach_type], strerror(-err)); @@ -721,10 +764,23 @@ static int do_detach(int argc, char **argv) if (ifindex < 1) return -EINVAL; + switch (attach_type) { /* detach xdp prog */ - progfd = -1; - if (is_prefix("xdp", attach_type_strings[attach_type])) + case NET_ATTACH_TYPE_XDP: + case NET_ATTACH_TYPE_XDP_GENERIC: + case NET_ATTACH_TYPE_XDP_DRIVER: + case NET_ATTACH_TYPE_XDP_OFFLOAD: + progfd = -1; err = do_attach_detach_xdp(progfd, attach_type, ifindex, NULL); + break; + /* detach tcx prog */ + case NET_ATTACH_TYPE_TCX_INGRESS: + case NET_ATTACH_TYPE_TCX_EGRESS: + err = do_detach_tcx(ifindex, attach_type); + break; + default: + break; + } if (err < 0) { p_err("interface %s detach failed: %s", @@ -824,6 +880,9 @@ static void show_link_netfilter(void) nf_link_count++; } + if (!nf_link_info) + return; + qsort(nf_link_info, nf_link_count, sizeof(*nf_link_info), netfilter_link_compar); for (id = 0; id < nf_link_count; id++) { @@ -928,7 +987,8 @@ static int do_help(int argc, char **argv) " %1$s %2$s help\n" "\n" " " HELP_SPEC_PROGRAM "\n" - " ATTACH_TYPE := { xdp | xdpgeneric | xdpdrv | xdpoffload }\n" + " ATTACH_TYPE := { xdp | xdpgeneric | xdpdrv | xdpoffload | tcx_ingress\n" + " | tcx_egress }\n" " " HELP_SPEC_OPTIONS " }\n" "\n" "Note: Only xdp, tcx, tc, netkit, flow_dissector and netfilter attachments\n" diff --git a/tools/bpf/bpftool/xlated_dumper.c b/tools/bpf/bpftool/xlated_dumper.c index 567f56dfd9f1..d0094345fb2b 100644 --- a/tools/bpf/bpftool/xlated_dumper.c +++ b/tools/bpf/bpftool/xlated_dumper.c @@ -349,7 +349,7 @@ void dump_xlated_plain(struct dump_data *dd, void *buf, unsigned int len, double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW); - printf("% 4d: ", i); + printf("%4u: ", i); print_bpf_insn(&cbs, insn + i, true); if (opcodes) { @@ -415,7 +415,7 @@ void dump_xlated_for_graph(struct dump_data *dd, void *buf_start, void *buf_end, } } - printf("%d: ", insn_off); + printf("%u: ", insn_off); print_bpf_insn(&cbs, cur, true); if (opcodes) { diff --git a/tools/bpf/runqslower/Makefile b/tools/bpf/runqslower/Makefile index d8288936c912..c4f1f1735af6 100644 --- a/tools/bpf/runqslower/Makefile +++ b/tools/bpf/runqslower/Makefile @@ -15,6 +15,7 @@ INCLUDES := -I$(OUTPUT) -I$(BPF_INCLUDE) -I$(abspath ../../include/uapi) CFLAGS := -g -Wall $(CLANG_CROSS_FLAGS) CFLAGS += $(EXTRA_CFLAGS) LDFLAGS += $(EXTRA_LDFLAGS) +LDLIBS += -lelf -lz # Try to detect best kernel BTF source KERNEL_REL := $(shell uname -r) @@ -51,7 +52,7 @@ clean: libbpf_hdrs: $(BPFOBJ) $(OUTPUT)/runqslower: $(OUTPUT)/runqslower.o $(BPFOBJ) - $(QUIET_LINK)$(CC) $(CFLAGS) $^ -lelf -lz -o $@ + $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $^ $(LDLIBS) -o $@ $(OUTPUT)/runqslower.o: runqslower.h $(OUTPUT)/runqslower.skel.h \ $(OUTPUT)/runqslower.bpf.o | libbpf_hdrs diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index e05b39e39c3f..1fb3cb2636e6 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -7513,4 +7513,13 @@ struct bpf_iter_num { __u64 __opaque[1]; } __attribute__((aligned(8))); +/* + * Flags to control BPF kfunc behaviour. + * - BPF_F_PAD_ZEROS: Pad destination buffer with zeros. (See the respective + * helper documentation for details.) + */ +enum bpf_kfunc_flags { + BPF_F_PAD_ZEROS = (1ULL << 0), +}; + #endif /* _UAPI__LINUX_BPF_H__ */ diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 972e17ec0c09..a4a7b1ad1b63 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -100,7 +100,7 @@ struct bpf_prog_load_opts { __u32 log_level; __u32 log_size; char *log_buf; - /* output: actual total log contents size (including termintaing zero). + /* output: actual total log contents size (including terminating zero). * It could be both larger than original log_size (if log was * truncated), or smaller (if log buffer wasn't filled completely). * If kernel doesn't support this feature, log_size is left unchanged. @@ -129,7 +129,7 @@ struct bpf_btf_load_opts { char *log_buf; __u32 log_level; __u32 log_size; - /* output: actual total log contents size (including termintaing zero). + /* output: actual total log contents size (including terminating zero). * It could be both larger than original log_size (if log was * truncated), or smaller (if log buffer wasn't filled completely). * If kernel doesn't support this feature, log_size is left unchanged. diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h index 305c62817dd3..80bc0242e8dc 100644 --- a/tools/lib/bpf/bpf_helpers.h +++ b/tools/lib/bpf/bpf_helpers.h @@ -341,7 +341,7 @@ extern void bpf_iter_num_destroy(struct bpf_iter_num *it) __weak __ksym; * I.e., it looks almost like high-level for each loop in other languages, * supports continue/break, and is verifiable by BPF verifier. * - * For iterating integers, the difference betwen bpf_for_each(num, i, N, M) + * For iterating integers, the difference between bpf_for_each(num, i, N, M) * and bpf_for(i, N, M) is in that bpf_for() provides additional proof to * verifier that i is in [N, M) range, and in bpf_for_each() case i is `int * *`, not just `int`. So for integers bpf_for() is more convenient. diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h index 9314fa95f04e..a8f6cd4841b0 100644 --- a/tools/lib/bpf/bpf_tracing.h +++ b/tools/lib/bpf/bpf_tracing.h @@ -163,7 +163,7 @@ struct pt_regs___s390 { unsigned long orig_gpr2; -}; +} __attribute__((preserve_access_index)); /* s390 provides user_pt_regs instead of struct pt_regs to userspace */ #define __PT_REGS_CAST(x) ((const user_pt_regs *)(x)) @@ -179,7 +179,7 @@ struct pt_regs___s390 { #define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG #define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG #define __PT_PARM6_SYSCALL_REG gprs[7] -#define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1_CORE_SYSCALL(x) +#define PT_REGS_PARM1_SYSCALL(x) (((const struct pt_regs___s390 *)(x))->__PT_PARM1_SYSCALL_REG) #define PT_REGS_PARM1_CORE_SYSCALL(x) \ BPF_CORE_READ((const struct pt_regs___s390 *)(x), __PT_PARM1_SYSCALL_REG) @@ -222,7 +222,7 @@ struct pt_regs___s390 { struct pt_regs___arm64 { unsigned long orig_x0; -}; +} __attribute__((preserve_access_index)); /* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */ #define __PT_REGS_CAST(x) ((const struct user_pt_regs *)(x)) @@ -241,7 +241,7 @@ struct pt_regs___arm64 { #define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG #define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG #define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG -#define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1_CORE_SYSCALL(x) +#define PT_REGS_PARM1_SYSCALL(x) (((const struct pt_regs___arm64 *)(x))->__PT_PARM1_SYSCALL_REG) #define PT_REGS_PARM1_CORE_SYSCALL(x) \ BPF_CORE_READ((const struct pt_regs___arm64 *)(x), __PT_PARM1_SYSCALL_REG) @@ -351,6 +351,10 @@ struct pt_regs___arm64 { * https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc#risc-v-calling-conventions */ +struct pt_regs___riscv { + unsigned long orig_a0; +} __attribute__((preserve_access_index)); + /* riscv provides struct user_regs_struct instead of struct pt_regs to userspace */ #define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x)) #define __PT_PARM1_REG a0 @@ -362,12 +366,15 @@ struct pt_regs___arm64 { #define __PT_PARM7_REG a6 #define __PT_PARM8_REG a7 -#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG +#define __PT_PARM1_SYSCALL_REG orig_a0 #define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG #define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG #define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG #define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG #define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG +#define PT_REGS_PARM1_SYSCALL(x) (((const struct pt_regs___riscv *)(x))->__PT_PARM1_SYSCALL_REG) +#define PT_REGS_PARM1_CORE_SYSCALL(x) \ + BPF_CORE_READ((const struct pt_regs___riscv *)(x), __PT_PARM1_SYSCALL_REG) #define __PT_RET_REG ra #define __PT_FP_REG s0 @@ -473,7 +480,7 @@ struct pt_regs; #endif /* * Similarly, syscall-specific conventions might differ between function call - * conventions within each architecutre. All supported architectures pass + * conventions within each architecture. All supported architectures pass * either 6 or 7 syscall arguments in registers. * * See syscall(2) manpage for succinct table with information on each arch. @@ -515,7 +522,7 @@ struct pt_regs; #define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; }) #define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP -#elif defined(bpf_target_sparc) +#elif defined(bpf_target_sparc) || defined(bpf_target_arm64) #define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); }) #define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP @@ -651,7 +658,7 @@ struct pt_regs; * BPF_PROG is a convenience wrapper for generic tp_btf/fentry/fexit and * similar kinds of BPF programs, that accept input arguments as a single * pointer to untyped u64 array, where each u64 can actually be a typed - * pointer or integer of different size. Instead of requring user to write + * pointer or integer of different size. Instead of requiring user to write * manual casts and work with array elements by index, BPF_PROG macro * allows user to declare a list of named and typed input arguments in the * same syntax as for normal C function. All the casting is hidden and @@ -801,7 +808,7 @@ struct pt_regs; * tp_btf/fentry/fexit BPF programs. It hides the underlying platform-specific * low-level way of getting kprobe input arguments from struct pt_regs, and * provides a familiar typed and named function arguments syntax and - * semantics of accessing kprobe input paremeters. + * semantics of accessing kprobe input parameters. * * Original struct pt_regs* context is preserved as 'ctx' argument. This might * be necessary when using BPF helpers like bpf_perf_event_output(). diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 32c00db3b91b..3c131039c523 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -996,6 +996,7 @@ static struct btf *btf_new_empty(struct btf *base_btf) btf->base_btf = base_btf; btf->start_id = btf__type_cnt(base_btf); btf->start_str_off = base_btf->hdr->str_len; + btf->swapped_endian = base_btf->swapped_endian; } /* +1 for empty string at offset 0 */ @@ -4191,7 +4192,7 @@ static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id * and canonical graphs are not compatible structurally, whole graphs are * incompatible. If types are structurally equivalent (i.e., all information * except referenced type IDs is exactly the same), a mapping from `canon_id` to - * a `cand_id` is recored in hypothetical mapping (`btf_dedup->hypot_map`). + * a `cand_id` is recoded in hypothetical mapping (`btf_dedup->hypot_map`). * If a type references other types, then those referenced types are checked * for equivalence recursively. * @@ -4229,7 +4230,7 @@ static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id * consists of portions of the graph that come from multiple compilation units. * This is due to the fact that types within single compilation unit are always * deduplicated and FWDs are already resolved, if referenced struct/union - * definiton is available. So, if we had unresolved FWD and found corresponding + * definition is available. So, if we had unresolved FWD and found corresponding * STRUCT/UNION, they will be from different compilation units. This * consequently means that when we "link" FWD to corresponding STRUCT/UNION, * type graph will likely have at least two different BTF types that describe @@ -5394,6 +5395,9 @@ int btf__distill_base(const struct btf *src_btf, struct btf **new_base_btf, new_base = btf__new_empty(); if (!new_base) return libbpf_err(-ENOMEM); + + btf__set_endianness(new_base, btf__endianness(src_btf)); + dist.id_map = calloc(n, sizeof(*dist.id_map)); if (!dist.id_map) { err = -ENOMEM; diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index b68d216837a9..4e349ad79ee6 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -286,7 +286,7 @@ LIBBPF_API void btf_dump__free(struct btf_dump *d); LIBBPF_API int btf_dump__dump_type(struct btf_dump *d, __u32 id); struct btf_dump_emit_type_decl_opts { - /* size of this struct, for forward/backward compatiblity */ + /* size of this struct, for forward/backward compatibility */ size_t sz; /* optional field name for type declaration, e.g.: * - struct my_struct <FNAME> diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c index 894860111ddb..0a7327541c17 100644 --- a/tools/lib/bpf/btf_dump.c +++ b/tools/lib/bpf/btf_dump.c @@ -304,7 +304,7 @@ int btf_dump__dump_type(struct btf_dump *d, __u32 id) * definition, in which case they have to be declared inline as part of field * type declaration; or as a top-level anonymous enum, typically used for * declaring global constants. It's impossible to distinguish between two - * without knowning whether given enum type was referenced from other type: + * without knowing whether given enum type was referenced from other type: * top-level anonymous enum won't be referenced by anything, while embedded * one will. */ diff --git a/tools/lib/bpf/btf_relocate.c b/tools/lib/bpf/btf_relocate.c index 17f8b32f94a0..4f7399d85eab 100644 --- a/tools/lib/bpf/btf_relocate.c +++ b/tools/lib/bpf/btf_relocate.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* Copyright (c) 2024, Oracle and/or its affiliates. */ #ifndef _GNU_SOURCE diff --git a/tools/lib/bpf/elf.c b/tools/lib/bpf/elf.c index c92e02394159..b5ab1cb13e5e 100644 --- a/tools/lib/bpf/elf.c +++ b/tools/lib/bpf/elf.c @@ -28,6 +28,9 @@ int elf_open(const char *binary_path, struct elf_fd *elf_fd) int fd, ret; Elf *elf; + elf_fd->elf = NULL; + elf_fd->fd = -1; + if (elf_version(EV_CURRENT) == EV_NONE) { pr_warn("elf: failed to init libelf for %s\n", binary_path); return -LIBBPF_ERRNO__LIBELF; diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index a3be6f8fac09..219facd0e66e 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -496,8 +496,6 @@ struct bpf_program { }; struct bpf_struct_ops { - const char *tname; - const struct btf_type *type; struct bpf_program **progs; __u32 *kern_func_off; /* e.g. struct tcp_congestion_ops in bpf_prog's btf format */ @@ -988,7 +986,7 @@ find_struct_ops_kern_types(struct bpf_object *obj, const char *tname_raw, { const struct btf_type *kern_type, *kern_vtype; const struct btf_member *kern_data_member; - struct btf *btf; + struct btf *btf = NULL; __s32 kern_vtype_id, kern_type_id; char tname[256]; __u32 i; @@ -1083,11 +1081,14 @@ static int bpf_object_adjust_struct_ops_autoload(struct bpf_object *obj) continue; for (j = 0; j < obj->nr_maps; ++j) { + const struct btf_type *type; + map = &obj->maps[j]; if (!bpf_map__is_struct_ops(map)) continue; - vlen = btf_vlen(map->st_ops->type); + type = btf__type_by_id(obj->btf, map->st_ops->type_id); + vlen = btf_vlen(type); for (k = 0; k < vlen; ++k) { slot_prog = map->st_ops->progs[k]; if (prog != slot_prog) @@ -1115,14 +1116,14 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map) const struct btf *btf = obj->btf; struct bpf_struct_ops *st_ops; const struct btf *kern_btf; - struct module_btf *mod_btf; + struct module_btf *mod_btf = NULL; void *data, *kern_data; const char *tname; int err; st_ops = map->st_ops; - type = st_ops->type; - tname = st_ops->tname; + type = btf__type_by_id(btf, st_ops->type_id); + tname = btf__name_by_offset(btf, type->name_off); err = find_struct_ops_kern_types(obj, tname, &mod_btf, &kern_type, &kern_type_id, &kern_vtype, &kern_vtype_id, @@ -1423,8 +1424,6 @@ static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name, memcpy(st_ops->data, data->d_buf + vsi->offset, type->size); - st_ops->tname = tname; - st_ops->type = type; st_ops->type_id = type_id; pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n", @@ -1849,7 +1848,7 @@ static char *internal_map_name(struct bpf_object *obj, const char *real_name) snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name, sfx_len, real_name); - /* sanitise map name to characters allowed by kernel */ + /* sanities map name to characters allowed by kernel */ for (p = map_name; *p && p < map_name + sizeof(map_name); p++) if (!isalnum(*p) && *p != '_' && *p != '.') *p = '_'; @@ -7906,16 +7905,19 @@ static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object } static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, size_t obj_buf_sz, + const char *obj_name, const struct bpf_object_open_opts *opts) { - const char *obj_name, *kconfig, *btf_tmp_path, *token_path; + const char *kconfig, *btf_tmp_path, *token_path; struct bpf_object *obj; - char tmp_name[64]; int err; char *log_buf; size_t log_size; __u32 log_level; + if (obj_buf && !obj_name) + return ERR_PTR(-EINVAL); + if (elf_version(EV_CURRENT) == EV_NONE) { pr_warn("failed to init libelf for %s\n", path ? : "(mem buf)"); @@ -7925,16 +7927,12 @@ static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, if (!OPTS_VALID(opts, bpf_object_open_opts)) return ERR_PTR(-EINVAL); - obj_name = OPTS_GET(opts, object_name, NULL); + obj_name = OPTS_GET(opts, object_name, NULL) ?: obj_name; if (obj_buf) { - if (!obj_name) { - snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx", - (unsigned long)obj_buf, - (unsigned long)obj_buf_sz); - obj_name = tmp_name; - } path = obj_name; pr_debug("loading object '%s' from buffer\n", obj_name); + } else { + pr_debug("loading object from %s\n", path); } log_buf = OPTS_GET(opts, kernel_log_buf, NULL); @@ -8018,9 +8016,7 @@ bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts) if (!path) return libbpf_err_ptr(-EINVAL); - pr_debug("loading %s\n", path); - - return libbpf_ptr(bpf_object_open(path, NULL, 0, opts)); + return libbpf_ptr(bpf_object_open(path, NULL, 0, NULL, opts)); } struct bpf_object *bpf_object__open(const char *path) @@ -8032,10 +8028,15 @@ struct bpf_object * bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz, const struct bpf_object_open_opts *opts) { + char tmp_name[64]; + if (!obj_buf || obj_buf_sz == 0) return libbpf_err_ptr(-EINVAL); - return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, opts)); + /* create a (quite useless) default "name" for this memory buffer object */ + snprintf(tmp_name, sizeof(tmp_name), "%lx-%zx", (unsigned long)obj_buf, obj_buf_sz); + + return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, tmp_name, opts)); } static int bpf_object_unload(struct bpf_object *obj) @@ -8445,11 +8446,13 @@ static int bpf_object__resolve_externs(struct bpf_object *obj, static void bpf_map_prepare_vdata(const struct bpf_map *map) { + const struct btf_type *type; struct bpf_struct_ops *st_ops; __u32 i; st_ops = map->st_ops; - for (i = 0; i < btf_vlen(st_ops->type); i++) { + type = btf__type_by_id(map->obj->btf, st_ops->type_id); + for (i = 0; i < btf_vlen(type); i++) { struct bpf_program *prog = st_ops->progs[i]; void *kern_data; int prog_fd; @@ -9056,6 +9059,11 @@ unsigned int bpf_object__kversion(const struct bpf_object *obj) return obj ? obj->kern_version : 0; } +int bpf_object__token_fd(const struct bpf_object *obj) +{ + return obj->token_fd ?: -1; +} + struct btf *bpf_object__btf(const struct bpf_object *obj) { return obj ? obj->btf : NULL; @@ -9712,6 +9720,7 @@ static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj, static int bpf_object__collect_st_ops_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Data *data) { + const struct btf_type *type; const struct btf_member *member; struct bpf_struct_ops *st_ops; struct bpf_program *prog; @@ -9771,13 +9780,14 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj, } insn_idx = sym->st_value / BPF_INSN_SZ; - member = find_member_by_offset(st_ops->type, moff * 8); + type = btf__type_by_id(btf, st_ops->type_id); + member = find_member_by_offset(type, moff * 8); if (!member) { pr_warn("struct_ops reloc %s: cannot find member at moff %u\n", map->name, moff); return -EINVAL; } - member_idx = member - btf_members(st_ops->type); + member_idx = member - btf_members(type); name = btf__name_by_offset(btf, member->name_off); if (!resolve_func_ptr(btf, member->type, NULL)) { @@ -11683,7 +11693,7 @@ static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, stru ret = 0; break; case 3: - opts.retprobe = strcmp(probe_type, "uretprobe.multi") == 0; + opts.retprobe = str_has_pfx(probe_type, "uretprobe.multi"); *link = bpf_program__attach_uprobe_multi(prog, -1, binary_path, func_name, &opts); ret = libbpf_get_error(*link); break; @@ -13758,29 +13768,13 @@ static int populate_skeleton_progs(const struct bpf_object *obj, int bpf_object__open_skeleton(struct bpf_object_skeleton *s, const struct bpf_object_open_opts *opts) { - DECLARE_LIBBPF_OPTS(bpf_object_open_opts, skel_opts, - .object_name = s->name, - ); struct bpf_object *obj; int err; - /* Attempt to preserve opts->object_name, unless overriden by user - * explicitly. Overwriting object name for skeletons is discouraged, - * as it breaks global data maps, because they contain object name - * prefix as their own map name prefix. When skeleton is generated, - * bpftool is making an assumption that this name will stay the same. - */ - if (opts) { - memcpy(&skel_opts, opts, sizeof(*opts)); - if (!opts->object_name) - skel_opts.object_name = s->name; - } - - obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts); - err = libbpf_get_error(obj); - if (err) { - pr_warn("failed to initialize skeleton BPF object '%s': %d\n", - s->name, err); + obj = bpf_object_open(NULL, s->data, s->data_sz, s->name, opts); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + pr_warn("failed to initialize skeleton BPF object '%s': %d\n", s->name, err); return libbpf_err(err); } diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 64a6a3d323e3..91484303849c 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -152,7 +152,7 @@ struct bpf_object_open_opts { * log_buf and log_level settings. * * If specified, this log buffer will be passed for: - * - each BPF progral load (BPF_PROG_LOAD) attempt, unless overriden + * - each BPF progral load (BPF_PROG_LOAD) attempt, unless overridden * with bpf_program__set_log() on per-program level, to get * BPF verifier log output. * - during BPF object's BTF load into kernel (BPF_BTF_LOAD) to get @@ -294,6 +294,14 @@ LIBBPF_API const char *bpf_object__name(const struct bpf_object *obj); LIBBPF_API unsigned int bpf_object__kversion(const struct bpf_object *obj); LIBBPF_API int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version); +/** + * @brief **bpf_object__token_fd** is an accessor for BPF token FD associated + * with BPF object. + * @param obj Pointer to a valid BPF object + * @return BPF token FD or -1, if it wasn't set + */ +LIBBPF_API int bpf_object__token_fd(const struct bpf_object *obj); + struct btf; LIBBPF_API struct btf *bpf_object__btf(const struct bpf_object *obj); LIBBPF_API int bpf_object__btf_fd(const struct bpf_object *obj); @@ -455,7 +463,7 @@ LIBBPF_API int bpf_link__destroy(struct bpf_link *link); /** * @brief **bpf_program__attach()** is a generic function for attaching * a BPF program based on auto-detection of program type, attach type, - * and extra paremeters, where applicable. + * and extra parameters, where applicable. * * @param prog BPF program to attach * @return Reference to the newly created BPF link; or NULL is returned on error, @@ -679,7 +687,7 @@ struct bpf_uprobe_opts { /** * @brief **bpf_program__attach_uprobe()** attaches a BPF program * to the userspace function which is found by binary path and - * offset. You can optionally specify a particular proccess to attach + * offset. You can optionally specify a particular process to attach * to. You can also optionally attach the program to the function * exit instead of entry. * @@ -1593,11 +1601,11 @@ LIBBPF_API int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_i * memory region of the ring buffer. * This ring buffer can be used to implement a custom events consumer. * The ring buffer starts with the *struct perf_event_mmap_page*, which - * holds the ring buffer managment fields, when accessing the header + * holds the ring buffer management fields, when accessing the header * structure it's important to be SMP aware. * You can refer to *perf_event_read_simple* for a simple example. * @param pb the perf buffer structure - * @param buf_idx the buffer index to retreive + * @param buf_idx the buffer index to retrieve * @param buf (out) gets the base pointer of the mmap()'ed memory * @param buf_size (out) gets the size of the mmap()'ed region * @return 0 on success, negative error code for failure diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 8f0d9ea3b1b4..0096e483f7eb 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -423,6 +423,7 @@ LIBBPF_1.5.0 { btf__relocate; bpf_map__autoattach; bpf_map__set_autoattach; + bpf_object__token_fd; bpf_program__attach_sockmap; ring__consume_n; ring_buffer__consume_n; diff --git a/tools/lib/bpf/libbpf_legacy.h b/tools/lib/bpf/libbpf_legacy.h index 1e1be467bede..60b2600be88a 100644 --- a/tools/lib/bpf/libbpf_legacy.h +++ b/tools/lib/bpf/libbpf_legacy.h @@ -76,7 +76,7 @@ enum libbpf_strict_mode { * first BPF program or map creation operation. This is done only if * kernel is too old to support memcg-based memory accounting for BPF * subsystem. By default, RLIMIT_MEMLOCK limit is set to RLIM_INFINITY, - * but it can be overriden with libbpf_set_memlock_rlim() API. + * but it can be overridden with libbpf_set_memlock_rlim() API. * Note that libbpf_set_memlock_rlim() needs to be called before * the very first bpf_prog_load(), bpf_map_create() or bpf_object__load() * operation. @@ -97,7 +97,7 @@ LIBBPF_API int libbpf_set_strict_mode(enum libbpf_strict_mode mode); * @brief **libbpf_get_error()** extracts the error code from the passed * pointer * @param ptr pointer returned from libbpf API function - * @return error code; or 0 if no error occured + * @return error code; or 0 if no error occurred * * Note, as of libbpf 1.0 this function is not necessary and not recommended * to be used. Libbpf doesn't return error code embedded into the pointer diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c index 9cd3d4109788..e0005c6ade88 100644 --- a/tools/lib/bpf/linker.c +++ b/tools/lib/bpf/linker.c @@ -1413,7 +1413,7 @@ recur: return true; case BTF_KIND_PTR: /* just validate overall shape of the referenced type, so no - * contents comparison for struct/union, and allowd fwd vs + * contents comparison for struct/union, and allowed fwd vs * struct/union */ exact = false; @@ -1962,7 +1962,7 @@ static int linker_append_elf_sym(struct bpf_linker *linker, struct src_obj *obj, /* If existing symbol is a strong resolved symbol, bail out, * because we lost resolution battle have nothing to - * contribute. We already checked abover that there is no + * contribute. We already checked above that there is no * strong-strong conflict. We also already tightened binding * and visibility, so nothing else to contribute at that point. */ diff --git a/tools/lib/bpf/skel_internal.h b/tools/lib/bpf/skel_internal.h index 1e82ab06c3eb..0875452521e9 100644 --- a/tools/lib/bpf/skel_internal.h +++ b/tools/lib/bpf/skel_internal.h @@ -107,7 +107,7 @@ static inline void skel_free(const void *p) * The loader program will perform probe_read_kernel() from maps.rodata.initial_value. * skel_finalize_map_data() sets skel->rodata to point to actual value in a bpf map and * does maps.rodata.initial_value = ~0ULL to signal skel_free_map_data() that kvfree - * is not nessary. + * is not necessary. * * For user space: * skel_prep_map_data() mmaps anon memory into skel->rodata that can be accessed directly. diff --git a/tools/lib/bpf/usdt.bpf.h b/tools/lib/bpf/usdt.bpf.h index 76359bcdc94a..b811f754939f 100644 --- a/tools/lib/bpf/usdt.bpf.h +++ b/tools/lib/bpf/usdt.bpf.h @@ -39,7 +39,7 @@ enum __bpf_usdt_arg_type { struct __bpf_usdt_arg_spec { /* u64 scalar interpreted depending on arg_type, see below */ __u64 val_off; - /* arg location case, see bpf_udst_arg() for details */ + /* arg location case, see bpf_usdt_arg() for details */ enum __bpf_usdt_arg_type arg_type; /* offset of referenced register within struct pt_regs */ short reg_off; diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore index 5025401323af..e6533b3400de 100644 --- a/tools/testing/selftests/bpf/.gitignore +++ b/tools/testing/selftests/bpf/.gitignore @@ -8,8 +8,8 @@ test_lru_map test_lpm_map test_tag FEATURE-DUMP.libbpf +FEATURE-DUMP.selftests fixdep -test_dev_cgroup /test_progs /test_progs-no_alu32 /test_progs-bpf_gcc @@ -20,9 +20,6 @@ test_sock urandom_read test_sockmap test_lirc_mode2_user -get_cgroup_id_user -test_skb_cgroup_id_user -test_cgroup_storage test_flow_dissector flow_dissector_load test_tcpnotify_user @@ -31,6 +28,7 @@ test_tcp_check_syncookie_user test_sysctl xdping test_cpp +*.d *.subskel.h *.skel.h *.lskel.h diff --git a/tools/testing/selftests/bpf/DENYLIST.riscv64 b/tools/testing/selftests/bpf/DENYLIST.riscv64 new file mode 100644 index 000000000000..4fc4dfdde293 --- /dev/null +++ b/tools/testing/selftests/bpf/DENYLIST.riscv64 @@ -0,0 +1,3 @@ +# riscv64 deny list for BPF CI and local vmtest +exceptions # JIT does not support exceptions +tailcalls/tailcall_bpf2bpf* # JIT does not support mixing bpf2bpf and tailcalls diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 81d4757ecd4c..f04af11df8eb 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -33,6 +33,13 @@ OPT_FLAGS ?= $(if $(RELEASE),-O2,-O0) LIBELF_CFLAGS := $(shell $(PKG_CONFIG) libelf --cflags 2>/dev/null) LIBELF_LIBS := $(shell $(PKG_CONFIG) libelf --libs 2>/dev/null || echo -lelf) +ifeq ($(srctree),) +srctree := $(patsubst %/,%,$(dir $(CURDIR))) +srctree := $(patsubst %/,%,$(dir $(srctree))) +srctree := $(patsubst %/,%,$(dir $(srctree))) +srctree := $(patsubst %/,%,$(dir $(srctree))) +endif + CFLAGS += -g $(OPT_FLAGS) -rdynamic \ -Wall -Werror -fno-omit-frame-pointer \ $(GENFLAGS) $(SAN_CFLAGS) $(LIBELF_CFLAGS) \ @@ -41,6 +48,11 @@ CFLAGS += -g $(OPT_FLAGS) -rdynamic \ LDFLAGS += $(SAN_LDFLAGS) LDLIBS += $(LIBELF_LIBS) -lz -lrt -lpthread +PCAP_CFLAGS := $(shell $(PKG_CONFIG) --cflags libpcap 2>/dev/null && echo "-DTRAFFIC_MONITOR=1") +PCAP_LIBS := $(shell $(PKG_CONFIG) --libs libpcap 2>/dev/null) +LDLIBS += $(PCAP_LIBS) +CFLAGS += $(PCAP_CFLAGS) + # The following tests perform type punning and they may break strict # aliasing rules, which are exploited by both GCC and clang by default # while optimizing. This can lead to broken programs. @@ -54,6 +66,10 @@ progs/test_pkt_md_access.c-CFLAGS := -fno-strict-aliasing progs/test_sk_lookup.c-CFLAGS := -fno-strict-aliasing progs/timer_crash.c-CFLAGS := -fno-strict-aliasing progs/test_global_func9.c-CFLAGS := -fno-strict-aliasing +progs/verifier_nocsr.c-CFLAGS := -fno-strict-aliasing + +# Some utility functions use LLVM libraries +jit_disasm_helpers.c-CFLAGS = $(LLVM_CFLAGS) ifneq ($(LLVM),) # Silence some warnings when compiled with clang @@ -67,9 +83,7 @@ endif # Order correspond to 'make run_tests' order TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \ - test_dev_cgroup \ - test_sock test_sockmap get_cgroup_id_user \ - test_cgroup_storage \ + test_sock test_sockmap \ test_tcpnotify_user test_sysctl \ test_progs-no_alu32 TEST_INST_SUBDIRS := no_alu32 @@ -115,7 +129,6 @@ TEST_PROGS := test_kmod.sh \ test_xdp_redirect.sh \ test_xdp_redirect_multi.sh \ test_xdp_meta.sh \ - test_xdp_veth.sh \ test_tunnel.sh \ test_lwt_seg6local.sh \ test_lirc_mode2.sh \ @@ -140,7 +153,7 @@ TEST_PROGS_EXTENDED := with_addr.sh \ test_xdp_vlan.sh test_bpftool.py # Compile but not part of 'make run_tests' -TEST_GEN_PROGS_EXTENDED = test_skb_cgroup_id_user \ +TEST_GEN_PROGS_EXTENDED = \ flow_dissector_load test_flow_dissector test_tcp_check_syncookie_user \ test_lirc_mode2_user xdping test_cpp runqslower bench bpf_testmod.ko \ xskxceiver xdp_redirect_multi xdp_synproxy veristat xdp_hw_metadata \ @@ -166,6 +179,35 @@ endef include ../lib.mk +NON_CHECK_FEAT_TARGETS := clean docs-clean +CHECK_FEAT := $(filter-out $(NON_CHECK_FEAT_TARGETS),$(or $(MAKECMDGOALS), "none")) +ifneq ($(CHECK_FEAT),) +FEATURE_USER := .selftests +FEATURE_TESTS := llvm +FEATURE_DISPLAY := $(FEATURE_TESTS) + +# Makefile.feature expects OUTPUT to end with a slash +ifeq ($(shell expr $(MAKE_VERSION) \>= 4.4), 1) +$(let OUTPUT,$(OUTPUT)/,\ + $(eval include ../../../build/Makefile.feature)) +else +OUTPUT := $(OUTPUT)/ +$(eval include ../../../build/Makefile.feature) +OUTPUT := $(patsubst %/,%,$(OUTPUT)) +endif +endif + +ifeq ($(feature-llvm),1) + LLVM_CFLAGS += -DHAVE_LLVM_SUPPORT + LLVM_CONFIG_LIB_COMPONENTS := mcdisassembler all-targets + # both llvm-config and lib.mk add -D_GNU_SOURCE, which ends up as conflict + LLVM_CFLAGS += $(filter-out -D_GNU_SOURCE,$(shell $(LLVM_CONFIG) --cflags)) + LLVM_LDLIBS += $(shell $(LLVM_CONFIG) --link-static --libs $(LLVM_CONFIG_LIB_COMPONENTS)) + LLVM_LDLIBS += $(shell $(LLVM_CONFIG) --link-static --system-libs $(LLVM_CONFIG_LIB_COMPONENTS)) + LLVM_LDLIBS += -lstdc++ + LLVM_LDFLAGS += $(shell $(LLVM_CONFIG) --ldflags) +endif + SCRATCH_DIR := $(OUTPUT)/tools BUILD_DIR := $(SCRATCH_DIR)/build INCLUDE_DIR := $(SCRATCH_DIR)/include @@ -293,13 +335,9 @@ JSON_WRITER := $(OUTPUT)/json_writer.o CAP_HELPERS := $(OUTPUT)/cap_helpers.o NETWORK_HELPERS := $(OUTPUT)/network_helpers.o -$(OUTPUT)/test_dev_cgroup: $(CGROUP_HELPERS) $(TESTING_HELPERS) -$(OUTPUT)/test_skb_cgroup_id_user: $(CGROUP_HELPERS) $(TESTING_HELPERS) $(OUTPUT)/test_sock: $(CGROUP_HELPERS) $(TESTING_HELPERS) $(OUTPUT)/test_sockmap: $(CGROUP_HELPERS) $(TESTING_HELPERS) $(OUTPUT)/test_tcpnotify_user: $(CGROUP_HELPERS) $(TESTING_HELPERS) $(TRACE_HELPERS) -$(OUTPUT)/get_cgroup_id_user: $(CGROUP_HELPERS) $(TESTING_HELPERS) -$(OUTPUT)/test_cgroup_storage: $(CGROUP_HELPERS) $(TESTING_HELPERS) $(OUTPUT)/test_sock_fields: $(CGROUP_HELPERS) $(TESTING_HELPERS) $(OUTPUT)/test_sysctl: $(CGROUP_HELPERS) $(TESTING_HELPERS) $(OUTPUT)/test_tag: $(TESTING_HELPERS) @@ -365,10 +403,14 @@ $(HOST_BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \ DESTDIR=$(HOST_SCRATCH_DIR)/ prefix= all install_headers endif +# vmlinux.h is first dumped to a temprorary file and then compared to +# the previous version. This helps to avoid unnecessary re-builds of +# $(TRUNNER_BPF_OBJS) $(INCLUDE_DIR)/vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL) | $(INCLUDE_DIR) ifeq ($(VMLINUX_H),) $(call msg,GEN,,$@) - $(Q)$(BPFTOOL) btf dump file $(VMLINUX_BTF) format c > $@ + $(Q)$(BPFTOOL) btf dump file $(VMLINUX_BTF) format c > $(INCLUDE_DIR)/.vmlinux.h.tmp + $(Q)cmp -s $(INCLUDE_DIR)/.vmlinux.h.tmp $@ || mv $(INCLUDE_DIR)/.vmlinux.h.tmp $@ else $(call msg,CP,,$@) $(Q)cp "$(VMLINUX_H)" $@ @@ -396,7 +438,8 @@ define get_sys_includes $(shell $(1) $(2) -v -E - </dev/null 2>&1 \ | sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }') \ $(shell $(1) $(2) -dM -E - </dev/null | grep '__riscv_xlen ' | awk '{printf("-D__riscv_xlen=%d -D__BITS_PER_LONG=%d", $$3, $$3)}') \ -$(shell $(1) $(2) -dM -E - </dev/null | grep '__loongarch_grlen ' | awk '{printf("-D__BITS_PER_LONG=%d", $$3)}') +$(shell $(1) $(2) -dM -E - </dev/null | grep '__loongarch_grlen ' | awk '{printf("-D__BITS_PER_LONG=%d", $$3)}') \ +$(shell $(1) $(2) -dM -E - </dev/null | grep -E 'MIPS(EL|EB)|_MIPS_SZ(PTR|LONG) |_MIPS_SIM |_ABI(O32|N32|64) ' | awk '{printf("-D%s=%s ", $$2, $$3)}') endef # Determine target endianness. @@ -427,23 +470,24 @@ $(OUTPUT)/cgroup_getset_retval_hooks.o: cgroup_getset_retval_hooks.h # $1 - input .c file # $2 - output .o file # $3 - CFLAGS +# $4 - binary name define CLANG_BPF_BUILD_RULE - $(call msg,CLNG-BPF,$(TRUNNER_BINARY),$2) + $(call msg,CLNG-BPF,$4,$2) $(Q)$(CLANG) $3 -O2 --target=bpf -c $1 -mcpu=v3 -o $2 endef # Similar to CLANG_BPF_BUILD_RULE, but with disabled alu32 define CLANG_NOALU32_BPF_BUILD_RULE - $(call msg,CLNG-BPF,$(TRUNNER_BINARY),$2) + $(call msg,CLNG-BPF,$4,$2) $(Q)$(CLANG) $3 -O2 --target=bpf -c $1 -mcpu=v2 -o $2 endef # Similar to CLANG_BPF_BUILD_RULE, but with cpu-v4 define CLANG_CPUV4_BPF_BUILD_RULE - $(call msg,CLNG-BPF,$(TRUNNER_BINARY),$2) + $(call msg,CLNG-BPF,$4,$2) $(Q)$(CLANG) $3 -O2 --target=bpf -c $1 -mcpu=v4 -o $2 endef # Build BPF object using GCC define GCC_BPF_BUILD_RULE - $(call msg,GCC-BPF,$(TRUNNER_BINARY),$2) + $(call msg,GCC-BPF,$4,$2) $(Q)$(BPF_GCC) $3 -DBPF_NO_PRESERVE_ACCESS_INDEX -Wno-attributes -O2 -c $1 -o $2 endef @@ -477,7 +521,14 @@ xsk_xdp_progs.skel.h-deps := xsk_xdp_progs.bpf.o xdp_hw_metadata.skel.h-deps := xdp_hw_metadata.bpf.o xdp_features.skel.h-deps := xdp_features.bpf.o -LINKED_BPF_SRCS := $(patsubst %.bpf.o,%.c,$(foreach skel,$(LINKED_SKELS),$($(skel)-deps))) +LINKED_BPF_OBJS := $(foreach skel,$(LINKED_SKELS),$($(skel)-deps)) +LINKED_BPF_SRCS := $(patsubst %.bpf.o,%.c,$(LINKED_BPF_OBJS)) + +HEADERS_FOR_BPF_OBJS := $(wildcard $(BPFDIR)/*.bpf.h) \ + $(addprefix $(BPFDIR)/, bpf_core_read.h \ + bpf_endian.h \ + bpf_helpers.h \ + bpf_tracing.h) # Set up extra TRUNNER_XXX "temporary" variables in the environment (relies on # $eval()) and pass control to DEFINE_TEST_RUNNER_RULES. @@ -529,13 +580,12 @@ $(TRUNNER_BPF_OBJS): $(TRUNNER_OUTPUT)/%.bpf.o: \ $(TRUNNER_BPF_PROGS_DIR)/%.c \ $(TRUNNER_BPF_PROGS_DIR)/*.h \ $$(INCLUDE_DIR)/vmlinux.h \ - $(wildcard $(BPFDIR)/bpf_*.h) \ - $(wildcard $(BPFDIR)/*.bpf.h) \ + $(HEADERS_FOR_BPF_OBJS) \ | $(TRUNNER_OUTPUT) $$(BPFOBJ) $$(call $(TRUNNER_BPF_BUILD_RULE),$$<,$$@, \ $(TRUNNER_BPF_CFLAGS) \ $$($$<-CFLAGS) \ - $$($$<-$2-CFLAGS)) + $$($$<-$2-CFLAGS),$(TRUNNER_BINARY)) $(TRUNNER_BPF_SKELS): %.skel.h: %.bpf.o $(BPFTOOL) | $(TRUNNER_OUTPUT) $$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@) @@ -556,7 +606,11 @@ $(TRUNNER_BPF_LSKELS): %.lskel.h: %.bpf.o $(BPFTOOL) | $(TRUNNER_OUTPUT) $(Q)$$(BPFTOOL) gen skeleton -L $$(<:.o=.llinked3.o) name $$(notdir $$(<:.bpf.o=_lskel)) > $$@ $(Q)rm -f $$(<:.o=.llinked1.o) $$(<:.o=.llinked2.o) $$(<:.o=.llinked3.o) -$(TRUNNER_BPF_SKELS_LINKED): $(TRUNNER_BPF_OBJS) $(BPFTOOL) | $(TRUNNER_OUTPUT) +$(LINKED_BPF_OBJS): %: $(TRUNNER_OUTPUT)/% + +# .SECONDEXPANSION here allows to correctly expand %-deps variables as prerequisites +.SECONDEXPANSION: +$(TRUNNER_BPF_SKELS_LINKED): $(TRUNNER_OUTPUT)/%: $$$$(%-deps) $(BPFTOOL) | $(TRUNNER_OUTPUT) $$(call msg,LINK-BPF,$(TRUNNER_BINARY),$$(@:.skel.h=.bpf.o)) $(Q)$$(BPFTOOL) gen object $$(@:.skel.h=.linked1.o) $$(addprefix $(TRUNNER_OUTPUT)/,$$($$(@F)-deps)) $(Q)$$(BPFTOOL) gen object $$(@:.skel.h=.linked2.o) $$(@:.skel.h=.linked1.o) @@ -566,6 +620,14 @@ $(TRUNNER_BPF_SKELS_LINKED): $(TRUNNER_BPF_OBJS) $(BPFTOOL) | $(TRUNNER_OUTPUT) $(Q)$$(BPFTOOL) gen skeleton $$(@:.skel.h=.linked3.o) name $$(notdir $$(@:.skel.h=)) > $$@ $(Q)$$(BPFTOOL) gen subskeleton $$(@:.skel.h=.linked3.o) name $$(notdir $$(@:.skel.h=)) > $$(@:.skel.h=.subskel.h) $(Q)rm -f $$(@:.skel.h=.linked1.o) $$(@:.skel.h=.linked2.o) $$(@:.skel.h=.linked3.o) + +# When the compiler generates a %.d file, only skel basenames (not +# full paths) are specified as prerequisites for corresponding %.o +# file. This target makes %.skel.h basename dependent on full paths, +# linking generated %.d dependency with actual %.skel.h files. +$(notdir %.skel.h): $(TRUNNER_OUTPUT)/%.skel.h + @true + endif # ensure we set up tests.h header generation rule just once @@ -583,14 +645,25 @@ endif # Note: we cd into output directory to ensure embedded BPF object is found $(TRUNNER_TEST_OBJS): $(TRUNNER_OUTPUT)/%.test.o: \ $(TRUNNER_TESTS_DIR)/%.c \ - $(TRUNNER_EXTRA_HDRS) \ - $(TRUNNER_BPF_OBJS) \ - $(TRUNNER_BPF_SKELS) \ - $(TRUNNER_BPF_LSKELS) \ - $(TRUNNER_BPF_SKELS_LINKED) \ - $$(BPFOBJ) | $(TRUNNER_OUTPUT) + | $(TRUNNER_OUTPUT)/%.test.d $$(call msg,TEST-OBJ,$(TRUNNER_BINARY),$$@) - $(Q)cd $$(@D) && $$(CC) -I. $$(CFLAGS) -c $(CURDIR)/$$< $$(LDLIBS) -o $$(@F) + $(Q)cd $$(@D) && $$(CC) -I. $$(CFLAGS) -MMD -MT $$@ -c $(CURDIR)/$$< $$(LDLIBS) -o $$(@F) + +$(TRUNNER_TEST_OBJS:.o=.d): $(TRUNNER_OUTPUT)/%.test.d: \ + $(TRUNNER_TESTS_DIR)/%.c \ + $(TRUNNER_EXTRA_HDRS) \ + $(TRUNNER_BPF_SKELS) \ + $(TRUNNER_BPF_LSKELS) \ + $(TRUNNER_BPF_SKELS_LINKED) \ + $$(BPFOBJ) | $(TRUNNER_OUTPUT) + +ifeq ($(filter clean docs-clean,$(MAKECMDGOALS)),) +include $(wildcard $(TRUNNER_TEST_OBJS:.o=.d)) +endif + +# add per extra obj CFGLAGS definitions +$(foreach N,$(patsubst $(TRUNNER_OUTPUT)/%.o,%,$(TRUNNER_EXTRA_OBJS)), \ + $(eval $(TRUNNER_OUTPUT)/$(N).o: CFLAGS += $($(N).c-CFLAGS))) $(TRUNNER_EXTRA_OBJS): $(TRUNNER_OUTPUT)/%.o: \ %.c \ @@ -608,13 +681,19 @@ ifneq ($2:$(OUTPUT),:$(shell pwd)) $(Q)rsync -aq $$^ $(TRUNNER_OUTPUT)/ endif +$(OUTPUT)/$(TRUNNER_BINARY): LDLIBS += $$(LLVM_LDLIBS) +$(OUTPUT)/$(TRUNNER_BINARY): LDFLAGS += $$(LLVM_LDFLAGS) + +# some X.test.o files have runtime dependencies on Y.bpf.o files +$(OUTPUT)/$(TRUNNER_BINARY): | $(TRUNNER_BPF_OBJS) + $(OUTPUT)/$(TRUNNER_BINARY): $(TRUNNER_TEST_OBJS) \ $(TRUNNER_EXTRA_OBJS) $$(BPFOBJ) \ $(RESOLVE_BTFIDS) \ $(TRUNNER_BPFTOOL) \ | $(TRUNNER_BINARY)-extras $$(call msg,BINARY,,$$@) - $(Q)$$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) -o $$@ + $(Q)$$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) $$(LDFLAGS) -o $$@ $(Q)$(RESOLVE_BTFIDS) --btf $(TRUNNER_OUTPUT)/btf_data.bpf.o $$@ $(Q)ln -sf $(if $2,..,.)/tools/build/bpftool/$(USE_BOOTSTRAP)bpftool \ $(OUTPUT)/$(if $2,$2/)bpftool @@ -633,9 +712,11 @@ TRUNNER_EXTRA_SOURCES := test_progs.c \ cap_helpers.c \ unpriv_helpers.c \ netlink_helpers.c \ + jit_disasm_helpers.c \ test_loader.c \ xsk.c \ disasm.c \ + disasm_helpers.c \ json_writer.c \ flow_dissector_load.h \ ip_check_defrag_frags.h @@ -762,17 +843,21 @@ $(OUTPUT)/veristat: $(OUTPUT)/veristat.o $(call msg,BINARY,,$@) $(Q)$(CC) $(CFLAGS) $(LDFLAGS) $(filter %.a %.o,$^) $(LDLIBS) -o $@ -$(OUTPUT)/uprobe_multi: uprobe_multi.c +# Linking uprobe_multi can fail due to relocation overflows on mips. +$(OUTPUT)/uprobe_multi: CFLAGS += $(if $(filter mips, $(ARCH)),-mxgot) +$(OUTPUT)/uprobe_multi: uprobe_multi.c uprobe_multi.ld $(call msg,BINARY,,$@) - $(Q)$(CC) $(CFLAGS) -O0 $(LDFLAGS) $^ $(LDLIBS) -o $@ + $(Q)$(CC) $(CFLAGS) -Wl,-T,uprobe_multi.ld -O0 $(LDFLAGS) \ + $(filter-out %.ld,$^) $(LDLIBS) -o $@ EXTRA_CLEAN := $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \ prog_tests/tests.h map_tests/tests.h verifier/tests.h \ - feature bpftool \ - $(addprefix $(OUTPUT)/,*.o *.skel.h *.lskel.h *.subskel.h \ + feature bpftool \ + $(addprefix $(OUTPUT)/,*.o *.d *.skel.h *.lskel.h *.subskel.h \ no_alu32 cpuv4 bpf_gcc bpf_testmod.ko \ bpf_test_no_cfi.ko \ - liburandom_read.so) + liburandom_read.so) \ + $(OUTPUT)/FEATURE-DUMP.selftests .PHONY: docs docs-clean diff --git a/tools/testing/selftests/bpf/README.rst b/tools/testing/selftests/bpf/README.rst index 9b974e425af3..776fbe3cb8f9 100644 --- a/tools/testing/selftests/bpf/README.rst +++ b/tools/testing/selftests/bpf/README.rst @@ -85,7 +85,37 @@ In case of linker errors when running selftests, try using static linking: If you want to change pahole and llvm, you can change `PATH` environment variable in the beginning of script. -.. note:: The script currently only supports x86_64 and s390x architectures. +Running vmtest on RV64 +====================== +To speed up testing and avoid various dependency issues, it is recommended to +run vmtest in a Docker container. Before running vmtest, we need to prepare +Docker container and local rootfs image. The overall steps are as follows: + +1. Create Docker container as shown in link [0]. + +2. Use mkrootfs_debian.sh script [1] to build local rootfs image: + +.. code-block:: console + + $ sudo ./mkrootfs_debian.sh --arch riscv64 --distro noble + +3. Start Docker container [0] and run vmtest in the container: + +.. code-block:: console + + $ PLATFORM=riscv64 CROSS_COMPILE=riscv64-linux-gnu- \ + tools/testing/selftests/bpf/vmtest.sh \ + -l <path of local rootfs image> -- \ + ./test_progs -d \ + \"$(cat tools/testing/selftests/bpf/DENYLIST.riscv64 \ + | cut -d'#' -f1 \ + | sed -e 's/^[[:space:]]*//' \ + -e 's/[[:space:]]*$//' \ + | tr -s '\n' ',' \ + )\" + +Link: https://github.com/pulehui/riscv-bpf-vmtest.git [0] +Link: https://github.com/libbpf/ci/blob/main/rootfs/mkrootfs_debian.sh [1] Additional information about selftest failures are documented here. diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c index 627b74ae041b..1bd403a5ef7b 100644 --- a/tools/testing/selftests/bpf/bench.c +++ b/tools/testing/selftests/bpf/bench.c @@ -10,6 +10,7 @@ #include <sys/sysinfo.h> #include <signal.h> #include "bench.h" +#include "bpf_util.h" #include "testing_helpers.h" struct env env = { @@ -519,6 +520,12 @@ extern const struct bench bench_trig_uprobe_push; extern const struct bench bench_trig_uretprobe_push; extern const struct bench bench_trig_uprobe_ret; extern const struct bench bench_trig_uretprobe_ret; +extern const struct bench bench_trig_uprobe_multi_nop; +extern const struct bench bench_trig_uretprobe_multi_nop; +extern const struct bench bench_trig_uprobe_multi_push; +extern const struct bench bench_trig_uretprobe_multi_push; +extern const struct bench bench_trig_uprobe_multi_ret; +extern const struct bench bench_trig_uretprobe_multi_ret; extern const struct bench bench_rb_libbpf; extern const struct bench bench_rb_custom; @@ -573,6 +580,12 @@ static const struct bench *benchs[] = { &bench_trig_uretprobe_push, &bench_trig_uprobe_ret, &bench_trig_uretprobe_ret, + &bench_trig_uprobe_multi_nop, + &bench_trig_uretprobe_multi_nop, + &bench_trig_uprobe_multi_push, + &bench_trig_uretprobe_multi_push, + &bench_trig_uprobe_multi_ret, + &bench_trig_uretprobe_multi_ret, /* ringbuf/perfbuf benchmarks */ &bench_rb_libbpf, &bench_rb_custom, diff --git a/tools/testing/selftests/bpf/bench.h b/tools/testing/selftests/bpf/bench.h index 68180d8f8558..005c401b3e22 100644 --- a/tools/testing/selftests/bpf/bench.h +++ b/tools/testing/selftests/bpf/bench.h @@ -10,6 +10,7 @@ #include <math.h> #include <time.h> #include <sys/syscall.h> +#include <limits.h> struct cpu_set { bool *cpus; diff --git a/tools/testing/selftests/bpf/benchs/bench_trigger.c b/tools/testing/selftests/bpf/benchs/bench_trigger.c index 4b05539f167d..2ed0ef6f21ee 100644 --- a/tools/testing/selftests/bpf/benchs/bench_trigger.c +++ b/tools/testing/selftests/bpf/benchs/bench_trigger.c @@ -276,7 +276,7 @@ static void trigger_rawtp_setup(void) * instructions. So use two different targets, one of which starts with nop * and another doesn't. * - * GCC doesn't generate stack setup preample for these functions due to them + * GCC doesn't generate stack setup preamble for these functions due to them * having no input arguments and doing nothing in the body. */ __nocf_check __weak void uprobe_target_nop(void) @@ -332,7 +332,7 @@ static void *uprobe_producer_ret(void *input) return NULL; } -static void usetup(bool use_retprobe, void *target_addr) +static void usetup(bool use_retprobe, bool use_multi, void *target_addr) { size_t uprobe_offset; struct bpf_link *link; @@ -346,7 +346,10 @@ static void usetup(bool use_retprobe, void *target_addr) exit(1); } - bpf_program__set_autoload(ctx.skel->progs.bench_trigger_uprobe, true); + if (use_multi) + bpf_program__set_autoload(ctx.skel->progs.bench_trigger_uprobe_multi, true); + else + bpf_program__set_autoload(ctx.skel->progs.bench_trigger_uprobe, true); err = trigger_bench__load(ctx.skel); if (err) { @@ -355,16 +358,28 @@ static void usetup(bool use_retprobe, void *target_addr) } uprobe_offset = get_uprobe_offset(target_addr); - link = bpf_program__attach_uprobe(ctx.skel->progs.bench_trigger_uprobe, - use_retprobe, - -1 /* all PIDs */, - "/proc/self/exe", - uprobe_offset); + if (use_multi) { + LIBBPF_OPTS(bpf_uprobe_multi_opts, opts, + .retprobe = use_retprobe, + .cnt = 1, + .offsets = &uprobe_offset, + ); + link = bpf_program__attach_uprobe_multi( + ctx.skel->progs.bench_trigger_uprobe_multi, + -1 /* all PIDs */, "/proc/self/exe", NULL, &opts); + ctx.skel->links.bench_trigger_uprobe_multi = link; + } else { + link = bpf_program__attach_uprobe(ctx.skel->progs.bench_trigger_uprobe, + use_retprobe, + -1 /* all PIDs */, + "/proc/self/exe", + uprobe_offset); + ctx.skel->links.bench_trigger_uprobe = link; + } if (!link) { - fprintf(stderr, "failed to attach uprobe!\n"); + fprintf(stderr, "failed to attach %s!\n", use_multi ? "multi-uprobe" : "uprobe"); exit(1); } - ctx.skel->links.bench_trigger_uprobe = link; } static void usermode_count_setup(void) @@ -374,32 +389,62 @@ static void usermode_count_setup(void) static void uprobe_nop_setup(void) { - usetup(false, &uprobe_target_nop); + usetup(false, false /* !use_multi */, &uprobe_target_nop); } static void uretprobe_nop_setup(void) { - usetup(true, &uprobe_target_nop); + usetup(true, false /* !use_multi */, &uprobe_target_nop); } static void uprobe_push_setup(void) { - usetup(false, &uprobe_target_push); + usetup(false, false /* !use_multi */, &uprobe_target_push); } static void uretprobe_push_setup(void) { - usetup(true, &uprobe_target_push); + usetup(true, false /* !use_multi */, &uprobe_target_push); } static void uprobe_ret_setup(void) { - usetup(false, &uprobe_target_ret); + usetup(false, false /* !use_multi */, &uprobe_target_ret); } static void uretprobe_ret_setup(void) { - usetup(true, &uprobe_target_ret); + usetup(true, false /* !use_multi */, &uprobe_target_ret); +} + +static void uprobe_multi_nop_setup(void) +{ + usetup(false, true /* use_multi */, &uprobe_target_nop); +} + +static void uretprobe_multi_nop_setup(void) +{ + usetup(true, true /* use_multi */, &uprobe_target_nop); +} + +static void uprobe_multi_push_setup(void) +{ + usetup(false, true /* use_multi */, &uprobe_target_push); +} + +static void uretprobe_multi_push_setup(void) +{ + usetup(true, true /* use_multi */, &uprobe_target_push); +} + +static void uprobe_multi_ret_setup(void) +{ + usetup(false, true /* use_multi */, &uprobe_target_ret); +} + +static void uretprobe_multi_ret_setup(void) +{ + usetup(true, true /* use_multi */, &uprobe_target_ret); } const struct bench bench_trig_syscall_count = { @@ -454,3 +499,9 @@ BENCH_TRIG_USERMODE(uprobe_ret, ret, "uprobe-ret"); BENCH_TRIG_USERMODE(uretprobe_nop, nop, "uretprobe-nop"); BENCH_TRIG_USERMODE(uretprobe_push, push, "uretprobe-push"); BENCH_TRIG_USERMODE(uretprobe_ret, ret, "uretprobe-ret"); +BENCH_TRIG_USERMODE(uprobe_multi_nop, nop, "uprobe-multi-nop"); +BENCH_TRIG_USERMODE(uprobe_multi_push, push, "uprobe-multi-push"); +BENCH_TRIG_USERMODE(uprobe_multi_ret, ret, "uprobe-multi-ret"); +BENCH_TRIG_USERMODE(uretprobe_multi_nop, nop, "uretprobe-multi-nop"); +BENCH_TRIG_USERMODE(uretprobe_multi_push, push, "uretprobe-multi-push"); +BENCH_TRIG_USERMODE(uretprobe_multi_ret, ret, "uretprobe-multi-ret"); diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h index 828556cdc2f0..b0668f29f7b3 100644 --- a/tools/testing/selftests/bpf/bpf_experimental.h +++ b/tools/testing/selftests/bpf/bpf_experimental.h @@ -195,6 +195,32 @@ extern void bpf_iter_task_vma_destroy(struct bpf_iter_task_vma *it) __ksym; */ extern void bpf_throw(u64 cookie) __ksym; +/* Description + * Acquire a reference on the exe_file member field belonging to the + * mm_struct that is nested within the supplied task_struct. The supplied + * task_struct must be trusted/referenced. + * Returns + * A referenced file pointer pointing to the exe_file member field of the + * mm_struct nested in the supplied task_struct, or NULL. + */ +extern struct file *bpf_get_task_exe_file(struct task_struct *task) __ksym; + +/* Description + * Release a reference on the supplied file. The supplied file must be + * acquired. + */ +extern void bpf_put_file(struct file *file) __ksym; + +/* Description + * Resolve a pathname for the supplied path and store it in the supplied + * buffer. The supplied path must be trusted/referenced. + * Returns + * A positive integer corresponding to the length of the resolved pathname, + * including the NULL termination character, stored in the supplied + * buffer. On error, a negative integer is returned. + */ +extern int bpf_path_d_path(struct path *path, char *buf, size_t buf__sz) __ksym; + /* This macro must be used to mark the exception callback corresponding to the * main program. For example: * diff --git a/tools/testing/selftests/bpf/bpf_kfuncs.h b/tools/testing/selftests/bpf/bpf_kfuncs.h index 3b6675ab4086..2eb3483f2fb0 100644 --- a/tools/testing/selftests/bpf/bpf_kfuncs.h +++ b/tools/testing/selftests/bpf/bpf_kfuncs.h @@ -45,7 +45,7 @@ extern int bpf_dynptr_clone(const struct bpf_dynptr *ptr, struct bpf_dynptr *clo /* Description * Modify the address of a AF_UNIX sockaddr. - * Returns__bpf_kfunc + * Returns * -EINVAL if the address size is too big or, 0 if the sockaddr was successfully modified. */ extern int bpf_sock_addr_set_sun_path(struct bpf_sock_addr_kern *sa_kern, @@ -78,4 +78,13 @@ extern int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_ptr, extern bool bpf_session_is_return(void) __ksym __weak; extern __u64 *bpf_session_cookie(void) __ksym __weak; + +struct dentry; +/* Description + * Returns xattr of a dentry + * Returns + * Error code + */ +extern int bpf_get_dentry_xattr(struct dentry *dentry, const char *name, + struct bpf_dynptr *value_ptr) __ksym __weak; #endif diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c index 22807fd78fe6..8835761d9a12 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c @@ -17,6 +17,7 @@ #include <linux/in.h> #include <linux/in6.h> #include <linux/un.h> +#include <linux/filter.h> #include <net/sock.h> #include <linux/namei.h> #include "bpf_testmod.h" @@ -141,13 +142,12 @@ bpf_testmod_test_mod_kfunc(int i) __bpf_kfunc int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt) { - if (cnt < 0) { - it->cnt = 0; + it->cnt = cnt; + + if (cnt < 0) return -EINVAL; - } it->value = value; - it->cnt = cnt; return 0; } @@ -162,6 +162,14 @@ __bpf_kfunc s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq* it) return &it->value; } +__bpf_kfunc s64 bpf_iter_testmod_seq_value(int val, struct bpf_iter_testmod_seq* it__iter) +{ + if (it__iter->cnt < 0) + return 0; + + return val + it__iter->value; +} + __bpf_kfunc void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it) { it->cnt = 0; @@ -176,6 +184,36 @@ __bpf_kfunc void bpf_kfunc_dynptr_test(struct bpf_dynptr *ptr, { } +__bpf_kfunc struct sk_buff *bpf_kfunc_nested_acquire_nonzero_offset_test(struct sk_buff_head *ptr) +{ + return NULL; +} + +__bpf_kfunc struct sk_buff *bpf_kfunc_nested_acquire_zero_offset_test(struct sock_common *ptr) +{ + return NULL; +} + +__bpf_kfunc void bpf_kfunc_nested_release_test(struct sk_buff *ptr) +{ +} + +__bpf_kfunc void bpf_kfunc_trusted_vma_test(struct vm_area_struct *ptr) +{ +} + +__bpf_kfunc void bpf_kfunc_trusted_task_test(struct task_struct *ptr) +{ +} + +__bpf_kfunc void bpf_kfunc_trusted_num_test(int *ptr) +{ +} + +__bpf_kfunc void bpf_kfunc_rcu_task_test(struct task_struct *ptr) +{ +} + __bpf_kfunc struct bpf_testmod_ctx * bpf_testmod_ctx_create(int *err) { @@ -534,8 +572,16 @@ BTF_KFUNCS_START(bpf_testmod_common_kfunc_ids) BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW) BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY) +BTF_ID_FLAGS(func, bpf_iter_testmod_seq_value) BTF_ID_FLAGS(func, bpf_kfunc_common_test) BTF_ID_FLAGS(func, bpf_kfunc_dynptr_test) +BTF_ID_FLAGS(func, bpf_kfunc_nested_acquire_nonzero_offset_test, KF_ACQUIRE) +BTF_ID_FLAGS(func, bpf_kfunc_nested_acquire_zero_offset_test, KF_ACQUIRE) +BTF_ID_FLAGS(func, bpf_kfunc_nested_release_test, KF_RELEASE) +BTF_ID_FLAGS(func, bpf_kfunc_trusted_vma_test, KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_kfunc_trusted_task_test, KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_kfunc_trusted_num_test, KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_kfunc_rcu_task_test, KF_RCU) BTF_ID_FLAGS(func, bpf_testmod_ctx_create, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_testmod_ctx_release, KF_RELEASE) BTF_KFUNCS_END(bpf_testmod_common_kfunc_ids) @@ -923,6 +969,51 @@ out: return err; } +static DEFINE_MUTEX(st_ops_mutex); +static struct bpf_testmod_st_ops *st_ops; + +__bpf_kfunc int bpf_kfunc_st_ops_test_prologue(struct st_ops_args *args) +{ + int ret = -1; + + mutex_lock(&st_ops_mutex); + if (st_ops && st_ops->test_prologue) + ret = st_ops->test_prologue(args); + mutex_unlock(&st_ops_mutex); + + return ret; +} + +__bpf_kfunc int bpf_kfunc_st_ops_test_epilogue(struct st_ops_args *args) +{ + int ret = -1; + + mutex_lock(&st_ops_mutex); + if (st_ops && st_ops->test_epilogue) + ret = st_ops->test_epilogue(args); + mutex_unlock(&st_ops_mutex); + + return ret; +} + +__bpf_kfunc int bpf_kfunc_st_ops_test_pro_epilogue(struct st_ops_args *args) +{ + int ret = -1; + + mutex_lock(&st_ops_mutex); + if (st_ops && st_ops->test_pro_epilogue) + ret = st_ops->test_pro_epilogue(args); + mutex_unlock(&st_ops_mutex); + + return ret; +} + +__bpf_kfunc int bpf_kfunc_st_ops_inc10(struct st_ops_args *args) +{ + args->a += 10; + return args->a; +} + BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids) BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc) BTF_ID_FLAGS(func, bpf_kfunc_call_test1) @@ -959,6 +1050,10 @@ BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_sendmsg, KF_SLEEPABLE) BTF_ID_FLAGS(func, bpf_kfunc_call_sock_sendmsg, KF_SLEEPABLE) BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getsockname, KF_SLEEPABLE) BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getpeername, KF_SLEEPABLE) +BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_prologue, KF_TRUSTED_ARGS | KF_SLEEPABLE) +BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_epilogue, KF_TRUSTED_ARGS | KF_SLEEPABLE) +BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_pro_epilogue, KF_TRUSTED_ARGS | KF_SLEEPABLE) +BTF_ID_FLAGS(func, bpf_kfunc_st_ops_inc10, KF_TRUSTED_ARGS) BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids) static int bpf_testmod_ops_init(struct btf *btf) @@ -1027,6 +1122,11 @@ static void bpf_testmod_test_2(int a, int b) { } +static int bpf_testmod_tramp(int value) +{ + return 0; +} + static int bpf_testmod_ops__test_maybe_null(int dummy, struct task_struct *task__nullable) { @@ -1073,6 +1173,144 @@ struct bpf_struct_ops bpf_testmod_ops2 = { .owner = THIS_MODULE, }; +static int bpf_test_mod_st_ops__test_prologue(struct st_ops_args *args) +{ + return 0; +} + +static int bpf_test_mod_st_ops__test_epilogue(struct st_ops_args *args) +{ + return 0; +} + +static int bpf_test_mod_st_ops__test_pro_epilogue(struct st_ops_args *args) +{ + return 0; +} + +static int st_ops_gen_prologue(struct bpf_insn *insn_buf, bool direct_write, + const struct bpf_prog *prog) +{ + struct bpf_insn *insn = insn_buf; + + if (strcmp(prog->aux->attach_func_name, "test_prologue") && + strcmp(prog->aux->attach_func_name, "test_pro_epilogue")) + return 0; + + /* r6 = r1[0]; // r6 will be "struct st_ops *args". r1 is "u64 *ctx". + * r7 = r6->a; + * r7 += 1000; + * r6->a = r7; + */ + *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0); + *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_6, offsetof(struct st_ops_args, a)); + *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 1000); + *insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_7, offsetof(struct st_ops_args, a)); + *insn++ = prog->insnsi[0]; + + return insn - insn_buf; +} + +static int st_ops_gen_epilogue(struct bpf_insn *insn_buf, const struct bpf_prog *prog, + s16 ctx_stack_off) +{ + struct bpf_insn *insn = insn_buf; + + if (strcmp(prog->aux->attach_func_name, "test_epilogue") && + strcmp(prog->aux->attach_func_name, "test_pro_epilogue")) + return 0; + + /* r1 = stack[ctx_stack_off]; // r1 will be "u64 *ctx" + * r1 = r1[0]; // r1 will be "struct st_ops *args" + * r6 = r1->a; + * r6 += 10000; + * r1->a = r6; + * r0 = r6; + * r0 *= 2; + * BPF_EXIT; + */ + *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_FP, ctx_stack_off); + *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0); + *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct st_ops_args, a)); + *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 10000); + *insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, offsetof(struct st_ops_args, a)); + *insn++ = BPF_MOV64_REG(BPF_REG_0, BPF_REG_6); + *insn++ = BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, 2); + *insn++ = BPF_EXIT_INSN(); + + return insn - insn_buf; +} + +static int st_ops_btf_struct_access(struct bpf_verifier_log *log, + const struct bpf_reg_state *reg, + int off, int size) +{ + if (off < 0 || off + size > sizeof(struct st_ops_args)) + return -EACCES; + return 0; +} + +static const struct bpf_verifier_ops st_ops_verifier_ops = { + .is_valid_access = bpf_testmod_ops_is_valid_access, + .btf_struct_access = st_ops_btf_struct_access, + .gen_prologue = st_ops_gen_prologue, + .gen_epilogue = st_ops_gen_epilogue, + .get_func_proto = bpf_base_func_proto, +}; + +static struct bpf_testmod_st_ops st_ops_cfi_stubs = { + .test_prologue = bpf_test_mod_st_ops__test_prologue, + .test_epilogue = bpf_test_mod_st_ops__test_epilogue, + .test_pro_epilogue = bpf_test_mod_st_ops__test_pro_epilogue, +}; + +static int st_ops_reg(void *kdata, struct bpf_link *link) +{ + int err = 0; + + mutex_lock(&st_ops_mutex); + if (st_ops) { + pr_err("st_ops has already been registered\n"); + err = -EEXIST; + goto unlock; + } + st_ops = kdata; + +unlock: + mutex_unlock(&st_ops_mutex); + return err; +} + +static void st_ops_unreg(void *kdata, struct bpf_link *link) +{ + mutex_lock(&st_ops_mutex); + st_ops = NULL; + mutex_unlock(&st_ops_mutex); +} + +static int st_ops_init(struct btf *btf) +{ + return 0; +} + +static int st_ops_init_member(const struct btf_type *t, + const struct btf_member *member, + void *kdata, const void *udata) +{ + return 0; +} + +static struct bpf_struct_ops testmod_st_ops = { + .verifier_ops = &st_ops_verifier_ops, + .init = st_ops_init, + .init_member = st_ops_init_member, + .reg = st_ops_reg, + .unreg = st_ops_unreg, + .cfi_stubs = &st_ops_cfi_stubs, + .name = "bpf_testmod_st_ops", + .owner = THIS_MODULE, +}; + extern int bpf_fentry_test1(int a); static int bpf_testmod_init(void) @@ -1083,14 +1321,17 @@ static int bpf_testmod_init(void) .kfunc_btf_id = bpf_testmod_dtor_ids[1] }, }; + void **tramp; int ret; ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_testmod_common_kfunc_set); ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set); ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set); ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set); + ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_testmod_kfunc_set); ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops); ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops2, bpf_testmod_ops2); + ret = ret ?: register_bpf_struct_ops(&testmod_st_ops, bpf_testmod_st_ops); ret = ret ?: register_btf_id_dtor_kfuncs(bpf_testmod_dtors, ARRAY_SIZE(bpf_testmod_dtors), THIS_MODULE); @@ -1106,6 +1347,14 @@ static int bpf_testmod_init(void) ret = register_bpf_testmod_uprobe(); if (ret < 0) return ret; + + /* Ensure nothing is between tramp_1..tramp_40 */ + BUILD_BUG_ON(offsetof(struct bpf_testmod_ops, tramp_1) + 40 * sizeof(long) != + offsetofend(struct bpf_testmod_ops, tramp_40)); + tramp = (void **)&__bpf_testmod_ops.tramp_1; + while (tramp <= (void **)&__bpf_testmod_ops.tramp_40) + *tramp++ = bpf_testmod_tramp; + return 0; } diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h index 23fa1872ee67..fb7dff47597a 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h @@ -35,6 +35,7 @@ struct bpf_testmod_ops { void (*test_2)(int a, int b); /* Used to test nullable arguments. */ int (*test_maybe_null)(int dummy, struct task_struct *task); + int (*unsupported_ops)(void); /* The following fields are used to test shadow copies. */ char onebyte; @@ -93,4 +94,15 @@ struct bpf_testmod_ops2 { int (*test_1)(void); }; +struct st_ops_args { + u64 a; +}; + +struct bpf_testmod_st_ops { + int (*test_prologue)(struct st_ops_args *args); + int (*test_epilogue)(struct st_ops_args *args); + int (*test_pro_epilogue)(struct st_ops_args *args); + struct module *owner; +}; + #endif /* _BPF_TESTMOD_H */ diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h index e587a79f2239..b58817938deb 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h @@ -144,4 +144,19 @@ void bpf_kfunc_dynptr_test(struct bpf_dynptr *ptr, struct bpf_dynptr *ptr__nulla struct bpf_testmod_ctx *bpf_testmod_ctx_create(int *err) __ksym; void bpf_testmod_ctx_release(struct bpf_testmod_ctx *ctx) __ksym; +struct sk_buff *bpf_kfunc_nested_acquire_nonzero_offset_test(struct sk_buff_head *ptr) __ksym; +struct sk_buff *bpf_kfunc_nested_acquire_zero_offset_test(struct sock_common *ptr) __ksym; +void bpf_kfunc_nested_release_test(struct sk_buff *ptr) __ksym; + +struct st_ops_args; +int bpf_kfunc_st_ops_test_prologue(struct st_ops_args *args) __ksym; +int bpf_kfunc_st_ops_test_epilogue(struct st_ops_args *args) __ksym; +int bpf_kfunc_st_ops_test_pro_epilogue(struct st_ops_args *args) __ksym; +int bpf_kfunc_st_ops_inc10(struct st_ops_args *args) __ksym; + +void bpf_kfunc_trusted_vma_test(struct vm_area_struct *ptr) __ksym; +void bpf_kfunc_trusted_task_test(struct task_struct *ptr) __ksym; +void bpf_kfunc_trusted_num_test(int *ptr) __ksym; +void bpf_kfunc_rcu_task_test(struct task_struct *ptr) __ksym; + #endif /* _BPF_TESTMOD_KFUNC_H */ diff --git a/tools/testing/selftests/bpf/cgroup_helpers.c b/tools/testing/selftests/bpf/cgroup_helpers.c index 23bb9a9e6a7d..e4535451322e 100644 --- a/tools/testing/selftests/bpf/cgroup_helpers.c +++ b/tools/testing/selftests/bpf/cgroup_helpers.c @@ -644,7 +644,7 @@ unsigned long long get_classid_cgroup_id(void) /** * get_cgroup1_hierarchy_id - Retrieves the ID of a cgroup1 hierarchy from the cgroup1 subsys name. * @subsys_name: The cgroup1 subsys name, which can be retrieved from /proc/self/cgroup. It can be - * a named cgroup like "name=systemd", a controller name like "net_cls", or multi-contollers like + * a named cgroup like "name=systemd", a controller name like "net_cls", or multi-controllers like * "net_cls,net_prio". */ int get_cgroup1_hierarchy_id(const char *subsys_name) diff --git a/tools/testing/selftests/bpf/config.riscv64 b/tools/testing/selftests/bpf/config.riscv64 new file mode 100644 index 000000000000..bb7043a80e1a --- /dev/null +++ b/tools/testing/selftests/bpf/config.riscv64 @@ -0,0 +1,84 @@ +CONFIG_AUDIT=y +CONFIG_BLK_CGROUP=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BONDING=y +CONFIG_BPF_JIT_ALWAYS_ON=y +CONFIG_BPF_PRELOAD=y +CONFIG_BPF_PRELOAD_UMD=y +CONFIG_CGROUPS=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_SCHED=y +CONFIG_CPUSETS=y +CONFIG_DEBUG_ATOMIC_SLEEP=y +CONFIG_DEBUG_FS=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_EXPERT=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_FRAME_POINTER=y +CONFIG_HARDLOCKUP_DETECTOR=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_HUGETLBFS=y +CONFIG_INET=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_JUMP_LABEL=y +CONFIG_KALLSYMS_ALL=y +CONFIG_KPROBES=y +CONFIG_MEMCG=y +CONFIG_NAMESPACES=y +CONFIG_NET=y +CONFIG_NETDEVICES=y +CONFIG_NETFILTER_XT_MATCH_BPF=y +CONFIG_NET_ACT_BPF=y +CONFIG_NET_L3_MASTER_DEV=y +CONFIG_NET_VRF=y +CONFIG_NONPORTABLE=y +CONFIG_NO_HZ_IDLE=y +CONFIG_NR_CPUS=256 +CONFIG_PACKET=y +CONFIG_PANIC_ON_OOPS=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_PCI=y +CONFIG_PCI_HOST_GENERIC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_PRINTK_TIME=y +CONFIG_PROC_KCORE=y +CONFIG_PROFILING=y +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +CONFIG_RISCV_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_RISCV_ISA_C=y +CONFIG_RISCV_PMU=y +CONFIG_RISCV_PMU_SBI=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_SMP=y +CONFIG_SOC_VIRT=y +CONFIG_SYSVIPC=y +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TLS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TUN=y +CONFIG_UNIX=y +CONFIG_UPROBES=y +CONFIG_USER_NS=y +CONFIG_VETH=y +CONFIG_VLAN_8021Q=y +CONFIG_VSOCKETS_LOOPBACK=y +CONFIG_XFRM_USER=y diff --git a/tools/testing/selftests/bpf/disasm_helpers.c b/tools/testing/selftests/bpf/disasm_helpers.c new file mode 100644 index 000000000000..f529f1c8c171 --- /dev/null +++ b/tools/testing/selftests/bpf/disasm_helpers.c @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +#include <bpf/bpf.h> +#include "disasm.h" + +struct print_insn_context { + char scratch[16]; + char *buf; + size_t sz; +}; + +static void print_insn_cb(void *private_data, const char *fmt, ...) +{ + struct print_insn_context *ctx = private_data; + va_list args; + + va_start(args, fmt); + vsnprintf(ctx->buf, ctx->sz, fmt, args); + va_end(args); +} + +static const char *print_call_cb(void *private_data, const struct bpf_insn *insn) +{ + struct print_insn_context *ctx = private_data; + + /* For pseudo calls verifier.c:jit_subprogs() hides original + * imm to insn->off and changes insn->imm to be an index of + * the subprog instead. + */ + if (insn->src_reg == BPF_PSEUDO_CALL) { + snprintf(ctx->scratch, sizeof(ctx->scratch), "%+d", insn->off); + return ctx->scratch; + } + + return NULL; +} + +struct bpf_insn *disasm_insn(struct bpf_insn *insn, char *buf, size_t buf_sz) +{ + struct print_insn_context ctx = { + .buf = buf, + .sz = buf_sz, + }; + struct bpf_insn_cbs cbs = { + .cb_print = print_insn_cb, + .cb_call = print_call_cb, + .private_data = &ctx, + }; + char *tmp, *pfx_end, *sfx_start; + bool double_insn; + int len; + + print_bpf_insn(&cbs, insn, true); + /* We share code with kernel BPF disassembler, it adds '(FF) ' prefix + * for each instruction (FF stands for instruction `code` byte). + * Remove the prefix inplace, and also simplify call instructions. + * E.g.: "(85) call foo#10" -> "call foo". + * Also remove newline in the end (the 'max(strlen(buf) - 1, 0)' thing). + */ + pfx_end = buf + 5; + sfx_start = buf + max((int)strlen(buf) - 1, 0); + if (strncmp(pfx_end, "call ", 5) == 0 && (tmp = strrchr(buf, '#'))) + sfx_start = tmp; + len = sfx_start - pfx_end; + memmove(buf, pfx_end, len); + buf[len] = 0; + double_insn = insn->code == (BPF_LD | BPF_IMM | BPF_DW); + return insn + (double_insn ? 2 : 1); +} diff --git a/tools/testing/selftests/bpf/disasm_helpers.h b/tools/testing/selftests/bpf/disasm_helpers.h new file mode 100644 index 000000000000..7b26cab70099 --- /dev/null +++ b/tools/testing/selftests/bpf/disasm_helpers.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ + +#ifndef __DISASM_HELPERS_H +#define __DISASM_HELPERS_H + +#include <stdlib.h> + +struct bpf_insn; + +struct bpf_insn *disasm_insn(struct bpf_insn *insn, char *buf, size_t buf_sz); + +#endif /* __DISASM_HELPERS_H */ diff --git a/tools/testing/selftests/bpf/get_cgroup_id_user.c b/tools/testing/selftests/bpf/get_cgroup_id_user.c deleted file mode 100644 index aefd83ebdcd7..000000000000 --- a/tools/testing/selftests/bpf/get_cgroup_id_user.c +++ /dev/null @@ -1,151 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -// Copyright (c) 2018 Facebook - -#include <stdio.h> -#include <stdlib.h> -#include <string.h> -#include <errno.h> -#include <fcntl.h> -#include <syscall.h> -#include <unistd.h> -#include <linux/perf_event.h> -#include <sys/ioctl.h> -#include <sys/time.h> -#include <sys/types.h> -#include <sys/stat.h> - -#include <linux/bpf.h> -#include <bpf/bpf.h> -#include <bpf/libbpf.h> - -#include "cgroup_helpers.h" -#include "testing_helpers.h" - -#define CHECK(condition, tag, format...) ({ \ - int __ret = !!(condition); \ - if (__ret) { \ - printf("%s:FAIL:%s ", __func__, tag); \ - printf(format); \ - } else { \ - printf("%s:PASS:%s\n", __func__, tag); \ - } \ - __ret; \ -}) - -static int bpf_find_map(const char *test, struct bpf_object *obj, - const char *name) -{ - struct bpf_map *map; - - map = bpf_object__find_map_by_name(obj, name); - if (!map) - return -1; - return bpf_map__fd(map); -} - -#define TEST_CGROUP "/test-bpf-get-cgroup-id/" - -int main(int argc, char **argv) -{ - const char *probe_name = "syscalls/sys_enter_nanosleep"; - const char *file = "get_cgroup_id_kern.bpf.o"; - int err, bytes, efd, prog_fd, pmu_fd; - int cgroup_fd, cgidmap_fd, pidmap_fd; - struct perf_event_attr attr = {}; - struct bpf_object *obj; - __u64 kcgid = 0, ucgid; - __u32 key = 0, pid; - int exit_code = 1; - char buf[256]; - const struct timespec req = { - .tv_sec = 1, - .tv_nsec = 0, - }; - - cgroup_fd = cgroup_setup_and_join(TEST_CGROUP); - if (CHECK(cgroup_fd < 0, "cgroup_setup_and_join", "err %d errno %d\n", cgroup_fd, errno)) - return 1; - - /* Use libbpf 1.0 API mode */ - libbpf_set_strict_mode(LIBBPF_STRICT_ALL); - - err = bpf_prog_test_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); - if (CHECK(err, "bpf_prog_test_load", "err %d errno %d\n", err, errno)) - goto cleanup_cgroup_env; - - cgidmap_fd = bpf_find_map(__func__, obj, "cg_ids"); - if (CHECK(cgidmap_fd < 0, "bpf_find_map", "err %d errno %d\n", - cgidmap_fd, errno)) - goto close_prog; - - pidmap_fd = bpf_find_map(__func__, obj, "pidmap"); - if (CHECK(pidmap_fd < 0, "bpf_find_map", "err %d errno %d\n", - pidmap_fd, errno)) - goto close_prog; - - pid = getpid(); - bpf_map_update_elem(pidmap_fd, &key, &pid, 0); - - if (access("/sys/kernel/tracing/trace", F_OK) == 0) { - snprintf(buf, sizeof(buf), - "/sys/kernel/tracing/events/%s/id", probe_name); - } else { - snprintf(buf, sizeof(buf), - "/sys/kernel/debug/tracing/events/%s/id", probe_name); - } - efd = open(buf, O_RDONLY, 0); - if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno)) - goto close_prog; - bytes = read(efd, buf, sizeof(buf)); - close(efd); - if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "read", - "bytes %d errno %d\n", bytes, errno)) - goto close_prog; - - attr.config = strtol(buf, NULL, 0); - attr.type = PERF_TYPE_TRACEPOINT; - attr.sample_type = PERF_SAMPLE_RAW; - attr.sample_period = 1; - attr.wakeup_events = 1; - - /* attach to this pid so the all bpf invocations will be in the - * cgroup associated with this pid. - */ - pmu_fd = syscall(__NR_perf_event_open, &attr, getpid(), -1, -1, 0); - if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", pmu_fd, - errno)) - goto close_prog; - - err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0); - if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", err, - errno)) - goto close_pmu; - - err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd); - if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", err, - errno)) - goto close_pmu; - - /* trigger some syscalls */ - syscall(__NR_nanosleep, &req, NULL); - - err = bpf_map_lookup_elem(cgidmap_fd, &key, &kcgid); - if (CHECK(err, "bpf_map_lookup_elem", "err %d errno %d\n", err, errno)) - goto close_pmu; - - ucgid = get_cgroup_id(TEST_CGROUP); - if (CHECK(kcgid != ucgid, "compare_cgroup_id", - "kern cgid %llx user cgid %llx", kcgid, ucgid)) - goto close_pmu; - - exit_code = 0; - printf("%s:PASS\n", argv[0]); - -close_pmu: - close(pmu_fd); -close_prog: - bpf_object__close(obj); -cleanup_cgroup_env: - cleanup_cgroup_environment(); - return exit_code; -} diff --git a/tools/testing/selftests/bpf/jit_disasm_helpers.c b/tools/testing/selftests/bpf/jit_disasm_helpers.c new file mode 100644 index 000000000000..febd6b12e372 --- /dev/null +++ b/tools/testing/selftests/bpf/jit_disasm_helpers.c @@ -0,0 +1,245 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) +#include <bpf/bpf.h> +#include <bpf/libbpf.h> +#include <test_progs.h> + +#ifdef HAVE_LLVM_SUPPORT + +#include <llvm-c/Core.h> +#include <llvm-c/Disassembler.h> +#include <llvm-c/Target.h> +#include <llvm-c/TargetMachine.h> + +/* The intent is to use get_jited_program_text() for small test + * programs written in BPF assembly, thus assume that 32 local labels + * would be sufficient. + */ +#define MAX_LOCAL_LABELS 32 + +/* Local labels are encoded as 'L42', this requires 4 bytes of storage: + * 3 characters + zero byte + */ +#define LOCAL_LABEL_LEN 4 + +static bool llvm_initialized; + +struct local_labels { + bool print_phase; + __u32 prog_len; + __u32 cnt; + __u32 pcs[MAX_LOCAL_LABELS]; + char names[MAX_LOCAL_LABELS][LOCAL_LABEL_LEN]; +}; + +static const char *lookup_symbol(void *data, uint64_t ref_value, uint64_t *ref_type, + uint64_t ref_pc, const char **ref_name) +{ + struct local_labels *labels = data; + uint64_t type = *ref_type; + int i; + + *ref_type = LLVMDisassembler_ReferenceType_InOut_None; + *ref_name = NULL; + if (type != LLVMDisassembler_ReferenceType_In_Branch) + return NULL; + /* Depending on labels->print_phase either discover local labels or + * return a name assigned with local jump target: + * - if print_phase is true and ref_value is in labels->pcs, + * return corresponding labels->name. + * - if print_phase is false, save program-local jump targets + * in labels->pcs; + */ + if (labels->print_phase) { + for (i = 0; i < labels->cnt; ++i) + if (labels->pcs[i] == ref_value) + return labels->names[i]; + } else { + if (labels->cnt < MAX_LOCAL_LABELS && ref_value < labels->prog_len) + labels->pcs[labels->cnt++] = ref_value; + } + return NULL; +} + +static int disasm_insn(LLVMDisasmContextRef ctx, uint8_t *image, __u32 len, __u32 pc, + char *buf, __u32 buf_sz) +{ + int i, cnt; + + cnt = LLVMDisasmInstruction(ctx, image + pc, len - pc, pc, + buf, buf_sz); + if (cnt > 0) + return cnt; + PRINT_FAIL("Can't disasm instruction at offset %d:", pc); + for (i = 0; i < 16 && pc + i < len; ++i) + printf(" %02x", image[pc + i]); + printf("\n"); + return -EINVAL; +} + +static int cmp_u32(const void *_a, const void *_b) +{ + __u32 a = *(__u32 *)_a; + __u32 b = *(__u32 *)_b; + + if (a < b) + return -1; + if (a > b) + return 1; + return 0; +} + +static int disasm_one_func(FILE *text_out, uint8_t *image, __u32 len) +{ + char *label, *colon, *triple = NULL; + LLVMDisasmContextRef ctx = NULL; + struct local_labels labels = {}; + __u32 *label_pc, pc; + int i, cnt, err = 0; + char buf[64]; + + triple = LLVMGetDefaultTargetTriple(); + ctx = LLVMCreateDisasm(triple, &labels, 0, NULL, lookup_symbol); + if (!ASSERT_OK_PTR(ctx, "LLVMCreateDisasm")) { + err = -EINVAL; + goto out; + } + + cnt = LLVMSetDisasmOptions(ctx, LLVMDisassembler_Option_PrintImmHex); + if (!ASSERT_EQ(cnt, 1, "LLVMSetDisasmOptions")) { + err = -EINVAL; + goto out; + } + + /* discover labels */ + labels.prog_len = len; + pc = 0; + while (pc < len) { + cnt = disasm_insn(ctx, image, len, pc, buf, 1); + if (cnt < 0) { + err = cnt; + goto out; + } + pc += cnt; + } + qsort(labels.pcs, labels.cnt, sizeof(*labels.pcs), cmp_u32); + for (i = 0; i < labels.cnt; ++i) + /* gcc is unable to infer upper bound for labels.cnt and assumes + * it to be U32_MAX. U32_MAX takes 10 decimal digits. + * snprintf below prints into labels.names[*], + * which has space only for two digits and a letter. + * To avoid truncation warning use (i % MAX_LOCAL_LABELS), + * which informs gcc about printed value upper bound. + */ + snprintf(labels.names[i], sizeof(labels.names[i]), "L%d", i % MAX_LOCAL_LABELS); + + /* now print with labels */ + labels.print_phase = true; + pc = 0; + while (pc < len) { + cnt = disasm_insn(ctx, image, len, pc, buf, sizeof(buf)); + if (cnt < 0) { + err = cnt; + goto out; + } + label_pc = bsearch(&pc, labels.pcs, labels.cnt, sizeof(*labels.pcs), cmp_u32); + label = ""; + colon = ""; + if (label_pc) { + label = labels.names[label_pc - labels.pcs]; + colon = ":"; + } + fprintf(text_out, "%x:\t", pc); + for (i = 0; i < cnt; ++i) + fprintf(text_out, "%02x ", image[pc + i]); + for (i = cnt * 3; i < 12 * 3; ++i) + fputc(' ', text_out); + fprintf(text_out, "%s%s%s\n", label, colon, buf); + pc += cnt; + } + +out: + if (triple) + LLVMDisposeMessage(triple); + if (ctx) + LLVMDisasmDispose(ctx); + return err; +} + +int get_jited_program_text(int fd, char *text, size_t text_sz) +{ + struct bpf_prog_info info = {}; + __u32 info_len = sizeof(info); + __u32 jited_funcs, len, pc; + __u32 *func_lens = NULL; + FILE *text_out = NULL; + uint8_t *image = NULL; + int i, err = 0; + + if (!llvm_initialized) { + LLVMInitializeAllTargetInfos(); + LLVMInitializeAllTargetMCs(); + LLVMInitializeAllDisassemblers(); + llvm_initialized = 1; + } + + text_out = fmemopen(text, text_sz, "w"); + if (!ASSERT_OK_PTR(text_out, "open_memstream")) { + err = -errno; + goto out; + } + + /* first call is to find out jited program len */ + err = bpf_prog_get_info_by_fd(fd, &info, &info_len); + if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd #1")) + goto out; + + len = info.jited_prog_len; + image = malloc(len); + if (!ASSERT_OK_PTR(image, "malloc(info.jited_prog_len)")) { + err = -ENOMEM; + goto out; + } + + jited_funcs = info.nr_jited_func_lens; + func_lens = malloc(jited_funcs * sizeof(__u32)); + if (!ASSERT_OK_PTR(func_lens, "malloc(info.nr_jited_func_lens)")) { + err = -ENOMEM; + goto out; + } + + memset(&info, 0, sizeof(info)); + info.jited_prog_insns = (__u64)image; + info.jited_prog_len = len; + info.jited_func_lens = (__u64)func_lens; + info.nr_jited_func_lens = jited_funcs; + err = bpf_prog_get_info_by_fd(fd, &info, &info_len); + if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd #2")) + goto out; + + for (pc = 0, i = 0; i < jited_funcs; ++i) { + fprintf(text_out, "func #%d:\n", i); + disasm_one_func(text_out, image + pc, func_lens[i]); + fprintf(text_out, "\n"); + pc += func_lens[i]; + } + +out: + if (text_out) + fclose(text_out); + if (image) + free(image); + if (func_lens) + free(func_lens); + return err; +} + +#else /* HAVE_LLVM_SUPPORT */ + +int get_jited_program_text(int fd, char *text, size_t text_sz) +{ + if (env.verbosity >= VERBOSE_VERY) + printf("compiled w/o llvm development libraries, can't dis-assembly binary code"); + return -EOPNOTSUPP; +} + +#endif /* HAVE_LLVM_SUPPORT */ diff --git a/tools/testing/selftests/bpf/jit_disasm_helpers.h b/tools/testing/selftests/bpf/jit_disasm_helpers.h new file mode 100644 index 000000000000..e6924fd65ecf --- /dev/null +++ b/tools/testing/selftests/bpf/jit_disasm_helpers.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ + +#ifndef __JIT_DISASM_HELPERS_H +#define __JIT_DISASM_HELPERS_H + +#include <stddef.h> + +int get_jited_program_text(int fd, char *text, size_t text_sz); + +#endif /* __JIT_DISASM_HELPERS_H */ diff --git a/tools/testing/selftests/bpf/map_tests/htab_map_batch_ops.c b/tools/testing/selftests/bpf/map_tests/htab_map_batch_ops.c index 1230ccf90128..5da493b94ae2 100644 --- a/tools/testing/selftests/bpf/map_tests/htab_map_batch_ops.c +++ b/tools/testing/selftests/bpf/map_tests/htab_map_batch_ops.c @@ -197,7 +197,7 @@ void __test_map_lookup_and_delete_batch(bool is_pcpu) CHECK(total != max_entries, "delete with steps", "total = %u, max_entries = %u\n", total, max_entries); - /* check map is empty, errono == ENOENT */ + /* check map is empty, errno == ENOENT */ err = bpf_map_get_next_key(map_fd, NULL, &key); CHECK(!err || errno != ENOENT, "bpf_map_get_next_key()", "error: %s\n", strerror(errno)); diff --git a/tools/testing/selftests/bpf/map_tests/lpm_trie_map_batch_ops.c b/tools/testing/selftests/bpf/map_tests/lpm_trie_map_batch_ops.c index b66d56ddb7ef..fe3e19f96244 100644 --- a/tools/testing/selftests/bpf/map_tests/lpm_trie_map_batch_ops.c +++ b/tools/testing/selftests/bpf/map_tests/lpm_trie_map_batch_ops.c @@ -135,7 +135,7 @@ void test_lpm_trie_map_batch_ops(void) CHECK(total != max_entries, "delete with steps", "total = %u, max_entries = %u\n", total, max_entries); - /* check map is empty, errono == ENOENT */ + /* check map is empty, errno == ENOENT */ err = bpf_map_get_next_key(map_fd, NULL, &key); CHECK(!err || errno != ENOENT, "bpf_map_get_next_key()", "error: %s\n", strerror(errno)); diff --git a/tools/testing/selftests/bpf/map_tests/map_percpu_stats.c b/tools/testing/selftests/bpf/map_tests/map_percpu_stats.c index 2ea36408816b..1c7c04288eff 100644 --- a/tools/testing/selftests/bpf/map_tests/map_percpu_stats.c +++ b/tools/testing/selftests/bpf/map_tests/map_percpu_stats.c @@ -17,6 +17,7 @@ #define MAX_ENTRIES_HASH_OF_MAPS 64 #define N_THREADS 8 #define MAX_MAP_KEY_SIZE 4 +#define PCPU_MIN_UNIT_SIZE 32768 static void map_info(int map_fd, struct bpf_map_info *info) { @@ -456,6 +457,22 @@ static void map_percpu_stats_hash_of_maps(void) printf("test_%s:PASS\n", __func__); } +static void map_percpu_stats_map_value_size(void) +{ + int fd; + int value_sz = PCPU_MIN_UNIT_SIZE + 1; + struct bpf_map_create_opts opts = { .sz = sizeof(opts) }; + enum bpf_map_type map_types[] = { BPF_MAP_TYPE_PERCPU_ARRAY, + BPF_MAP_TYPE_PERCPU_HASH, + BPF_MAP_TYPE_LRU_PERCPU_HASH }; + for (int i = 0; i < ARRAY_SIZE(map_types); i++) { + fd = bpf_map_create(map_types[i], NULL, sizeof(__u32), value_sz, 1, &opts); + CHECK(fd < 0 && errno != E2BIG, "percpu map value size", + "error: %s\n", strerror(errno)); + } + printf("test_%s:PASS\n", __func__); +} + void test_map_percpu_stats(void) { map_percpu_stats_hash(); @@ -467,4 +484,5 @@ void test_map_percpu_stats(void) map_percpu_stats_percpu_lru_hash(); map_percpu_stats_percpu_lru_hash_no_common(); map_percpu_stats_hash_of_maps(); + map_percpu_stats_map_value_size(); } diff --git a/tools/testing/selftests/bpf/map_tests/sk_storage_map.c b/tools/testing/selftests/bpf/map_tests/sk_storage_map.c index 18405c3b7cee..af10c309359a 100644 --- a/tools/testing/selftests/bpf/map_tests/sk_storage_map.c +++ b/tools/testing/selftests/bpf/map_tests/sk_storage_map.c @@ -412,7 +412,7 @@ static void test_sk_storage_map_stress_free(void) rlim_new.rlim_max = rlim_new.rlim_cur + 128; err = setrlimit(RLIMIT_NOFILE, &rlim_new); CHECK(err, "setrlimit(RLIMIT_NOFILE)", "rlim_new:%lu errno:%d", - rlim_new.rlim_cur, errno); + (unsigned long) rlim_new.rlim_cur, errno); } err = do_sk_storage_map_stress_free(); diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c index e0cba4178e41..27784946b01b 100644 --- a/tools/testing/selftests/bpf/network_helpers.c +++ b/tools/testing/selftests/bpf/network_helpers.c @@ -11,17 +11,31 @@ #include <arpa/inet.h> #include <sys/mount.h> #include <sys/stat.h> +#include <sys/types.h> #include <sys/un.h> +#include <sys/eventfd.h> #include <linux/err.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/limits.h> +#include <linux/ip.h> +#include <linux/udp.h> +#include <netinet/tcp.h> +#include <net/if.h> + #include "bpf_util.h" #include "network_helpers.h" #include "test_progs.h" +#ifdef TRAFFIC_MONITOR +/* Prevent pcap.h from including pcap/bpf.h and causing conflicts */ +#define PCAP_DONT_INCLUDE_PCAP_BPF_H 1 +#include <pcap/pcap.h> +#include <pcap/dlt.h> +#endif + #ifndef IPPROTO_MPTCP #define IPPROTO_MPTCP 262 #endif @@ -80,12 +94,15 @@ int settimeo(int fd, int timeout_ms) #define save_errno_close(fd) ({ int __save = errno; close(fd); errno = __save; }) -static int __start_server(int type, const struct sockaddr *addr, socklen_t addrlen, - const struct network_helper_opts *opts) +int start_server_addr(int type, const struct sockaddr_storage *addr, socklen_t addrlen, + const struct network_helper_opts *opts) { int fd; - fd = socket(addr->sa_family, type, opts->proto); + if (!opts) + opts = &default_opts; + + fd = socket(addr->ss_family, type, opts->proto); if (fd < 0) { log_err("Failed to create server socket"); return -1; @@ -100,7 +117,7 @@ static int __start_server(int type, const struct sockaddr *addr, socklen_t addrl goto error_close; } - if (bind(fd, addr, addrlen) < 0) { + if (bind(fd, (struct sockaddr *)addr, addrlen) < 0) { log_err("Failed to bind socket"); goto error_close; } @@ -131,7 +148,7 @@ int start_server_str(int family, int type, const char *addr_str, __u16 port, if (make_sockaddr(family, addr_str, port, &addr, &addrlen)) return -1; - return __start_server(type, (struct sockaddr *)&addr, addrlen, opts); + return start_server_addr(type, &addr, addrlen, opts); } int start_server(int family, int type, const char *addr_str, __u16 port, @@ -173,7 +190,7 @@ int *start_reuseport_server(int family, int type, const char *addr_str, if (!fds) return NULL; - fds[0] = __start_server(type, (struct sockaddr *)&addr, addrlen, &opts); + fds[0] = start_server_addr(type, &addr, addrlen, &opts); if (fds[0] == -1) goto close_fds; nr_fds = 1; @@ -182,7 +199,7 @@ int *start_reuseport_server(int family, int type, const char *addr_str, goto close_fds; for (; nr_fds < nr_listens; nr_fds++) { - fds[nr_fds] = __start_server(type, (struct sockaddr *)&addr, addrlen, &opts); + fds[nr_fds] = start_server_addr(type, &addr, addrlen, &opts); if (fds[nr_fds] == -1) goto close_fds; } @@ -194,15 +211,6 @@ close_fds: return NULL; } -int start_server_addr(int type, const struct sockaddr_storage *addr, socklen_t len, - const struct network_helper_opts *opts) -{ - if (!opts) - opts = &default_opts; - - return __start_server(type, (struct sockaddr *)addr, len, opts); -} - void free_fds(int *fds, unsigned int nr_close_fds) { if (fds) { @@ -277,33 +285,6 @@ error_close: return -1; } -static int connect_fd_to_addr(int fd, - const struct sockaddr_storage *addr, - socklen_t addrlen, const bool must_fail) -{ - int ret; - - errno = 0; - ret = connect(fd, (const struct sockaddr *)addr, addrlen); - if (must_fail) { - if (!ret) { - log_err("Unexpected success to connect to server"); - return -1; - } - if (errno != EPERM) { - log_err("Unexpected error from connect to server"); - return -1; - } - } else { - if (ret) { - log_err("Failed to connect to server"); - return -1; - } - } - - return 0; -} - int connect_to_addr(int type, const struct sockaddr_storage *addr, socklen_t addrlen, const struct network_helper_opts *opts) { @@ -318,17 +299,17 @@ int connect_to_addr(int type, const struct sockaddr_storage *addr, socklen_t add return -1; } - if (connect_fd_to_addr(fd, addr, addrlen, opts->must_fail)) - goto error_close; + if (connect(fd, (const struct sockaddr *)addr, addrlen)) { + log_err("Failed to connect to server"); + save_errno_close(fd); + return -1; + } return fd; - -error_close: - save_errno_close(fd); - return -1; } -int connect_to_fd_opts(int server_fd, int type, const struct network_helper_opts *opts) +int connect_to_addr_str(int family, int type, const char *addr_str, __u16 port, + const struct network_helper_opts *opts) { struct sockaddr_storage addr; socklen_t addrlen; @@ -336,6 +317,27 @@ int connect_to_fd_opts(int server_fd, int type, const struct network_helper_opts if (!opts) opts = &default_opts; + if (make_sockaddr(family, addr_str, port, &addr, &addrlen)) + return -1; + + return connect_to_addr(type, &addr, addrlen, opts); +} + +int connect_to_fd_opts(int server_fd, const struct network_helper_opts *opts) +{ + struct sockaddr_storage addr; + socklen_t addrlen, optlen; + int type; + + if (!opts) + opts = &default_opts; + + optlen = sizeof(type); + if (getsockopt(server_fd, SOL_SOCKET, SO_TYPE, &type, &optlen)) { + log_err("getsockopt(SOL_TYPE)"); + return -1; + } + addrlen = sizeof(addr); if (getsockname(server_fd, (struct sockaddr *)&addr, &addrlen)) { log_err("Failed to get server addr"); @@ -350,14 +352,8 @@ int connect_to_fd(int server_fd, int timeout_ms) struct network_helper_opts opts = { .timeout_ms = timeout_ms, }; - int type, protocol; socklen_t optlen; - - optlen = sizeof(type); - if (getsockopt(server_fd, SOL_SOCKET, SO_TYPE, &type, &optlen)) { - log_err("getsockopt(SOL_TYPE)"); - return -1; - } + int protocol; optlen = sizeof(protocol); if (getsockopt(server_fd, SOL_SOCKET, SO_PROTOCOL, &protocol, &optlen)) { @@ -366,7 +362,7 @@ int connect_to_fd(int server_fd, int timeout_ms) } opts.proto = protocol; - return connect_to_fd_opts(server_fd, type, &opts); + return connect_to_fd_opts(server_fd, &opts); } int connect_fd_to_fd(int client_fd, int server_fd, int timeout_ms) @@ -382,8 +378,10 @@ int connect_fd_to_fd(int client_fd, int server_fd, int timeout_ms) return -1; } - if (connect_fd_to_addr(client_fd, &addr, len, false)) + if (connect(client_fd, (const struct sockaddr *)&addr, len)) { + log_err("Failed to connect to server"); return -1; + } return 0; } @@ -448,6 +446,52 @@ char *ping_command(int family) return "ping"; } +int remove_netns(const char *name) +{ + char *cmd; + int r; + + r = asprintf(&cmd, "ip netns del %s >/dev/null 2>&1", name); + if (r < 0) { + log_err("Failed to malloc cmd"); + return -1; + } + + r = system(cmd); + free(cmd); + return r; +} + +int make_netns(const char *name) +{ + char *cmd; + int r; + + r = asprintf(&cmd, "ip netns add %s", name); + if (r < 0) { + log_err("Failed to malloc cmd"); + return -1; + } + + r = system(cmd); + free(cmd); + + if (r) + return r; + + r = asprintf(&cmd, "ip -n %s link set lo up", name); + if (r < 0) { + log_err("Failed to malloc cmd for setting up lo"); + remove_netns(name); + return -1; + } + + r = system(cmd); + free(cmd); + + return r; +} + struct nstoken { int orig_netns_fd; }; @@ -676,3 +720,443 @@ int send_recv_data(int lfd, int fd, uint32_t total_bytes) return err; } + +#ifdef TRAFFIC_MONITOR +struct tmonitor_ctx { + pcap_t *pcap; + pcap_dumper_t *dumper; + pthread_t thread; + int wake_fd; + + volatile bool done; + char pkt_fname[PATH_MAX]; + int pcap_fd; +}; + +/* Is this packet captured with a Ethernet protocol type? */ +static bool is_ethernet(const u_char *packet) +{ + u16 arphdr_type; + + memcpy(&arphdr_type, packet + 8, 2); + arphdr_type = ntohs(arphdr_type); + + /* Except the following cases, the protocol type contains the + * Ethernet protocol type for the packet. + * + * https://www.tcpdump.org/linktypes/LINKTYPE_LINUX_SLL2.html + */ + switch (arphdr_type) { + case 770: /* ARPHRD_FRAD */ + case 778: /* ARPHDR_IPGRE */ + case 803: /* ARPHRD_IEEE80211_RADIOTAP */ + printf("Packet captured: arphdr_type=%d\n", arphdr_type); + return false; + } + return true; +} + +static const char * const pkt_types[] = { + "In", + "B", /* Broadcast */ + "M", /* Multicast */ + "C", /* Captured with the promiscuous mode */ + "Out", +}; + +static const char *pkt_type_str(u16 pkt_type) +{ + if (pkt_type < ARRAY_SIZE(pkt_types)) + return pkt_types[pkt_type]; + return "Unknown"; +} + +/* Show the information of the transport layer in the packet */ +static void show_transport(const u_char *packet, u16 len, u32 ifindex, + const char *src_addr, const char *dst_addr, + u16 proto, bool ipv6, u8 pkt_type) +{ + char *ifname, _ifname[IF_NAMESIZE]; + const char *transport_str; + u16 src_port, dst_port; + struct udphdr *udp; + struct tcphdr *tcp; + + ifname = if_indextoname(ifindex, _ifname); + if (!ifname) { + snprintf(_ifname, sizeof(_ifname), "unknown(%d)", ifindex); + ifname = _ifname; + } + + if (proto == IPPROTO_UDP) { + udp = (struct udphdr *)packet; + src_port = ntohs(udp->source); + dst_port = ntohs(udp->dest); + transport_str = "UDP"; + } else if (proto == IPPROTO_TCP) { + tcp = (struct tcphdr *)packet; + src_port = ntohs(tcp->source); + dst_port = ntohs(tcp->dest); + transport_str = "TCP"; + } else if (proto == IPPROTO_ICMP) { + printf("%-7s %-3s IPv4 %s > %s: ICMP, length %d, type %d, code %d\n", + ifname, pkt_type_str(pkt_type), src_addr, dst_addr, len, + packet[0], packet[1]); + return; + } else if (proto == IPPROTO_ICMPV6) { + printf("%-7s %-3s IPv6 %s > %s: ICMPv6, length %d, type %d, code %d\n", + ifname, pkt_type_str(pkt_type), src_addr, dst_addr, len, + packet[0], packet[1]); + return; + } else { + printf("%-7s %-3s %s %s > %s: protocol %d\n", + ifname, pkt_type_str(pkt_type), ipv6 ? "IPv6" : "IPv4", + src_addr, dst_addr, proto); + return; + } + + /* TCP or UDP*/ + + flockfile(stdout); + if (ipv6) + printf("%-7s %-3s IPv6 %s.%d > %s.%d: %s, length %d", + ifname, pkt_type_str(pkt_type), src_addr, src_port, + dst_addr, dst_port, transport_str, len); + else + printf("%-7s %-3s IPv4 %s:%d > %s:%d: %s, length %d", + ifname, pkt_type_str(pkt_type), src_addr, src_port, + dst_addr, dst_port, transport_str, len); + + if (proto == IPPROTO_TCP) { + if (tcp->fin) + printf(", FIN"); + if (tcp->syn) + printf(", SYN"); + if (tcp->rst) + printf(", RST"); + if (tcp->ack) + printf(", ACK"); + } + + printf("\n"); + funlockfile(stdout); +} + +static void show_ipv6_packet(const u_char *packet, u32 ifindex, u8 pkt_type) +{ + char src_buf[INET6_ADDRSTRLEN], dst_buf[INET6_ADDRSTRLEN]; + struct ipv6hdr *pkt = (struct ipv6hdr *)packet; + const char *src, *dst; + u_char proto; + + src = inet_ntop(AF_INET6, &pkt->saddr, src_buf, sizeof(src_buf)); + if (!src) + src = "<invalid>"; + dst = inet_ntop(AF_INET6, &pkt->daddr, dst_buf, sizeof(dst_buf)); + if (!dst) + dst = "<invalid>"; + proto = pkt->nexthdr; + show_transport(packet + sizeof(struct ipv6hdr), + ntohs(pkt->payload_len), + ifindex, src, dst, proto, true, pkt_type); +} + +static void show_ipv4_packet(const u_char *packet, u32 ifindex, u8 pkt_type) +{ + char src_buf[INET_ADDRSTRLEN], dst_buf[INET_ADDRSTRLEN]; + struct iphdr *pkt = (struct iphdr *)packet; + const char *src, *dst; + u_char proto; + + src = inet_ntop(AF_INET, &pkt->saddr, src_buf, sizeof(src_buf)); + if (!src) + src = "<invalid>"; + dst = inet_ntop(AF_INET, &pkt->daddr, dst_buf, sizeof(dst_buf)); + if (!dst) + dst = "<invalid>"; + proto = pkt->protocol; + show_transport(packet + sizeof(struct iphdr), + ntohs(pkt->tot_len), + ifindex, src, dst, proto, false, pkt_type); +} + +static void *traffic_monitor_thread(void *arg) +{ + char *ifname, _ifname[IF_NAMESIZE]; + const u_char *packet, *payload; + struct tmonitor_ctx *ctx = arg; + pcap_dumper_t *dumper = ctx->dumper; + int fd = ctx->pcap_fd, nfds, r; + int wake_fd = ctx->wake_fd; + struct pcap_pkthdr header; + pcap_t *pcap = ctx->pcap; + u32 ifindex; + fd_set fds; + u16 proto; + u8 ptype; + + nfds = (fd > wake_fd ? fd : wake_fd) + 1; + FD_ZERO(&fds); + + while (!ctx->done) { + FD_SET(fd, &fds); + FD_SET(wake_fd, &fds); + r = select(nfds, &fds, NULL, NULL, NULL); + if (!r) + continue; + if (r < 0) { + if (errno == EINTR) + continue; + log_err("Fail to select on pcap fd and wake fd"); + break; + } + + /* This instance of pcap is non-blocking */ + packet = pcap_next(pcap, &header); + if (!packet) + continue; + + /* According to the man page of pcap_dump(), first argument + * is the pcap_dumper_t pointer even it's argument type is + * u_char *. + */ + pcap_dump((u_char *)dumper, &header, packet); + + /* Not sure what other types of packets look like. Here, we + * parse only Ethernet and compatible packets. + */ + if (!is_ethernet(packet)) + continue; + + /* Skip SLL2 header + * https://www.tcpdump.org/linktypes/LINKTYPE_LINUX_SLL2.html + * + * Although the document doesn't mention that, the payload + * doesn't include the Ethernet header. The payload starts + * from the first byte of the network layer header. + */ + payload = packet + 20; + + memcpy(&proto, packet, 2); + proto = ntohs(proto); + memcpy(&ifindex, packet + 4, 4); + ifindex = ntohl(ifindex); + ptype = packet[10]; + + if (proto == ETH_P_IPV6) { + show_ipv6_packet(payload, ifindex, ptype); + } else if (proto == ETH_P_IP) { + show_ipv4_packet(payload, ifindex, ptype); + } else { + ifname = if_indextoname(ifindex, _ifname); + if (!ifname) { + snprintf(_ifname, sizeof(_ifname), "unknown(%d)", ifindex); + ifname = _ifname; + } + + printf("%-7s %-3s Unknown network protocol type 0x%x\n", + ifname, pkt_type_str(ptype), proto); + } + } + + return NULL; +} + +/* Prepare the pcap handle to capture packets. + * + * This pcap is non-blocking and immediate mode is enabled to receive + * captured packets as soon as possible. The snaplen is set to 1024 bytes + * to limit the size of captured content. The format of the link-layer + * header is set to DLT_LINUX_SLL2 to enable handling various link-layer + * technologies. + */ +static pcap_t *traffic_monitor_prepare_pcap(void) +{ + char errbuf[PCAP_ERRBUF_SIZE]; + pcap_t *pcap; + int r; + + /* Listen on all NICs in the namespace */ + pcap = pcap_create("any", errbuf); + if (!pcap) { + log_err("Failed to open pcap: %s", errbuf); + return NULL; + } + /* Limit the size of the packet (first N bytes) */ + r = pcap_set_snaplen(pcap, 1024); + if (r) { + log_err("Failed to set snaplen: %s", pcap_geterr(pcap)); + goto error; + } + /* To receive packets as fast as possible */ + r = pcap_set_immediate_mode(pcap, 1); + if (r) { + log_err("Failed to set immediate mode: %s", pcap_geterr(pcap)); + goto error; + } + r = pcap_setnonblock(pcap, 1, errbuf); + if (r) { + log_err("Failed to set nonblock: %s", errbuf); + goto error; + } + r = pcap_activate(pcap); + if (r) { + log_err("Failed to activate pcap: %s", pcap_geterr(pcap)); + goto error; + } + /* Determine the format of the link-layer header */ + r = pcap_set_datalink(pcap, DLT_LINUX_SLL2); + if (r) { + log_err("Failed to set datalink: %s", pcap_geterr(pcap)); + goto error; + } + + return pcap; +error: + pcap_close(pcap); + return NULL; +} + +static void encode_test_name(char *buf, size_t len, const char *test_name, const char *subtest_name) +{ + char *p; + + if (subtest_name) + snprintf(buf, len, "%s__%s", test_name, subtest_name); + else + snprintf(buf, len, "%s", test_name); + while ((p = strchr(buf, '/'))) + *p = '_'; + while ((p = strchr(buf, ' '))) + *p = '_'; +} + +#define PCAP_DIR "/tmp/tmon_pcap" + +/* Start to monitor the network traffic in the given network namespace. + * + * netns: the name of the network namespace to monitor. If NULL, the + * current network namespace is monitored. + * test_name: the name of the running test. + * subtest_name: the name of the running subtest if there is. It should be + * NULL if it is not a subtest. + * + * This function will start a thread to capture packets going through NICs + * in the give network namespace. + */ +struct tmonitor_ctx *traffic_monitor_start(const char *netns, const char *test_name, + const char *subtest_name) +{ + struct nstoken *nstoken = NULL; + struct tmonitor_ctx *ctx; + char test_name_buf[64]; + static int tmon_seq; + int r; + + if (netns) { + nstoken = open_netns(netns); + if (!nstoken) + return NULL; + } + ctx = malloc(sizeof(*ctx)); + if (!ctx) { + log_err("Failed to malloc ctx"); + goto fail_ctx; + } + memset(ctx, 0, sizeof(*ctx)); + + encode_test_name(test_name_buf, sizeof(test_name_buf), test_name, subtest_name); + snprintf(ctx->pkt_fname, sizeof(ctx->pkt_fname), + PCAP_DIR "/packets-%d-%d-%s-%s.log", getpid(), tmon_seq++, + test_name_buf, netns ? netns : "unknown"); + + r = mkdir(PCAP_DIR, 0755); + if (r && errno != EEXIST) { + log_err("Failed to create " PCAP_DIR); + goto fail_pcap; + } + + ctx->pcap = traffic_monitor_prepare_pcap(); + if (!ctx->pcap) + goto fail_pcap; + ctx->pcap_fd = pcap_get_selectable_fd(ctx->pcap); + if (ctx->pcap_fd < 0) { + log_err("Failed to get pcap fd"); + goto fail_dumper; + } + + /* Create a packet file */ + ctx->dumper = pcap_dump_open(ctx->pcap, ctx->pkt_fname); + if (!ctx->dumper) { + log_err("Failed to open pcap dump: %s", ctx->pkt_fname); + goto fail_dumper; + } + + /* Create an eventfd to wake up the monitor thread */ + ctx->wake_fd = eventfd(0, 0); + if (ctx->wake_fd < 0) { + log_err("Failed to create eventfd"); + goto fail_eventfd; + } + + r = pthread_create(&ctx->thread, NULL, traffic_monitor_thread, ctx); + if (r) { + log_err("Failed to create thread"); + goto fail; + } + + close_netns(nstoken); + + return ctx; + +fail: + close(ctx->wake_fd); + +fail_eventfd: + pcap_dump_close(ctx->dumper); + unlink(ctx->pkt_fname); + +fail_dumper: + pcap_close(ctx->pcap); + +fail_pcap: + free(ctx); + +fail_ctx: + close_netns(nstoken); + + return NULL; +} + +static void traffic_monitor_release(struct tmonitor_ctx *ctx) +{ + pcap_close(ctx->pcap); + pcap_dump_close(ctx->dumper); + + close(ctx->wake_fd); + + free(ctx); +} + +/* Stop the network traffic monitor. + * + * ctx: the context returned by traffic_monitor_start() + */ +void traffic_monitor_stop(struct tmonitor_ctx *ctx) +{ + __u64 w = 1; + + if (!ctx) + return; + + /* Stop the monitor thread */ + ctx->done = true; + /* Wake up the background thread. */ + write(ctx->wake_fd, &w, sizeof(w)); + pthread_join(ctx->thread, NULL); + + printf("Packet file: %s\n", strrchr(ctx->pkt_fname, '/') + 1); + + traffic_monitor_release(ctx); +} +#endif /* TRAFFIC_MONITOR */ diff --git a/tools/testing/selftests/bpf/network_helpers.h b/tools/testing/selftests/bpf/network_helpers.h index aac5b94d6379..c72c16e1aff8 100644 --- a/tools/testing/selftests/bpf/network_helpers.h +++ b/tools/testing/selftests/bpf/network_helpers.h @@ -23,7 +23,6 @@ typedef __u16 __sum16; struct network_helper_opts { int timeout_ms; - bool must_fail; int proto; /* +ve: Passed to listen() as-is. * 0: Default when the test does not set @@ -70,8 +69,10 @@ int client_socket(int family, int type, const struct network_helper_opts *opts); int connect_to_addr(int type, const struct sockaddr_storage *addr, socklen_t len, const struct network_helper_opts *opts); +int connect_to_addr_str(int family, int type, const char *addr_str, __u16 port, + const struct network_helper_opts *opts); int connect_to_fd(int server_fd, int timeout_ms); -int connect_to_fd_opts(int server_fd, int type, const struct network_helper_opts *opts); +int connect_to_fd_opts(int server_fd, const struct network_helper_opts *opts); int connect_fd_to_fd(int client_fd, int server_fd, int timeout_ms); int fastopen_connect(int server_fd, const char *data, unsigned int data_len, int timeout_ms); @@ -92,6 +93,8 @@ struct nstoken; struct nstoken *open_netns(const char *name); void close_netns(struct nstoken *token); int send_recv_data(int lfd, int fd, uint32_t total_bytes); +int make_netns(const char *name); +int remove_netns(const char *name); static __u16 csum_fold(__u32 csum) { @@ -135,4 +138,22 @@ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, return csum_fold((__u32)s); } +struct tmonitor_ctx; + +#ifdef TRAFFIC_MONITOR +struct tmonitor_ctx *traffic_monitor_start(const char *netns, const char *test_name, + const char *subtest_name); +void traffic_monitor_stop(struct tmonitor_ctx *ctx); +#else +static inline struct tmonitor_ctx *traffic_monitor_start(const char *netns, const char *test_name, + const char *subtest_name) +{ + return NULL; +} + +static inline void traffic_monitor_stop(struct tmonitor_ctx *ctx) +{ +} +#endif + #endif diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c index 7175af39134f..329c7862b52d 100644 --- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c +++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c @@ -283,9 +283,11 @@ static void test_uprobe_sleepable(struct test_attach_probe *skel) trigger_func3(); ASSERT_EQ(skel->bss->uprobe_byname3_sleepable_res, 9, "check_uprobe_byname3_sleepable_res"); - ASSERT_EQ(skel->bss->uprobe_byname3_res, 10, "check_uprobe_byname3_res"); - ASSERT_EQ(skel->bss->uretprobe_byname3_sleepable_res, 11, "check_uretprobe_byname3_sleepable_res"); - ASSERT_EQ(skel->bss->uretprobe_byname3_res, 12, "check_uretprobe_byname3_res"); + ASSERT_EQ(skel->bss->uprobe_byname3_str_sleepable_res, 10, "check_uprobe_byname3_str_sleepable_res"); + ASSERT_EQ(skel->bss->uprobe_byname3_res, 11, "check_uprobe_byname3_res"); + ASSERT_EQ(skel->bss->uretprobe_byname3_sleepable_res, 12, "check_uretprobe_byname3_sleepable_res"); + ASSERT_EQ(skel->bss->uretprobe_byname3_str_sleepable_res, 13, "check_uretprobe_byname3_str_sleepable_res"); + ASSERT_EQ(skel->bss->uretprobe_byname3_res, 14, "check_uretprobe_byname3_res"); } void test_attach_probe(void) diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c index 618af9dfae9b..52e6f7570475 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c @@ -1218,7 +1218,7 @@ out: bpf_iter_bpf_sk_storage_helpers__destroy(skel); } -static void test_bpf_sk_stoarge_map_iter_fd(void) +static void test_bpf_sk_storage_map_iter_fd(void) { struct bpf_iter_bpf_sk_storage_map *skel; @@ -1693,7 +1693,7 @@ void test_bpf_iter(void) if (test__start_subtest("bpf_sk_storage_map")) test_bpf_sk_storage_map(); if (test__start_subtest("bpf_sk_storage_map_iter_fd")) - test_bpf_sk_stoarge_map_iter_fd(); + test_bpf_sk_storage_map_iter_fd(); if (test__start_subtest("bpf_sk_storage_delete")) test_bpf_sk_storage_delete(); if (test__start_subtest("bpf_sk_storage_get")) diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c index b52ff8ce34db..16bed9dd8e6a 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c @@ -95,7 +95,7 @@ static unsigned short get_local_port(int fd) struct sockaddr_in6 addr; socklen_t addrlen = sizeof(addr); - if (!getsockname(fd, &addr, &addrlen)) + if (!getsockname(fd, (struct sockaddr *)&addr, &addrlen)) return ntohs(addr.sin6_port); return 0; diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c index 63422f4f3896..409a06975823 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c @@ -49,7 +49,7 @@ static bool start_test(char *addr_str, goto err; /* connect to server */ - *cli_fd = connect_to_fd_opts(*srv_fd, SOCK_STREAM, cli_opts); + *cli_fd = connect_to_fd_opts(*srv_fd, cli_opts); if (!ASSERT_NEQ(*cli_fd, -1, "connect_to_fd_opts")) goto err; @@ -285,7 +285,7 @@ static void test_dctcp_fallback(void) dctcp_skel = bpf_dctcp__open(); if (!ASSERT_OK_PTR(dctcp_skel, "dctcp_skel")) return; - strcpy(dctcp_skel->rodata->fallback, "cubic"); + strcpy(dctcp_skel->rodata->fallback_cc, "cubic"); if (!ASSERT_OK(bpf_dctcp__load(dctcp_skel), "bpf_dctcp__load")) goto done; diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c index 61de88cf4ad0..e63d74ce046f 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf.c +++ b/tools/testing/selftests/bpf/prog_tests/btf.c @@ -5020,7 +5020,7 @@ struct pprint_mapv_int128 { static struct btf_raw_test pprint_test_template[] = { { .raw_types = { - /* unsighed char */ /* [1] */ + /* unsigned char */ /* [1] */ BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 8, 1), /* unsigned short */ /* [2] */ BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 16, 2), @@ -5087,7 +5087,7 @@ static struct btf_raw_test pprint_test_template[] = { * be encoded with kind_flag set. */ .raw_types = { - /* unsighed char */ /* [1] */ + /* unsigned char */ /* [1] */ BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 8, 1), /* unsigned short */ /* [2] */ BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 16, 2), @@ -5154,7 +5154,7 @@ static struct btf_raw_test pprint_test_template[] = { * will have both int and enum types. */ .raw_types = { - /* unsighed char */ /* [1] */ + /* unsigned char */ /* [1] */ BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 8, 1), /* unsigned short */ /* [2] */ BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 16, 2), diff --git a/tools/testing/selftests/bpf/prog_tests/btf_distill.c b/tools/testing/selftests/bpf/prog_tests/btf_distill.c index bfbe795823a2..ca84726d5ac1 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_distill.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_distill.c @@ -535,6 +535,72 @@ cleanup: btf__free(vmlinux_btf); } +/* Split and new base BTFs should inherit endianness from source BTF. */ +static void test_distilled_endianness(void) +{ + struct btf *base = NULL, *split = NULL, *new_base = NULL, *new_split = NULL; + struct btf *new_base1 = NULL, *new_split1 = NULL; + enum btf_endianness inverse_endianness; + const void *raw_data; + __u32 size; + + base = btf__new_empty(); + if (!ASSERT_OK_PTR(base, "empty_main_btf")) + return; + inverse_endianness = btf__endianness(base) == BTF_LITTLE_ENDIAN ? BTF_BIG_ENDIAN + : BTF_LITTLE_ENDIAN; + btf__set_endianness(base, inverse_endianness); + btf__add_int(base, "int", 4, BTF_INT_SIGNED); /* [1] int */ + VALIDATE_RAW_BTF( + base, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED"); + split = btf__new_empty_split(base); + if (!ASSERT_OK_PTR(split, "empty_split_btf")) + goto cleanup; + btf__add_ptr(split, 1); + VALIDATE_RAW_BTF( + split, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] PTR '(anon)' type_id=1"); + if (!ASSERT_EQ(0, btf__distill_base(split, &new_base, &new_split), + "distilled_base") || + !ASSERT_OK_PTR(new_base, "distilled_base") || + !ASSERT_OK_PTR(new_split, "distilled_split") || + !ASSERT_EQ(2, btf__type_cnt(new_base), "distilled_base_type_cnt")) + goto cleanup; + VALIDATE_RAW_BTF( + new_split, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] PTR '(anon)' type_id=1"); + + raw_data = btf__raw_data(new_base, &size); + if (!ASSERT_OK_PTR(raw_data, "btf__raw_data #1")) + goto cleanup; + new_base1 = btf__new(raw_data, size); + if (!ASSERT_OK_PTR(new_base1, "new_base1 = btf__new()")) + goto cleanup; + raw_data = btf__raw_data(new_split, &size); + if (!ASSERT_OK_PTR(raw_data, "btf__raw_data #2")) + goto cleanup; + new_split1 = btf__new_split(raw_data, size, new_base1); + if (!ASSERT_OK_PTR(new_split1, "new_split1 = btf__new()")) + goto cleanup; + + ASSERT_EQ(btf__endianness(new_base1), inverse_endianness, "new_base1 endianness"); + ASSERT_EQ(btf__endianness(new_split1), inverse_endianness, "new_split1 endianness"); + VALIDATE_RAW_BTF( + new_split1, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] PTR '(anon)' type_id=1"); +cleanup: + btf__free(new_split1); + btf__free(new_base1); + btf__free(new_split); + btf__free(new_base); + btf__free(split); + btf__free(base); +} + void test_btf_distill(void) { if (test__start_subtest("distilled_base")) @@ -549,4 +615,6 @@ void test_btf_distill(void) test_distilled_base_multi_err2(); if (test__start_subtest("distilled_base_vmlinux")) test_distilled_base_vmlinux(); + if (test__start_subtest("distilled_endianness")) + test_distilled_endianness(); } diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c index 09a8e6f9b379..b293b8501fd6 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_dump.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c @@ -805,8 +805,8 @@ static void test_btf_dump_var_data(struct btf *btf, struct btf_dump *d, TEST_BTF_DUMP_VAR(btf, d, NULL, str, "cpu_number", int, BTF_F_COMPACT, "int cpu_number = (int)100", 100); #endif - TEST_BTF_DUMP_VAR(btf, d, NULL, str, "cpu_profile_flip", int, BTF_F_COMPACT, - "static int cpu_profile_flip = (int)2", 2); + TEST_BTF_DUMP_VAR(btf, d, NULL, str, "bpf_cgrp_storage_busy", int, BTF_F_COMPACT, + "static int bpf_cgrp_storage_busy = (int)2", 2); } static void test_btf_datasec(struct btf *btf, struct btf_dump *d, char *str, diff --git a/tools/testing/selftests/bpf/prog_tests/build_id.c b/tools/testing/selftests/bpf/prog_tests/build_id.c new file mode 100644 index 000000000000..aec9c8d6bc96 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/build_id.c @@ -0,0 +1,118 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include <test_progs.h> + +#include "test_build_id.skel.h" + +static char build_id[BPF_BUILD_ID_SIZE]; +static int build_id_sz; + +static void print_stack(struct bpf_stack_build_id *stack, int frame_cnt) +{ + int i, j; + + for (i = 0; i < frame_cnt; i++) { + printf("FRAME #%02d: ", i); + switch (stack[i].status) { + case BPF_STACK_BUILD_ID_EMPTY: + printf("<EMPTY>\n"); + break; + case BPF_STACK_BUILD_ID_VALID: + printf("BUILD ID = "); + for (j = 0; j < BPF_BUILD_ID_SIZE; j++) + printf("%02hhx", (unsigned)stack[i].build_id[j]); + printf(" OFFSET = %llx", (unsigned long long)stack[i].offset); + break; + case BPF_STACK_BUILD_ID_IP: + printf("IP = %llx", (unsigned long long)stack[i].ip); + break; + default: + printf("UNEXPECTED STATUS %d ", stack[i].status); + break; + } + printf("\n"); + } +} + +static void subtest_nofault(bool build_id_resident) +{ + struct test_build_id *skel; + struct bpf_stack_build_id *stack; + int frame_cnt; + + skel = test_build_id__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + skel->links.uprobe_nofault = bpf_program__attach(skel->progs.uprobe_nofault); + if (!ASSERT_OK_PTR(skel->links.uprobe_nofault, "link")) + goto cleanup; + + if (build_id_resident) + ASSERT_OK(system("./uprobe_multi uprobe-paged-in"), "trigger_uprobe"); + else + ASSERT_OK(system("./uprobe_multi uprobe-paged-out"), "trigger_uprobe"); + + if (!ASSERT_GT(skel->bss->res_nofault, 0, "res")) + goto cleanup; + + stack = skel->bss->stack_nofault; + frame_cnt = skel->bss->res_nofault / sizeof(struct bpf_stack_build_id); + if (env.verbosity >= VERBOSE_NORMAL) + print_stack(stack, frame_cnt); + + if (build_id_resident) { + ASSERT_EQ(stack[0].status, BPF_STACK_BUILD_ID_VALID, "build_id_status"); + ASSERT_EQ(memcmp(stack[0].build_id, build_id, build_id_sz), 0, "build_id_match"); + } else { + ASSERT_EQ(stack[0].status, BPF_STACK_BUILD_ID_IP, "build_id_status"); + } + +cleanup: + test_build_id__destroy(skel); +} + +static void subtest_sleepable(void) +{ + struct test_build_id *skel; + struct bpf_stack_build_id *stack; + int frame_cnt; + + skel = test_build_id__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + skel->links.uprobe_sleepable = bpf_program__attach(skel->progs.uprobe_sleepable); + if (!ASSERT_OK_PTR(skel->links.uprobe_sleepable, "link")) + goto cleanup; + + /* force build ID to not be paged in */ + ASSERT_OK(system("./uprobe_multi uprobe-paged-out"), "trigger_uprobe"); + + if (!ASSERT_GT(skel->bss->res_sleepable, 0, "res")) + goto cleanup; + + stack = skel->bss->stack_sleepable; + frame_cnt = skel->bss->res_sleepable / sizeof(struct bpf_stack_build_id); + if (env.verbosity >= VERBOSE_NORMAL) + print_stack(stack, frame_cnt); + + ASSERT_EQ(stack[0].status, BPF_STACK_BUILD_ID_VALID, "build_id_status"); + ASSERT_EQ(memcmp(stack[0].build_id, build_id, build_id_sz), 0, "build_id_match"); + +cleanup: + test_build_id__destroy(skel); +} + +void serial_test_build_id(void) +{ + build_id_sz = read_build_id("uprobe_multi", build_id, sizeof(build_id)); + ASSERT_EQ(build_id_sz, BPF_BUILD_ID_SIZE, "parse_build_id"); + + if (test__start_subtest("nofault-paged-out")) + subtest_nofault(false /* not resident */); + if (test__start_subtest("nofault-paged-in")) + subtest_nofault(true /* resident */); + if (test__start_subtest("sleepable")) + subtest_sleepable(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c b/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c index 63ee892bc757..10224f845568 100644 --- a/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c +++ b/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c @@ -214,7 +214,7 @@ static void test_isolated(int parent_cgroup_fd, int child_cgroup_fd) /* Attach to parent and child cgroup, trigger packet from child. * Assert that there is six additional runs, parent cgroup egresses and * ingress, child cgroup egresses and ingress. - * Assert that egree and ingress storages are separate. + * Assert that egress and ingress storages are separate. */ child_egress1_link = bpf_program__attach_cgroup(obj->progs.egress1, child_cgroup_fd); diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_ancestor.c b/tools/testing/selftests/bpf/prog_tests/cgroup_ancestor.c new file mode 100644 index 000000000000..9250a1e9f9af --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_ancestor.c @@ -0,0 +1,141 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "test_progs.h" +#include "network_helpers.h" +#include "cgroup_helpers.h" +#include "cgroup_ancestor.skel.h" + +#define CGROUP_PATH "/skb_cgroup_test" +#define TEST_NS "cgroup_ancestor_ns" +#define NUM_CGROUP_LEVELS 4 +#define WAIT_AUTO_IP_MAX_ATTEMPT 10 +#define DST_ADDR "::1" +#define DST_PORT 1234 +#define MAX_ASSERT_NAME 32 + +struct test_data { + struct cgroup_ancestor *skel; + struct bpf_tc_hook qdisc; + struct bpf_tc_opts tc_attach; + struct nstoken *ns; +}; + +static int send_datagram(void) +{ + unsigned char buf[] = "some random test data"; + struct sockaddr_in6 addr = { .sin6_family = AF_INET6, + .sin6_port = htons(DST_PORT), }; + int sock, n; + + if (!ASSERT_EQ(inet_pton(AF_INET6, DST_ADDR, &addr.sin6_addr), 1, + "inet_pton")) + return -1; + + sock = socket(AF_INET6, SOCK_DGRAM, 0); + if (!ASSERT_OK_FD(sock, "create socket")) + return sock; + + if (!ASSERT_OK(connect(sock, &addr, sizeof(addr)), "connect")) { + close(sock); + return -1; + } + + n = sendto(sock, buf, sizeof(buf), 0, (const struct sockaddr *)&addr, + sizeof(addr)); + close(sock); + return ASSERT_EQ(n, sizeof(buf), "send data") ? 0 : -1; +} + +static int setup_network(struct test_data *t) +{ + SYS(fail, "ip netns add %s", TEST_NS); + t->ns = open_netns(TEST_NS); + if (!ASSERT_OK_PTR(t->ns, "open netns")) + goto cleanup_ns; + + SYS(close_ns, "ip link set lo up"); + + memset(&t->qdisc, 0, sizeof(t->qdisc)); + t->qdisc.sz = sizeof(t->qdisc); + t->qdisc.attach_point = BPF_TC_EGRESS; + t->qdisc.ifindex = if_nametoindex("lo"); + if (!ASSERT_NEQ(t->qdisc.ifindex, 0, "if_nametoindex")) + goto close_ns; + if (!ASSERT_OK(bpf_tc_hook_create(&t->qdisc), "qdisc add")) + goto close_ns; + + memset(&t->tc_attach, 0, sizeof(t->tc_attach)); + t->tc_attach.sz = sizeof(t->tc_attach); + t->tc_attach.prog_fd = bpf_program__fd(t->skel->progs.log_cgroup_id); + if (!ASSERT_OK(bpf_tc_attach(&t->qdisc, &t->tc_attach), "filter add")) + goto cleanup_qdisc; + + return 0; + +cleanup_qdisc: + bpf_tc_hook_destroy(&t->qdisc); +close_ns: + close_netns(t->ns); +cleanup_ns: + SYS_NOFAIL("ip netns del %s", TEST_NS); +fail: + return 1; +} + +static void cleanup_network(struct test_data *t) +{ + bpf_tc_detach(&t->qdisc, &t->tc_attach); + bpf_tc_hook_destroy(&t->qdisc); + close_netns(t->ns); + SYS_NOFAIL("ip netns del %s", TEST_NS); +} + +static void check_ancestors_ids(struct test_data *t) +{ + __u64 expected_ids[NUM_CGROUP_LEVELS]; + char assert_name[MAX_ASSERT_NAME]; + __u32 level; + + expected_ids[0] = get_cgroup_id("/.."); /* root cgroup */ + expected_ids[1] = get_cgroup_id(""); + expected_ids[2] = get_cgroup_id(CGROUP_PATH); + expected_ids[3] = 0; /* non-existent cgroup */ + + for (level = 0; level < NUM_CGROUP_LEVELS; level++) { + snprintf(assert_name, MAX_ASSERT_NAME, + "ancestor id at level %d", level); + ASSERT_EQ(t->skel->bss->cgroup_ids[level], expected_ids[level], + assert_name); + } +} + +void test_cgroup_ancestor(void) +{ + struct test_data t; + int cgroup_fd; + + t.skel = cgroup_ancestor__open_and_load(); + if (!ASSERT_OK_PTR(t.skel, "open and load")) + return; + + t.skel->bss->dport = htons(DST_PORT); + cgroup_fd = cgroup_setup_and_join(CGROUP_PATH); + if (cgroup_fd < 0) + goto cleanup_progs; + + if (setup_network(&t)) + goto cleanup_cgroups; + + if (send_datagram()) + goto cleanup_network; + + check_ancestors_ids(&t); + +cleanup_network: + cleanup_network(&t); +cleanup_cgroups: + close(cgroup_fd); + cleanup_cgroup_environment(); +cleanup_progs: + cgroup_ancestor__destroy(t.skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_dev.c b/tools/testing/selftests/bpf/prog_tests/cgroup_dev.c new file mode 100644 index 000000000000..5ab7547e38c0 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_dev.c @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <sys/stat.h> +#include <sys/sysmacros.h> +#include <errno.h> +#include "test_progs.h" +#include "cgroup_helpers.h" +#include "dev_cgroup.skel.h" + +#define TEST_CGROUP "/test-bpf-based-device-cgroup/" +#define TEST_BUFFER_SIZE 64 + +static void test_mknod(const char *path, mode_t mode, int dev_major, + int dev_minor, int expected_ret, int expected_errno) +{ + int ret; + + unlink(path); + ret = mknod(path, mode, makedev(dev_major, dev_minor)); + ASSERT_EQ(ret, expected_ret, "mknod"); + if (expected_ret) + ASSERT_EQ(errno, expected_errno, "mknod errno"); + else + unlink(path); +} + +static void test_read(const char *path, char *buf, int buf_size, + int expected_ret, int expected_errno) +{ + int ret, fd; + + fd = open(path, O_RDONLY); + + /* A bare open on unauthorized device should fail */ + if (expected_ret < 0) { + ASSERT_EQ(fd, expected_ret, "open ret for read"); + ASSERT_EQ(errno, expected_errno, "open errno for read"); + if (fd >= 0) + close(fd); + return; + } + + if (!ASSERT_OK_FD(fd, "open ret for read")) + return; + + ret = read(fd, buf, buf_size); + ASSERT_EQ(ret, expected_ret, "read"); + + close(fd); +} + +static void test_write(const char *path, char *buf, int buf_size, + int expected_ret, int expected_errno) +{ + int ret, fd; + + fd = open(path, O_WRONLY); + + /* A bare open on unauthorized device should fail */ + if (expected_ret < 0) { + ASSERT_EQ(fd, expected_ret, "open ret for write"); + ASSERT_EQ(errno, expected_errno, "open errno for write"); + if (fd >= 0) + close(fd); + return; + } + + if (!ASSERT_OK_FD(fd, "open ret for write")) + return; + + ret = write(fd, buf, buf_size); + ASSERT_EQ(ret, expected_ret, "write"); + + close(fd); +} + +void test_cgroup_dev(void) +{ + char buf[TEST_BUFFER_SIZE] = "some random test data"; + struct dev_cgroup *skel; + int cgroup_fd; + + cgroup_fd = cgroup_setup_and_join(TEST_CGROUP); + if (!ASSERT_OK_FD(cgroup_fd, "cgroup switch")) + return; + + skel = dev_cgroup__open_and_load(); + if (!ASSERT_OK_PTR(skel, "load program")) + goto cleanup_cgroup; + + skel->links.bpf_prog1 = + bpf_program__attach_cgroup(skel->progs.bpf_prog1, cgroup_fd); + if (!ASSERT_OK_PTR(skel->links.bpf_prog1, "attach_program")) + goto cleanup_progs; + + if (test__start_subtest("allow-mknod")) + test_mknod("/dev/test_dev_cgroup_null", S_IFCHR, 1, 3, 0, 0); + + if (test__start_subtest("allow-read")) + test_read("/dev/urandom", buf, TEST_BUFFER_SIZE, + TEST_BUFFER_SIZE, 0); + + if (test__start_subtest("allow-write")) + test_write("/dev/null", buf, TEST_BUFFER_SIZE, + TEST_BUFFER_SIZE, 0); + + if (test__start_subtest("deny-mknod")) + test_mknod("/dev/test_dev_cgroup_zero", S_IFCHR, 1, 5, -1, + EPERM); + + if (test__start_subtest("deny-read")) + test_read("/dev/random", buf, TEST_BUFFER_SIZE, -1, EPERM); + + if (test__start_subtest("deny-write")) + test_write("/dev/zero", buf, TEST_BUFFER_SIZE, -1, EPERM); + + if (test__start_subtest("deny-mknod-wrong-type")) + test_mknod("/dev/test_dev_cgroup_block", S_IFBLK, 1, 3, -1, + EPERM); + +cleanup_progs: + dev_cgroup__destroy(skel); +cleanup_cgroup: + cleanup_cgroup_environment(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_get_current_cgroup_id.c b/tools/testing/selftests/bpf/prog_tests/cgroup_get_current_cgroup_id.c new file mode 100644 index 000000000000..7a1643b03bf3 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_get_current_cgroup_id.c @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <sys/stat.h> +#include <sys/sysmacros.h> +#include "test_progs.h" +#include "cgroup_helpers.h" +#include "get_cgroup_id_kern.skel.h" + +#define TEST_CGROUP "/test-bpf-get-cgroup-id/" + +void test_cgroup_get_current_cgroup_id(void) +{ + struct get_cgroup_id_kern *skel; + const struct timespec req = { + .tv_sec = 0, + .tv_nsec = 1, + }; + int cgroup_fd; + __u64 ucgid; + + cgroup_fd = cgroup_setup_and_join(TEST_CGROUP); + if (!ASSERT_OK_FD(cgroup_fd, "cgroup switch")) + return; + + skel = get_cgroup_id_kern__open_and_load(); + if (!ASSERT_OK_PTR(skel, "load program")) + goto cleanup_cgroup; + + if (!ASSERT_OK(get_cgroup_id_kern__attach(skel), "attach bpf program")) + goto cleanup_progs; + + skel->bss->expected_pid = getpid(); + /* trigger the syscall on which is attached the tested prog */ + if (!ASSERT_OK(syscall(__NR_nanosleep, &req, NULL), "nanosleep")) + goto cleanup_progs; + + ucgid = get_cgroup_id(TEST_CGROUP); + + ASSERT_EQ(skel->bss->cg_id, ucgid, "compare cgroup ids"); + +cleanup_progs: + get_cgroup_id_kern__destroy(skel); +cleanup_cgroup: + close(cgroup_fd); + cleanup_cgroup_environment(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_storage.c b/tools/testing/selftests/bpf/prog_tests/cgroup_storage.c new file mode 100644 index 000000000000..cf395715ced4 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_storage.c @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <test_progs.h> +#include "cgroup_helpers.h" +#include "network_helpers.h" +#include "cgroup_storage.skel.h" + +#define TEST_CGROUP "/test-bpf-cgroup-storage-buf/" +#define TEST_NS "cgroup_storage_ns" +#define PING_CMD "ping localhost -c 1 -W 1 -q" + +static int setup_network(struct nstoken **token) +{ + SYS(fail, "ip netns add %s", TEST_NS); + *token = open_netns(TEST_NS); + if (!ASSERT_OK_PTR(*token, "open netns")) + goto cleanup_ns; + SYS(cleanup_ns, "ip link set lo up"); + + return 0; + +cleanup_ns: + SYS_NOFAIL("ip netns del %s", TEST_NS); +fail: + return -1; +} + +static void cleanup_network(struct nstoken *ns) +{ + close_netns(ns); + SYS_NOFAIL("ip netns del %s", TEST_NS); +} + +void test_cgroup_storage(void) +{ + struct bpf_cgroup_storage_key key; + struct cgroup_storage *skel; + struct nstoken *ns = NULL; + unsigned long long value; + int cgroup_fd; + int err; + + cgroup_fd = cgroup_setup_and_join(TEST_CGROUP); + if (!ASSERT_OK_FD(cgroup_fd, "create cgroup")) + return; + + if (!ASSERT_OK(setup_network(&ns), "setup network")) + goto cleanup_cgroup; + + skel = cgroup_storage__open_and_load(); + if (!ASSERT_OK_PTR(skel, "load program")) + goto cleanup_network; + + skel->links.bpf_prog = + bpf_program__attach_cgroup(skel->progs.bpf_prog, cgroup_fd); + if (!ASSERT_OK_PTR(skel->links.bpf_prog, "attach program")) + goto cleanup_progs; + + /* Check that one out of every two packets is dropped */ + err = SYS_NOFAIL(PING_CMD); + ASSERT_OK(err, "first ping"); + err = SYS_NOFAIL(PING_CMD); + ASSERT_NEQ(err, 0, "second ping"); + err = SYS_NOFAIL(PING_CMD); + ASSERT_OK(err, "third ping"); + + err = bpf_map__get_next_key(skel->maps.cgroup_storage, NULL, &key, + sizeof(key)); + if (!ASSERT_OK(err, "get first key")) + goto cleanup_progs; + err = bpf_map__lookup_elem(skel->maps.cgroup_storage, &key, sizeof(key), + &value, sizeof(value), 0); + if (!ASSERT_OK(err, "first packet count read")) + goto cleanup_progs; + + /* Add one to the packet counter, check again packet filtering */ + value++; + err = bpf_map__update_elem(skel->maps.cgroup_storage, &key, sizeof(key), + &value, sizeof(value), 0); + if (!ASSERT_OK(err, "increment packet counter")) + goto cleanup_progs; + err = SYS_NOFAIL(PING_CMD); + ASSERT_OK(err, "fourth ping"); + err = SYS_NOFAIL(PING_CMD); + ASSERT_NEQ(err, 0, "fifth ping"); + err = SYS_NOFAIL(PING_CMD); + ASSERT_OK(err, "sixth ping"); + +cleanup_progs: + cgroup_storage__destroy(skel); +cleanup_network: + cleanup_network(ns); +cleanup_cgroup: + close(cgroup_fd); + cleanup_cgroup_environment(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c b/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c index 9709c8db7275..64abba72ac10 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c @@ -9,9 +9,6 @@ static int run_test(int cgroup_fd, int server_fd, bool classid) { - struct network_helper_opts opts = { - .must_fail = true, - }; struct connect4_dropper *skel; int fd, err = 0; @@ -32,11 +29,16 @@ static int run_test(int cgroup_fd, int server_fd, bool classid) goto out; } - fd = connect_to_fd_opts(server_fd, SOCK_STREAM, &opts); - if (fd < 0) + errno = 0; + fd = connect_to_fd_opts(server_fd, NULL); + if (fd >= 0) { + log_err("Unexpected success to connect to server"); err = -1; - else close(fd); + } else if (errno != EPERM) { + log_err("Unexpected errno from connect to server"); + err = -1; + } out: connect4_dropper__destroy(skel); return err; @@ -52,7 +54,7 @@ void test_cgroup_v1v2(void) server_fd = start_server(AF_INET, SOCK_STREAM, NULL, port, 0); if (!ASSERT_GE(server_fd, 0, "server_fd")) return; - client_fd = connect_to_fd_opts(server_fd, SOCK_STREAM, &opts); + client_fd = connect_to_fd_opts(server_fd, &opts); if (!ASSERT_GE(client_fd, 0, "client_fd")) { close(server_fd); return; diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c index 47f42e680105..26019313e1fc 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c +++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 +#define _GNU_SOURCE #include <test_progs.h> #include "progs/core_reloc_types.h" #include "bpf_testmod/bpf_testmod.h" diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc_raw.c b/tools/testing/selftests/bpf/prog_tests/core_reloc_raw.c new file mode 100644 index 000000000000..a18d3680fb16 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/core_reloc_raw.c @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Test cases that can't load programs using libbpf and need direct + * BPF syscall access + */ + +#include <sys/syscall.h> +#include <bpf/libbpf.h> +#include <bpf/btf.h> + +#include "test_progs.h" +#include "test_btf.h" +#include "bpf/libbpf_internal.h" + +static char log[16 * 1024]; + +/* Check that verifier rejects BPF program containing relocation + * pointing to non-existent BTF type. + */ +static void test_bad_local_id(void) +{ + struct test_btf { + struct btf_header hdr; + __u32 types[15]; + char strings[128]; + } raw_btf = { + .hdr = { + .magic = BTF_MAGIC, + .version = BTF_VERSION, + .hdr_len = sizeof(struct btf_header), + .type_off = 0, + .type_len = sizeof(raw_btf.types), + .str_off = offsetof(struct test_btf, strings) - + offsetof(struct test_btf, types), + .str_len = sizeof(raw_btf.strings), + }, + .types = { + BTF_PTR_ENC(0), /* [1] void* */ + BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [2] int */ + BTF_FUNC_PROTO_ENC(2, 1), /* [3] int (*)(void*) */ + BTF_FUNC_PROTO_ARG_ENC(8, 1), + BTF_FUNC_ENC(8, 3) /* [4] FUNC 'foo' type_id=2 */ + }, + .strings = "\0int\0 0\0foo\0" + }; + __u32 log_level = 1 | 2 | 4; + LIBBPF_OPTS(bpf_btf_load_opts, opts, + .log_buf = log, + .log_size = sizeof(log), + .log_level = log_level, + ); + struct bpf_insn insns[] = { + BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + struct bpf_func_info funcs[] = { + { + .insn_off = 0, + .type_id = 4, + } + }; + struct bpf_core_relo relos[] = { + { + .insn_off = 0, /* patch first instruction (r0 = 0) */ + .type_id = 100500, /* !!! this type id does not exist */ + .access_str_off = 6, /* offset of "0" */ + .kind = BPF_CORE_TYPE_ID_LOCAL, + } + }; + union bpf_attr attr; + int saved_errno; + int prog_fd = -1; + int btf_fd = -1; + + btf_fd = bpf_btf_load(&raw_btf, sizeof(raw_btf), &opts); + saved_errno = errno; + if (btf_fd < 0 || env.verbosity > VERBOSE_NORMAL) { + printf("-------- BTF load log start --------\n"); + printf("%s", log); + printf("-------- BTF load log end ----------\n"); + } + if (btf_fd < 0) { + PRINT_FAIL("bpf_btf_load() failed, errno=%d\n", saved_errno); + return; + } + + log[0] = 0; + memset(&attr, 0, sizeof(attr)); + attr.prog_btf_fd = btf_fd; + attr.prog_type = BPF_TRACE_RAW_TP; + attr.license = (__u64)"GPL"; + attr.insns = (__u64)&insns; + attr.insn_cnt = sizeof(insns) / sizeof(*insns); + attr.log_buf = (__u64)log; + attr.log_size = sizeof(log); + attr.log_level = log_level; + attr.func_info = (__u64)funcs; + attr.func_info_cnt = sizeof(funcs) / sizeof(*funcs); + attr.func_info_rec_size = sizeof(*funcs); + attr.core_relos = (__u64)relos; + attr.core_relo_cnt = sizeof(relos) / sizeof(*relos); + attr.core_relo_rec_size = sizeof(*relos); + prog_fd = sys_bpf_prog_load(&attr, sizeof(attr), 1); + saved_errno = errno; + if (prog_fd < 0 || env.verbosity > VERBOSE_NORMAL) { + printf("-------- program load log start --------\n"); + printf("%s", log); + printf("-------- program load log end ----------\n"); + } + if (prog_fd >= 0) { + PRINT_FAIL("sys_bpf_prog_load() expected to fail\n"); + goto out; + } + ASSERT_HAS_SUBSTR(log, "relo #0: bad type id 100500", "program load log"); + +out: + close(prog_fd); + close(btf_fd); +} + +void test_core_reloc_raw(void) +{ + if (test__start_subtest("bad_local_id")) + test_bad_local_id(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/crypto_sanity.c b/tools/testing/selftests/bpf/prog_tests/crypto_sanity.c index b1a3a49a822a..42bd07f7218d 100644 --- a/tools/testing/selftests/bpf/prog_tests/crypto_sanity.c +++ b/tools/testing/selftests/bpf/prog_tests/crypto_sanity.c @@ -4,7 +4,6 @@ #include <sys/types.h> #include <sys/socket.h> #include <net/if.h> -#include <linux/in6.h> #include <linux/if_alg.h> #include "test_progs.h" diff --git a/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c b/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c index 08b6391f2f56..dd75ccb03770 100644 --- a/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c +++ b/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c @@ -10,7 +10,8 @@ #include "bpf/btf.h" #include "bpf_util.h" #include "linux/filter.h" -#include "disasm.h" +#include "linux/kernel.h" +#include "disasm_helpers.h" #define MAX_PROG_TEXT_SZ (32 * 1024) @@ -628,63 +629,6 @@ err: return false; } -static void print_insn(void *private_data, const char *fmt, ...) -{ - va_list args; - - va_start(args, fmt); - vfprintf((FILE *)private_data, fmt, args); - va_end(args); -} - -/* Disassemble instructions to a stream */ -static void print_xlated(FILE *out, struct bpf_insn *insn, __u32 len) -{ - const struct bpf_insn_cbs cbs = { - .cb_print = print_insn, - .cb_call = NULL, - .cb_imm = NULL, - .private_data = out, - }; - bool double_insn = false; - int i; - - for (i = 0; i < len; i++) { - if (double_insn) { - double_insn = false; - continue; - } - - double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW); - print_bpf_insn(&cbs, insn + i, true); - } -} - -/* We share code with kernel BPF disassembler, it adds '(FF) ' prefix - * for each instruction (FF stands for instruction `code` byte). - * This function removes the prefix inplace for each line in `str`. - */ -static void remove_insn_prefix(char *str, int size) -{ - const int prefix_size = 5; - - int write_pos = 0, read_pos = prefix_size; - int len = strlen(str); - char c; - - size = min(size, len); - - while (read_pos < size) { - c = str[read_pos++]; - if (c == 0) - break; - str[write_pos++] = c; - if (c == '\n') - read_pos += prefix_size; - } - str[write_pos] = 0; -} - struct prog_info { char *prog_kind; enum bpf_prog_type prog_type; @@ -699,9 +643,10 @@ static void match_program(struct btf *btf, char *reg_map[][2], bool skip_first_insn) { - struct bpf_insn *buf = NULL; + struct bpf_insn *buf = NULL, *insn, *insn_end; int err = 0, prog_fd = 0; FILE *prog_out = NULL; + char insn_buf[64]; char *text = NULL; __u32 cnt = 0; @@ -739,12 +684,13 @@ static void match_program(struct btf *btf, PRINT_FAIL("Can't open memory stream\n"); goto out; } - if (skip_first_insn) - print_xlated(prog_out, buf + 1, cnt - 1); - else - print_xlated(prog_out, buf, cnt); + insn_end = buf + cnt; + insn = buf + (skip_first_insn ? 1 : 0); + while (insn < insn_end) { + insn = disasm_insn(insn, insn_buf, sizeof(insn_buf)); + fprintf(prog_out, "%s\n", insn_buf); + } fclose(prog_out); - remove_insn_prefix(text, MAX_PROG_TEXT_SZ); ASSERT_TRUE(match_pattern(btf, pattern, text, reg_map), pinfo->prog_kind); diff --git a/tools/testing/selftests/bpf/prog_tests/decap_sanity.c b/tools/testing/selftests/bpf/prog_tests/decap_sanity.c index dcb9e5070cc3..d79f398ec6b7 100644 --- a/tools/testing/selftests/bpf/prog_tests/decap_sanity.c +++ b/tools/testing/selftests/bpf/prog_tests/decap_sanity.c @@ -4,7 +4,6 @@ #include <sys/types.h> #include <sys/socket.h> #include <net/if.h> -#include <linux/in6.h> #include "test_progs.h" #include "network_helpers.h" diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c index 49b1ffc9af1f..14c91b6f1e83 100644 --- a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c +++ b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2019 Facebook */ #include <test_progs.h> +#include "bpf_util.h" void serial_test_fexit_stress(void) { @@ -36,7 +37,7 @@ void serial_test_fexit_stress(void) for (i = 0; i < bpf_max_tramp_links; i++) { fexit_fd[i] = bpf_prog_load(BPF_PROG_TYPE_TRACING, NULL, "GPL", trace_program, - sizeof(trace_program) / sizeof(struct bpf_insn), + ARRAY_SIZE(trace_program), &trace_opts); if (!ASSERT_GE(fexit_fd[i], 0, "fexit load")) goto out; diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c index 6b3078dd5645..cfcc90cb7ffb 100644 --- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 +#define _GNU_SOURCE #include <test_progs.h> #include <network_helpers.h> -#include <error.h> #include <linux/if_tun.h> #include <sys/uio.h> diff --git a/tools/testing/selftests/bpf/prog_tests/fs_kfuncs.c b/tools/testing/selftests/bpf/prog_tests/fs_kfuncs.c index 37056ba73847..5a0b51157451 100644 --- a/tools/testing/selftests/bpf/prog_tests/fs_kfuncs.c +++ b/tools/testing/selftests/bpf/prog_tests/fs_kfuncs.c @@ -16,6 +16,7 @@ static void test_xattr(void) { struct test_get_xattr *skel = NULL; int fd = -1, err; + int v[32]; fd = open(testfile, O_CREAT | O_RDONLY, 0644); if (!ASSERT_GE(fd, 0, "create_file")) @@ -50,7 +51,13 @@ static void test_xattr(void) if (!ASSERT_GE(fd, 0, "open_file")) goto out; - ASSERT_EQ(skel->bss->found_xattr, 1, "found_xattr"); + ASSERT_EQ(skel->bss->found_xattr_from_file, 1, "found_xattr_from_file"); + + /* Trigger security_inode_getxattr */ + err = getxattr(testfile, "user.kfuncs", v, sizeof(v)); + ASSERT_EQ(err, -1, "getxattr_return"); + ASSERT_EQ(errno, EINVAL, "getxattr_errno"); + ASSERT_EQ(skel->bss->found_xattr_from_dentry, 1, "found_xattr_from_dentry"); out: close(fd); diff --git a/tools/testing/selftests/bpf/prog_tests/iters.c b/tools/testing/selftests/bpf/prog_tests/iters.c index 3c440370c1f0..89ff23c4a8bc 100644 --- a/tools/testing/selftests/bpf/prog_tests/iters.c +++ b/tools/testing/selftests/bpf/prog_tests/iters.c @@ -14,6 +14,7 @@ #include "iters_state_safety.skel.h" #include "iters_looping.skel.h" #include "iters_num.skel.h" +#include "iters_testmod.skel.h" #include "iters_testmod_seq.skel.h" #include "iters_task_vma.skel.h" #include "iters_task.skel.h" @@ -297,8 +298,10 @@ void test_iters(void) RUN_TESTS(iters); RUN_TESTS(iters_css_task); - if (env.has_testmod) + if (env.has_testmod) { + RUN_TESTS(iters_testmod); RUN_TESTS(iters_testmod_seq); + } if (test__start_subtest("num")) subtest_num_iters(); diff --git a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c index c07991544a78..34f8822fd221 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c +++ b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 +#define _GNU_SOURCE #include <test_progs.h> #include <network_helpers.h> #include "kfree_skb.skel.h" diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c index 5b743212292f..f79c8e53cb3e 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c +++ b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c @@ -68,6 +68,7 @@ static struct kfunc_test_params kfunc_tests[] = { TC_FAIL(kfunc_call_test_get_mem_fail_oob, 0, "min value is outside of the allowed memory range"), TC_FAIL(kfunc_call_test_get_mem_fail_not_const, 0, "is not a const"), TC_FAIL(kfunc_call_test_mem_acquire_fail, 0, "acquire kernel function does not return PTR_TO_BTF_ID"), + TC_FAIL(kfunc_call_test_pointer_arg_type_mismatch, 0, "arg#0 expected pointer to ctx, but got scalar"), /* success cases */ TC_TEST(kfunc_call_test1, 12), diff --git a/tools/testing/selftests/bpf/prog_tests/log_buf.c b/tools/testing/selftests/bpf/prog_tests/log_buf.c index 0f7ea4d7d9f6..27676a04d0b6 100644 --- a/tools/testing/selftests/bpf/prog_tests/log_buf.c +++ b/tools/testing/selftests/bpf/prog_tests/log_buf.c @@ -5,6 +5,7 @@ #include <bpf/btf.h> #include "test_log_buf.skel.h" +#include "bpf_util.h" static size_t libbpf_log_pos; static char libbpf_log_buf[1024 * 1024]; @@ -143,11 +144,11 @@ static void bpf_prog_load_log_buf(void) BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }; - const size_t good_prog_insn_cnt = sizeof(good_prog_insns) / sizeof(struct bpf_insn); + const size_t good_prog_insn_cnt = ARRAY_SIZE(good_prog_insns); const struct bpf_insn bad_prog_insns[] = { BPF_EXIT_INSN(), }; - size_t bad_prog_insn_cnt = sizeof(bad_prog_insns) / sizeof(struct bpf_insn); + size_t bad_prog_insn_cnt = ARRAY_SIZE(bad_prog_insns); LIBBPF_OPTS(bpf_prog_load_opts, opts); const size_t log_buf_sz = 1024 * 1024; char *log_buf; @@ -159,7 +160,7 @@ static void bpf_prog_load_log_buf(void) opts.log_buf = log_buf; opts.log_size = log_buf_sz; - /* with log_level == 0 log_buf shoud stay empty for good prog */ + /* with log_level == 0 log_buf should stay empty for good prog */ log_buf[0] = '\0'; opts.log_level = 0; fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "good_prog", "GPL", @@ -221,7 +222,7 @@ static void bpf_btf_load_log_buf(void) opts.log_buf = log_buf; opts.log_size = log_buf_sz; - /* with log_level == 0 log_buf shoud stay empty for good BTF */ + /* with log_level == 0 log_buf should stay empty for good BTF */ log_buf[0] = '\0'; opts.log_level = 0; fd = bpf_btf_load(raw_btf_data, raw_btf_size, &opts); diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c b/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c index 835a1d756c16..b6e8d822e8e9 100644 --- a/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c +++ b/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c @@ -47,7 +47,6 @@ #include <linux/if_ether.h> #include <linux/if_packet.h> #include <linux/if_tun.h> -#include <linux/icmp.h> #include <arpa/inet.h> #include <unistd.h> #include <errno.h> diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c b/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c index 03825d2b45a8..6c50c0f63f43 100644 --- a/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c +++ b/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c @@ -49,6 +49,7 @@ * is not crashed, it is considered successful. */ #define NETNS "ns_lwt_reroute" +#include <netinet/in.h> #include "lwt_helpers.h" #include "network_helpers.h" #include <linux/net_tstamp.h> diff --git a/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c b/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c index aa9f67eb1c95..bea05f78de5f 100644 --- a/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c +++ b/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c @@ -4,6 +4,7 @@ #include <bpf/btf.h> #include "bpf/libbpf_internal.h" #include "cgroup_helpers.h" +#include "bpf_util.h" static const char *module_name = "bpf_testmod"; static const char *symbol_name = "bpf_fentry_shadow_test"; @@ -100,7 +101,7 @@ void test_module_fentry_shadow(void) load_opts.attach_btf_obj_fd = btf_fd[i]; prog_fd[i] = bpf_prog_load(BPF_PROG_TYPE_TRACING, NULL, "GPL", trace_program, - sizeof(trace_program) / sizeof(struct bpf_insn), + ARRAY_SIZE(trace_program), &load_opts); if (!ASSERT_GE(prog_fd[i], 0, "bpf_prog_load")) goto out; diff --git a/tools/testing/selftests/bpf/prog_tests/nested_trust.c b/tools/testing/selftests/bpf/prog_tests/nested_trust.c index 39886f58924e..54a112ad5f9c 100644 --- a/tools/testing/selftests/bpf/prog_tests/nested_trust.c +++ b/tools/testing/selftests/bpf/prog_tests/nested_trust.c @@ -4,9 +4,13 @@ #include <test_progs.h> #include "nested_trust_failure.skel.h" #include "nested_trust_success.skel.h" +#include "nested_acquire.skel.h" void test_nested_trust(void) { RUN_TESTS(nested_trust_success); RUN_TESTS(nested_trust_failure); + + if (env.has_testmod) + RUN_TESTS(nested_acquire); } diff --git a/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c b/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c index e72d75d6baa7..c29787e092d6 100644 --- a/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c +++ b/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c @@ -11,7 +11,7 @@ #include <sched.h> #include <sys/wait.h> #include <sys/mount.h> -#include <sys/fcntl.h> +#include <fcntl.h> #include "network_helpers.h" #define STACK_SIZE (1024 * 1024) diff --git a/tools/testing/selftests/bpf/prog_tests/parse_tcp_hdr_opt.c b/tools/testing/selftests/bpf/prog_tests/parse_tcp_hdr_opt.c index daa952711d8f..e9c07d561ded 100644 --- a/tools/testing/selftests/bpf/prog_tests/parse_tcp_hdr_opt.c +++ b/tools/testing/selftests/bpf/prog_tests/parse_tcp_hdr_opt.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 +#define _GNU_SOURCE #include <test_progs.h> #include <network_helpers.h> #include "test_parse_tcp_hdr_opt.skel.h" diff --git a/tools/testing/selftests/bpf/prog_tests/pro_epilogue.c b/tools/testing/selftests/bpf/prog_tests/pro_epilogue.c new file mode 100644 index 000000000000..509883e6823a --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/pro_epilogue.c @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include <test_progs.h> +#include "pro_epilogue.skel.h" +#include "epilogue_tailcall.skel.h" +#include "pro_epilogue_goto_start.skel.h" +#include "epilogue_exit.skel.h" + +struct st_ops_args { + __u64 a; +}; + +static void test_tailcall(void) +{ + LIBBPF_OPTS(bpf_test_run_opts, topts); + struct epilogue_tailcall *skel; + struct st_ops_args args; + int err, prog_fd; + + skel = epilogue_tailcall__open_and_load(); + if (!ASSERT_OK_PTR(skel, "epilogue_tailcall__open_and_load")) + return; + + topts.ctx_in = &args; + topts.ctx_size_in = sizeof(args); + + skel->links.epilogue_tailcall = + bpf_map__attach_struct_ops(skel->maps.epilogue_tailcall); + if (!ASSERT_OK_PTR(skel->links.epilogue_tailcall, "attach_struct_ops")) + goto done; + + /* Both test_epilogue_tailcall and test_epilogue_subprog are + * patched with epilogue. When syscall_epilogue_tailcall() + * is run, test_epilogue_tailcall() is triggered. + * It executes a tail call and control is transferred to + * test_epilogue_subprog(). Only test_epilogue_subprog() + * does args->a += 1, thus final args.a value of 10001 + * guarantees that only the epilogue of the + * test_epilogue_subprog is executed. + */ + memset(&args, 0, sizeof(args)); + prog_fd = bpf_program__fd(skel->progs.syscall_epilogue_tailcall); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "bpf_prog_test_run_opts"); + ASSERT_EQ(args.a, 10001, "args.a"); + ASSERT_EQ(topts.retval, 10001 * 2, "topts.retval"); + +done: + epilogue_tailcall__destroy(skel); +} + +void test_pro_epilogue(void) +{ + RUN_TESTS(pro_epilogue); + RUN_TESTS(pro_epilogue_goto_start); + RUN_TESTS(epilogue_exit); + if (test__start_subtest("tailcall")) + test_tailcall(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c index e2f1445b0e10..216b0dfac0fe 100644 --- a/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c +++ b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c @@ -2,6 +2,7 @@ #include <test_progs.h> #include <linux/nbd.h> +#include "bpf_util.h" void test_raw_tp_writable_reject_nbd_invalid(void) { @@ -25,7 +26,7 @@ void test_raw_tp_writable_reject_nbd_invalid(void) ); bpf_fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, NULL, "GPL v2", - program, sizeof(program) / sizeof(struct bpf_insn), + program, ARRAY_SIZE(program), &opts); if (CHECK(bpf_fd < 0, "bpf_raw_tracepoint_writable load", "failed: %d errno %d\n", bpf_fd, errno)) diff --git a/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c index f4aa7dab4766..e3668058b7bb 100644 --- a/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c +++ b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c @@ -2,6 +2,7 @@ #include <test_progs.h> #include <linux/nbd.h> +#include "bpf_util.h" /* NOTE: conflict with other tests. */ void serial_test_raw_tp_writable_test_run(void) @@ -24,7 +25,7 @@ void serial_test_raw_tp_writable_test_run(void) ); int bpf_fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, NULL, "GPL v2", - trace_program, sizeof(trace_program) / sizeof(struct bpf_insn), + trace_program, ARRAY_SIZE(trace_program), &trace_opts); if (CHECK(bpf_fd < 0, "bpf_raw_tracepoint_writable loaded", "failed: %d errno %d\n", bpf_fd, errno)) @@ -41,7 +42,7 @@ void serial_test_raw_tp_writable_test_run(void) ); int filter_fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL v2", - skb_program, sizeof(skb_program) / sizeof(struct bpf_insn), + skb_program, ARRAY_SIZE(skb_program), &skb_opts); if (CHECK(filter_fd < 0, "test_program_loaded", "failed: %d errno %d\n", filter_fd, errno)) diff --git a/tools/testing/selftests/bpf/prog_tests/read_vsyscall.c b/tools/testing/selftests/bpf/prog_tests/read_vsyscall.c index 3405923fe4e6..c7b9ba8b1d06 100644 --- a/tools/testing/selftests/bpf/prog_tests/read_vsyscall.c +++ b/tools/testing/selftests/bpf/prog_tests/read_vsyscall.c @@ -23,6 +23,7 @@ struct read_ret_desc { { .name = "probe_read_user_str", .ret = -EFAULT }, { .name = "copy_from_user", .ret = -EFAULT }, { .name = "copy_from_user_task", .ret = -EFAULT }, + { .name = "copy_from_user_str", .ret = -EFAULT }, }; void test_read_vsyscall(void) diff --git a/tools/testing/selftests/bpf/prog_tests/reg_bounds.c b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c index eb74363f9f70..39d42271cc46 100644 --- a/tools/testing/selftests/bpf/prog_tests/reg_bounds.c +++ b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c @@ -433,6 +433,19 @@ static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t, y_cast = range_cast(y_t, x_t, y); + /* If we know that + * - *x* is in the range of signed 32bit value, and + * - *y_cast* range is 32-bit signed non-negative + * then *x* range can be improved with *y_cast* such that *x* range + * is 32-bit signed non-negative. Otherwise, if the new range for *x* + * allows upper 32-bit * 0xffffffff then the eventual new range for + * *x* will be out of signed 32-bit range which violates the origin + * *x* range. + */ + if (x_t == S64 && y_t == S32 && y_cast.a <= S32_MAX && y_cast.b <= S32_MAX && + (s64)x.a >= S32_MIN && (s64)x.b <= S32_MAX) + return range_improve(x_t, x, y_cast); + /* the case when new range knowledge, *y*, is a 32-bit subregister * range, while previous range knowledge, *x*, is a full register * 64-bit range, needs special treatment to take into account upper 32 @@ -490,7 +503,7 @@ static const char *op_str(enum op op) /* Can register with range [x.a, x.b] *EVER* satisfy * OP (<, <=, >, >=, ==, !=) relation to - * a regsiter with range [y.a, y.b] + * a register with range [y.a, y.b] * _in *num_t* domain_ */ static bool range_canbe_op(enum num_t t, struct range x, struct range y, enum op op) @@ -519,7 +532,7 @@ static bool range_canbe_op(enum num_t t, struct range x, struct range y, enum op /* Does register with range [x.a, x.b] *ALWAYS* satisfy * OP (<, <=, >, >=, ==, !=) relation to - * a regsiter with range [y.a, y.b] + * a register with range [y.a, y.b] * _in *num_t* domain_ */ static bool range_always_op(enum num_t t, struct range x, struct range y, enum op op) @@ -530,7 +543,7 @@ static bool range_always_op(enum num_t t, struct range x, struct range y, enum o /* Does register with range [x.a, x.b] *NEVER* satisfy * OP (<, <=, >, >=, ==, !=) relation to - * a regsiter with range [y.a, y.b] + * a register with range [y.a, y.b] * _in *num_t* domain_ */ static bool range_never_op(enum num_t t, struct range x, struct range y, enum op op) @@ -1005,11 +1018,11 @@ static int parse_reg_state(const char *s, struct reg_state *reg) * - umin=%llu, if missing, assumed 0; * - umax=%llu, if missing, assumed U64_MAX; * - smin=%lld, if missing, assumed S64_MIN; - * - smax=%lld, if missing, assummed S64_MAX; + * - smax=%lld, if missing, assumed S64_MAX; * - umin32=%d, if missing, assumed 0; * - umax32=%d, if missing, assumed U32_MAX; * - smin32=%d, if missing, assumed S32_MIN; - * - smax32=%d, if missing, assummed S32_MAX; + * - smax32=%d, if missing, assumed S32_MAX; * - var_off=(%#llx; %#llx), tnum part, we don't care about it. * * If some of the values are equal, they will be grouped (but min/max @@ -1474,7 +1487,7 @@ static int verify_case_opt(struct ctx *ctx, enum num_t init_t, enum num_t cond_t u64 elapsed_ns = get_time_ns() - ctx->start_ns; double remain_ns = elapsed_ns / progress * (1 - progress); - fprintf(env.stderr, "PROGRESS (%s): %d/%d (%.2lf%%), " + fprintf(env.stderr_saved, "PROGRESS (%s): %d/%d (%.2lf%%), " "elapsed %llu mins (%.2lf hrs), " "ETA %.0lf mins (%.2lf hrs)\n", ctx->progress_ctx, @@ -1871,7 +1884,7 @@ cleanup: * envvar is not set, this test is skipped during test_progs testing. * * We split this up into smaller subsets based on initialization and - * conditiona numeric domains to get an easy parallelization with test_progs' + * conditional numeric domains to get an easy parallelization with test_progs' * -j argument. */ @@ -1925,7 +1938,7 @@ static u64 rand_u64() { /* RAND_MAX is guaranteed to be at least 1<<15, but in practice it * seems to be 1<<31, so we need to call it thrice to get full u64; - * we'll use rougly equal split: 22 + 21 + 21 bits + * we'll use roughly equal split: 22 + 21 + 21 bits */ return ((u64)random() << 42) | (((u64)random() & RAND_21BIT_MASK) << 21) | @@ -2108,6 +2121,9 @@ static struct subtest_case crafted_cases[] = { {S32, U32, {(u32)S32_MIN, 0}, {0, 0}}, {S32, U32, {(u32)S32_MIN, 0}, {(u32)S32_MIN, (u32)S32_MIN}}, {S32, U32, {(u32)S32_MIN, S32_MAX}, {S32_MAX, S32_MAX}}, + {S64, U32, {0x0, 0x1f}, {0xffffffff80000000ULL, 0x000000007fffffffULL}}, + {S64, U32, {0x0, 0x1f}, {0xffffffffffff8000ULL, 0x0000000000007fffULL}}, + {S64, U32, {0x0, 0x1f}, {0xffffffffffffff80ULL, 0x000000000000007fULL}}, }; /* Go over crafted hard-coded cases. This is fast, so we do it as part of diff --git a/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c b/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c index f81d08d429a2..51544372f52e 100644 --- a/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c +++ b/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c @@ -103,7 +103,7 @@ static int resolve_symbols(void) btf = btf__parse_elf("btf_data.bpf.o", NULL); if (CHECK(libbpf_get_error(btf), "resolve", - "Failed to load BTF from btf_data.o\n")) + "Failed to load BTF from btf_data.bpf.o\n")) return -1; nr = btf__type_cnt(btf); diff --git a/tools/testing/selftests/bpf/prog_tests/select_reuseport.c b/tools/testing/selftests/bpf/prog_tests/select_reuseport.c index 64c5f5eb2994..036d4760d2c1 100644 --- a/tools/testing/selftests/bpf/prog_tests/select_reuseport.c +++ b/tools/testing/selftests/bpf/prog_tests/select_reuseport.c @@ -37,9 +37,7 @@ static int sk_fds[REUSEPORT_ARRAY_SIZE]; static int reuseport_array = -1, outer_map = -1; static enum bpf_map_type inner_map_type; static int select_by_skb_data_prog; -static int saved_tcp_syncookie = -1; static struct bpf_object *obj; -static int saved_tcp_fo = -1; static __u32 index_zero; static int epfd; @@ -193,14 +191,6 @@ static int write_int_sysctl(const char *sysctl, int v) return 0; } -static void restore_sysctls(void) -{ - if (saved_tcp_fo != -1) - write_int_sysctl(TCP_FO_SYSCTL, saved_tcp_fo); - if (saved_tcp_syncookie != -1) - write_int_sysctl(TCP_SYNCOOKIE_SYSCTL, saved_tcp_syncookie); -} - static int enable_fastopen(void) { int fo; @@ -793,6 +783,7 @@ static void test_config(int sotype, sa_family_t family, bool inany) TEST_INIT(test_pass_on_err), TEST_INIT(test_detach_bpf), }; + struct netns_obj *netns; char s[MAX_TEST_NAME]; const struct test *t; @@ -808,9 +799,21 @@ static void test_config(int sotype, sa_family_t family, bool inany) if (!test__start_subtest(s)) continue; + netns = netns_new("select_reuseport", true); + if (!ASSERT_OK_PTR(netns, "netns_new")) + continue; + + if (CHECK_FAIL(enable_fastopen())) + goto out; + if (CHECK_FAIL(disable_syncookie())) + goto out; + setup_per_test(sotype, family, inany, t->no_inner_map); t->fn(sotype, family); cleanup_per_test(t->no_inner_map); + +out: + netns_free(netns); } } @@ -850,21 +853,7 @@ out: void serial_test_select_reuseport(void) { - saved_tcp_fo = read_int_sysctl(TCP_FO_SYSCTL); - if (saved_tcp_fo < 0) - goto out; - saved_tcp_syncookie = read_int_sysctl(TCP_SYNCOOKIE_SYSCTL); - if (saved_tcp_syncookie < 0) - goto out; - - if (enable_fastopen()) - goto out; - if (disable_syncookie()) - goto out; - test_map_type(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY); test_map_type(BPF_MAP_TYPE_SOCKMAP); test_map_type(BPF_MAP_TYPE_SOCKHASH); -out: - restore_sysctls(); } diff --git a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c index ae87c00867ba..023c31bde229 100644 --- a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c +++ b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c @@ -18,7 +18,6 @@ #include <arpa/inet.h> #include <assert.h> #include <errno.h> -#include <error.h> #include <fcntl.h> #include <sched.h> #include <stdio.h> @@ -47,8 +46,6 @@ #define INT_IP6 "fd00::2" #define INT_PORT 8008 -#define IO_TIMEOUT_SEC 3 - enum server { SERVER_A = 0, SERVER_B = 1, @@ -108,46 +105,6 @@ static int attach_reuseport(int sock_fd, struct bpf_program *reuseport_prog) return 0; } -static socklen_t inetaddr_len(const struct sockaddr_storage *addr) -{ - return (addr->ss_family == AF_INET ? sizeof(struct sockaddr_in) : - addr->ss_family == AF_INET6 ? sizeof(struct sockaddr_in6) : 0); -} - -static int make_socket(int sotype, const char *ip, int port, - struct sockaddr_storage *addr) -{ - struct timeval timeo = { .tv_sec = IO_TIMEOUT_SEC }; - int err, family, fd; - - family = is_ipv6(ip) ? AF_INET6 : AF_INET; - err = make_sockaddr(family, ip, port, addr, NULL); - if (CHECK(err, "make_address", "failed\n")) - return -1; - - fd = socket(addr->ss_family, sotype, 0); - if (CHECK(fd < 0, "socket", "failed\n")) { - log_err("failed to make socket"); - return -1; - } - - err = setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo)); - if (CHECK(err, "setsockopt(SO_SNDTIMEO)", "failed\n")) { - log_err("failed to set SNDTIMEO"); - close(fd); - return -1; - } - - err = setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo)); - if (CHECK(err, "setsockopt(SO_RCVTIMEO)", "failed\n")) { - log_err("failed to set RCVTIMEO"); - close(fd); - return -1; - } - - return fd; -} - static int setsockopts(int fd, void *opts) { struct cb_opts *co = (struct cb_opts *)opts; @@ -229,27 +186,6 @@ fail: return -1; } -static int make_client(int sotype, const char *ip, int port) -{ - struct sockaddr_storage addr = {0}; - int err, fd; - - fd = make_socket(sotype, ip, port, &addr); - if (fd < 0) - return -1; - - err = connect(fd, (void *)&addr, inetaddr_len(&addr)); - if (CHECK(err, "make_client", "connect")) { - log_err("failed to connect client socket"); - goto fail; - } - - return fd; -fail: - close(fd); - return -1; -} - static __u64 socket_cookie(int fd) { __u64 cookie; @@ -646,8 +582,9 @@ static void run_lookup_prog(const struct test *t) goto close; } - client_fd = make_client(t->sotype, t->connect_to.ip, t->connect_to.port); - if (client_fd < 0) + client_fd = connect_to_addr_str(is_ipv6(t->connect_to.ip) ? AF_INET6 : AF_INET, + t->sotype, t->connect_to.ip, t->connect_to.port, NULL); + if (!ASSERT_OK_FD(client_fd, "connect_to_addr_str")) goto close; if (t->sotype == SOCK_STREAM) @@ -862,9 +799,11 @@ static void test_redirect_lookup(struct test_sk_lookup *skel) static void drop_on_lookup(const struct test *t) { + int family = is_ipv6(t->connect_to.ip) ? AF_INET6 : AF_INET; struct sockaddr_storage dst = {}; int client_fd, server_fd, err; struct bpf_link *lookup_link; + socklen_t len; ssize_t n; lookup_link = attach_lookup_prog(t->lookup_prog); @@ -876,12 +815,14 @@ static void drop_on_lookup(const struct test *t) if (server_fd < 0) goto detach; - client_fd = make_socket(t->sotype, t->connect_to.ip, - t->connect_to.port, &dst); - if (client_fd < 0) + client_fd = client_socket(family, t->sotype, NULL); + if (!ASSERT_OK_FD(client_fd, "client_socket")) goto close_srv; - err = connect(client_fd, (void *)&dst, inetaddr_len(&dst)); + err = make_sockaddr(family, t->connect_to.ip, t->connect_to.port, &dst, &len); + if (!ASSERT_OK(err, "make_sockaddr")) + goto close_all; + err = connect(client_fd, (void *)&dst, len); if (t->sotype == SOCK_DGRAM) { err = send_byte(client_fd); if (err) @@ -976,9 +917,11 @@ static void test_drop_on_lookup(struct test_sk_lookup *skel) static void drop_on_reuseport(const struct test *t) { + int family = is_ipv6(t->connect_to.ip) ? AF_INET6 : AF_INET; struct sockaddr_storage dst = { 0 }; int client, server1, server2, err; struct bpf_link *lookup_link; + socklen_t len; ssize_t n; lookup_link = attach_lookup_prog(t->lookup_prog); @@ -1000,12 +943,14 @@ static void drop_on_reuseport(const struct test *t) if (server2 < 0) goto close_srv1; - client = make_socket(t->sotype, t->connect_to.ip, - t->connect_to.port, &dst); - if (client < 0) + client = client_socket(family, t->sotype, NULL); + if (!ASSERT_OK_FD(client, "client_socket")) goto close_srv2; - err = connect(client, (void *)&dst, inetaddr_len(&dst)); + err = make_sockaddr(family, t->connect_to.ip, t->connect_to.port, &dst, &len); + if (!ASSERT_OK(err, "make_sockaddr")) + goto close_all; + err = connect(client, (void *)&dst, len); if (t->sotype == SOCK_DGRAM) { err = send_byte(client); if (err) @@ -1152,8 +1097,8 @@ static void run_sk_assign_connected(struct test_sk_lookup *skel, if (server_fd < 0) return; - connected_fd = make_client(sotype, EXT_IP4, EXT_PORT); - if (connected_fd < 0) + connected_fd = connect_to_addr_str(AF_INET, sotype, EXT_IP4, EXT_PORT, NULL); + if (!ASSERT_OK_FD(connected_fd, "connect_to_addr_str")) goto out_close_server; /* Put a connected socket in redirect map */ @@ -1166,8 +1111,8 @@ static void run_sk_assign_connected(struct test_sk_lookup *skel, goto out_close_connected; /* Try to redirect TCP SYN / UDP packet to a connected socket */ - client_fd = make_client(sotype, EXT_IP4, EXT_PORT); - if (client_fd < 0) + client_fd = connect_to_addr_str(AF_INET, sotype, EXT_IP4, EXT_PORT, NULL); + if (!ASSERT_OK_FD(client_fd, "connect_to_addr_str")) goto out_unlink_prog; if (sotype == SOCK_DGRAM) { send_byte(client_fd); @@ -1219,6 +1164,7 @@ static void run_multi_prog_lookup(const struct test_multi_prog *t) int map_fd, server_fd, client_fd; struct bpf_link *link1, *link2; int prog_idx, done, err; + socklen_t len; map_fd = bpf_map__fd(t->run_map); @@ -1248,11 +1194,14 @@ static void run_multi_prog_lookup(const struct test_multi_prog *t) if (err) goto out_close_server; - client_fd = make_socket(SOCK_STREAM, EXT_IP4, EXT_PORT, &dst); - if (client_fd < 0) + client_fd = client_socket(AF_INET, SOCK_STREAM, NULL); + if (!ASSERT_OK_FD(client_fd, "client_socket")) goto out_close_server; - err = connect(client_fd, (void *)&dst, inetaddr_len(&dst)); + err = make_sockaddr(AF_INET, EXT_IP4, EXT_PORT, &dst, &len); + if (!ASSERT_OK(err, "make_sockaddr")) + goto out_close_client; + err = connect(client_fd, (void *)&dst, len); if (CHECK(err && !t->expect_errno, "connect", "unexpected error %d\n", errno)) goto out_close_client; diff --git a/tools/testing/selftests/bpf/prog_tests/sock_addr.c b/tools/testing/selftests/bpf/prog_tests/sock_addr.c index b880c564a204..a6ee7f8d4f79 100644 --- a/tools/testing/selftests/bpf/prog_tests/sock_addr.c +++ b/tools/testing/selftests/bpf/prog_tests/sock_addr.c @@ -2642,6 +2642,7 @@ void test_sock_addr(void) break; default: ASSERT_TRUE(false, "Unknown sock addr test type"); + err = -EINVAL; break; } diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c index da5a6fb03b69..4ee1148d22be 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c @@ -1836,6 +1836,7 @@ static void test_udp_unix_redir(struct test_sockmap_listen *skel, struct bpf_map int family) { const char *family_name, *map_name; + struct netns_obj *netns; char s[MAX_TEST_NAME]; family_name = family_str(family); @@ -1843,8 +1844,15 @@ static void test_udp_unix_redir(struct test_sockmap_listen *skel, struct bpf_map snprintf(s, sizeof(s), "%s %s %s", map_name, family_name, __func__); if (!test__start_subtest(s)) return; + + netns = netns_new("sockmap_listen", true); + if (!ASSERT_OK_PTR(netns, "netns_new")) + return; + inet_unix_skb_redir_to_connected(skel, map, family); unix_inet_skb_redir_to_connected(skel, map, family); + + netns_free(netns); } static void run_tests(struct test_sockmap_listen *skel, struct bpf_map *map, diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c index 59993fc9c0d7..21c5a37846ad 100644 --- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c +++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c @@ -3,7 +3,10 @@ #include <test_progs.h> #include <network_helpers.h> #include "tailcall_poke.skel.h" - +#include "tailcall_bpf2bpf_hierarchy2.skel.h" +#include "tailcall_bpf2bpf_hierarchy3.skel.h" +#include "tailcall_freplace.skel.h" +#include "tc_bpf2bpf.skel.h" /* test_tailcall_1 checks basic functionality by patching multiple locations * in a single program for a single tail call slot with nop->jmp, jmp->nop @@ -1187,6 +1190,372 @@ out: tailcall_poke__destroy(call); } +static void test_tailcall_hierarchy_count(const char *which, bool test_fentry, + bool test_fexit, + bool test_fentry_entry) +{ + int err, map_fd, prog_fd, main_data_fd, fentry_data_fd, fexit_data_fd, i, val; + struct bpf_object *obj = NULL, *fentry_obj = NULL, *fexit_obj = NULL; + struct bpf_link *fentry_link = NULL, *fexit_link = NULL; + struct bpf_program *prog, *fentry_prog; + struct bpf_map *prog_array, *data_map; + int fentry_prog_fd; + char buff[128] = {}; + + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = buff, + .data_size_in = sizeof(buff), + .repeat = 1, + ); + + err = bpf_prog_test_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj, + &prog_fd); + if (!ASSERT_OK(err, "load obj")) + return; + + prog = bpf_object__find_program_by_name(obj, "entry"); + if (!ASSERT_OK_PTR(prog, "find entry prog")) + goto out; + + prog_fd = bpf_program__fd(prog); + if (!ASSERT_GE(prog_fd, 0, "prog_fd")) + goto out; + + if (test_fentry_entry) { + fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_hierarchy_fentry.bpf.o", + NULL); + if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file")) + goto out; + + fentry_prog = bpf_object__find_program_by_name(fentry_obj, + "fentry"); + if (!ASSERT_OK_PTR(prog, "find fentry prog")) + goto out; + + err = bpf_program__set_attach_target(fentry_prog, prog_fd, + "entry"); + if (!ASSERT_OK(err, "set_attach_target entry")) + goto out; + + err = bpf_object__load(fentry_obj); + if (!ASSERT_OK(err, "load fentry_obj")) + goto out; + + fentry_link = bpf_program__attach_trace(fentry_prog); + if (!ASSERT_OK_PTR(fentry_link, "attach_trace")) + goto out; + + fentry_prog_fd = bpf_program__fd(fentry_prog); + if (!ASSERT_GE(fentry_prog_fd, 0, "fentry_prog_fd")) + goto out; + + prog_array = bpf_object__find_map_by_name(fentry_obj, "jmp_table"); + if (!ASSERT_OK_PTR(prog_array, "find jmp_table")) + goto out; + + map_fd = bpf_map__fd(prog_array); + if (!ASSERT_GE(map_fd, 0, "map_fd")) + goto out; + + i = 0; + err = bpf_map_update_elem(map_fd, &i, &fentry_prog_fd, BPF_ANY); + if (!ASSERT_OK(err, "update jmp_table")) + goto out; + + data_map = bpf_object__find_map_by_name(fentry_obj, ".bss"); + if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map), + "find data_map")) + goto out; + + } else { + prog_array = bpf_object__find_map_by_name(obj, "jmp_table"); + if (!ASSERT_OK_PTR(prog_array, "find jmp_table")) + goto out; + + map_fd = bpf_map__fd(prog_array); + if (!ASSERT_GE(map_fd, 0, "map_fd")) + goto out; + + i = 0; + err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); + if (!ASSERT_OK(err, "update jmp_table")) + goto out; + + data_map = bpf_object__find_map_by_name(obj, ".bss"); + if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map), + "find data_map")) + goto out; + } + + if (test_fentry) { + fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o", + NULL); + if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file")) + goto out; + + prog = bpf_object__find_program_by_name(fentry_obj, "fentry"); + if (!ASSERT_OK_PTR(prog, "find fentry prog")) + goto out; + + err = bpf_program__set_attach_target(prog, prog_fd, + "subprog_tail"); + if (!ASSERT_OK(err, "set_attach_target subprog_tail")) + goto out; + + err = bpf_object__load(fentry_obj); + if (!ASSERT_OK(err, "load fentry_obj")) + goto out; + + fentry_link = bpf_program__attach_trace(prog); + if (!ASSERT_OK_PTR(fentry_link, "attach_trace")) + goto out; + } + + if (test_fexit) { + fexit_obj = bpf_object__open_file("tailcall_bpf2bpf_fexit.bpf.o", + NULL); + if (!ASSERT_OK_PTR(fexit_obj, "open fexit_obj file")) + goto out; + + prog = bpf_object__find_program_by_name(fexit_obj, "fexit"); + if (!ASSERT_OK_PTR(prog, "find fexit prog")) + goto out; + + err = bpf_program__set_attach_target(prog, prog_fd, + "subprog_tail"); + if (!ASSERT_OK(err, "set_attach_target subprog_tail")) + goto out; + + err = bpf_object__load(fexit_obj); + if (!ASSERT_OK(err, "load fexit_obj")) + goto out; + + fexit_link = bpf_program__attach_trace(prog); + if (!ASSERT_OK_PTR(fexit_link, "attach_trace")) + goto out; + } + + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, 1, "tailcall retval"); + + main_data_fd = bpf_map__fd(data_map); + if (!ASSERT_GE(main_data_fd, 0, "main_data_fd")) + goto out; + + i = 0; + err = bpf_map_lookup_elem(main_data_fd, &i, &val); + ASSERT_OK(err, "tailcall count"); + ASSERT_EQ(val, 34, "tailcall count"); + + if (test_fentry) { + data_map = bpf_object__find_map_by_name(fentry_obj, ".bss"); + if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map), + "find tailcall_bpf2bpf_fentry.bss map")) + goto out; + + fentry_data_fd = bpf_map__fd(data_map); + if (!ASSERT_GE(fentry_data_fd, 0, + "find tailcall_bpf2bpf_fentry.bss map fd")) + goto out; + + i = 0; + err = bpf_map_lookup_elem(fentry_data_fd, &i, &val); + ASSERT_OK(err, "fentry count"); + ASSERT_EQ(val, 68, "fentry count"); + } + + if (test_fexit) { + data_map = bpf_object__find_map_by_name(fexit_obj, ".bss"); + if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map), + "find tailcall_bpf2bpf_fexit.bss map")) + goto out; + + fexit_data_fd = bpf_map__fd(data_map); + if (!ASSERT_GE(fexit_data_fd, 0, + "find tailcall_bpf2bpf_fexit.bss map fd")) + goto out; + + i = 0; + err = bpf_map_lookup_elem(fexit_data_fd, &i, &val); + ASSERT_OK(err, "fexit count"); + ASSERT_EQ(val, 68, "fexit count"); + } + + i = 0; + err = bpf_map_delete_elem(map_fd, &i); + if (!ASSERT_OK(err, "delete_elem from jmp_table")) + goto out; + + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, 1, "tailcall retval"); + + i = 0; + err = bpf_map_lookup_elem(main_data_fd, &i, &val); + ASSERT_OK(err, "tailcall count"); + ASSERT_EQ(val, 35, "tailcall count"); + + if (test_fentry) { + i = 0; + err = bpf_map_lookup_elem(fentry_data_fd, &i, &val); + ASSERT_OK(err, "fentry count"); + ASSERT_EQ(val, 70, "fentry count"); + } + + if (test_fexit) { + i = 0; + err = bpf_map_lookup_elem(fexit_data_fd, &i, &val); + ASSERT_OK(err, "fexit count"); + ASSERT_EQ(val, 70, "fexit count"); + } + +out: + bpf_link__destroy(fentry_link); + bpf_link__destroy(fexit_link); + bpf_object__close(fentry_obj); + bpf_object__close(fexit_obj); + bpf_object__close(obj); +} + +/* test_tailcall_bpf2bpf_hierarchy_1 checks that the count value of the tail + * call limit enforcement matches with expectations when tailcalls are preceded + * with two bpf2bpf calls. + * + * subprog --tailcall-> entry + * entry < + * subprog --tailcall-> entry + */ +static void test_tailcall_bpf2bpf_hierarchy_1(void) +{ + test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o", + false, false, false); +} + +/* test_tailcall_bpf2bpf_hierarchy_fentry checks that the count value of the + * tail call limit enforcement matches with expectations when tailcalls are + * preceded with two bpf2bpf calls, and the two subprogs are traced by fentry. + */ +static void test_tailcall_bpf2bpf_hierarchy_fentry(void) +{ + test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o", + true, false, false); +} + +/* test_tailcall_bpf2bpf_hierarchy_fexit checks that the count value of the tail + * call limit enforcement matches with expectations when tailcalls are preceded + * with two bpf2bpf calls, and the two subprogs are traced by fexit. + */ +static void test_tailcall_bpf2bpf_hierarchy_fexit(void) +{ + test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o", + false, true, false); +} + +/* test_tailcall_bpf2bpf_hierarchy_fentry_fexit checks that the count value of + * the tail call limit enforcement matches with expectations when tailcalls are + * preceded with two bpf2bpf calls, and the two subprogs are traced by both + * fentry and fexit. + */ +static void test_tailcall_bpf2bpf_hierarchy_fentry_fexit(void) +{ + test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o", + true, true, false); +} + +/* test_tailcall_bpf2bpf_hierarchy_fentry_entry checks that the count value of + * the tail call limit enforcement matches with expectations when tailcalls are + * preceded with two bpf2bpf calls in fentry. + */ +static void test_tailcall_bpf2bpf_hierarchy_fentry_entry(void) +{ + test_tailcall_hierarchy_count("tc_dummy.bpf.o", false, false, true); +} + +/* test_tailcall_bpf2bpf_hierarchy_2 checks that the count value of the tail + * call limit enforcement matches with expectations: + * + * subprog_tail0 --tailcall-> classifier_0 -> subprog_tail0 + * entry < + * subprog_tail1 --tailcall-> classifier_1 -> subprog_tail1 + */ +static void test_tailcall_bpf2bpf_hierarchy_2(void) +{ + RUN_TESTS(tailcall_bpf2bpf_hierarchy2); +} + +/* test_tailcall_bpf2bpf_hierarchy_3 checks that the count value of the tail + * call limit enforcement matches with expectations: + * + * subprog with jmp_table0 to classifier_0 + * entry --tailcall-> classifier_0 < + * subprog with jmp_table1 to classifier_0 + */ +static void test_tailcall_bpf2bpf_hierarchy_3(void) +{ + RUN_TESTS(tailcall_bpf2bpf_hierarchy3); +} + +/* test_tailcall_freplace checks that the attached freplace prog is OK to + * update the prog_array map. + */ +static void test_tailcall_freplace(void) +{ + struct tailcall_freplace *freplace_skel = NULL; + struct bpf_link *freplace_link = NULL; + struct bpf_program *freplace_prog; + struct tc_bpf2bpf *tc_skel = NULL; + int prog_fd, map_fd; + char buff[128] = {}; + int err, key; + + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = buff, + .data_size_in = sizeof(buff), + .repeat = 1, + ); + + freplace_skel = tailcall_freplace__open(); + if (!ASSERT_OK_PTR(freplace_skel, "tailcall_freplace__open")) + return; + + tc_skel = tc_bpf2bpf__open_and_load(); + if (!ASSERT_OK_PTR(tc_skel, "tc_bpf2bpf__open_and_load")) + goto out; + + prog_fd = bpf_program__fd(tc_skel->progs.entry_tc); + freplace_prog = freplace_skel->progs.entry_freplace; + err = bpf_program__set_attach_target(freplace_prog, prog_fd, "subprog"); + if (!ASSERT_OK(err, "set_attach_target")) + goto out; + + err = tailcall_freplace__load(freplace_skel); + if (!ASSERT_OK(err, "tailcall_freplace__load")) + goto out; + + freplace_link = bpf_program__attach_freplace(freplace_prog, prog_fd, + "subprog"); + if (!ASSERT_OK_PTR(freplace_link, "attach_freplace")) + goto out; + + map_fd = bpf_map__fd(freplace_skel->maps.jmp_table); + prog_fd = bpf_program__fd(freplace_prog); + key = 0; + err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY); + if (!ASSERT_OK(err, "update jmp_table")) + goto out; + + prog_fd = bpf_program__fd(tc_skel->progs.entry_tc); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(topts.retval, 34, "test_run retval"); + +out: + bpf_link__destroy(freplace_link); + tc_bpf2bpf__destroy(tc_skel); + tailcall_freplace__destroy(freplace_skel); +} + void test_tailcalls(void) { if (test__start_subtest("tailcall_1")) @@ -1223,4 +1592,18 @@ void test_tailcalls(void) test_tailcall_bpf2bpf_fentry_entry(); if (test__start_subtest("tailcall_poke")) test_tailcall_poke(); + if (test__start_subtest("tailcall_bpf2bpf_hierarchy_1")) + test_tailcall_bpf2bpf_hierarchy_1(); + if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fentry")) + test_tailcall_bpf2bpf_hierarchy_fentry(); + if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fexit")) + test_tailcall_bpf2bpf_hierarchy_fexit(); + if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fentry_fexit")) + test_tailcall_bpf2bpf_hierarchy_fentry_fexit(); + if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fentry_entry")) + test_tailcall_bpf2bpf_hierarchy_fentry_entry(); + test_tailcall_bpf2bpf_hierarchy_2(); + test_tailcall_bpf2bpf_hierarchy_3(); + if (test__start_subtest("tailcall_freplace")) + test_tailcall_freplace(); } diff --git a/tools/testing/selftests/bpf/prog_tests/tc_opts.c b/tools/testing/selftests/bpf/prog_tests/tc_opts.c index 196abf223465..f77f604389aa 100644 --- a/tools/testing/selftests/bpf/prog_tests/tc_opts.c +++ b/tools/testing/selftests/bpf/prog_tests/tc_opts.c @@ -2384,7 +2384,7 @@ static int generate_dummy_prog(void) BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }; - const size_t prog_insn_cnt = sizeof(prog_insns) / sizeof(struct bpf_insn); + const size_t prog_insn_cnt = ARRAY_SIZE(prog_insns); LIBBPF_OPTS(bpf_prog_load_opts, opts); const size_t log_buf_sz = 256; char log_buf[log_buf_sz]; diff --git a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c index 327d51f59142..c85798966aec 100644 --- a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c +++ b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c @@ -68,6 +68,7 @@ __FILE__, __LINE__, strerror(errno), ##__VA_ARGS__) static const char * const namespaces[] = {NS_SRC, NS_FWD, NS_DST, NULL}; +static struct netns_obj *netns_objs[3]; static int write_file(const char *path, const char *newval) { @@ -87,27 +88,41 @@ static int write_file(const char *path, const char *newval) static int netns_setup_namespaces(const char *verb) { + struct netns_obj **ns_obj = netns_objs; const char * const *ns = namespaces; - char cmd[128]; while (*ns) { - snprintf(cmd, sizeof(cmd), "ip netns %s %s", verb, *ns); - if (!ASSERT_OK(system(cmd), cmd)) - return -1; + if (strcmp(verb, "add") == 0) { + *ns_obj = netns_new(*ns, false); + if (!ASSERT_OK_PTR(*ns_obj, "netns_new")) + return -1; + } else { + if (!ASSERT_OK_PTR(*ns_obj, "netns_obj is NULL")) + return -1; + netns_free(*ns_obj); + *ns_obj = NULL; + } ns++; + ns_obj++; } return 0; } static void netns_setup_namespaces_nofail(const char *verb) { + struct netns_obj **ns_obj = netns_objs; const char * const *ns = namespaces; - char cmd[128]; while (*ns) { - snprintf(cmd, sizeof(cmd), "ip netns %s %s > /dev/null 2>&1", verb, *ns); - system(cmd); + if (strcmp(verb, "add") == 0) { + *ns_obj = netns_new(*ns, false); + } else { + if (*ns_obj) + netns_free(*ns_obj); + *ns_obj = NULL; + } ns++; + ns_obj++; } } @@ -471,7 +486,7 @@ static int set_forwarding(bool enable) static int __rcv_tstamp(int fd, const char *expected, size_t s, __u64 *tstamp) { - struct __kernel_timespec pkt_ts = {}; + struct timespec pkt_ts = {}; char ctl[CMSG_SPACE(sizeof(pkt_ts))]; struct timespec now_ts; struct msghdr msg = {}; @@ -495,7 +510,7 @@ static int __rcv_tstamp(int fd, const char *expected, size_t s, __u64 *tstamp) cmsg = CMSG_FIRSTHDR(&msg); if (cmsg && cmsg->cmsg_level == SOL_SOCKET && - cmsg->cmsg_type == SO_TIMESTAMPNS_NEW) + cmsg->cmsg_type == SO_TIMESTAMPNS) memcpy(&pkt_ts, CMSG_DATA(cmsg), sizeof(pkt_ts)); pkt_ns = pkt_ts.tv_sec * NSEC_PER_SEC + pkt_ts.tv_nsec; @@ -537,9 +552,9 @@ static int wait_netstamp_needed_key(void) if (!ASSERT_GE(srv_fd, 0, "start_server")) goto done; - err = setsockopt(srv_fd, SOL_SOCKET, SO_TIMESTAMPNS_NEW, + err = setsockopt(srv_fd, SOL_SOCKET, SO_TIMESTAMPNS, &opt, sizeof(opt)); - if (!ASSERT_OK(err, "setsockopt(SO_TIMESTAMPNS_NEW)")) + if (!ASSERT_OK(err, "setsockopt(SO_TIMESTAMPNS)")) goto done; cli_fd = connect_to_fd(srv_fd, TIMEOUT_MILLIS); @@ -621,9 +636,9 @@ static void test_inet_dtime(int family, int type, const char *addr, __u16 port) return; /* Ensure the kernel puts the (rcv) timestamp for all skb */ - err = setsockopt(listen_fd, SOL_SOCKET, SO_TIMESTAMPNS_NEW, + err = setsockopt(listen_fd, SOL_SOCKET, SO_TIMESTAMPNS, &opt, sizeof(opt)); - if (!ASSERT_OK(err, "setsockopt(SO_TIMESTAMPNS_NEW)")) + if (!ASSERT_OK(err, "setsockopt(SO_TIMESTAMPNS)")) goto done; if (type == SOCK_STREAM) { @@ -857,7 +872,7 @@ static void test_tcp_dtime(struct test_tc_dtime *skel, int family, bool bpf_fwd) test_inet_dtime(family, SOCK_STREAM, addr, 50000 + t); /* fwdns_prio100 prog does not read delivery_time_type, so - * kernel puts the (rcv) timetamp in __sk_buff->tstamp + * kernel puts the (rcv) timestamp in __sk_buff->tstamp */ ASSERT_EQ(dtimes[INGRESS_FWDNS_P100], 0, dtime_cnt_str(t, INGRESS_FWDNS_P100)); diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c b/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c index f2b99d95d916..c38784c1c066 100644 --- a/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c +++ b/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 +#define _GNU_SOURCE #include <test_progs.h> #include "cgroup_helpers.h" #include "network_helpers.h" diff --git a/tools/testing/selftests/bpf/prog_tests/test_bpf_syscall_macro.c b/tools/testing/selftests/bpf/prog_tests/test_bpf_syscall_macro.c index 2900c5e9a016..1750c29b94f8 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_bpf_syscall_macro.c +++ b/tools/testing/selftests/bpf/prog_tests/test_bpf_syscall_macro.c @@ -38,11 +38,7 @@ void test_bpf_syscall_macro(void) /* check whether args of syscall are copied correctly */ prctl(exp_arg1, exp_arg2, exp_arg3, exp_arg4, exp_arg5); -#if defined(__aarch64__) || defined(__s390__) - ASSERT_NEQ(skel->bss->arg1, exp_arg1, "syscall_arg1"); -#else ASSERT_EQ(skel->bss->arg1, exp_arg1, "syscall_arg1"); -#endif ASSERT_EQ(skel->bss->arg2, exp_arg2, "syscall_arg2"); ASSERT_EQ(skel->bss->arg3, exp_arg3, "syscall_arg3"); /* it cannot copy arg4 when uses PT_REGS_PARM4 on x86_64 */ diff --git a/tools/testing/selftests/bpf/prog_tests/test_bprm_opts.c b/tools/testing/selftests/bpf/prog_tests/test_bprm_opts.c index a0054019e677..9c0200c132d9 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_bprm_opts.c +++ b/tools/testing/selftests/bpf/prog_tests/test_bprm_opts.c @@ -51,7 +51,7 @@ static int run_set_secureexec(int map_fd, int secureexec) exit(ret); /* If the binary is executed with securexec=1, the dynamic - * loader ingores and unsets certain variables like LD_PRELOAD, + * loader ignores and unsets certain variables like LD_PRELOAD, * TMPDIR etc. TMPDIR is used here to simplify the example, as * LD_PRELOAD requires a real .so file. * diff --git a/tools/testing/selftests/bpf/prog_tests/test_lsm.c b/tools/testing/selftests/bpf/prog_tests/test_lsm.c index 16175d579bc7..2a27f3714f5c 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_lsm.c +++ b/tools/testing/selftests/bpf/prog_tests/test_lsm.c @@ -12,6 +12,7 @@ #include <stdlib.h> #include "lsm.skel.h" +#include "lsm_tailcall.skel.h" char *CMD_ARGS[] = {"true", NULL}; @@ -95,7 +96,7 @@ static int test_lsm(struct lsm *skel) return 0; } -void test_test_lsm(void) +static void test_lsm_basic(void) { struct lsm *skel = NULL; int err; @@ -114,3 +115,46 @@ void test_test_lsm(void) close_prog: lsm__destroy(skel); } + +static void test_lsm_tailcall(void) +{ + struct lsm_tailcall *skel = NULL; + int map_fd, prog_fd; + int err, key; + + skel = lsm_tailcall__open_and_load(); + if (!ASSERT_OK_PTR(skel, "lsm_tailcall__skel_load")) + goto close_prog; + + map_fd = bpf_map__fd(skel->maps.jmp_table); + if (CHECK_FAIL(map_fd < 0)) + goto close_prog; + + prog_fd = bpf_program__fd(skel->progs.lsm_file_permission_prog); + if (CHECK_FAIL(prog_fd < 0)) + goto close_prog; + + key = 0; + err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY); + if (CHECK_FAIL(!err)) + goto close_prog; + + prog_fd = bpf_program__fd(skel->progs.lsm_file_alloc_security_prog); + if (CHECK_FAIL(prog_fd < 0)) + goto close_prog; + + err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY); + if (CHECK_FAIL(err)) + goto close_prog; + +close_prog: + lsm_tailcall__destroy(skel); +} + +void test_test_lsm(void) +{ + if (test__start_subtest("lsm_basic")) + test_lsm_basic(); + if (test__start_subtest("lsm_tailcall")) + test_lsm_tailcall(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/test_mmap_inner_array.c b/tools/testing/selftests/bpf/prog_tests/test_mmap_inner_array.c new file mode 100644 index 000000000000..ce745776ed18 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_mmap_inner_array.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include <test_progs.h> +#include <sys/mman.h> +#include "mmap_inner_array.skel.h" + +void test_mmap_inner_array(void) +{ + const long page_size = sysconf(_SC_PAGE_SIZE); + struct mmap_inner_array *skel; + int inner_array_fd, err; + void *tmp; + __u64 *val; + + skel = mmap_inner_array__open_and_load(); + + if (!ASSERT_OK_PTR(skel, "open_and_load")) + return; + + inner_array_fd = bpf_map__fd(skel->maps.inner_array); + tmp = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, inner_array_fd, 0); + if (!ASSERT_OK_PTR(tmp, "inner array mmap")) + goto out; + val = (void *)tmp; + + err = mmap_inner_array__attach(skel); + if (!ASSERT_OK(err, "attach")) + goto out_unmap; + + skel->bss->pid = getpid(); + usleep(1); + + /* pid is set, pid_match == true and outer_map_match == false */ + ASSERT_TRUE(skel->bss->pid_match, "pid match 1"); + ASSERT_FALSE(skel->bss->outer_map_match, "outer map match 1"); + ASSERT_FALSE(skel->bss->done, "done 1"); + ASSERT_EQ(*val, 0, "value match 1"); + + err = bpf_map__update_elem(skel->maps.outer_map, + &skel->bss->pid, sizeof(skel->bss->pid), + &inner_array_fd, sizeof(inner_array_fd), + BPF_ANY); + if (!ASSERT_OK(err, "update elem")) + goto out_unmap; + usleep(1); + + /* outer map key is set, outer_map_match == true */ + ASSERT_TRUE(skel->bss->pid_match, "pid match 2"); + ASSERT_TRUE(skel->bss->outer_map_match, "outer map match 2"); + ASSERT_TRUE(skel->bss->done, "done 2"); + ASSERT_EQ(*val, skel->data->match_value, "value match 2"); + +out_unmap: + munmap(tmp, page_size); +out: + mmap_inner_array__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/test_strncmp.c b/tools/testing/selftests/bpf/prog_tests/test_strncmp.c index 7ddd6615b7e7..baceb0de9d49 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_strncmp.c +++ b/tools/testing/selftests/bpf/prog_tests/test_strncmp.c @@ -72,7 +72,7 @@ static void test_strncmp_ret(void) got = trigger_strncmp(skel); ASSERT_EQ(got, 0, "strncmp: same str"); - /* Not-null-termainted string */ + /* Not-null-terminated string */ memcpy(skel->bss->str, skel->rodata->target, sizeof(skel->bss->str)); skel->bss->str[sizeof(skel->bss->str) - 1] = 'A'; got = trigger_strncmp(skel); diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c index bbcf12696a6b..75a0dea511b3 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c +++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c @@ -9,6 +9,7 @@ #include "struct_ops_nulled_out_cb.skel.h" #include "struct_ops_forgotten_cb.skel.h" #include "struct_ops_detach.skel.h" +#include "unsupported_ops.skel.h" static void check_map_info(struct bpf_map_info *info) { @@ -311,5 +312,6 @@ void serial_test_struct_ops_module(void) test_struct_ops_forgotten_cb(); if (test__start_subtest("test_detach_link")) test_detach_link(); + RUN_TESTS(unsupported_ops); } diff --git a/tools/testing/selftests/bpf/prog_tests/test_xdp_veth.c b/tools/testing/selftests/bpf/prog_tests/test_xdp_veth.c new file mode 100644 index 000000000000..8d75424fe6bc --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_xdp_veth.c @@ -0,0 +1,213 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Create 3 namespaces with 3 veth peers, and forward packets in-between using + * native XDP + * + * XDP_TX + * NS1(veth11) NS2(veth22) NS3(veth33) + * | | | + * | | | + * (veth1, (veth2, (veth3, + * id:111) id:122) id:133) + * ^ | ^ | ^ | + * | | XDP_REDIRECT | | XDP_REDIRECT | | + * | ------------------ ------------------ | + * ----------------------------------------- + * XDP_REDIRECT + */ + +#define _GNU_SOURCE +#include <net/if.h> +#include "test_progs.h" +#include "network_helpers.h" +#include "xdp_dummy.skel.h" +#include "xdp_redirect_map.skel.h" +#include "xdp_tx.skel.h" + +#define VETH_PAIRS_COUNT 3 +#define NS_SUFFIX_LEN 6 +#define VETH_NAME_MAX_LEN 16 +#define IP_SRC "10.1.1.11" +#define IP_DST "10.1.1.33" +#define IP_CMD_MAX_LEN 128 + +struct skeletons { + struct xdp_dummy *xdp_dummy; + struct xdp_tx *xdp_tx; + struct xdp_redirect_map *xdp_redirect_maps; +}; + +struct veth_configuration { + char local_veth[VETH_NAME_MAX_LEN]; /* Interface in main namespace */ + char remote_veth[VETH_NAME_MAX_LEN]; /* Peer interface in dedicated namespace*/ + const char *namespace; /* Namespace for the remote veth */ + char next_veth[VETH_NAME_MAX_LEN]; /* Local interface to redirect traffic to */ + char *remote_addr; /* IP address of the remote veth */ +}; + +static struct veth_configuration config[VETH_PAIRS_COUNT] = { + { + .local_veth = "veth1", + .remote_veth = "veth11", + .next_veth = "veth2", + .remote_addr = IP_SRC, + .namespace = "ns-veth11" + }, + { + .local_veth = "veth2", + .remote_veth = "veth22", + .next_veth = "veth3", + .remote_addr = NULL, + .namespace = "ns-veth22" + }, + { + .local_veth = "veth3", + .remote_veth = "veth33", + .next_veth = "veth1", + .remote_addr = IP_DST, + .namespace = "ns-veth33" + } +}; + +static int attach_programs_to_veth_pair(struct skeletons *skeletons, int index) +{ + struct bpf_program *local_prog, *remote_prog; + struct bpf_link **local_link, **remote_link; + struct nstoken *nstoken; + struct bpf_link *link; + int interface; + + switch (index) { + case 0: + local_prog = skeletons->xdp_redirect_maps->progs.xdp_redirect_map_0; + local_link = &skeletons->xdp_redirect_maps->links.xdp_redirect_map_0; + remote_prog = skeletons->xdp_dummy->progs.xdp_dummy_prog; + remote_link = &skeletons->xdp_dummy->links.xdp_dummy_prog; + break; + case 1: + local_prog = skeletons->xdp_redirect_maps->progs.xdp_redirect_map_1; + local_link = &skeletons->xdp_redirect_maps->links.xdp_redirect_map_1; + remote_prog = skeletons->xdp_tx->progs.xdp_tx; + remote_link = &skeletons->xdp_tx->links.xdp_tx; + break; + case 2: + local_prog = skeletons->xdp_redirect_maps->progs.xdp_redirect_map_2; + local_link = &skeletons->xdp_redirect_maps->links.xdp_redirect_map_2; + remote_prog = skeletons->xdp_dummy->progs.xdp_dummy_prog; + remote_link = &skeletons->xdp_dummy->links.xdp_dummy_prog; + break; + } + interface = if_nametoindex(config[index].local_veth); + if (!ASSERT_NEQ(interface, 0, "non zero interface index")) + return -1; + link = bpf_program__attach_xdp(local_prog, interface); + if (!ASSERT_OK_PTR(link, "attach xdp program to local veth")) + return -1; + *local_link = link; + nstoken = open_netns(config[index].namespace); + if (!ASSERT_OK_PTR(nstoken, "switch to remote veth namespace")) + return -1; + interface = if_nametoindex(config[index].remote_veth); + if (!ASSERT_NEQ(interface, 0, "non zero interface index")) { + close_netns(nstoken); + return -1; + } + link = bpf_program__attach_xdp(remote_prog, interface); + *remote_link = link; + close_netns(nstoken); + if (!ASSERT_OK_PTR(link, "attach xdp program to remote veth")) + return -1; + + return 0; +} + +static int configure_network(struct skeletons *skeletons) +{ + int interface_id; + int map_fd; + int err; + int i = 0; + + /* First create and configure all interfaces */ + for (i = 0; i < VETH_PAIRS_COUNT; i++) { + SYS(fail, "ip netns add %s", config[i].namespace); + SYS(fail, "ip link add %s type veth peer name %s netns %s", + config[i].local_veth, config[i].remote_veth, config[i].namespace); + SYS(fail, "ip link set dev %s up", config[i].local_veth); + if (config[i].remote_addr) + SYS(fail, "ip -n %s addr add %s/24 dev %s", config[i].namespace, + config[i].remote_addr, config[i].remote_veth); + SYS(fail, "ip -n %s link set dev %s up", config[i].namespace, + config[i].remote_veth); + } + + /* Then configure the redirect map and attach programs to interfaces */ + map_fd = bpf_map__fd(skeletons->xdp_redirect_maps->maps.tx_port); + if (!ASSERT_GE(map_fd, 0, "open redirect map")) + goto fail; + for (i = 0; i < VETH_PAIRS_COUNT; i++) { + interface_id = if_nametoindex(config[i].next_veth); + if (!ASSERT_NEQ(interface_id, 0, "non zero interface index")) + goto fail; + err = bpf_map_update_elem(map_fd, &i, &interface_id, BPF_ANY); + if (!ASSERT_OK(err, "configure interface redirection through map")) + goto fail; + if (attach_programs_to_veth_pair(skeletons, i)) + goto fail; + } + + return 0; + +fail: + return -1; +} + +static void cleanup_network(void) +{ + int i; + + /* Deleting namespaces is enough to automatically remove veth pairs as well + */ + for (i = 0; i < VETH_PAIRS_COUNT; i++) + SYS_NOFAIL("ip netns del %s", config[i].namespace); +} + +static int check_ping(struct skeletons *skeletons) +{ + /* Test: if all interfaces are properly configured, we must be able to ping + * veth33 from veth11 + */ + return SYS_NOFAIL("ip netns exec %s ping -c 1 -W 1 %s > /dev/null", + config[0].namespace, IP_DST); +} + +void test_xdp_veth_redirect(void) +{ + struct skeletons skeletons = {}; + + skeletons.xdp_dummy = xdp_dummy__open_and_load(); + if (!ASSERT_OK_PTR(skeletons.xdp_dummy, "xdp_dummy__open_and_load")) + return; + + skeletons.xdp_tx = xdp_tx__open_and_load(); + if (!ASSERT_OK_PTR(skeletons.xdp_tx, "xdp_tx__open_and_load")) + goto destroy_xdp_dummy; + + skeletons.xdp_redirect_maps = xdp_redirect_map__open_and_load(); + if (!ASSERT_OK_PTR(skeletons.xdp_redirect_maps, "xdp_redirect_map__open_and_load")) + goto destroy_xdp_tx; + + if (configure_network(&skeletons)) + goto destroy_xdp_redirect_map; + + ASSERT_OK(check_ping(&skeletons), "ping"); + +destroy_xdp_redirect_map: + xdp_redirect_map__destroy(skeletons.xdp_redirect_maps); +destroy_xdp_tx: + xdp_tx__destroy(skeletons.xdp_tx); +destroy_xdp_dummy: + xdp_dummy__destroy(skeletons.xdp_dummy); + + cleanup_network(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/token.c b/tools/testing/selftests/bpf/prog_tests/token.c index fc4a175d8d76..fe86e4fdb89c 100644 --- a/tools/testing/selftests/bpf/prog_tests/token.c +++ b/tools/testing/selftests/bpf/prog_tests/token.c @@ -867,7 +867,7 @@ static int userns_obj_priv_implicit_token(int mnt_fd, struct token_lsm *lsm_skel } unsetenv(TOKEN_ENVVAR); - /* now the same struct_ops skeleton should succeed thanks to libppf + /* now the same struct_ops skeleton should succeed thanks to libbpf * creating BPF token from /sys/fs/bpf mount point */ skel = dummy_st_ops_success__open_and_load(); @@ -929,7 +929,7 @@ static int userns_obj_priv_implicit_token_envvar(int mnt_fd, struct token_lsm *l if (!ASSERT_OK(err, "setenv_token_path")) goto err_out; - /* now the same struct_ops skeleton should succeed thanks to libppf + /* now the same struct_ops skeleton should succeed thanks to libbpf * creating BPF token from custom mount point */ skel = dummy_st_ops_success__open_and_load(); diff --git a/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c b/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c index 0adf8d9475cb..472f4f9fa95f 100644 --- a/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c +++ b/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c @@ -7,6 +7,7 @@ #include "test_unpriv_bpf_disabled.skel.h" #include "cap_helpers.h" +#include "bpf_util.h" /* Using CAP_LAST_CAP is risky here, since it can get pulled in from * an old /usr/include/linux/capability.h and be < CAP_BPF; as a result @@ -146,7 +147,7 @@ static void test_unpriv_bpf_disabled_negative(struct test_unpriv_bpf_disabled *s BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }; - const size_t prog_insn_cnt = sizeof(prog_insns) / sizeof(struct bpf_insn); + const size_t prog_insn_cnt = ARRAY_SIZE(prog_insns); LIBBPF_OPTS(bpf_prog_load_opts, load_opts); struct bpf_map_info map_info = {}; __u32 map_info_len = sizeof(map_info); diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c index bf6ca8e3eb13..844f6fc8487b 100644 --- a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c +++ b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c @@ -6,6 +6,8 @@ #include "uprobe_multi.skel.h" #include "uprobe_multi_bench.skel.h" #include "uprobe_multi_usdt.skel.h" +#include "uprobe_multi_consumers.skel.h" +#include "uprobe_multi_pid_filter.skel.h" #include "bpf/libbpf_internal.h" #include "testing_helpers.h" #include "../sdt.h" @@ -38,6 +40,7 @@ struct child { int pid; int tid; pthread_t thread; + char stack[65536]; }; static void release_child(struct child *child) @@ -67,41 +70,54 @@ static void kick_child(struct child *child) fflush(NULL); } -static struct child *spawn_child(void) +static int child_func(void *arg) { - static struct child child; - int err; - int c; - - /* pipe to notify child to execute the trigger functions */ - if (pipe(child.go)) - return NULL; + struct child *child = arg; + int err, c; - child.pid = child.tid = fork(); - if (child.pid < 0) { - release_child(&child); - errno = EINVAL; - return NULL; - } + close(child->go[1]); - /* child */ - if (child.pid == 0) { - close(child.go[1]); + /* wait for parent's kick */ + err = read(child->go[0], &c, 1); + if (err != 1) + exit(err); - /* wait for parent's kick */ - err = read(child.go[0], &c, 1); - if (err != 1) - exit(err); + uprobe_multi_func_1(); + uprobe_multi_func_2(); + uprobe_multi_func_3(); + usdt_trigger(); - uprobe_multi_func_1(); - uprobe_multi_func_2(); - uprobe_multi_func_3(); - usdt_trigger(); + exit(errno); +} - exit(errno); +static int spawn_child_flag(struct child *child, bool clone_vm) +{ + /* pipe to notify child to execute the trigger functions */ + if (pipe(child->go)) + return -1; + + if (clone_vm) { + child->pid = child->tid = clone(child_func, child->stack + sizeof(child->stack)/2, + CLONE_VM|SIGCHLD, child); + } else { + child->pid = child->tid = fork(); + } + if (child->pid < 0) { + release_child(child); + errno = EINVAL; + return -1; } - return &child; + /* fork-ed child */ + if (!clone_vm && child->pid == 0) + child_func(child); + + return 0; +} + +static int spawn_child(struct child *child) +{ + return spawn_child_flag(child, false); } static void *child_thread(void *ctx) @@ -130,39 +146,38 @@ static void *child_thread(void *ctx) pthread_exit(&err); } -static struct child *spawn_thread(void) +static int spawn_thread(struct child *child) { - static struct child child; int c, err; /* pipe to notify child to execute the trigger functions */ - if (pipe(child.go)) - return NULL; + if (pipe(child->go)) + return -1; /* pipe to notify parent that child thread is ready */ - if (pipe(child.c2p)) { - close(child.go[0]); - close(child.go[1]); - return NULL; + if (pipe(child->c2p)) { + close(child->go[0]); + close(child->go[1]); + return -1; } - child.pid = getpid(); + child->pid = getpid(); - err = pthread_create(&child.thread, NULL, child_thread, &child); + err = pthread_create(&child->thread, NULL, child_thread, child); if (err) { err = -errno; - close(child.go[0]); - close(child.go[1]); - close(child.c2p[0]); - close(child.c2p[1]); + close(child->go[0]); + close(child->go[1]); + close(child->c2p[0]); + close(child->c2p[1]); errno = -err; - return NULL; + return -1; } - err = read(child.c2p[0], &c, 1); + err = read(child->c2p[0], &c, 1); if (!ASSERT_EQ(err, 1, "child_thread_ready")) - return NULL; + return -1; - return &child; + return 0; } static void uprobe_multi_test_run(struct uprobe_multi *skel, struct child *child) @@ -198,7 +213,7 @@ static void uprobe_multi_test_run(struct uprobe_multi *skel, struct child *child /* * There are 2 entry and 2 exit probe called for each uprobe_multi_func_[123] - * function and each slepable probe (6) increments uprobe_multi_sleep_result. + * function and each sleepable probe (6) increments uprobe_multi_sleep_result. */ ASSERT_EQ(skel->bss->uprobe_multi_func_1_result, 2, "uprobe_multi_func_1_result"); ASSERT_EQ(skel->bss->uprobe_multi_func_2_result, 2, "uprobe_multi_func_2_result"); @@ -303,24 +318,22 @@ cleanup: static void test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_multi_opts *opts) { - struct child *child; + static struct child child; /* no pid filter */ __test_attach_api(binary, pattern, opts, NULL); /* pid filter */ - child = spawn_child(); - if (!ASSERT_OK_PTR(child, "spawn_child")) + if (!ASSERT_OK(spawn_child(&child), "spawn_child")) return; - __test_attach_api(binary, pattern, opts, child); + __test_attach_api(binary, pattern, opts, &child); /* pid filter (thread) */ - child = spawn_thread(); - if (!ASSERT_OK_PTR(child, "spawn_thread")) + if (!ASSERT_OK(spawn_thread(&child), "spawn_thread")) return; - __test_attach_api(binary, pattern, opts, child); + __test_attach_api(binary, pattern, opts, &child); } static void test_attach_api_pattern(void) @@ -516,6 +529,122 @@ cleanup: uprobe_multi__destroy(skel); } +#ifdef __x86_64__ +noinline void uprobe_multi_error_func(void) +{ + /* + * If --fcf-protection=branch is enabled the gcc generates endbr as + * first instruction, so marking the exact address of int3 with the + * symbol to be used in the attach_uprobe_fail_trap test below. + */ + asm volatile ( + ".globl uprobe_multi_error_func_int3; \n" + "uprobe_multi_error_func_int3: \n" + "int3 \n" + ); +} + +/* + * Attaching uprobe on uprobe_multi_error_func results in error + * because it already starts with int3 instruction. + */ +static void attach_uprobe_fail_trap(struct uprobe_multi *skel) +{ + LIBBPF_OPTS(bpf_uprobe_multi_opts, opts); + const char *syms[4] = { + "uprobe_multi_func_1", + "uprobe_multi_func_2", + "uprobe_multi_func_3", + "uprobe_multi_error_func_int3", + }; + + opts.syms = syms; + opts.cnt = ARRAY_SIZE(syms); + + skel->links.uprobe = bpf_program__attach_uprobe_multi(skel->progs.uprobe, -1, + "/proc/self/exe", NULL, &opts); + if (!ASSERT_ERR_PTR(skel->links.uprobe, "bpf_program__attach_uprobe_multi")) { + bpf_link__destroy(skel->links.uprobe); + skel->links.uprobe = NULL; + } +} +#else +static void attach_uprobe_fail_trap(struct uprobe_multi *skel) { } +#endif + +short sema_1 __used, sema_2 __used; + +static void attach_uprobe_fail_refctr(struct uprobe_multi *skel) +{ + unsigned long *tmp_offsets = NULL, *tmp_ref_ctr_offsets = NULL; + unsigned long offsets[3], ref_ctr_offsets[3]; + LIBBPF_OPTS(bpf_link_create_opts, opts); + const char *path = "/proc/self/exe"; + const char *syms[3] = { + "uprobe_multi_func_1", + "uprobe_multi_func_2", + }; + const char *sema[3] = { + "sema_1", + "sema_2", + }; + int prog_fd, link_fd, err; + + prog_fd = bpf_program__fd(skel->progs.uprobe_extra); + + err = elf_resolve_syms_offsets("/proc/self/exe", 2, (const char **) &syms, + &tmp_offsets, STT_FUNC); + if (!ASSERT_OK(err, "elf_resolve_syms_offsets_func")) + return; + + err = elf_resolve_syms_offsets("/proc/self/exe", 2, (const char **) &sema, + &tmp_ref_ctr_offsets, STT_OBJECT); + if (!ASSERT_OK(err, "elf_resolve_syms_offsets_sema")) + goto cleanup; + + /* + * We attach to 3 uprobes on 2 functions, so 2 uprobes share single function, + * but with different ref_ctr_offset which is not allowed and results in fail. + */ + offsets[0] = tmp_offsets[0]; /* uprobe_multi_func_1 */ + offsets[1] = tmp_offsets[1]; /* uprobe_multi_func_2 */ + offsets[2] = tmp_offsets[1]; /* uprobe_multi_func_2 */ + + ref_ctr_offsets[0] = tmp_ref_ctr_offsets[0]; /* sema_1 */ + ref_ctr_offsets[1] = tmp_ref_ctr_offsets[1]; /* sema_2 */ + ref_ctr_offsets[2] = tmp_ref_ctr_offsets[0]; /* sema_1, error */ + + opts.uprobe_multi.path = path; + opts.uprobe_multi.offsets = (const unsigned long *) &offsets; + opts.uprobe_multi.ref_ctr_offsets = (const unsigned long *) &ref_ctr_offsets; + opts.uprobe_multi.cnt = 3; + + link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); + if (!ASSERT_ERR(link_fd, "link_fd")) + close(link_fd); + +cleanup: + free(tmp_ref_ctr_offsets); + free(tmp_offsets); +} + +static void test_attach_uprobe_fails(void) +{ + struct uprobe_multi *skel = NULL; + + skel = uprobe_multi__open_and_load(); + if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load")) + return; + + /* attach fails due to adding uprobe on trap instruction, x86_64 only */ + attach_uprobe_fail_trap(skel); + + /* attach fail due to wrong ref_ctr_offs on one of the uprobes */ + attach_uprobe_fail_refctr(skel); + + uprobe_multi__destroy(skel); +} + static void __test_link_api(struct child *child) { int prog_fd, link1_fd = -1, link2_fd = -1, link3_fd = -1, link4_fd = -1; @@ -595,24 +724,296 @@ cleanup: static void test_link_api(void) { - struct child *child; + static struct child child; /* no pid filter */ __test_link_api(NULL); /* pid filter */ - child = spawn_child(); - if (!ASSERT_OK_PTR(child, "spawn_child")) + if (!ASSERT_OK(spawn_child(&child), "spawn_child")) return; - __test_link_api(child); + __test_link_api(&child); /* pid filter (thread) */ - child = spawn_thread(); - if (!ASSERT_OK_PTR(child, "spawn_thread")) + if (!ASSERT_OK(spawn_thread(&child), "spawn_thread")) + return; + + __test_link_api(&child); +} + +static struct bpf_program * +get_program(struct uprobe_multi_consumers *skel, int prog) +{ + switch (prog) { + case 0: + return skel->progs.uprobe_0; + case 1: + return skel->progs.uprobe_1; + case 2: + return skel->progs.uprobe_2; + case 3: + return skel->progs.uprobe_3; + default: + ASSERT_FAIL("get_program"); + return NULL; + } +} + +static struct bpf_link ** +get_link(struct uprobe_multi_consumers *skel, int link) +{ + switch (link) { + case 0: + return &skel->links.uprobe_0; + case 1: + return &skel->links.uprobe_1; + case 2: + return &skel->links.uprobe_2; + case 3: + return &skel->links.uprobe_3; + default: + ASSERT_FAIL("get_link"); + return NULL; + } +} + +static int uprobe_attach(struct uprobe_multi_consumers *skel, int idx) +{ + struct bpf_program *prog = get_program(skel, idx); + struct bpf_link **link = get_link(skel, idx); + LIBBPF_OPTS(bpf_uprobe_multi_opts, opts); + + if (!prog || !link) + return -1; + + /* + * bit/prog: 0,1 uprobe entry + * bit/prog: 2,3 uprobe return + */ + opts.retprobe = idx == 2 || idx == 3; + + *link = bpf_program__attach_uprobe_multi(prog, 0, "/proc/self/exe", + "uprobe_consumer_test", + &opts); + if (!ASSERT_OK_PTR(*link, "bpf_program__attach_uprobe_multi")) + return -1; + return 0; +} + +static void uprobe_detach(struct uprobe_multi_consumers *skel, int idx) +{ + struct bpf_link **link = get_link(skel, idx); + + bpf_link__destroy(*link); + *link = NULL; +} + +static bool test_bit(int bit, unsigned long val) +{ + return val & (1 << bit); +} + +noinline int +uprobe_consumer_test(struct uprobe_multi_consumers *skel, + unsigned long before, unsigned long after) +{ + int idx; + + /* detach uprobe for each unset programs in 'before' state ... */ + for (idx = 0; idx < 4; idx++) { + if (test_bit(idx, before) && !test_bit(idx, after)) + uprobe_detach(skel, idx); + } + + /* ... and attach all new programs in 'after' state */ + for (idx = 0; idx < 4; idx++) { + if (!test_bit(idx, before) && test_bit(idx, after)) { + if (!ASSERT_OK(uprobe_attach(skel, idx), "uprobe_attach_after")) + return -1; + } + } + return 0; +} + +static void consumer_test(struct uprobe_multi_consumers *skel, + unsigned long before, unsigned long after) +{ + int err, idx; + + printf("consumer_test before %lu after %lu\n", before, after); + + /* 'before' is each, we attach uprobe for every set idx */ + for (idx = 0; idx < 4; idx++) { + if (test_bit(idx, before)) { + if (!ASSERT_OK(uprobe_attach(skel, idx), "uprobe_attach_before")) + goto cleanup; + } + } + + err = uprobe_consumer_test(skel, before, after); + if (!ASSERT_EQ(err, 0, "uprobe_consumer_test")) + goto cleanup; + + for (idx = 0; idx < 4; idx++) { + const char *fmt = "BUG"; + __u64 val = 0; + + if (idx < 2) { + /* + * uprobe entry + * +1 if define in 'before' + */ + if (test_bit(idx, before)) + val++; + fmt = "prog 0/1: uprobe"; + } else { + /* + * uprobe return is tricky ;-) + * + * to trigger uretprobe consumer, the uretprobe needs to be installed, + * which means one of the 'return' uprobes was alive when probe was hit: + * + * idxs: 2/3 uprobe return in 'installed' mask + * + * in addition if 'after' state removes everything that was installed in + * 'before' state, then uprobe kernel object goes away and return uprobe + * is not installed and we won't hit it even if it's in 'after' state. + */ + unsigned long had_uretprobes = before & 0b1100; /* is uretprobe installed */ + unsigned long probe_preserved = before & after; /* did uprobe go away */ + + if (had_uretprobes && probe_preserved && test_bit(idx, after)) + val++; + fmt = "idx 2/3: uretprobe"; + } + + ASSERT_EQ(skel->bss->uprobe_result[idx], val, fmt); + skel->bss->uprobe_result[idx] = 0; + } + +cleanup: + for (idx = 0; idx < 4; idx++) + uprobe_detach(skel, idx); +} + +static void test_consumers(void) +{ + struct uprobe_multi_consumers *skel; + int before, after; + + skel = uprobe_multi_consumers__open_and_load(); + if (!ASSERT_OK_PTR(skel, "uprobe_multi_consumers__open_and_load")) + return; + + /* + * The idea of this test is to try all possible combinations of + * uprobes consumers attached on single function. + * + * - 2 uprobe entry consumer + * - 2 uprobe exit consumers + * + * The test uses 4 uprobes attached on single function, but that + * translates into single uprobe with 4 consumers in kernel. + * + * The before/after values present the state of attached consumers + * before and after the probed function: + * + * bit/prog 0,1 : uprobe entry + * bit/prog 2,3 : uprobe return + * + * For example for: + * + * before = 0b0101 + * after = 0b0110 + * + * it means that before we call 'uprobe_consumer_test' we attach + * uprobes defined in 'before' value: + * + * - bit/prog 0: uprobe entry + * - bit/prog 2: uprobe return + * + * uprobe_consumer_test is called and inside it we attach and detach + * uprobes based on 'after' value: + * + * - bit/prog 0: stays untouched + * - bit/prog 2: uprobe return is detached + * + * uprobe_consumer_test returns and we check counters values increased + * by bpf programs on each uprobe to match the expected count based on + * before/after bits. + */ + + for (before = 0; before < 16; before++) { + for (after = 0; after < 16; after++) + consumer_test(skel, before, after); + } + + uprobe_multi_consumers__destroy(skel); +} + +static struct bpf_program *uprobe_multi_program(struct uprobe_multi_pid_filter *skel, int idx) +{ + switch (idx) { + case 0: return skel->progs.uprobe_multi_0; + case 1: return skel->progs.uprobe_multi_1; + case 2: return skel->progs.uprobe_multi_2; + } + return NULL; +} + +#define TASKS 3 + +static void run_pid_filter(struct uprobe_multi_pid_filter *skel, bool clone_vm, bool retprobe) +{ + LIBBPF_OPTS(bpf_uprobe_multi_opts, opts, .retprobe = retprobe); + struct bpf_link *link[TASKS] = {}; + struct child child[TASKS] = {}; + int i; + + memset(skel->bss->test, 0, sizeof(skel->bss->test)); + + for (i = 0; i < TASKS; i++) { + if (!ASSERT_OK(spawn_child_flag(&child[i], clone_vm), "spawn_child")) + goto cleanup; + skel->bss->pids[i] = child[i].pid; + } + + for (i = 0; i < TASKS; i++) { + link[i] = bpf_program__attach_uprobe_multi(uprobe_multi_program(skel, i), + child[i].pid, "/proc/self/exe", + "uprobe_multi_func_1", &opts); + if (!ASSERT_OK_PTR(link[i], "bpf_program__attach_uprobe_multi")) + goto cleanup; + } + + for (i = 0; i < TASKS; i++) + kick_child(&child[i]); + + for (i = 0; i < TASKS; i++) { + ASSERT_EQ(skel->bss->test[i][0], 1, "pid"); + ASSERT_EQ(skel->bss->test[i][1], 0, "unknown"); + } + +cleanup: + for (i = 0; i < TASKS; i++) + bpf_link__destroy(link[i]); + for (i = 0; i < TASKS; i++) + release_child(&child[i]); +} + +static void test_pid_filter_process(bool clone_vm) +{ + struct uprobe_multi_pid_filter *skel; + + skel = uprobe_multi_pid_filter__open_and_load(); + if (!ASSERT_OK_PTR(skel, "uprobe_multi_pid_filter__open_and_load")) return; - __test_link_api(child); + run_pid_filter(skel, clone_vm, false); + run_pid_filter(skel, clone_vm, true); + + uprobe_multi_pid_filter__destroy(skel); } static void test_bench_attach_uprobe(void) @@ -703,4 +1104,12 @@ void test_uprobe_multi_test(void) test_bench_attach_usdt(); if (test__start_subtest("attach_api_fails")) test_attach_api_fails(); + if (test__start_subtest("attach_uprobe_fails")) + test_attach_uprobe_fails(); + if (test__start_subtest("consumers")) + test_consumers(); + if (test__start_subtest("filter_fork")) + test_pid_filter_process(false); + if (test__start_subtest("filter_clone_vm")) + test_pid_filter_process(true); } diff --git a/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c b/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c index e51721df14fc..d424e7ecbd12 100644 --- a/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c +++ b/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c @@ -4,6 +4,7 @@ #define _GNU_SOURCE #include <linux/compiler.h> #include <linux/ring_buffer.h> +#include <linux/build_bug.h> #include <pthread.h> #include <stdio.h> #include <stdlib.h> @@ -642,7 +643,7 @@ static void test_user_ringbuf_blocking_reserve(void) if (!ASSERT_EQ(err, 0, "deferred_kick_thread\n")) goto cleanup; - /* After spawning another thread that asychronously kicks the kernel to + /* After spawning another thread that asynchronously kicks the kernel to * drain the messages, we're able to block and successfully get a * sample once we receive an event notification. */ diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 9dc3687bc406..e26b5150fc43 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -21,6 +21,7 @@ #include "verifier_cgroup_inv_retcode.skel.h" #include "verifier_cgroup_skb.skel.h" #include "verifier_cgroup_storage.skel.h" +#include "verifier_const.skel.h" #include "verifier_const_or.skel.h" #include "verifier_ctx.skel.h" #include "verifier_ctx_sk_msg.skel.h" @@ -39,6 +40,7 @@ #include "verifier_int_ptr.skel.h" #include "verifier_iterating_callbacks.skel.h" #include "verifier_jeq_infer_not_null.skel.h" +#include "verifier_jit_convergence.skel.h" #include "verifier_ld_ind.skel.h" #include "verifier_ldsx.skel.h" #include "verifier_leak_ptr.skel.h" @@ -53,6 +55,7 @@ #include "verifier_movsx.skel.h" #include "verifier_netfilter_ctx.skel.h" #include "verifier_netfilter_retcode.skel.h" +#include "verifier_bpf_fastcall.skel.h" #include "verifier_or_jmp32_k.skel.h" #include "verifier_precision.skel.h" #include "verifier_prevent_map_lookup.skel.h" @@ -74,6 +77,7 @@ #include "verifier_stack_ptr.skel.h" #include "verifier_subprog_precision.skel.h" #include "verifier_subreg.skel.h" +#include "verifier_tailcall_jit.skel.h" #include "verifier_typedef.skel.h" #include "verifier_uninit.skel.h" #include "verifier_unpriv.skel.h" @@ -84,10 +88,13 @@ #include "verifier_value_or_null.skel.h" #include "verifier_value_ptr_arith.skel.h" #include "verifier_var_off.skel.h" +#include "verifier_vfs_accept.skel.h" +#include "verifier_vfs_reject.skel.h" #include "verifier_xadd.skel.h" #include "verifier_xdp.skel.h" #include "verifier_xdp_direct_packet_access.skel.h" #include "verifier_bits_iter.skel.h" +#include "verifier_lsm.skel.h" #define MAX_ENTRIES 11 @@ -140,6 +147,7 @@ void test_verifier_cfg(void) { RUN(verifier_cfg); } void test_verifier_cgroup_inv_retcode(void) { RUN(verifier_cgroup_inv_retcode); } void test_verifier_cgroup_skb(void) { RUN(verifier_cgroup_skb); } void test_verifier_cgroup_storage(void) { RUN(verifier_cgroup_storage); } +void test_verifier_const(void) { RUN(verifier_const); } void test_verifier_const_or(void) { RUN(verifier_const_or); } void test_verifier_ctx(void) { RUN(verifier_ctx); } void test_verifier_ctx_sk_msg(void) { RUN(verifier_ctx_sk_msg); } @@ -158,6 +166,7 @@ void test_verifier_helper_value_access(void) { RUN(verifier_helper_value_access void test_verifier_int_ptr(void) { RUN(verifier_int_ptr); } void test_verifier_iterating_callbacks(void) { RUN(verifier_iterating_callbacks); } void test_verifier_jeq_infer_not_null(void) { RUN(verifier_jeq_infer_not_null); } +void test_verifier_jit_convergence(void) { RUN(verifier_jit_convergence); } void test_verifier_ld_ind(void) { RUN(verifier_ld_ind); } void test_verifier_ldsx(void) { RUN(verifier_ldsx); } void test_verifier_leak_ptr(void) { RUN(verifier_leak_ptr); } @@ -172,6 +181,7 @@ void test_verifier_meta_access(void) { RUN(verifier_meta_access); } void test_verifier_movsx(void) { RUN(verifier_movsx); } void test_verifier_netfilter_ctx(void) { RUN(verifier_netfilter_ctx); } void test_verifier_netfilter_retcode(void) { RUN(verifier_netfilter_retcode); } +void test_verifier_bpf_fastcall(void) { RUN(verifier_bpf_fastcall); } void test_verifier_or_jmp32_k(void) { RUN(verifier_or_jmp32_k); } void test_verifier_precision(void) { RUN(verifier_precision); } void test_verifier_prevent_map_lookup(void) { RUN(verifier_prevent_map_lookup); } @@ -193,6 +203,7 @@ void test_verifier_spin_lock(void) { RUN(verifier_spin_lock); } void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); } void test_verifier_subprog_precision(void) { RUN(verifier_subprog_precision); } void test_verifier_subreg(void) { RUN(verifier_subreg); } +void test_verifier_tailcall_jit(void) { RUN(verifier_tailcall_jit); } void test_verifier_typedef(void) { RUN(verifier_typedef); } void test_verifier_uninit(void) { RUN(verifier_uninit); } void test_verifier_unpriv(void) { RUN(verifier_unpriv); } @@ -202,10 +213,13 @@ void test_verifier_value(void) { RUN(verifier_value); } void test_verifier_value_illegal_alu(void) { RUN(verifier_value_illegal_alu); } void test_verifier_value_or_null(void) { RUN(verifier_value_or_null); } void test_verifier_var_off(void) { RUN(verifier_var_off); } +void test_verifier_vfs_accept(void) { RUN(verifier_vfs_accept); } +void test_verifier_vfs_reject(void) { RUN(verifier_vfs_reject); } void test_verifier_xadd(void) { RUN(verifier_xadd); } void test_verifier_xdp(void) { RUN(verifier_xdp); } void test_verifier_xdp_direct_packet_access(void) { RUN(verifier_xdp_direct_packet_access); } void test_verifier_bits_iter(void) { RUN(verifier_bits_iter); } +void test_verifier_lsm(void) { RUN(verifier_lsm); } static int init_test_val_map(struct bpf_object *obj, char *map_name) { diff --git a/tools/testing/selftests/bpf/progs/arena_atomics.c b/tools/testing/selftests/bpf/progs/arena_atomics.c index bb0acd79d28a..40dd57fca5cc 100644 --- a/tools/testing/selftests/bpf/progs/arena_atomics.c +++ b/tools/testing/selftests/bpf/progs/arena_atomics.c @@ -4,6 +4,7 @@ #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> #include <stdbool.h> +#include <stdatomic.h> #include "bpf_arena_common.h" struct { @@ -77,8 +78,13 @@ int sub(const void *ctx) return 0; } +#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING +_Atomic __u64 __arena_global and64_value = (0x110ull << 32); +_Atomic __u32 __arena_global and32_value = 0x110; +#else __u64 __arena_global and64_value = (0x110ull << 32); __u32 __arena_global and32_value = 0x110; +#endif SEC("raw_tp/sys_enter") int and(const void *ctx) @@ -86,16 +92,25 @@ int and(const void *ctx) if (pid != (bpf_get_current_pid_tgid() >> 32)) return 0; #ifdef ENABLE_ATOMICS_TESTS - +#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING + __c11_atomic_fetch_and(&and64_value, 0x011ull << 32, memory_order_relaxed); + __c11_atomic_fetch_and(&and32_value, 0x011, memory_order_relaxed); +#else __sync_fetch_and_and(&and64_value, 0x011ull << 32); __sync_fetch_and_and(&and32_value, 0x011); #endif +#endif return 0; } +#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING +_Atomic __u32 __arena_global or32_value = 0x110; +_Atomic __u64 __arena_global or64_value = (0x110ull << 32); +#else __u32 __arena_global or32_value = 0x110; __u64 __arena_global or64_value = (0x110ull << 32); +#endif SEC("raw_tp/sys_enter") int or(const void *ctx) @@ -103,15 +118,25 @@ int or(const void *ctx) if (pid != (bpf_get_current_pid_tgid() >> 32)) return 0; #ifdef ENABLE_ATOMICS_TESTS +#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING + __c11_atomic_fetch_or(&or64_value, 0x011ull << 32, memory_order_relaxed); + __c11_atomic_fetch_or(&or32_value, 0x011, memory_order_relaxed); +#else __sync_fetch_and_or(&or64_value, 0x011ull << 32); __sync_fetch_and_or(&or32_value, 0x011); #endif +#endif return 0; } +#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING +_Atomic __u64 __arena_global xor64_value = (0x110ull << 32); +_Atomic __u32 __arena_global xor32_value = 0x110; +#else __u64 __arena_global xor64_value = (0x110ull << 32); __u32 __arena_global xor32_value = 0x110; +#endif SEC("raw_tp/sys_enter") int xor(const void *ctx) @@ -119,9 +144,14 @@ int xor(const void *ctx) if (pid != (bpf_get_current_pid_tgid() >> 32)) return 0; #ifdef ENABLE_ATOMICS_TESTS +#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING + __c11_atomic_fetch_xor(&xor64_value, 0x011ull << 32, memory_order_relaxed); + __c11_atomic_fetch_xor(&xor32_value, 0x011, memory_order_relaxed); +#else __sync_fetch_and_xor(&xor64_value, 0x011ull << 32); __sync_fetch_and_xor(&xor32_value, 0x011); #endif +#endif return 0; } diff --git a/tools/testing/selftests/bpf/progs/bpf_cubic.c b/tools/testing/selftests/bpf/progs/bpf_cubic.c index d665b8a15cc4..f089faa97ae6 100644 --- a/tools/testing/selftests/bpf/progs/bpf_cubic.c +++ b/tools/testing/selftests/bpf/progs/bpf_cubic.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only -/* WARNING: This implemenation is not necessarily the same +/* WARNING: This implementation is not necessarily the same * as the tcp_cubic.c. The purpose is mainly for testing * the kernel BPF logic. * @@ -314,7 +314,7 @@ static void bictcp_update(struct bpf_bictcp *ca, __u32 cwnd, __u32 acked) * (so time^3 is done by using 64 bit) * and without the support of division of 64bit numbers * (so all divisions are done by using 32 bit) - * also NOTE the unit of those veriables + * also NOTE the unit of those variables * time = (t - K) / 2^bictcp_HZ * c = bic_scale >> 10 * rtt = (srtt >> 3) / HZ @@ -507,7 +507,7 @@ void BPF_PROG(bpf_cubic_acked, struct sock *sk, const struct ack_sample *sample) __u32 delay; bpf_cubic_acked_called = 1; - /* Some calls are for duplicates without timetamps */ + /* Some calls are for duplicates without timestamps */ if (sample->rtt_us < 0) return; diff --git a/tools/testing/selftests/bpf/progs/bpf_dctcp.c b/tools/testing/selftests/bpf/progs/bpf_dctcp.c index 02f552e7fd4d..7cd73e75f52a 100644 --- a/tools/testing/selftests/bpf/progs/bpf_dctcp.c +++ b/tools/testing/selftests/bpf/progs/bpf_dctcp.c @@ -26,7 +26,7 @@ static bool before(__u32 seq1, __u32 seq2) char _license[] SEC("license") = "GPL"; -volatile const char fallback[TCP_CA_NAME_MAX]; +volatile const char fallback_cc[TCP_CA_NAME_MAX]; const char bpf_dctcp[] = "bpf_dctcp"; const char tcp_cdg[] = "cdg"; char cc_res[TCP_CA_NAME_MAX]; @@ -71,10 +71,10 @@ void BPF_PROG(bpf_dctcp_init, struct sock *sk) struct bpf_dctcp *ca = inet_csk_ca(sk); int *stg; - if (!(tp->ecn_flags & TCP_ECN_OK) && fallback[0]) { + if (!(tp->ecn_flags & TCP_ECN_OK) && fallback_cc[0]) { /* Switch to fallback */ if (bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION, - (void *)fallback, sizeof(fallback)) == -EBUSY) + (void *)fallback_cc, sizeof(fallback_cc)) == -EBUSY) ebusy_cnt++; /* Switch back to myself and the recurred bpf_dctcp_init() @@ -87,7 +87,7 @@ void BPF_PROG(bpf_dctcp_init, struct sock *sk) /* Switch back to fallback */ if (bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION, - (void *)fallback, sizeof(fallback)) == -EBUSY) + (void *)fallback_cc, sizeof(fallback_cc)) == -EBUSY) ebusy_cnt++; /* Expecting -ENOTSUPP for tcp_cdg_res */ diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h index 81097a3f15eb..eccaf955e394 100644 --- a/tools/testing/selftests/bpf/progs/bpf_misc.h +++ b/tools/testing/selftests/bpf/progs/bpf_misc.h @@ -2,6 +2,9 @@ #ifndef __BPF_MISC_H__ #define __BPF_MISC_H__ +#define XSTR(s) STR(s) +#define STR(s) #s + /* This set of attributes controls behavior of the * test_loader.c:test_loader__run_subtests(). * @@ -22,10 +25,49 @@ * * __msg Message expected to be found in the verifier log. * Multiple __msg attributes could be specified. + * To match a regular expression use "{{" "}}" brackets, + * e.g. "foo{{[0-9]+}}" matches strings like "foo007". + * Extended POSIX regular expression syntax is allowed + * inside the brackets. * __msg_unpriv Same as __msg but for unprivileged mode. * - * __regex Same as __msg, but using a regular expression. - * __regex_unpriv Same as __msg_unpriv but using a regular expression. + * __xlated Expect a line in a disassembly log after verifier applies rewrites. + * Multiple __xlated attributes could be specified. + * Regular expressions could be specified same way as in __msg. + * __xlated_unpriv Same as __xlated but for unprivileged mode. + * + * __jited Match a line in a disassembly of the jited BPF program. + * Has to be used after __arch_* macro. + * For example: + * + * __arch_x86_64 + * __jited(" endbr64") + * __jited(" nopl (%rax,%rax)") + * __jited(" xorq %rax, %rax") + * ... + * __naked void some_test(void) + * { + * asm volatile (... ::: __clobber_all); + * } + * + * Regular expressions could be included in patterns same way + * as in __msg. + * + * By default assume that each pattern has to be matched on the + * next consecutive line of disassembly, e.g.: + * + * __jited(" endbr64") # matched on line N + * __jited(" nopl (%rax,%rax)") # matched on line N+1 + * + * If match occurs on a wrong line an error is reported. + * To override this behaviour use literal "...", e.g.: + * + * __jited(" endbr64") # matched on line N + * __jited("...") # not matched + * __jited(" nopl (%rax,%rax)") # matched on any line >= N + * + * __jited_unpriv Same as __jited but for unprivileged mode. + * * * __success Expect program load success in privileged mode. * __success_unpriv Expect program load success in unprivileged mode. @@ -60,14 +102,20 @@ * __auxiliary Annotated program is not a separate test, but used as auxiliary * for some other test cases and should always be loaded. * __auxiliary_unpriv Same, but load program in unprivileged mode. + * + * __arch_* Specify on which architecture the test case should be tested. + * Several __arch_* annotations could be specified at once. + * When test case is not run on current arch it is marked as skipped. */ -#define __msg(msg) __attribute__((btf_decl_tag("comment:test_expect_msg=" msg))) -#define __regex(regex) __attribute__((btf_decl_tag("comment:test_expect_regex=" regex))) +#define __msg(msg) __attribute__((btf_decl_tag("comment:test_expect_msg=" XSTR(__COUNTER__) "=" msg))) +#define __xlated(msg) __attribute__((btf_decl_tag("comment:test_expect_xlated=" XSTR(__COUNTER__) "=" msg))) +#define __jited(msg) __attribute__((btf_decl_tag("comment:test_jited=" XSTR(__COUNTER__) "=" msg))) #define __failure __attribute__((btf_decl_tag("comment:test_expect_failure"))) #define __success __attribute__((btf_decl_tag("comment:test_expect_success"))) #define __description(desc) __attribute__((btf_decl_tag("comment:test_description=" desc))) -#define __msg_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_msg_unpriv=" msg))) -#define __regex_unpriv(regex) __attribute__((btf_decl_tag("comment:test_expect_regex_unpriv=" regex))) +#define __msg_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_msg_unpriv=" XSTR(__COUNTER__) "=" msg))) +#define __xlated_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_xlated_unpriv=" XSTR(__COUNTER__) "=" msg))) +#define __jited_unpriv(msg) __attribute__((btf_decl_tag("comment:test_jited=" XSTR(__COUNTER__) "=" msg))) #define __failure_unpriv __attribute__((btf_decl_tag("comment:test_expect_failure_unpriv"))) #define __success_unpriv __attribute__((btf_decl_tag("comment:test_expect_success_unpriv"))) #define __log_level(lvl) __attribute__((btf_decl_tag("comment:test_log_level="#lvl))) @@ -77,6 +125,10 @@ #define __auxiliary __attribute__((btf_decl_tag("comment:test_auxiliary"))) #define __auxiliary_unpriv __attribute__((btf_decl_tag("comment:test_auxiliary_unpriv"))) #define __btf_path(path) __attribute__((btf_decl_tag("comment:test_btf_path=" path))) +#define __arch(arch) __attribute__((btf_decl_tag("comment:test_arch=" arch))) +#define __arch_x86_64 __arch("X86_64") +#define __arch_arm64 __arch("ARM64") +#define __arch_riscv64 __arch("RISCV64") /* Convenience macro for use with 'asm volatile' blocks */ #define __naked __attribute__((naked)) diff --git a/tools/testing/selftests/bpf/progs/bpf_syscall_macro.c b/tools/testing/selftests/bpf/progs/bpf_syscall_macro.c index 1a476d8ed354..9e7d9674ce2a 100644 --- a/tools/testing/selftests/bpf/progs/bpf_syscall_macro.c +++ b/tools/testing/selftests/bpf/progs/bpf_syscall_macro.c @@ -43,9 +43,7 @@ int BPF_KPROBE(handle_sys_prctl) /* test for PT_REGS_PARM */ -#if !defined(bpf_target_arm64) && !defined(bpf_target_s390) bpf_probe_read_kernel(&tmp, sizeof(tmp), &PT_REGS_PARM1_SYSCALL(real_regs)); -#endif arg1 = tmp; bpf_probe_read_kernel(&arg2, sizeof(arg2), &PT_REGS_PARM2_SYSCALL(real_regs)); bpf_probe_read_kernel(&arg3, sizeof(arg3), &PT_REGS_PARM3_SYSCALL(real_regs)); diff --git a/tools/testing/selftests/bpf/progs/cg_storage_multi.h b/tools/testing/selftests/bpf/progs/cg_storage_multi.h index a0778fe7857a..41d59f0ee606 100644 --- a/tools/testing/selftests/bpf/progs/cg_storage_multi.h +++ b/tools/testing/selftests/bpf/progs/cg_storage_multi.h @@ -3,8 +3,6 @@ #ifndef __PROGS_CG_STORAGE_MULTI_H #define __PROGS_CG_STORAGE_MULTI_H -#include <asm/types.h> - struct cgroup_value { __u32 egress_pkts; __u32 ingress_pkts; diff --git a/tools/testing/selftests/bpf/progs/cgroup_ancestor.c b/tools/testing/selftests/bpf/progs/cgroup_ancestor.c new file mode 100644 index 000000000000..8c2deb4fc493 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/cgroup_ancestor.c @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2018 Facebook + +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> +#include "bpf_tracing_net.h" +#define NUM_CGROUP_LEVELS 4 + +__u64 cgroup_ids[NUM_CGROUP_LEVELS]; +__u16 dport; + +static __always_inline void log_nth_level(struct __sk_buff *skb, __u32 level) +{ + /* [1] &level passed to external function that may change it, it's + * incompatible with loop unroll. + */ + cgroup_ids[level] = bpf_skb_ancestor_cgroup_id(skb, level); +} + +SEC("tc") +int log_cgroup_id(struct __sk_buff *skb) +{ + struct sock *sk = (void *)skb->sk; + + if (!sk) + return TC_ACT_OK; + + sk = bpf_core_cast(sk, struct sock); + if (sk->sk_protocol == IPPROTO_UDP && sk->sk_dport == dport) { + log_nth_level(skb, 0); + log_nth_level(skb, 1); + log_nth_level(skb, 2); + log_nth_level(skb, 3); + } + + return TC_ACT_OK; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/cgroup_storage.c b/tools/testing/selftests/bpf/progs/cgroup_storage.c new file mode 100644 index 000000000000..db1e4d2d3281 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/cgroup_storage.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> + +struct { + __uint(type, BPF_MAP_TYPE_CGROUP_STORAGE); + __type(key, struct bpf_cgroup_storage_key); + __type(value, __u64); +} cgroup_storage SEC(".maps"); + +SEC("cgroup_skb/egress") +int bpf_prog(struct __sk_buff *skb) +{ + __u64 *counter; + + counter = bpf_get_local_storage(&cgroup_storage, 0); + __sync_fetch_and_add(counter, 1); + + /* Drop one out of every two packets */ + return (*counter & 1); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/dev_cgroup.c b/tools/testing/selftests/bpf/progs/dev_cgroup.c index 79b54a4fa244..c1dfbd2b56fc 100644 --- a/tools/testing/selftests/bpf/progs/dev_cgroup.c +++ b/tools/testing/selftests/bpf/progs/dev_cgroup.c @@ -41,14 +41,14 @@ int bpf_prog1(struct bpf_cgroup_dev_ctx *ctx) bpf_trace_printk(fmt, sizeof(fmt), ctx->major, ctx->minor); #endif - /* Allow access to /dev/zero and /dev/random. + /* Allow access to /dev/null and /dev/urandom. * Forbid everything else. */ if (ctx->major != 1 || type != BPF_DEVCG_DEV_CHAR) return 0; switch (ctx->minor) { - case 5: /* 1:5 /dev/zero */ + case 3: /* 1:3 /dev/null */ case 9: /* 1:9 /dev/urandom */ return 1; } diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c index c3bc186af21e..8f36c9de7591 100644 --- a/tools/testing/selftests/bpf/progs/dynptr_fail.c +++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c @@ -965,7 +965,7 @@ int dynptr_invalidate_slice_reinit(void *ctx) * mem_or_null pointers. */ SEC("?raw_tp") -__failure __regex("R[0-9]+ type=scalar expected=percpu_ptr_") +__failure __msg("R{{[0-9]+}} type=scalar expected=percpu_ptr_") int dynptr_invalidate_slice_or_null(void *ctx) { struct bpf_dynptr ptr; @@ -983,7 +983,7 @@ int dynptr_invalidate_slice_or_null(void *ctx) /* Destruction of dynptr should also any slices obtained from it */ SEC("?raw_tp") -__failure __regex("R[0-9]+ invalid mem access 'scalar'") +__failure __msg("R{{[0-9]+}} invalid mem access 'scalar'") int dynptr_invalidate_slice_failure(void *ctx) { struct bpf_dynptr ptr1; @@ -1070,7 +1070,7 @@ int dynptr_read_into_slot(void *ctx) /* bpf_dynptr_slice()s are read-only and cannot be written to */ SEC("?tc") -__failure __regex("R[0-9]+ cannot write into rdonly_mem") +__failure __msg("R{{[0-9]+}} cannot write into rdonly_mem") int skb_invalid_slice_write(struct __sk_buff *skb) { struct bpf_dynptr ptr; diff --git a/tools/testing/selftests/bpf/progs/epilogue_exit.c b/tools/testing/selftests/bpf/progs/epilogue_exit.c new file mode 100644 index 000000000000..33d3a57bee90 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/epilogue_exit.c @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include "bpf_misc.h" +#include "../bpf_testmod/bpf_testmod.h" +#include "../bpf_testmod/bpf_testmod_kfunc.h" + +char _license[] SEC("license") = "GPL"; + +__success +/* save __u64 *ctx to stack */ +__xlated("0: *(u64 *)(r10 -8) = r1") +/* main prog */ +__xlated("1: r1 = *(u64 *)(r1 +0)") +__xlated("2: r2 = *(u64 *)(r1 +0)") +__xlated("3: r3 = 0") +__xlated("4: r4 = 1") +__xlated("5: if r2 == 0x0 goto pc+10") +__xlated("6: r0 = 0") +__xlated("7: *(u64 *)(r1 +0) = r3") +/* epilogue */ +__xlated("8: r1 = *(u64 *)(r10 -8)") +__xlated("9: r1 = *(u64 *)(r1 +0)") +__xlated("10: r6 = *(u64 *)(r1 +0)") +__xlated("11: r6 += 10000") +__xlated("12: *(u64 *)(r1 +0) = r6") +__xlated("13: r0 = r6") +__xlated("14: r0 *= 2") +__xlated("15: exit") +/* 2nd part of the main prog after the first exit */ +__xlated("16: *(u64 *)(r1 +0) = r4") +__xlated("17: r0 = 1") +/* Clear the r1 to ensure it does not have + * off-by-1 error and ensure it jumps back to the + * beginning of epilogue which initializes + * the r1 with the ctx ptr. + */ +__xlated("18: r1 = 0") +__xlated("19: gotol pc-12") +SEC("struct_ops/test_epilogue_exit") +__naked int test_epilogue_exit(void) +{ + asm volatile ( + "r1 = *(u64 *)(r1 +0);" + "r2 = *(u64 *)(r1 +0);" + "r3 = 0;" + "r4 = 1;" + "if r2 == 0 goto +3;" + "r0 = 0;" + "*(u64 *)(r1 + 0) = r3;" + "exit;" + "*(u64 *)(r1 + 0) = r4;" + "r0 = 1;" + "r1 = 0;" + "exit;" + ::: __clobber_all); +} + +SEC(".struct_ops.link") +struct bpf_testmod_st_ops epilogue_exit = { + .test_epilogue = (void *)test_epilogue_exit, +}; + +SEC("syscall") +__retval(20000) +int syscall_epilogue_exit0(void *ctx) +{ + struct st_ops_args args = { .a = 1 }; + + return bpf_kfunc_st_ops_test_epilogue(&args); +} + +SEC("syscall") +__retval(20002) +int syscall_epilogue_exit1(void *ctx) +{ + struct st_ops_args args = {}; + + return bpf_kfunc_st_ops_test_epilogue(&args); +} diff --git a/tools/testing/selftests/bpf/progs/epilogue_tailcall.c b/tools/testing/selftests/bpf/progs/epilogue_tailcall.c new file mode 100644 index 000000000000..7275dd594de0 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/epilogue_tailcall.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include "bpf_misc.h" +#include "../bpf_testmod/bpf_testmod.h" +#include "../bpf_testmod/bpf_testmod_kfunc.h" + +char _license[] SEC("license") = "GPL"; + +static __noinline __used int subprog(struct st_ops_args *args) +{ + args->a += 1; + return args->a; +} + +SEC("struct_ops/test_epilogue_subprog") +int BPF_PROG(test_epilogue_subprog, struct st_ops_args *args) +{ + subprog(args); + return args->a; +} + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 1); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(__u32)); + __array(values, void (void)); +} epilogue_map SEC(".maps") = { + .values = { + [0] = (void *)&test_epilogue_subprog, + } +}; + +SEC("struct_ops/test_epilogue_tailcall") +int test_epilogue_tailcall(unsigned long long *ctx) +{ + bpf_tail_call(ctx, &epilogue_map, 0); + return 0; +} + +SEC(".struct_ops.link") +struct bpf_testmod_st_ops epilogue_tailcall = { + .test_epilogue = (void *)test_epilogue_tailcall, +}; + +SEC(".struct_ops.link") +struct bpf_testmod_st_ops epilogue_subprog = { + .test_epilogue = (void *)test_epilogue_subprog, +}; + +SEC("syscall") +int syscall_epilogue_tailcall(struct st_ops_args *args) +{ + return bpf_kfunc_st_ops_test_epilogue(args); +} diff --git a/tools/testing/selftests/bpf/progs/err.h b/tools/testing/selftests/bpf/progs/err.h index d66d283d9e59..38529779a236 100644 --- a/tools/testing/selftests/bpf/progs/err.h +++ b/tools/testing/selftests/bpf/progs/err.h @@ -5,6 +5,16 @@ #define MAX_ERRNO 4095 #define IS_ERR_VALUE(x) (unsigned long)(void *)(x) >= (unsigned long)-MAX_ERRNO +#define __STR(x) #x + +#define set_if_not_errno_or_zero(x, y) \ +({ \ + asm volatile ("if %0 s< -4095 goto +1\n" \ + "if %0 s<= 0 goto +1\n" \ + "%0 = " __STR(y) "\n" \ + : "+r"(x)); \ +}) + static inline int IS_ERR_OR_NULL(const void *ptr) { return !ptr || IS_ERR_VALUE((unsigned long)ptr); diff --git a/tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c b/tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c index 68587b1de34e..30fd504856c7 100644 --- a/tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c +++ b/tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c @@ -4,34 +4,16 @@ #include <linux/bpf.h> #include <bpf/bpf_helpers.h> -struct { - __uint(type, BPF_MAP_TYPE_ARRAY); - __uint(max_entries, 1); - __type(key, __u32); - __type(value, __u64); -} cg_ids SEC(".maps"); - -struct { - __uint(type, BPF_MAP_TYPE_ARRAY); - __uint(max_entries, 1); - __type(key, __u32); - __type(value, __u32); -} pidmap SEC(".maps"); +__u64 cg_id; +__u64 expected_pid; SEC("tracepoint/syscalls/sys_enter_nanosleep") int trace(void *ctx) { __u32 pid = bpf_get_current_pid_tgid(); - __u32 key = 0, *expected_pid; - __u64 *val; - - expected_pid = bpf_map_lookup_elem(&pidmap, &key); - if (!expected_pid || *expected_pid != pid) - return 0; - val = bpf_map_lookup_elem(&cg_ids, &key); - if (val) - *val = bpf_get_current_cgroup_id(); + if (expected_pid == pid) + cg_id = bpf_get_current_cgroup_id(); return 0; } diff --git a/tools/testing/selftests/bpf/progs/iters_testmod.c b/tools/testing/selftests/bpf/progs/iters_testmod.c new file mode 100644 index 000000000000..df1d3db60b1b --- /dev/null +++ b/tools/testing/selftests/bpf/progs/iters_testmod.c @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "vmlinux.h" +#include "bpf_experimental.h" +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" +#include "../bpf_testmod/bpf_testmod_kfunc.h" + +char _license[] SEC("license") = "GPL"; + +SEC("raw_tp/sys_enter") +__success +int iter_next_trusted(const void *ctx) +{ + struct task_struct *cur_task = bpf_get_current_task_btf(); + struct bpf_iter_task_vma vma_it; + struct vm_area_struct *vma_ptr; + + bpf_iter_task_vma_new(&vma_it, cur_task, 0); + + vma_ptr = bpf_iter_task_vma_next(&vma_it); + if (vma_ptr == NULL) + goto out; + + bpf_kfunc_trusted_vma_test(vma_ptr); +out: + bpf_iter_task_vma_destroy(&vma_it); + return 0; +} + +SEC("raw_tp/sys_enter") +__failure __msg("Possibly NULL pointer passed to trusted arg0") +int iter_next_trusted_or_null(const void *ctx) +{ + struct task_struct *cur_task = bpf_get_current_task_btf(); + struct bpf_iter_task_vma vma_it; + struct vm_area_struct *vma_ptr; + + bpf_iter_task_vma_new(&vma_it, cur_task, 0); + + vma_ptr = bpf_iter_task_vma_next(&vma_it); + + bpf_kfunc_trusted_vma_test(vma_ptr); + + bpf_iter_task_vma_destroy(&vma_it); + return 0; +} + +SEC("raw_tp/sys_enter") +__success +int iter_next_rcu(const void *ctx) +{ + struct task_struct *cur_task = bpf_get_current_task_btf(); + struct bpf_iter_task task_it; + struct task_struct *task_ptr; + + bpf_iter_task_new(&task_it, cur_task, 0); + + task_ptr = bpf_iter_task_next(&task_it); + if (task_ptr == NULL) + goto out; + + bpf_kfunc_rcu_task_test(task_ptr); +out: + bpf_iter_task_destroy(&task_it); + return 0; +} + +SEC("raw_tp/sys_enter") +__failure __msg("Possibly NULL pointer passed to trusted arg0") +int iter_next_rcu_or_null(const void *ctx) +{ + struct task_struct *cur_task = bpf_get_current_task_btf(); + struct bpf_iter_task task_it; + struct task_struct *task_ptr; + + bpf_iter_task_new(&task_it, cur_task, 0); + + task_ptr = bpf_iter_task_next(&task_it); + + bpf_kfunc_rcu_task_test(task_ptr); + + bpf_iter_task_destroy(&task_it); + return 0; +} + +SEC("raw_tp/sys_enter") +__failure __msg("R1 must be referenced or trusted") +int iter_next_rcu_not_trusted(const void *ctx) +{ + struct task_struct *cur_task = bpf_get_current_task_btf(); + struct bpf_iter_task task_it; + struct task_struct *task_ptr; + + bpf_iter_task_new(&task_it, cur_task, 0); + + task_ptr = bpf_iter_task_next(&task_it); + if (task_ptr == NULL) + goto out; + + bpf_kfunc_trusted_task_test(task_ptr); +out: + bpf_iter_task_destroy(&task_it); + return 0; +} + +SEC("raw_tp/sys_enter") +__failure __msg("R1 cannot write into rdonly_mem") +/* Message should not be 'R1 cannot write into rdonly_trusted_mem' */ +int iter_next_ptr_mem_not_trusted(const void *ctx) +{ + struct bpf_iter_num num_it; + int *num_ptr; + + bpf_iter_num_new(&num_it, 0, 10); + + num_ptr = bpf_iter_num_next(&num_it); + if (num_ptr == NULL) + goto out; + + bpf_kfunc_trusted_num_test(num_ptr); +out: + bpf_iter_num_destroy(&num_it); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/iters_testmod_seq.c b/tools/testing/selftests/bpf/progs/iters_testmod_seq.c index 3873fb6c292a..4a176e6aede8 100644 --- a/tools/testing/selftests/bpf/progs/iters_testmod_seq.c +++ b/tools/testing/selftests/bpf/progs/iters_testmod_seq.c @@ -12,6 +12,7 @@ struct bpf_iter_testmod_seq { extern int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt) __ksym; extern s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq *it) __ksym; +extern s64 bpf_iter_testmod_seq_value(int blah, struct bpf_iter_testmod_seq *it) __ksym; extern void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it) __ksym; const volatile __s64 exp_empty = 0 + 1; @@ -76,4 +77,53 @@ int testmod_seq_truncated(const void *ctx) return 0; } +SEC("?raw_tp") +__failure +__msg("expected an initialized iter_testmod_seq as arg #2") +int testmod_seq_getter_before_bad(const void *ctx) +{ + struct bpf_iter_testmod_seq it; + + return bpf_iter_testmod_seq_value(0, &it); +} + +SEC("?raw_tp") +__failure +__msg("expected an initialized iter_testmod_seq as arg #2") +int testmod_seq_getter_after_bad(const void *ctx) +{ + struct bpf_iter_testmod_seq it; + s64 sum = 0, *v; + + bpf_iter_testmod_seq_new(&it, 100, 100); + + while ((v = bpf_iter_testmod_seq_next(&it))) { + sum += *v; + } + + bpf_iter_testmod_seq_destroy(&it); + + return sum + bpf_iter_testmod_seq_value(0, &it); +} + +SEC("?socket") +__success __retval(1000000) +int testmod_seq_getter_good(const void *ctx) +{ + struct bpf_iter_testmod_seq it; + s64 sum = 0, *v; + + bpf_iter_testmod_seq_new(&it, 100, 100); + + while ((v = bpf_iter_testmod_seq_next(&it))) { + sum += *v; + } + + sum *= bpf_iter_testmod_seq_value(0, &it); + + bpf_iter_testmod_seq_destroy(&it); + + return sum; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_fail.c b/tools/testing/selftests/bpf/progs/kfunc_call_fail.c index 4b0b7b79cdfb..08fae306539c 100644 --- a/tools/testing/selftests/bpf/progs/kfunc_call_fail.c +++ b/tools/testing/selftests/bpf/progs/kfunc_call_fail.c @@ -150,4 +150,11 @@ int kfunc_call_test_mem_acquire_fail(struct __sk_buff *skb) return ret; } +SEC("?tc") +int kfunc_call_test_pointer_arg_type_mismatch(struct __sk_buff *skb) +{ + bpf_kfunc_call_test_pass_ctx((void *)10); + return 0; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/local_kptr_stash.c b/tools/testing/selftests/bpf/progs/local_kptr_stash.c index 75043ffc5dad..b092a72b2c9d 100644 --- a/tools/testing/selftests/bpf/progs/local_kptr_stash.c +++ b/tools/testing/selftests/bpf/progs/local_kptr_stash.c @@ -8,9 +8,12 @@ #include "../bpf_experimental.h" #include "../bpf_testmod/bpf_testmod_kfunc.h" +struct plain_local; + struct node_data { long key; long data; + struct plain_local __kptr * stashed_in_local_kptr; struct bpf_rb_node node; }; @@ -85,6 +88,7 @@ static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b) static int create_and_stash(int idx, int val) { + struct plain_local *inner_local_kptr; struct map_value *mapval; struct node_data *res; @@ -92,11 +96,25 @@ static int create_and_stash(int idx, int val) if (!mapval) return 1; + inner_local_kptr = bpf_obj_new(typeof(*inner_local_kptr)); + if (!inner_local_kptr) + return 2; + res = bpf_obj_new(typeof(*res)); - if (!res) - return 1; + if (!res) { + bpf_obj_drop(inner_local_kptr); + return 3; + } res->key = val; + inner_local_kptr = bpf_kptr_xchg(&res->stashed_in_local_kptr, inner_local_kptr); + if (inner_local_kptr) { + /* Should never happen, we just obj_new'd res */ + bpf_obj_drop(inner_local_kptr); + bpf_obj_drop(res); + return 4; + } + res = bpf_kptr_xchg(&mapval->node, res); if (res) bpf_obj_drop(res); @@ -169,6 +187,7 @@ long stash_local_with_root(void *ctx) SEC("tc") long unstash_rb_node(void *ctx) { + struct plain_local *inner_local_kptr = NULL; struct map_value *mapval; struct node_data *res; long retval; @@ -180,6 +199,13 @@ long unstash_rb_node(void *ctx) res = bpf_kptr_xchg(&mapval->node, NULL); if (res) { + inner_local_kptr = bpf_kptr_xchg(&res->stashed_in_local_kptr, inner_local_kptr); + if (!inner_local_kptr) { + bpf_obj_drop(res); + return 1; + } + bpf_obj_drop(inner_local_kptr); + retval = res->key; bpf_obj_drop(res); return retval; diff --git a/tools/testing/selftests/bpf/progs/lsm_tailcall.c b/tools/testing/selftests/bpf/progs/lsm_tailcall.c new file mode 100644 index 000000000000..49c075ce2d4c --- /dev/null +++ b/tools/testing/selftests/bpf/progs/lsm_tailcall.c @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Huawei Technologies Co., Ltd */ + +#include "vmlinux.h" +#include <errno.h> +#include <bpf/bpf_helpers.h> + +char _license[] SEC("license") = "GPL"; + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 1); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(__u32)); +} jmp_table SEC(".maps"); + +SEC("lsm/file_permission") +int lsm_file_permission_prog(void *ctx) +{ + return 0; +} + +SEC("lsm/file_alloc_security") +int lsm_file_alloc_security_prog(void *ctx) +{ + return 0; +} + +SEC("lsm/file_alloc_security") +int lsm_file_alloc_security_entry(void *ctx) +{ + bpf_tail_call_static(ctx, &jmp_table, 0); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/mmap_inner_array.c b/tools/testing/selftests/bpf/progs/mmap_inner_array.c new file mode 100644 index 000000000000..90aacbc2938a --- /dev/null +++ b/tools/testing/selftests/bpf/progs/mmap_inner_array.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> + +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +struct inner_array_type { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(map_flags, BPF_F_MMAPABLE); + __type(key, __u32); + __type(value, __u64); + __uint(max_entries, 1); +} inner_array SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS); + __uint(key_size, 4); + __uint(value_size, 4); + __uint(max_entries, 1); + __array(values, struct inner_array_type); +} outer_map SEC(".maps"); + +int pid = 0; +__u64 match_value = 0x13572468; +bool done = false; +bool pid_match = false; +bool outer_map_match = false; + +SEC("fentry/" SYS_PREFIX "sys_nanosleep") +int add_to_list_in_inner_array(void *ctx) +{ + __u32 curr_pid, zero = 0; + struct bpf_map *map; + __u64 *value; + + curr_pid = (u32)bpf_get_current_pid_tgid(); + if (done || curr_pid != pid) + return 0; + + pid_match = true; + map = bpf_map_lookup_elem(&outer_map, &curr_pid); + if (!map) + return 0; + + outer_map_match = true; + value = bpf_map_lookup_elem(map, &zero); + if (!value) + return 0; + + *value = match_value; + done = true; + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/nested_acquire.c b/tools/testing/selftests/bpf/progs/nested_acquire.c new file mode 100644 index 000000000000..8e521a21d995 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/nested_acquire.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" +#include "../bpf_testmod/bpf_testmod_kfunc.h" + +char _license[] SEC("license") = "GPL"; + +SEC("tp_btf/tcp_probe") +__success +int BPF_PROG(test_nested_acquire_nonzero, struct sock *sk, struct sk_buff *skb) +{ + struct sk_buff *ptr; + + ptr = bpf_kfunc_nested_acquire_nonzero_offset_test(&sk->sk_write_queue); + + bpf_kfunc_nested_release_test(ptr); + return 0; +} + +SEC("tp_btf/tcp_probe") +__success +int BPF_PROG(test_nested_acquire_zero, struct sock *sk, struct sk_buff *skb) +{ + struct sk_buff *ptr; + + ptr = bpf_kfunc_nested_acquire_zero_offset_test(&sk->__sk_common); + + bpf_kfunc_nested_release_test(ptr); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/pro_epilogue.c b/tools/testing/selftests/bpf/progs/pro_epilogue.c new file mode 100644 index 000000000000..44bc3f06b4b6 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/pro_epilogue.c @@ -0,0 +1,154 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include "bpf_misc.h" +#include "../bpf_testmod/bpf_testmod.h" +#include "../bpf_testmod/bpf_testmod_kfunc.h" + +char _license[] SEC("license") = "GPL"; + +void __kfunc_btf_root(void) +{ + bpf_kfunc_st_ops_inc10(NULL); +} + +static __noinline __used int subprog(struct st_ops_args *args) +{ + args->a += 1; + return args->a; +} + +__success +/* prologue */ +__xlated("0: r6 = *(u64 *)(r1 +0)") +__xlated("1: r7 = *(u64 *)(r6 +0)") +__xlated("2: r7 += 1000") +__xlated("3: *(u64 *)(r6 +0) = r7") +/* main prog */ +__xlated("4: r1 = *(u64 *)(r1 +0)") +__xlated("5: r6 = r1") +__xlated("6: call kernel-function") +__xlated("7: r1 = r6") +__xlated("8: call pc+1") +__xlated("9: exit") +SEC("struct_ops/test_prologue") +__naked int test_prologue(void) +{ + asm volatile ( + "r1 = *(u64 *)(r1 +0);" + "r6 = r1;" + "call %[bpf_kfunc_st_ops_inc10];" + "r1 = r6;" + "call subprog;" + "exit;" + : + : __imm(bpf_kfunc_st_ops_inc10) + : __clobber_all); +} + +__success +/* save __u64 *ctx to stack */ +__xlated("0: *(u64 *)(r10 -8) = r1") +/* main prog */ +__xlated("1: r1 = *(u64 *)(r1 +0)") +__xlated("2: r6 = r1") +__xlated("3: call kernel-function") +__xlated("4: r1 = r6") +__xlated("5: call pc+") +/* epilogue */ +__xlated("6: r1 = *(u64 *)(r10 -8)") +__xlated("7: r1 = *(u64 *)(r1 +0)") +__xlated("8: r6 = *(u64 *)(r1 +0)") +__xlated("9: r6 += 10000") +__xlated("10: *(u64 *)(r1 +0) = r6") +__xlated("11: r0 = r6") +__xlated("12: r0 *= 2") +__xlated("13: exit") +SEC("struct_ops/test_epilogue") +__naked int test_epilogue(void) +{ + asm volatile ( + "r1 = *(u64 *)(r1 +0);" + "r6 = r1;" + "call %[bpf_kfunc_st_ops_inc10];" + "r1 = r6;" + "call subprog;" + "exit;" + : + : __imm(bpf_kfunc_st_ops_inc10) + : __clobber_all); +} + +__success +/* prologue */ +__xlated("0: r6 = *(u64 *)(r1 +0)") +__xlated("1: r7 = *(u64 *)(r6 +0)") +__xlated("2: r7 += 1000") +__xlated("3: *(u64 *)(r6 +0) = r7") +/* save __u64 *ctx to stack */ +__xlated("4: *(u64 *)(r10 -8) = r1") +/* main prog */ +__xlated("5: r1 = *(u64 *)(r1 +0)") +__xlated("6: r6 = r1") +__xlated("7: call kernel-function") +__xlated("8: r1 = r6") +__xlated("9: call pc+") +/* epilogue */ +__xlated("10: r1 = *(u64 *)(r10 -8)") +__xlated("11: r1 = *(u64 *)(r1 +0)") +__xlated("12: r6 = *(u64 *)(r1 +0)") +__xlated("13: r6 += 10000") +__xlated("14: *(u64 *)(r1 +0) = r6") +__xlated("15: r0 = r6") +__xlated("16: r0 *= 2") +__xlated("17: exit") +SEC("struct_ops/test_pro_epilogue") +__naked int test_pro_epilogue(void) +{ + asm volatile ( + "r1 = *(u64 *)(r1 +0);" + "r6 = r1;" + "call %[bpf_kfunc_st_ops_inc10];" + "r1 = r6;" + "call subprog;" + "exit;" + : + : __imm(bpf_kfunc_st_ops_inc10) + : __clobber_all); +} + +SEC("syscall") +__retval(1011) /* PROLOGUE_A [1000] + KFUNC_INC10 + SUBPROG_A [1] */ +int syscall_prologue(void *ctx) +{ + struct st_ops_args args = {}; + + return bpf_kfunc_st_ops_test_prologue(&args); +} + +SEC("syscall") +__retval(20022) /* (KFUNC_INC10 + SUBPROG_A [1] + EPILOGUE_A [10000]) * 2 */ +int syscall_epilogue(void *ctx) +{ + struct st_ops_args args = {}; + + return bpf_kfunc_st_ops_test_epilogue(&args); +} + +SEC("syscall") +__retval(22022) /* (PROLOGUE_A [1000] + KFUNC_INC10 + SUBPROG_A [1] + EPILOGUE_A [10000]) * 2 */ +int syscall_pro_epilogue(void *ctx) +{ + struct st_ops_args args = {}; + + return bpf_kfunc_st_ops_test_pro_epilogue(&args); +} + +SEC(".struct_ops.link") +struct bpf_testmod_st_ops pro_epilogue = { + .test_prologue = (void *)test_prologue, + .test_epilogue = (void *)test_epilogue, + .test_pro_epilogue = (void *)test_pro_epilogue, +}; diff --git a/tools/testing/selftests/bpf/progs/pro_epilogue_goto_start.c b/tools/testing/selftests/bpf/progs/pro_epilogue_goto_start.c new file mode 100644 index 000000000000..3529e53be355 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/pro_epilogue_goto_start.c @@ -0,0 +1,149 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include "bpf_misc.h" +#include "../bpf_testmod/bpf_testmod.h" +#include "../bpf_testmod/bpf_testmod_kfunc.h" + +char _license[] SEC("license") = "GPL"; + +__success +/* prologue */ +__xlated("0: r6 = *(u64 *)(r1 +0)") +__xlated("1: r7 = *(u64 *)(r6 +0)") +__xlated("2: r7 += 1000") +__xlated("3: *(u64 *)(r6 +0) = r7") +/* main prog */ +__xlated("4: if r1 == 0x0 goto pc+5") +__xlated("5: if r1 == 0x1 goto pc+2") +__xlated("6: r1 = 1") +__xlated("7: goto pc-3") +__xlated("8: r1 = 0") +__xlated("9: goto pc-6") +__xlated("10: r0 = 0") +__xlated("11: exit") +SEC("struct_ops/test_prologue_goto_start") +__naked int test_prologue_goto_start(void) +{ + asm volatile ( + "if r1 == 0 goto +5;" + "if r1 == 1 goto +2;" + "r1 = 1;" + "goto -3;" + "r1 = 0;" + "goto -6;" + "r0 = 0;" + "exit;" + ::: __clobber_all); +} + +__success +/* save __u64 *ctx to stack */ +__xlated("0: *(u64 *)(r10 -8) = r1") +/* main prog */ +__xlated("1: if r1 == 0x0 goto pc+5") +__xlated("2: if r1 == 0x1 goto pc+2") +__xlated("3: r1 = 1") +__xlated("4: goto pc-3") +__xlated("5: r1 = 0") +__xlated("6: goto pc-6") +__xlated("7: r0 = 0") +/* epilogue */ +__xlated("8: r1 = *(u64 *)(r10 -8)") +__xlated("9: r1 = *(u64 *)(r1 +0)") +__xlated("10: r6 = *(u64 *)(r1 +0)") +__xlated("11: r6 += 10000") +__xlated("12: *(u64 *)(r1 +0) = r6") +__xlated("13: r0 = r6") +__xlated("14: r0 *= 2") +__xlated("15: exit") +SEC("struct_ops/test_epilogue_goto_start") +__naked int test_epilogue_goto_start(void) +{ + asm volatile ( + "if r1 == 0 goto +5;" + "if r1 == 1 goto +2;" + "r1 = 1;" + "goto -3;" + "r1 = 0;" + "goto -6;" + "r0 = 0;" + "exit;" + ::: __clobber_all); +} + +__success +/* prologue */ +__xlated("0: r6 = *(u64 *)(r1 +0)") +__xlated("1: r7 = *(u64 *)(r6 +0)") +__xlated("2: r7 += 1000") +__xlated("3: *(u64 *)(r6 +0) = r7") +/* save __u64 *ctx to stack */ +__xlated("4: *(u64 *)(r10 -8) = r1") +/* main prog */ +__xlated("5: if r1 == 0x0 goto pc+5") +__xlated("6: if r1 == 0x1 goto pc+2") +__xlated("7: r1 = 1") +__xlated("8: goto pc-3") +__xlated("9: r1 = 0") +__xlated("10: goto pc-6") +__xlated("11: r0 = 0") +/* epilogue */ +__xlated("12: r1 = *(u64 *)(r10 -8)") +__xlated("13: r1 = *(u64 *)(r1 +0)") +__xlated("14: r6 = *(u64 *)(r1 +0)") +__xlated("15: r6 += 10000") +__xlated("16: *(u64 *)(r1 +0) = r6") +__xlated("17: r0 = r6") +__xlated("18: r0 *= 2") +__xlated("19: exit") +SEC("struct_ops/test_pro_epilogue_goto_start") +__naked int test_pro_epilogue_goto_start(void) +{ + asm volatile ( + "if r1 == 0 goto +5;" + "if r1 == 1 goto +2;" + "r1 = 1;" + "goto -3;" + "r1 = 0;" + "goto -6;" + "r0 = 0;" + "exit;" + ::: __clobber_all); +} + +SEC(".struct_ops.link") +struct bpf_testmod_st_ops epilogue_goto_start = { + .test_prologue = (void *)test_prologue_goto_start, + .test_epilogue = (void *)test_epilogue_goto_start, + .test_pro_epilogue = (void *)test_pro_epilogue_goto_start, +}; + +SEC("syscall") +__retval(0) +int syscall_prologue_goto_start(void *ctx) +{ + struct st_ops_args args = {}; + + return bpf_kfunc_st_ops_test_prologue(&args); +} + +SEC("syscall") +__retval(20000) /* (EPILOGUE_A [10000]) * 2 */ +int syscall_epilogue_goto_start(void *ctx) +{ + struct st_ops_args args = {}; + + return bpf_kfunc_st_ops_test_epilogue(&args); +} + +SEC("syscall") +__retval(22000) /* (PROLOGUE_A [1000] + EPILOGUE_A [10000]) * 2 */ +int syscall_pro_epilogue_goto_start(void *ctx) +{ + struct st_ops_args args = {}; + + return bpf_kfunc_st_ops_test_pro_epilogue(&args); +} diff --git a/tools/testing/selftests/bpf/progs/rbtree_fail.c b/tools/testing/selftests/bpf/progs/rbtree_fail.c index b722a1e1ddef..dbd5eee8e25e 100644 --- a/tools/testing/selftests/bpf/progs/rbtree_fail.c +++ b/tools/testing/selftests/bpf/progs/rbtree_fail.c @@ -105,7 +105,7 @@ long rbtree_api_remove_unadded_node(void *ctx) } SEC("?tc") -__failure __regex("Unreleased reference id=3 alloc_insn=[0-9]+") +__failure __msg("Unreleased reference id=3 alloc_insn={{[0-9]+}}") long rbtree_api_remove_no_drop(void *ctx) { struct bpf_rb_node *res; diff --git a/tools/testing/selftests/bpf/progs/read_vsyscall.c b/tools/testing/selftests/bpf/progs/read_vsyscall.c index 986f96687ae1..39ebef430059 100644 --- a/tools/testing/selftests/bpf/progs/read_vsyscall.c +++ b/tools/testing/selftests/bpf/progs/read_vsyscall.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (C) 2024. Huawei Technologies Co., Ltd */ +#include "vmlinux.h" #include <linux/types.h> #include <bpf/bpf_helpers.h> @@ -7,10 +8,15 @@ int target_pid = 0; void *user_ptr = 0; -int read_ret[8]; +int read_ret[9]; char _license[] SEC("license") = "GPL"; +/* + * This is the only kfunc, the others are helpers + */ +int bpf_copy_from_user_str(void *dst, u32, const void *, u64) __weak __ksym; + SEC("fentry/" SYS_PREFIX "sys_nanosleep") int do_probe_read(void *ctx) { @@ -40,6 +46,7 @@ int do_copy_from_user(void *ctx) read_ret[6] = bpf_copy_from_user(buf, sizeof(buf), user_ptr); read_ret[7] = bpf_copy_from_user_task(buf, sizeof(buf), user_ptr, bpf_get_current_task_btf(), 0); + read_ret[8] = bpf_copy_from_user_str((char *)buf, sizeof(buf), user_ptr, 0); return 0; } diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c index f8d4b7cfcd68..836c8ab7b908 100644 --- a/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c +++ b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c @@ -32,7 +32,7 @@ static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b) } SEC("?tc") -__failure __regex("Unreleased reference id=4 alloc_insn=[0-9]+") +__failure __msg("Unreleased reference id=4 alloc_insn={{[0-9]+}}") long rbtree_refcounted_node_ref_escapes(void *ctx) { struct node_acquire *n, *m; @@ -73,7 +73,7 @@ long refcount_acquire_maybe_null(void *ctx) } SEC("?tc") -__failure __regex("Unreleased reference id=3 alloc_insn=[0-9]+") +__failure __msg("Unreleased reference id=3 alloc_insn={{[0-9]+}}") long rbtree_refcounted_node_ref_escapes_owning_input(void *ctx) { struct node_acquire *n, *m; diff --git a/tools/testing/selftests/bpf/progs/strobemeta.h b/tools/testing/selftests/bpf/progs/strobemeta.h index f74459eead26..a5c74d31a244 100644 --- a/tools/testing/selftests/bpf/progs/strobemeta.h +++ b/tools/testing/selftests/bpf/progs/strobemeta.h @@ -373,7 +373,7 @@ static __always_inline uint64_t read_str_var(struct strobemeta_cfg *cfg, len = bpf_probe_read_user_str(&data->payload[off], STROBE_MAX_STR_LEN, value->ptr); /* * if bpf_probe_read_user_str returns error (<0), due to casting to - * unsinged int, it will become big number, so next check is + * unsigned int, it will become big number, so next check is * sufficient to check for errors AND prove to BPF verifier, that * bpf_probe_read_user_str won't return anything bigger than * STROBE_MAX_STR_LEN @@ -557,7 +557,7 @@ static void *read_strobe_meta(struct task_struct *task, return NULL; payload_off = ctx.payload_off; - /* this should not really happen, here only to satisfy verifer */ + /* this should not really happen, here only to satisfy verifier */ if (payload_off > sizeof(data->payload)) payload_off = sizeof(data->payload); #else diff --git a/tools/testing/selftests/bpf/progs/syscall.c b/tools/testing/selftests/bpf/progs/syscall.c index 3d3cafdebe72..0f4dfb770c32 100644 --- a/tools/testing/selftests/bpf/progs/syscall.c +++ b/tools/testing/selftests/bpf/progs/syscall.c @@ -8,6 +8,7 @@ #include <linux/btf.h> #include <string.h> #include <errno.h> +#include "bpf_misc.h" char _license[] SEC("license") = "GPL"; @@ -119,7 +120,7 @@ int load_prog(struct args *ctx) static __u64 value = 34; static union bpf_attr prog_load_attr = { .prog_type = BPF_PROG_TYPE_XDP, - .insn_cnt = sizeof(insns) / sizeof(insns[0]), + .insn_cnt = ARRAY_SIZE(insns), }; int ret; diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy1.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy1.c new file mode 100644 index 000000000000..327ca395e860 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy1.c @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_legacy.h" + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 1); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(__u32)); +} jmp_table SEC(".maps"); + +int count = 0; + +static __noinline +int subprog_tail(struct __sk_buff *skb) +{ + bpf_tail_call_static(skb, &jmp_table, 0); + return 0; +} + +SEC("tc") +int entry(struct __sk_buff *skb) +{ + int ret = 1; + + count++; + subprog_tail(skb); + subprog_tail(skb); + + return ret; +} + +char __license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy2.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy2.c new file mode 100644 index 000000000000..72fd0d577506 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy2.c @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +int classifier_0(struct __sk_buff *skb); +int classifier_1(struct __sk_buff *skb); + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 2); + __uint(key_size, sizeof(__u32)); + __array(values, void (void)); +} jmp_table SEC(".maps") = { + .values = { + [0] = (void *) &classifier_0, + [1] = (void *) &classifier_1, + }, +}; + +int count0 = 0; +int count1 = 0; + +static __noinline +int subprog_tail0(struct __sk_buff *skb) +{ + bpf_tail_call_static(skb, &jmp_table, 0); + return 0; +} + +__auxiliary +SEC("tc") +int classifier_0(struct __sk_buff *skb) +{ + count0++; + subprog_tail0(skb); + return 0; +} + +static __noinline +int subprog_tail1(struct __sk_buff *skb) +{ + bpf_tail_call_static(skb, &jmp_table, 1); + return 0; +} + +__auxiliary +SEC("tc") +int classifier_1(struct __sk_buff *skb) +{ + count1++; + subprog_tail1(skb); + return 0; +} + +__success +__retval(33) +SEC("tc") +int tailcall_bpf2bpf_hierarchy_2(struct __sk_buff *skb) +{ + int ret = 0; + + subprog_tail0(skb); + subprog_tail1(skb); + + __sink(ret); + return (count1 << 16) | count0; +} + +char __license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy3.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy3.c new file mode 100644 index 000000000000..a7fb91cb05b7 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy3.c @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +int classifier_0(struct __sk_buff *skb); + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 1); + __uint(key_size, sizeof(__u32)); + __array(values, void (void)); +} jmp_table0 SEC(".maps") = { + .values = { + [0] = (void *) &classifier_0, + }, +}; + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 1); + __uint(key_size, sizeof(__u32)); + __array(values, void (void)); +} jmp_table1 SEC(".maps") = { + .values = { + [0] = (void *) &classifier_0, + }, +}; + +int count = 0; + +static __noinline +int subprog_tail(struct __sk_buff *skb, void *jmp_table) +{ + bpf_tail_call_static(skb, jmp_table, 0); + return 0; +} + +__auxiliary +SEC("tc") +int classifier_0(struct __sk_buff *skb) +{ + count++; + subprog_tail(skb, &jmp_table0); + subprog_tail(skb, &jmp_table1); + return count; +} + +__success +__retval(33) +SEC("tc") +int tailcall_bpf2bpf_hierarchy_3(struct __sk_buff *skb) +{ + int ret = 0; + + bpf_tail_call_static(skb, &jmp_table0, 0); + + __sink(ret); + return ret; +} + +char __license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy_fentry.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy_fentry.c new file mode 100644 index 000000000000..c87f9ca982d3 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy_fentry.c @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright Leon Hwang */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 1); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(__u32)); +} jmp_table SEC(".maps"); + +int count = 0; + +static __noinline +int subprog_tail(void *ctx) +{ + bpf_tail_call_static(ctx, &jmp_table, 0); + return 0; +} + +SEC("fentry/dummy") +int BPF_PROG(fentry, struct sk_buff *skb) +{ + count++; + subprog_tail(ctx); + subprog_tail(ctx); + + return 0; +} + + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/tailcall_freplace.c b/tools/testing/selftests/bpf/progs/tailcall_freplace.c new file mode 100644 index 000000000000..6713b809df44 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/tailcall_freplace.c @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 1); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(__u32)); +} jmp_table SEC(".maps"); + +int count = 0; + +SEC("freplace") +int entry_freplace(struct __sk_buff *skb) +{ + count++; + bpf_tail_call_static(skb, &jmp_table, 0); + return count; +} + +char __license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_success.c b/tools/testing/selftests/bpf/progs/task_kfunc_success.c index 70df695312dc..a55149015063 100644 --- a/tools/testing/selftests/bpf/progs/task_kfunc_success.c +++ b/tools/testing/selftests/bpf/progs/task_kfunc_success.c @@ -5,6 +5,7 @@ #include <bpf/bpf_tracing.h> #include <bpf/bpf_helpers.h> +#include "../bpf_experimental.h" #include "task_kfunc_common.h" char _license[] SEC("license") = "GPL"; @@ -142,8 +143,9 @@ int BPF_PROG(test_task_acquire_leave_in_map, struct task_struct *task, u64 clone SEC("tp_btf/task_newtask") int BPF_PROG(test_task_xchg_release, struct task_struct *task, u64 clone_flags) { - struct task_struct *kptr; - struct __tasks_kfunc_map_value *v; + struct task_struct *kptr, *acquired; + struct __tasks_kfunc_map_value *v, *local; + int refcnt, refcnt_after_drop; long status; if (!is_test_kfunc_task()) @@ -167,6 +169,56 @@ int BPF_PROG(test_task_xchg_release, struct task_struct *task, u64 clone_flags) return 0; } + local = bpf_obj_new(typeof(*local)); + if (!local) { + err = 4; + bpf_task_release(kptr); + return 0; + } + + kptr = bpf_kptr_xchg(&local->task, kptr); + if (kptr) { + err = 5; + bpf_obj_drop(local); + bpf_task_release(kptr); + return 0; + } + + kptr = bpf_kptr_xchg(&local->task, NULL); + if (!kptr) { + err = 6; + bpf_obj_drop(local); + return 0; + } + + /* Stash a copy into local kptr and check if it is released recursively */ + acquired = bpf_task_acquire(kptr); + if (!acquired) { + err = 7; + bpf_obj_drop(local); + bpf_task_release(kptr); + return 0; + } + bpf_probe_read_kernel(&refcnt, sizeof(refcnt), &acquired->rcu_users); + + acquired = bpf_kptr_xchg(&local->task, acquired); + if (acquired) { + err = 8; + bpf_obj_drop(local); + bpf_task_release(kptr); + bpf_task_release(acquired); + return 0; + } + + bpf_obj_drop(local); + + bpf_probe_read_kernel(&refcnt_after_drop, sizeof(refcnt_after_drop), &kptr->rcu_users); + if (refcnt != refcnt_after_drop + 1) { + err = 9; + bpf_task_release(kptr); + return 0; + } + bpf_task_release(kptr); return 0; diff --git a/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c b/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c new file mode 100644 index 000000000000..8a0632c37839 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +__noinline +int subprog(struct __sk_buff *skb) +{ + int ret = 1; + + __sink(ret); + return ret; +} + +SEC("tc") +int entry_tc(struct __sk_buff *skb) +{ + return subprog(skb); +} + +char __license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/tc_dummy.c b/tools/testing/selftests/bpf/progs/tc_dummy.c new file mode 100644 index 000000000000..69a3d0dc8787 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/tc_dummy.c @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_legacy.h" + +SEC("tc") +int entry(struct __sk_buff *skb) +{ + return 1; +} + +char __license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_attach_probe.c b/tools/testing/selftests/bpf/progs/test_attach_probe.c index 68466a6ad18c..fb79e6cab932 100644 --- a/tools/testing/selftests/bpf/progs/test_attach_probe.c +++ b/tools/testing/selftests/bpf/progs/test_attach_probe.c @@ -5,8 +5,10 @@ #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> #include <bpf/bpf_core_read.h> +#include <errno.h> #include "bpf_misc.h" +u32 dynamic_sz = 1; int kprobe2_res = 0; int kretprobe2_res = 0; int uprobe_byname_res = 0; @@ -14,11 +16,15 @@ int uretprobe_byname_res = 0; int uprobe_byname2_res = 0; int uretprobe_byname2_res = 0; int uprobe_byname3_sleepable_res = 0; +int uprobe_byname3_str_sleepable_res = 0; int uprobe_byname3_res = 0; int uretprobe_byname3_sleepable_res = 0; +int uretprobe_byname3_str_sleepable_res = 0; int uretprobe_byname3_res = 0; void *user_ptr = 0; +int bpf_copy_from_user_str(void *dst, u32, const void *, u64) __weak __ksym; + SEC("ksyscall/nanosleep") int BPF_KSYSCALL(handle_kprobe_auto, struct __kernel_timespec *req, struct __kernel_timespec *rem) { @@ -87,11 +93,61 @@ static __always_inline bool verify_sleepable_user_copy(void) return bpf_strncmp(data, sizeof(data), "test_data") == 0; } +static __always_inline bool verify_sleepable_user_copy_str(void) +{ + int ret; + char data_long[20]; + char data_long_pad[20]; + char data_long_err[20]; + char data_short[4]; + char data_short_pad[4]; + + ret = bpf_copy_from_user_str(data_short, sizeof(data_short), user_ptr, 0); + + if (bpf_strncmp(data_short, 4, "tes\0") != 0 || ret != 4) + return false; + + ret = bpf_copy_from_user_str(data_short_pad, sizeof(data_short_pad), user_ptr, BPF_F_PAD_ZEROS); + + if (bpf_strncmp(data_short, 4, "tes\0") != 0 || ret != 4) + return false; + + /* Make sure this passes the verifier */ + ret = bpf_copy_from_user_str(data_long, dynamic_sz & sizeof(data_long), user_ptr, 0); + + if (ret != 0) + return false; + + ret = bpf_copy_from_user_str(data_long, sizeof(data_long), user_ptr, 0); + + if (bpf_strncmp(data_long, 10, "test_data\0") != 0 || ret != 10) + return false; + + ret = bpf_copy_from_user_str(data_long_pad, sizeof(data_long_pad), user_ptr, BPF_F_PAD_ZEROS); + + if (bpf_strncmp(data_long_pad, 10, "test_data\0") != 0 || ret != 10 || data_long_pad[19] != '\0') + return false; + + ret = bpf_copy_from_user_str(data_long_err, sizeof(data_long_err), (void *)data_long, BPF_F_PAD_ZEROS); + + if (ret > 0 || data_long_err[19] != '\0') + return false; + + ret = bpf_copy_from_user_str(data_long, sizeof(data_long), user_ptr, 2); + + if (ret != -EINVAL) + return false; + + return true; +} + SEC("uprobe.s//proc/self/exe:trigger_func3") int handle_uprobe_byname3_sleepable(struct pt_regs *ctx) { if (verify_sleepable_user_copy()) uprobe_byname3_sleepable_res = 9; + if (verify_sleepable_user_copy_str()) + uprobe_byname3_str_sleepable_res = 10; return 0; } @@ -102,7 +158,7 @@ int handle_uprobe_byname3_sleepable(struct pt_regs *ctx) SEC("uprobe//proc/self/exe:trigger_func3") int handle_uprobe_byname3(struct pt_regs *ctx) { - uprobe_byname3_res = 10; + uprobe_byname3_res = 11; return 0; } @@ -110,14 +166,16 @@ SEC("uretprobe.s//proc/self/exe:trigger_func3") int handle_uretprobe_byname3_sleepable(struct pt_regs *ctx) { if (verify_sleepable_user_copy()) - uretprobe_byname3_sleepable_res = 11; + uretprobe_byname3_sleepable_res = 12; + if (verify_sleepable_user_copy_str()) + uretprobe_byname3_str_sleepable_res = 13; return 0; } SEC("uretprobe//proc/self/exe:trigger_func3") int handle_uretprobe_byname3(struct pt_regs *ctx) { - uretprobe_byname3_res = 12; + uretprobe_byname3_res = 14; return 0; } diff --git a/tools/testing/selftests/bpf/progs/test_build_id.c b/tools/testing/selftests/bpf/progs/test_build_id.c new file mode 100644 index 000000000000..32ce59f9aa27 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_build_id.c @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> + +struct bpf_stack_build_id stack_sleepable[128]; +int res_sleepable; + +struct bpf_stack_build_id stack_nofault[128]; +int res_nofault; + +SEC("uprobe.multi/./uprobe_multi:uprobe") +int uprobe_nofault(struct pt_regs *ctx) +{ + res_nofault = bpf_get_stack(ctx, stack_nofault, sizeof(stack_nofault), + BPF_F_USER_STACK | BPF_F_USER_BUILD_ID); + + return 0; +} + +SEC("uprobe.multi.s/./uprobe_multi:uprobe") +int uprobe_sleepable(struct pt_regs *ctx) +{ + res_sleepable = bpf_get_stack(ctx, stack_sleepable, sizeof(stack_sleepable), + BPF_F_USER_STACK | BPF_F_USER_BUILD_ID); + + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c b/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c index da54c09e9a15..464515b824b9 100644 --- a/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c +++ b/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c @@ -503,7 +503,7 @@ static ret_t get_next_hop(struct bpf_dynptr *dynptr, __u64 *offset, encap_header * * fill_tuple(&t, foo, sizeof(struct iphdr), 123, 321) * - * clang will substitue a costant for sizeof, which allows the verifier + * clang will substitute a constant for sizeof, which allows the verifier * to track it's value. Based on this, it can figure out the constant * return value, and calling code works while still being "generic" to * IPv4 and IPv6. diff --git a/tools/testing/selftests/bpf/progs/test_core_read_macros.c b/tools/testing/selftests/bpf/progs/test_core_read_macros.c index fd54caa17319..873d85a4739b 100644 --- a/tools/testing/selftests/bpf/progs/test_core_read_macros.c +++ b/tools/testing/selftests/bpf/progs/test_core_read_macros.c @@ -36,7 +36,7 @@ int handler(void *ctx) return 0; /* next pointers for kernel address space have to be initialized from - * BPF side, user-space mmaped addresses are stil user-space addresses + * BPF side, user-space mmaped addresses are still user-space addresses */ k_probe_in.next = &k_probe_in; __builtin_preserve_access_index(({k_core_in.next = &k_core_in;})); diff --git a/tools/testing/selftests/bpf/progs/test_get_xattr.c b/tools/testing/selftests/bpf/progs/test_get_xattr.c index 7eb2a4e5a3e5..66e737720f7c 100644 --- a/tools/testing/selftests/bpf/progs/test_get_xattr.c +++ b/tools/testing/selftests/bpf/progs/test_get_xattr.c @@ -2,6 +2,7 @@ /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ #include "vmlinux.h" +#include <errno.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> #include "bpf_kfuncs.h" @@ -9,10 +10,12 @@ char _license[] SEC("license") = "GPL"; __u32 monitored_pid; -__u32 found_xattr; +__u32 found_xattr_from_file; +__u32 found_xattr_from_dentry; static const char expected_value[] = "hello"; -char value[32]; +char value1[32]; +char value2[32]; SEC("lsm.s/file_open") int BPF_PROG(test_file_open, struct file *f) @@ -25,13 +28,37 @@ int BPF_PROG(test_file_open, struct file *f) if (pid != monitored_pid) return 0; - bpf_dynptr_from_mem(value, sizeof(value), 0, &value_ptr); + bpf_dynptr_from_mem(value1, sizeof(value1), 0, &value_ptr); ret = bpf_get_file_xattr(f, "user.kfuncs", &value_ptr); if (ret != sizeof(expected_value)) return 0; - if (bpf_strncmp(value, ret, expected_value)) + if (bpf_strncmp(value1, ret, expected_value)) return 0; - found_xattr = 1; + found_xattr_from_file = 1; return 0; } + +SEC("lsm.s/inode_getxattr") +int BPF_PROG(test_inode_getxattr, struct dentry *dentry, char *name) +{ + struct bpf_dynptr value_ptr; + __u32 pid; + int ret; + + pid = bpf_get_current_pid_tgid() >> 32; + if (pid != monitored_pid) + return 0; + + bpf_dynptr_from_mem(value2, sizeof(value2), 0, &value_ptr); + + ret = bpf_get_dentry_xattr(dentry, "user.kfuncs", &value_ptr); + if (ret != sizeof(expected_value)) + return 0; + if (bpf_strncmp(value2, ret, expected_value)) + return 0; + found_xattr_from_dentry = 1; + + /* return non-zero to fail getxattr from user space */ + return -EINVAL; +} diff --git a/tools/testing/selftests/bpf/progs/test_global_func15.c b/tools/testing/selftests/bpf/progs/test_global_func15.c index b4e089d6981d..201cc000b3f4 100644 --- a/tools/testing/selftests/bpf/progs/test_global_func15.c +++ b/tools/testing/selftests/bpf/progs/test_global_func15.c @@ -44,7 +44,7 @@ __naked int global_func15_tricky_pruning(void) * case we have a valid 1 stored in R0 register, but in * a branch case we assign some random value to R0. So if * there is something wrong with precision tracking for R0 at - * program exit, we might erronenously prune branch case, + * program exit, we might erroneously prune branch case, * because R0 in fallthrough case is imprecise (and thus any * value is valid from POV of verifier is_state_equal() logic) */ diff --git a/tools/testing/selftests/bpf/progs/test_global_map_resize.c b/tools/testing/selftests/bpf/progs/test_global_map_resize.c index 1fbb73d3e5d5..a3f220ba7025 100644 --- a/tools/testing/selftests/bpf/progs/test_global_map_resize.c +++ b/tools/testing/selftests/bpf/progs/test_global_map_resize.c @@ -3,6 +3,7 @@ #include "vmlinux.h" #include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> char _license[] SEC("license") = "GPL"; @@ -15,7 +16,7 @@ const volatile size_t data_array_len; int sum = 0; int array[1]; -/* custom data secton */ +/* custom data section */ int my_array[1] SEC(".data.custom"); /* custom data section which should NOT be resizable, @@ -60,3 +61,18 @@ int data_array_sum(void *ctx) return 0; } + +SEC("struct_ops/test_1") +int BPF_PROG(test_1) +{ + return 0; +} + +struct bpf_testmod_ops { + int (*test_1)(void); +}; + +SEC(".struct_ops.link") +struct bpf_testmod_ops st_ops_resize = { + .test_1 = (void *)test_1 +}; diff --git a/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c b/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c index f5ac5f3e8919..568816307f71 100644 --- a/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c +++ b/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c @@ -31,6 +31,7 @@ int BPF_PROG(check_access, struct bpf_map *map, fmode_t fmode) if (fmode & FMODE_WRITE) return -EACCES; + barrier(); return 0; } diff --git a/tools/testing/selftests/bpf/progs/test_rdonly_maps.c b/tools/testing/selftests/bpf/progs/test_rdonly_maps.c index fc8e8a34a3db..7035fb4d4165 100644 --- a/tools/testing/selftests/bpf/progs/test_rdonly_maps.c +++ b/tools/testing/selftests/bpf/progs/test_rdonly_maps.c @@ -4,6 +4,7 @@ #include <linux/ptrace.h> #include <linux/bpf.h> #include <bpf/bpf_helpers.h> +#include "bpf_misc.h" const struct { unsigned a[4]; @@ -64,7 +65,7 @@ int full_loop(struct pt_regs *ctx) { /* prevent compiler to optimize everything out */ unsigned * volatile p = (void *)&rdonly_values.a; - int i = sizeof(rdonly_values.a) / sizeof(rdonly_values.a[0]); + int i = ARRAY_SIZE(rdonly_values.a); unsigned iters = 0, sum = 0; /* validate verifier can allow full loop as well */ diff --git a/tools/testing/selftests/bpf/progs/test_sig_in_xattr.c b/tools/testing/selftests/bpf/progs/test_sig_in_xattr.c index 2f0eb1334d65..8ef6b39335b6 100644 --- a/tools/testing/selftests/bpf/progs/test_sig_in_xattr.c +++ b/tools/testing/selftests/bpf/progs/test_sig_in_xattr.c @@ -6,6 +6,7 @@ #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> #include "bpf_kfuncs.h" +#include "err.h" char _license[] SEC("license") = "GPL"; @@ -79,5 +80,8 @@ int BPF_PROG(test_file_open, struct file *f) ret = bpf_verify_pkcs7_signature(&digest_ptr, &sig_ptr, trusted_keyring); bpf_key_put(trusted_keyring); + + set_if_not_errno_or_zero(ret, -EFAULT); + return ret; } diff --git a/tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c b/tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c deleted file mode 100644 index 37aacc66cd68..000000000000 --- a/tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c +++ /dev/null @@ -1,45 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -// Copyright (c) 2018 Facebook - -#include <linux/bpf.h> -#include <linux/pkt_cls.h> - -#include <string.h> - -#include <bpf/bpf_helpers.h> - -#define NUM_CGROUP_LEVELS 4 - -struct { - __uint(type, BPF_MAP_TYPE_ARRAY); - __type(key, __u32); - __type(value, __u64); - __uint(max_entries, NUM_CGROUP_LEVELS); -} cgroup_ids SEC(".maps"); - -static __always_inline void log_nth_level(struct __sk_buff *skb, __u32 level) -{ - __u64 id; - - /* [1] &level passed to external function that may change it, it's - * incompatible with loop unroll. - */ - id = bpf_skb_ancestor_cgroup_id(skb, level); - bpf_map_update_elem(&cgroup_ids, &level, &id, 0); -} - -SEC("cgroup_id_logger") -int log_cgroup_id(struct __sk_buff *skb) -{ - /* Loop unroll can't be used here due to [1]. Unrolling manually. - * Number of calls should be in sync with NUM_CGROUP_LEVELS. - */ - log_nth_level(skb, 0); - log_nth_level(skb, 1); - log_nth_level(skb, 2); - log_nth_level(skb, 3); - - return TC_ACT_OK; -} - -char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c index 3f5abcf3ff13..32127f1cd687 100644 --- a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c +++ b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c @@ -26,6 +26,18 @@ */ #define ASSIGNED_ADDR_VETH1 0xac1001c8 +struct bpf_fou_encap___local { + __be16 sport; + __be16 dport; +} __attribute__((preserve_access_index)); + +enum bpf_fou_encap_type___local { + FOU_BPF_ENCAP_FOU___local, + FOU_BPF_ENCAP_GUE___local, +}; + +struct bpf_fou_encap; + int bpf_skb_set_fou_encap(struct __sk_buff *skb_ctx, struct bpf_fou_encap *encap, int type) __ksym; int bpf_skb_get_fou_encap(struct __sk_buff *skb_ctx, @@ -745,7 +757,7 @@ SEC("tc") int ipip_gue_set_tunnel(struct __sk_buff *skb) { struct bpf_tunnel_key key = {}; - struct bpf_fou_encap encap = {}; + struct bpf_fou_encap___local encap = {}; void *data = (void *)(long)skb->data; struct iphdr *iph = data; void *data_end = (void *)(long)skb->data_end; @@ -769,7 +781,9 @@ int ipip_gue_set_tunnel(struct __sk_buff *skb) encap.sport = 0; encap.dport = bpf_htons(5555); - ret = bpf_skb_set_fou_encap(skb, &encap, FOU_BPF_ENCAP_GUE); + ret = bpf_skb_set_fou_encap(skb, (struct bpf_fou_encap *)&encap, + bpf_core_enum_value(enum bpf_fou_encap_type___local, + FOU_BPF_ENCAP_GUE___local)); if (ret < 0) { log_err(ret); return TC_ACT_SHOT; @@ -782,7 +796,7 @@ SEC("tc") int ipip_fou_set_tunnel(struct __sk_buff *skb) { struct bpf_tunnel_key key = {}; - struct bpf_fou_encap encap = {}; + struct bpf_fou_encap___local encap = {}; void *data = (void *)(long)skb->data; struct iphdr *iph = data; void *data_end = (void *)(long)skb->data_end; @@ -806,7 +820,8 @@ int ipip_fou_set_tunnel(struct __sk_buff *skb) encap.sport = 0; encap.dport = bpf_htons(5555); - ret = bpf_skb_set_fou_encap(skb, &encap, FOU_BPF_ENCAP_FOU); + ret = bpf_skb_set_fou_encap(skb, (struct bpf_fou_encap *)&encap, + FOU_BPF_ENCAP_FOU___local); if (ret < 0) { log_err(ret); return TC_ACT_SHOT; @@ -820,7 +835,7 @@ int ipip_encap_get_tunnel(struct __sk_buff *skb) { int ret; struct bpf_tunnel_key key = {}; - struct bpf_fou_encap encap = {}; + struct bpf_fou_encap___local encap = {}; ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0); if (ret < 0) { @@ -828,7 +843,7 @@ int ipip_encap_get_tunnel(struct __sk_buff *skb) return TC_ACT_SHOT; } - ret = bpf_skb_get_fou_encap(skb, &encap); + ret = bpf_skb_get_fou_encap(skb, (struct bpf_fou_encap *)&encap); if (ret < 0) { log_err(ret); return TC_ACT_SHOT; diff --git a/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c b/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c index f42e9f3831a1..12034a73ee2d 100644 --- a/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c +++ b/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c @@ -11,6 +11,7 @@ #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> #include "bpf_kfuncs.h" +#include "err.h" #define MAX_DATA_SIZE (1024 * 1024) #define MAX_SIG_SIZE 1024 @@ -55,12 +56,12 @@ int BPF_PROG(bpf, int cmd, union bpf_attr *attr, unsigned int size) ret = bpf_probe_read_kernel(&value, sizeof(value), &attr->value); if (ret) - return ret; + goto out; ret = bpf_copy_from_user(data_val, sizeof(struct data), (void *)(unsigned long)value); if (ret) - return ret; + goto out; if (data_val->data_len > sizeof(data_val->data)) return -EINVAL; @@ -84,5 +85,8 @@ int BPF_PROG(bpf, int cmd, union bpf_attr *attr, unsigned int size) bpf_key_put(trusted_keyring); +out: + set_if_not_errno_or_zero(ret, -EFAULT); + return ret; } diff --git a/tools/testing/selftests/bpf/progs/token_lsm.c b/tools/testing/selftests/bpf/progs/token_lsm.c index e4d59b6ba743..a6002d073b1b 100644 --- a/tools/testing/selftests/bpf/progs/token_lsm.c +++ b/tools/testing/selftests/bpf/progs/token_lsm.c @@ -8,8 +8,8 @@ char _license[] SEC("license") = "GPL"; int my_pid; -bool reject_capable; -bool reject_cmd; +int reject_capable; +int reject_cmd; SEC("lsm/bpf_token_capable") int BPF_PROG(token_capable, struct bpf_token *token, int cap) diff --git a/tools/testing/selftests/bpf/progs/trigger_bench.c b/tools/testing/selftests/bpf/progs/trigger_bench.c index 2619ed193c65..044a6d78923e 100644 --- a/tools/testing/selftests/bpf/progs/trigger_bench.c +++ b/tools/testing/selftests/bpf/progs/trigger_bench.c @@ -32,6 +32,13 @@ int bench_trigger_uprobe(void *ctx) return 0; } +SEC("?uprobe.multi") +int bench_trigger_uprobe_multi(void *ctx) +{ + inc_counter(); + return 0; +} + const volatile int batch_iters = 0; SEC("?raw_tp") diff --git a/tools/testing/selftests/bpf/progs/unsupported_ops.c b/tools/testing/selftests/bpf/progs/unsupported_ops.c new file mode 100644 index 000000000000..9180365a3568 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/unsupported_ops.c @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include "bpf_misc.h" +#include "../bpf_testmod/bpf_testmod.h" + +char _license[] SEC("license") = "GPL"; + +SEC("struct_ops/unsupported_ops") +__failure +__msg("attach to unsupported member unsupported_ops of struct bpf_testmod_ops") +int BPF_PROG(unsupported_ops) +{ + return 0; +} + +SEC(".struct_ops.link") +struct bpf_testmod_ops testmod = { + .unsupported_ops = (void *)unsupported_ops, +}; diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi_consumers.c b/tools/testing/selftests/bpf/progs/uprobe_multi_consumers.c new file mode 100644 index 000000000000..7e0fdcbbd242 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/uprobe_multi_consumers.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include <stdbool.h> +#include "bpf_kfuncs.h" +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +__u64 uprobe_result[4]; + +SEC("uprobe.multi") +int uprobe_0(struct pt_regs *ctx) +{ + uprobe_result[0]++; + return 0; +} + +SEC("uprobe.multi") +int uprobe_1(struct pt_regs *ctx) +{ + uprobe_result[1]++; + return 0; +} + +SEC("uprobe.multi") +int uprobe_2(struct pt_regs *ctx) +{ + uprobe_result[2]++; + return 0; +} + +SEC("uprobe.multi") +int uprobe_3(struct pt_regs *ctx) +{ + uprobe_result[3]++; + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi_pid_filter.c b/tools/testing/selftests/bpf/progs/uprobe_multi_pid_filter.c new file mode 100644 index 000000000000..67fcbad36661 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/uprobe_multi_pid_filter.c @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +__u32 pids[3]; +__u32 test[3][2]; + +static void update_pid(int idx) +{ + __u32 pid = bpf_get_current_pid_tgid() >> 32; + + if (pid == pids[idx]) + test[idx][0]++; + else + test[idx][1]++; +} + +SEC("uprobe.multi") +int uprobe_multi_0(struct pt_regs *ctx) +{ + update_pid(0); + return 0; +} + +SEC("uprobe.multi") +int uprobe_multi_1(struct pt_regs *ctx) +{ + update_pid(1); + return 0; +} + +SEC("uprobe.multi") +int uprobe_multi_2(struct pt_regs *ctx) +{ + update_pid(2); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/verifier_bits_iter.c b/tools/testing/selftests/bpf/progs/verifier_bits_iter.c index 716113c2bce2..f4da4d508ddb 100644 --- a/tools/testing/selftests/bpf/progs/verifier_bits_iter.c +++ b/tools/testing/selftests/bpf/progs/verifier_bits_iter.c @@ -87,7 +87,7 @@ int bits_memalloc(void) int *bit; __builtin_memset(&data, 0xf0, sizeof(data)); /* 4 * 16 */ - bpf_for_each(bits, bit, &data[0], sizeof(data) / sizeof(u64)) + bpf_for_each(bits, bit, &data[0], ARRAY_SIZE(data)) nr++; return nr; } diff --git a/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c b/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c new file mode 100644 index 000000000000..9da97d2efcd9 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c @@ -0,0 +1,900 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> +#include "../../../include/linux/filter.h" +#include "bpf_misc.h" +#include <stdbool.h> +#include "bpf_kfuncs.h" + +SEC("raw_tp") +__arch_x86_64 +__log_level(4) __msg("stack depth 8") +__xlated("4: r5 = 5") +__xlated("5: w0 = ") +__xlated("6: r0 = &(void __percpu *)(r0)") +__xlated("7: r0 = *(u32 *)(r0 +0)") +__xlated("8: exit") +__success +__naked void simple(void) +{ + asm volatile ( + "r1 = 1;" + "r2 = 2;" + "r3 = 3;" + "r4 = 4;" + "r5 = 5;" + "*(u64 *)(r10 - 16) = r1;" + "*(u64 *)(r10 - 24) = r2;" + "*(u64 *)(r10 - 32) = r3;" + "*(u64 *)(r10 - 40) = r4;" + "*(u64 *)(r10 - 48) = r5;" + "call %[bpf_get_smp_processor_id];" + "r5 = *(u64 *)(r10 - 48);" + "r4 = *(u64 *)(r10 - 40);" + "r3 = *(u64 *)(r10 - 32);" + "r2 = *(u64 *)(r10 - 24);" + "r1 = *(u64 *)(r10 - 16);" + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +/* The logic for detecting and verifying bpf_fastcall pattern is the same for + * any arch, however x86 differs from arm64 or riscv64 in a way + * bpf_get_smp_processor_id is rewritten: + * - on x86 it is done by verifier + * - on arm64 and riscv64 it is done by jit + * + * Which leads to different xlated patterns for different archs: + * - on x86 the call is expanded as 3 instructions + * - on arm64 and riscv64 the call remains as is + * (but spills/fills are still removed) + * + * It is really desirable to check instruction indexes in the xlated + * patterns, so add this canary test to check that function rewrite by + * jit is correctly processed by bpf_fastcall logic, keep the rest of the + * tests as x86. + */ +SEC("raw_tp") +__arch_arm64 +__arch_riscv64 +__xlated("0: r1 = 1") +__xlated("1: call bpf_get_smp_processor_id") +__xlated("2: exit") +__success +__naked void canary_arm64_riscv64(void) +{ + asm volatile ( + "r1 = 1;" + "*(u64 *)(r10 - 16) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - 16);" + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__xlated("1: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("3: exit") +__success +__naked void canary_zero_spills(void) +{ + asm volatile ( + "call %[bpf_get_smp_processor_id];" + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__log_level(4) __msg("stack depth 16") +__xlated("1: *(u64 *)(r10 -16) = r1") +__xlated("...") +__xlated("3: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("5: r2 = *(u64 *)(r10 -16)") +__success +__naked void wrong_reg_in_pattern1(void) +{ + asm volatile ( + "r1 = 1;" + "*(u64 *)(r10 - 16) = r1;" + "call %[bpf_get_smp_processor_id];" + "r2 = *(u64 *)(r10 - 16);" + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__xlated("1: *(u64 *)(r10 -16) = r6") +__xlated("...") +__xlated("3: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("5: r6 = *(u64 *)(r10 -16)") +__success +__naked void wrong_reg_in_pattern2(void) +{ + asm volatile ( + "r6 = 1;" + "*(u64 *)(r10 - 16) = r6;" + "call %[bpf_get_smp_processor_id];" + "r6 = *(u64 *)(r10 - 16);" + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__xlated("1: *(u64 *)(r10 -16) = r0") +__xlated("...") +__xlated("3: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("5: r0 = *(u64 *)(r10 -16)") +__success +__naked void wrong_reg_in_pattern3(void) +{ + asm volatile ( + "r0 = 1;" + "*(u64 *)(r10 - 16) = r0;" + "call %[bpf_get_smp_processor_id];" + "r0 = *(u64 *)(r10 - 16);" + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__xlated("2: *(u64 *)(r2 -16) = r1") +__xlated("...") +__xlated("4: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("6: r1 = *(u64 *)(r10 -16)") +__success +__naked void wrong_base_in_pattern(void) +{ + asm volatile ( + "r1 = 1;" + "r2 = r10;" + "*(u64 *)(r2 - 16) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - 16);" + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__xlated("1: *(u64 *)(r10 -16) = r1") +__xlated("...") +__xlated("3: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("5: r2 = 1") +__success +__naked void wrong_insn_in_pattern(void) +{ + asm volatile ( + "r1 = 1;" + "*(u64 *)(r10 - 16) = r1;" + "call %[bpf_get_smp_processor_id];" + "r2 = 1;" + "r1 = *(u64 *)(r10 - 16);" + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__xlated("2: *(u64 *)(r10 -16) = r1") +__xlated("...") +__xlated("4: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("6: r1 = *(u64 *)(r10 -8)") +__success +__naked void wrong_off_in_pattern1(void) +{ + asm volatile ( + "r1 = 1;" + "*(u64 *)(r10 - 8) = r1;" + "*(u64 *)(r10 - 16) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - 8);" + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__xlated("1: *(u32 *)(r10 -4) = r1") +__xlated("...") +__xlated("3: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("5: r1 = *(u32 *)(r10 -4)") +__success +__naked void wrong_off_in_pattern2(void) +{ + asm volatile ( + "r1 = 1;" + "*(u32 *)(r10 - 4) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u32 *)(r10 - 4);" + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__xlated("1: *(u32 *)(r10 -16) = r1") +__xlated("...") +__xlated("3: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("5: r1 = *(u32 *)(r10 -16)") +__success +__naked void wrong_size_in_pattern(void) +{ + asm volatile ( + "r1 = 1;" + "*(u32 *)(r10 - 16) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u32 *)(r10 - 16);" + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__xlated("2: *(u32 *)(r10 -8) = r1") +__xlated("...") +__xlated("4: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("6: r1 = *(u32 *)(r10 -8)") +__success +__naked void partial_pattern(void) +{ + asm volatile ( + "r1 = 1;" + "r2 = 2;" + "*(u32 *)(r10 - 8) = r1;" + "*(u64 *)(r10 - 16) = r2;" + "call %[bpf_get_smp_processor_id];" + "r2 = *(u64 *)(r10 - 16);" + "r1 = *(u32 *)(r10 - 8);" + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__xlated("0: r1 = 1") +__xlated("1: r2 = 2") +/* not patched, spills for -8, -16 not removed */ +__xlated("2: *(u64 *)(r10 -8) = r1") +__xlated("3: *(u64 *)(r10 -16) = r2") +__xlated("...") +__xlated("5: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("7: r2 = *(u64 *)(r10 -16)") +__xlated("8: r1 = *(u64 *)(r10 -8)") +/* patched, spills for -24, -32 removed */ +__xlated("...") +__xlated("10: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("12: exit") +__success +__naked void min_stack_offset(void) +{ + asm volatile ( + "r1 = 1;" + "r2 = 2;" + /* this call won't be patched */ + "*(u64 *)(r10 - 8) = r1;" + "*(u64 *)(r10 - 16) = r2;" + "call %[bpf_get_smp_processor_id];" + "r2 = *(u64 *)(r10 - 16);" + "r1 = *(u64 *)(r10 - 8);" + /* this call would be patched */ + "*(u64 *)(r10 - 24) = r1;" + "*(u64 *)(r10 - 32) = r2;" + "call %[bpf_get_smp_processor_id];" + "r2 = *(u64 *)(r10 - 32);" + "r1 = *(u64 *)(r10 - 24);" + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__xlated("1: *(u64 *)(r10 -8) = r1") +__xlated("...") +__xlated("3: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("5: r1 = *(u64 *)(r10 -8)") +__success +__naked void bad_fixed_read(void) +{ + asm volatile ( + "r1 = 1;" + "*(u64 *)(r10 - 8) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - 8);" + "r1 = r10;" + "r1 += -8;" + "r1 = *(u64 *)(r1 - 0);" + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__xlated("1: *(u64 *)(r10 -8) = r1") +__xlated("...") +__xlated("3: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("5: r1 = *(u64 *)(r10 -8)") +__success +__naked void bad_fixed_write(void) +{ + asm volatile ( + "r1 = 1;" + "*(u64 *)(r10 - 8) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - 8);" + "r1 = r10;" + "r1 += -8;" + "*(u64 *)(r1 - 0) = r1;" + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__xlated("6: *(u64 *)(r10 -16) = r1") +__xlated("...") +__xlated("8: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("10: r1 = *(u64 *)(r10 -16)") +__success +__naked void bad_varying_read(void) +{ + asm volatile ( + "r6 = *(u64 *)(r1 + 0);" /* random scalar value */ + "r6 &= 0x7;" /* r6 range [0..7] */ + "r6 += 0x2;" /* r6 range [2..9] */ + "r7 = 0;" + "r7 -= r6;" /* r7 range [-9..-2] */ + "r1 = 1;" + "*(u64 *)(r10 - 16) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - 16);" + "r1 = r10;" + "r1 += r7;" + "r1 = *(u8 *)(r1 - 0);" /* touches slot [-16..-9] where spills are stored */ + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__xlated("6: *(u64 *)(r10 -16) = r1") +__xlated("...") +__xlated("8: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("10: r1 = *(u64 *)(r10 -16)") +__success +__naked void bad_varying_write(void) +{ + asm volatile ( + "r6 = *(u64 *)(r1 + 0);" /* random scalar value */ + "r6 &= 0x7;" /* r6 range [0..7] */ + "r6 += 0x2;" /* r6 range [2..9] */ + "r7 = 0;" + "r7 -= r6;" /* r7 range [-9..-2] */ + "r1 = 1;" + "*(u64 *)(r10 - 16) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - 16);" + "r1 = r10;" + "r1 += r7;" + "*(u8 *)(r1 - 0) = r7;" /* touches slot [-16..-9] where spills are stored */ + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__xlated("1: *(u64 *)(r10 -8) = r1") +__xlated("...") +__xlated("3: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("5: r1 = *(u64 *)(r10 -8)") +__success +__naked void bad_write_in_subprog(void) +{ + asm volatile ( + "r1 = 1;" + "*(u64 *)(r10 - 8) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - 8);" + "r1 = r10;" + "r1 += -8;" + "call bad_write_in_subprog_aux;" + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +__used +__naked static void bad_write_in_subprog_aux(void) +{ + asm volatile ( + "r0 = 1;" + "*(u64 *)(r1 - 0) = r0;" /* invalidates bpf_fastcall contract for caller: */ + "exit;" /* caller stack at -8 used outside of the pattern */ + ::: __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__xlated("1: *(u64 *)(r10 -8) = r1") +__xlated("...") +__xlated("3: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("5: r1 = *(u64 *)(r10 -8)") +__success +__naked void bad_helper_write(void) +{ + asm volatile ( + "r1 = 1;" + /* bpf_fastcall pattern with stack offset -8 */ + "*(u64 *)(r10 - 8) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - 8);" + "r1 = r10;" + "r1 += -8;" + "r2 = 1;" + "r3 = 42;" + /* read dst is fp[-8], thus bpf_fastcall rewrite not applied */ + "call %[bpf_probe_read_kernel];" + "exit;" + : + : __imm(bpf_get_smp_processor_id), + __imm(bpf_probe_read_kernel) + : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +/* main, not patched */ +__xlated("1: *(u64 *)(r10 -8) = r1") +__xlated("...") +__xlated("3: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("5: r1 = *(u64 *)(r10 -8)") +__xlated("...") +__xlated("9: call pc+1") +__xlated("...") +__xlated("10: exit") +/* subprogram, patched */ +__xlated("11: r1 = 1") +__xlated("...") +__xlated("13: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("15: exit") +__success +__naked void invalidate_one_subprog(void) +{ + asm volatile ( + "r1 = 1;" + "*(u64 *)(r10 - 8) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - 8);" + "r1 = r10;" + "r1 += -8;" + "r1 = *(u64 *)(r1 - 0);" + "call invalidate_one_subprog_aux;" + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +__used +__naked static void invalidate_one_subprog_aux(void) +{ + asm volatile ( + "r1 = 1;" + "*(u64 *)(r10 - 8) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - 8);" + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +/* main */ +__xlated("0: r1 = 1") +__xlated("...") +__xlated("2: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("4: call pc+1") +__xlated("5: exit") +/* subprogram */ +__xlated("6: r1 = 1") +__xlated("...") +__xlated("8: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("10: *(u64 *)(r10 -16) = r1") +__xlated("11: exit") +__success +__naked void subprogs_use_independent_offsets(void) +{ + asm volatile ( + "r1 = 1;" + "*(u64 *)(r10 - 16) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - 16);" + "call subprogs_use_independent_offsets_aux;" + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +__used +__naked static void subprogs_use_independent_offsets_aux(void) +{ + asm volatile ( + "r1 = 1;" + "*(u64 *)(r10 - 24) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - 24);" + "*(u64 *)(r10 - 16) = r1;" + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__log_level(4) __msg("stack depth 8") +__xlated("2: r0 = &(void __percpu *)(r0)") +__success +__naked void helper_call_does_not_prevent_bpf_fastcall(void) +{ + asm volatile ( + "r1 = 1;" + "*(u64 *)(r10 - 8) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - 8);" + "*(u64 *)(r10 - 8) = r1;" + "call %[bpf_get_prandom_u32];" + "r1 = *(u64 *)(r10 - 8);" + "exit;" + : + : __imm(bpf_get_smp_processor_id), + __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__log_level(4) __msg("stack depth 16") +/* may_goto counter at -16 */ +__xlated("0: *(u64 *)(r10 -16) =") +__xlated("1: r1 = 1") +__xlated("...") +__xlated("3: r0 = &(void __percpu *)(r0)") +__xlated("...") +/* may_goto expansion starts */ +__xlated("5: r11 = *(u64 *)(r10 -16)") +__xlated("6: if r11 == 0x0 goto pc+3") +__xlated("7: r11 -= 1") +__xlated("8: *(u64 *)(r10 -16) = r11") +/* may_goto expansion ends */ +__xlated("9: *(u64 *)(r10 -8) = r1") +__xlated("10: exit") +__success +__naked void may_goto_interaction(void) +{ + asm volatile ( + "r1 = 1;" + "*(u64 *)(r10 - 16) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - 16);" + ".8byte %[may_goto];" + /* just touch some stack at -8 */ + "*(u64 *)(r10 - 8) = r1;" + "exit;" + : + : __imm(bpf_get_smp_processor_id), + __imm_insn(may_goto, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, +1 /* offset */, 0)) + : __clobber_all); +} + +__used +__naked static void dummy_loop_callback(void) +{ + asm volatile ( + "r0 = 0;" + "exit;" + ::: __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__log_level(4) __msg("stack depth 32+0") +__xlated("2: r1 = 1") +__xlated("3: w0 =") +__xlated("4: r0 = &(void __percpu *)(r0)") +__xlated("5: r0 = *(u32 *)(r0 +0)") +/* bpf_loop params setup */ +__xlated("6: r2 =") +__xlated("7: r3 = 0") +__xlated("8: r4 = 0") +__xlated("...") +/* ... part of the inlined bpf_loop */ +__xlated("12: *(u64 *)(r10 -32) = r6") +__xlated("13: *(u64 *)(r10 -24) = r7") +__xlated("14: *(u64 *)(r10 -16) = r8") +__xlated("...") +__xlated("21: call pc+8") /* dummy_loop_callback */ +/* ... last insns of the bpf_loop_interaction1 */ +__xlated("...") +__xlated("28: r0 = 0") +__xlated("29: exit") +/* dummy_loop_callback */ +__xlated("30: r0 = 0") +__xlated("31: exit") +__success +__naked int bpf_loop_interaction1(void) +{ + asm volatile ( + "r1 = 1;" + /* bpf_fastcall stack region at -16, but could be removed */ + "*(u64 *)(r10 - 16) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - 16);" + "r2 = %[dummy_loop_callback];" + "r3 = 0;" + "r4 = 0;" + "call %[bpf_loop];" + "r0 = 0;" + "exit;" + : + : __imm_ptr(dummy_loop_callback), + __imm(bpf_get_smp_processor_id), + __imm(bpf_loop) + : __clobber_common + ); +} + +SEC("raw_tp") +__arch_x86_64 +__log_level(4) __msg("stack depth 40+0") +/* call bpf_get_smp_processor_id */ +__xlated("2: r1 = 42") +__xlated("3: w0 =") +__xlated("4: r0 = &(void __percpu *)(r0)") +__xlated("5: r0 = *(u32 *)(r0 +0)") +/* call bpf_get_prandom_u32 */ +__xlated("6: *(u64 *)(r10 -16) = r1") +__xlated("7: call") +__xlated("8: r1 = *(u64 *)(r10 -16)") +__xlated("...") +/* ... part of the inlined bpf_loop */ +__xlated("15: *(u64 *)(r10 -40) = r6") +__xlated("16: *(u64 *)(r10 -32) = r7") +__xlated("17: *(u64 *)(r10 -24) = r8") +__success +__naked int bpf_loop_interaction2(void) +{ + asm volatile ( + "r1 = 42;" + /* bpf_fastcall stack region at -16, cannot be removed */ + "*(u64 *)(r10 - 16) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - 16);" + "*(u64 *)(r10 - 16) = r1;" + "call %[bpf_get_prandom_u32];" + "r1 = *(u64 *)(r10 - 16);" + "r2 = %[dummy_loop_callback];" + "r3 = 0;" + "r4 = 0;" + "call %[bpf_loop];" + "r0 = 0;" + "exit;" + : + : __imm_ptr(dummy_loop_callback), + __imm(bpf_get_smp_processor_id), + __imm(bpf_get_prandom_u32), + __imm(bpf_loop) + : __clobber_common + ); +} + +SEC("raw_tp") +__arch_x86_64 +__log_level(4) +__msg("stack depth 512+0") +/* just to print xlated version when debugging */ +__xlated("r0 = &(void __percpu *)(r0)") +__success +/* cumulative_stack_depth() stack usage is MAX_BPF_STACK, + * called subprogram uses an additional slot for bpf_fastcall spill/fill, + * since bpf_fastcall spill/fill could be removed the program still fits + * in MAX_BPF_STACK and should be accepted. + */ +__naked int cumulative_stack_depth(void) +{ + asm volatile( + "r1 = 42;" + "*(u64 *)(r10 - %[max_bpf_stack]) = r1;" + "call cumulative_stack_depth_subprog;" + "exit;" + : + : __imm_const(max_bpf_stack, MAX_BPF_STACK) + : __clobber_all + ); +} + +__used +__naked static void cumulative_stack_depth_subprog(void) +{ + asm volatile ( + "*(u64 *)(r10 - 8) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - 8);" + "exit;" + :: __imm(bpf_get_smp_processor_id) : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__log_level(4) +__msg("stack depth 512") +__xlated("0: r1 = 42") +__xlated("1: *(u64 *)(r10 -512) = r1") +__xlated("2: w0 = ") +__xlated("3: r0 = &(void __percpu *)(r0)") +__xlated("4: r0 = *(u32 *)(r0 +0)") +__xlated("5: exit") +__success +__naked int bpf_fastcall_max_stack_ok(void) +{ + asm volatile( + "r1 = 42;" + "*(u64 *)(r10 - %[max_bpf_stack]) = r1;" + "*(u64 *)(r10 - %[max_bpf_stack_8]) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - %[max_bpf_stack_8]);" + "exit;" + : + : __imm_const(max_bpf_stack, MAX_BPF_STACK), + __imm_const(max_bpf_stack_8, MAX_BPF_STACK + 8), + __imm(bpf_get_smp_processor_id) + : __clobber_all + ); +} + +SEC("raw_tp") +__arch_x86_64 +__log_level(4) +__msg("stack depth 520") +__failure +__naked int bpf_fastcall_max_stack_fail(void) +{ + asm volatile( + "r1 = 42;" + "*(u64 *)(r10 - %[max_bpf_stack]) = r1;" + "*(u64 *)(r10 - %[max_bpf_stack_8]) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - %[max_bpf_stack_8]);" + /* call to prandom blocks bpf_fastcall rewrite */ + "*(u64 *)(r10 - %[max_bpf_stack_8]) = r1;" + "call %[bpf_get_prandom_u32];" + "r1 = *(u64 *)(r10 - %[max_bpf_stack_8]);" + "exit;" + : + : __imm_const(max_bpf_stack, MAX_BPF_STACK), + __imm_const(max_bpf_stack_8, MAX_BPF_STACK + 8), + __imm(bpf_get_smp_processor_id), + __imm(bpf_get_prandom_u32) + : __clobber_all + ); +} + +SEC("cgroup/getsockname_unix") +__xlated("0: r2 = 1") +/* bpf_cast_to_kern_ctx is replaced by a single assignment */ +__xlated("1: r0 = r1") +__xlated("2: r0 = r2") +__xlated("3: exit") +__success +__naked void kfunc_bpf_cast_to_kern_ctx(void) +{ + asm volatile ( + "r2 = 1;" + "*(u64 *)(r10 - 32) = r2;" + "call %[bpf_cast_to_kern_ctx];" + "r2 = *(u64 *)(r10 - 32);" + "r0 = r2;" + "exit;" + : + : __imm(bpf_cast_to_kern_ctx) + : __clobber_all); +} + +SEC("raw_tp") +__xlated("3: r3 = 1") +/* bpf_rdonly_cast is replaced by a single assignment */ +__xlated("4: r0 = r1") +__xlated("5: r0 = r3") +void kfunc_bpf_rdonly_cast(void) +{ + asm volatile ( + "r2 = %[btf_id];" + "r3 = 1;" + "*(u64 *)(r10 - 32) = r3;" + "call %[bpf_rdonly_cast];" + "r3 = *(u64 *)(r10 - 32);" + "r0 = r3;" + : + : __imm(bpf_rdonly_cast), + [btf_id]"r"(bpf_core_type_id_kernel(union bpf_attr)) + : __clobber_common); +} + +/* BTF FUNC records are not generated for kfuncs referenced + * from inline assembly. These records are necessary for + * libbpf to link the program. The function below is a hack + * to ensure that BTF FUNC records are generated. + */ +void kfunc_root(void) +{ + bpf_cast_to_kern_ctx(0); + bpf_rdonly_cast(0, 0); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_const.c b/tools/testing/selftests/bpf/progs/verifier_const.c new file mode 100644 index 000000000000..2e533d7eec2f --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_const.c @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Isovalent */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +const volatile long foo = 42; +long bar; +long bart = 96; + +SEC("tc/ingress") +__description("rodata/strtol: write rejected") +__failure __msg("write into map forbidden") +int tcx1(struct __sk_buff *skb) +{ + char buff[] = { '8', '4', '\0' }; + bpf_strtol(buff, sizeof(buff), 0, (long *)&foo); + return TCX_PASS; +} + +SEC("tc/ingress") +__description("bss/strtol: write accepted") +__success +int tcx2(struct __sk_buff *skb) +{ + char buff[] = { '8', '4', '\0' }; + bpf_strtol(buff, sizeof(buff), 0, &bar); + return TCX_PASS; +} + +SEC("tc/ingress") +__description("data/strtol: write accepted") +__success +int tcx3(struct __sk_buff *skb) +{ + char buff[] = { '8', '4', '\0' }; + bpf_strtol(buff, sizeof(buff), 0, &bart); + return TCX_PASS; +} + +SEC("tc/ingress") +__description("rodata/mtu: write rejected") +__failure __msg("write into map forbidden") +int tcx4(struct __sk_buff *skb) +{ + bpf_check_mtu(skb, skb->ifindex, (__u32 *)&foo, 0, 0); + return TCX_PASS; +} + +SEC("tc/ingress") +__description("bss/mtu: write accepted") +__success +int tcx5(struct __sk_buff *skb) +{ + bpf_check_mtu(skb, skb->ifindex, (__u32 *)&bar, 0, 0); + return TCX_PASS; +} + +SEC("tc/ingress") +__description("data/mtu: write accepted") +__success +int tcx6(struct __sk_buff *skb) +{ + bpf_check_mtu(skb, skb->ifindex, (__u32 *)&bart, 0, 0); + return TCX_PASS; +} + +char LICENSE[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c b/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c index a9fc30ed4d73..20904cd2baa2 100644 --- a/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c +++ b/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c @@ -7,6 +7,7 @@ #include "bpf_misc.h" #include "xdp_metadata.h" #include "bpf_kfuncs.h" +#include "err.h" /* The compiler may be able to detect the access to uninitialized memory in the routines performing out of bound memory accesses and @@ -331,7 +332,11 @@ SEC("?lsm/bpf") __success __log_level(2) int BPF_PROG(arg_tag_ctx_lsm) { - return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx); + int ret; + + ret = tracing_subprog_void(ctx) + tracing_subprog_u64(ctx); + set_if_not_errno_or_zero(ret, -1); + return ret; } SEC("?struct_ops/test_1") diff --git a/tools/testing/selftests/bpf/progs/verifier_int_ptr.c b/tools/testing/selftests/bpf/progs/verifier_int_ptr.c index 9fc3fae5cd83..5f2efb895edb 100644 --- a/tools/testing/selftests/bpf/progs/verifier_int_ptr.c +++ b/tools/testing/selftests/bpf/progs/verifier_int_ptr.c @@ -6,9 +6,8 @@ #include "bpf_misc.h" SEC("socket") -__description("ARG_PTR_TO_LONG uninitialized") +__description("arg pointer to long uninitialized") __success -__failure_unpriv __msg_unpriv("invalid indirect read from stack R4 off -16+0 size 8") __naked void arg_ptr_to_long_uninitialized(void) { asm volatile (" \ @@ -35,10 +34,8 @@ __naked void arg_ptr_to_long_uninitialized(void) } SEC("socket") -__description("ARG_PTR_TO_LONG half-uninitialized") -/* in privileged mode reads from uninitialized stack locations are permitted */ -__success __failure_unpriv -__msg_unpriv("invalid indirect read from stack R4 off -16+4 size 8") +__description("arg pointer to long half-uninitialized") +__success __retval(0) __naked void ptr_to_long_half_uninitialized(void) { @@ -67,7 +64,7 @@ __naked void ptr_to_long_half_uninitialized(void) } SEC("cgroup/sysctl") -__description("ARG_PTR_TO_LONG misaligned") +__description("arg pointer to long misaligned") __failure __msg("misaligned stack access off 0+-20+0 size 8") __naked void arg_ptr_to_long_misaligned(void) { @@ -98,7 +95,7 @@ __naked void arg_ptr_to_long_misaligned(void) } SEC("cgroup/sysctl") -__description("ARG_PTR_TO_LONG size < sizeof(long)") +__description("arg pointer to long size < sizeof(long)") __failure __msg("invalid indirect access to stack R4 off=-4 size=8") __naked void to_long_size_sizeof_long(void) { @@ -127,7 +124,7 @@ __naked void to_long_size_sizeof_long(void) } SEC("cgroup/sysctl") -__description("ARG_PTR_TO_LONG initialized") +__description("arg pointer to long initialized") __success __naked void arg_ptr_to_long_initialized(void) { diff --git a/tools/testing/selftests/bpf/progs/verifier_jit_convergence.c b/tools/testing/selftests/bpf/progs/verifier_jit_convergence.c new file mode 100644 index 000000000000..9f3f2b7db450 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_jit_convergence.c @@ -0,0 +1,114 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +struct value_t { + long long a[32]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct value_t); +} map_hash SEC(".maps"); + +SEC("socket") +__description("bpf_jit_convergence je <-> jmp") +__success __retval(0) +__arch_x86_64 +__jited(" pushq %rbp") +__naked void btf_jit_convergence_je_jmp(void) +{ + asm volatile ( + "call %[bpf_get_prandom_u32];" + "if r0 == 0 goto l20_%=;" + "if r0 == 1 goto l21_%=;" + "if r0 == 2 goto l22_%=;" + "if r0 == 3 goto l23_%=;" + "if r0 == 4 goto l24_%=;" + "call %[bpf_get_prandom_u32];" + "call %[bpf_get_prandom_u32];" +"l20_%=:" +"l21_%=:" +"l22_%=:" +"l23_%=:" +"l24_%=:" + "r1 = 0;" + "*(u64 *)(r10 - 8) = r1;" + "r2 = r10;" + "r2 += -8;" + "r1 = %[map_hash] ll;" + "call %[bpf_map_lookup_elem];" + "if r0 == 0 goto l1_%=;" + "r6 = r0;" + "call %[bpf_get_prandom_u32];" + "r7 = r0;" + "r5 = r6;" + "if r0 != 0x0 goto l12_%=;" + "call %[bpf_get_prandom_u32];" + "r1 = r0;" + "r2 = r6;" + "if r1 == 0x0 goto l0_%=;" +"l9_%=:" + "r2 = *(u64 *)(r6 + 0x0);" + "r2 += 0x1;" + "*(u64 *)(r6 + 0x0) = r2;" + "goto l1_%=;" +"l12_%=:" + "r1 = r7;" + "r1 += 0x98;" + "r2 = r5;" + "r2 += 0x90;" + "r2 = *(u32 *)(r2 + 0x0);" + "r3 = r7;" + "r3 &= 0x1;" + "r2 *= 0xa8;" + "if r3 == 0x0 goto l2_%=;" + "r1 += r2;" + "r1 -= r7;" + "r1 += 0x8;" + "if r1 <= 0xb20 goto l3_%=;" + "r1 = 0x0;" + "goto l4_%=;" +"l3_%=:" + "r1 += r7;" +"l4_%=:" + "if r1 == 0x0 goto l8_%=;" + "goto l9_%=;" +"l2_%=:" + "r1 += r2;" + "r1 -= r7;" + "r1 += 0x10;" + "if r1 <= 0xb20 goto l6_%=;" + "r1 = 0x0;" + "goto l7_%=;" +"l6_%=:" + "r1 += r7;" +"l7_%=:" + "if r1 == 0x0 goto l8_%=;" + "goto l9_%=;" +"l0_%=:" + "r1 = 0x3;" + "*(u64 *)(r10 - 0x10) = r1;" + "r2 = r1;" + "goto l1_%=;" +"l8_%=:" + "r1 = r5;" + "r1 += 0x4;" + "r1 = *(u32 *)(r1 + 0x0);" + "*(u64 *)(r10 - 0x8) = r1;" +"l1_%=:" + "r0 = 0;" + "exit;" + : + : __imm(bpf_get_prandom_u32), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_kfunc_prog_types.c b/tools/testing/selftests/bpf/progs/verifier_kfunc_prog_types.c index cb32b0cfc84b..a509cad97e69 100644 --- a/tools/testing/selftests/bpf/progs/verifier_kfunc_prog_types.c +++ b/tools/testing/selftests/bpf/progs/verifier_kfunc_prog_types.c @@ -47,6 +47,22 @@ int BPF_PROG(task_kfunc_syscall) return 0; } +SEC("tracepoint") +__success +int BPF_PROG(task_kfunc_tracepoint) +{ + task_kfunc_load_test(); + return 0; +} + +SEC("perf_event") +__success +int BPF_PROG(task_kfunc_perf_event) +{ + task_kfunc_load_test(); + return 0; +} + /***************** * cgroup kfuncs * *****************/ @@ -85,6 +101,22 @@ int BPF_PROG(cgrp_kfunc_syscall) return 0; } +SEC("tracepoint") +__success +int BPF_PROG(cgrp_kfunc_tracepoint) +{ + cgrp_kfunc_load_test(); + return 0; +} + +SEC("perf_event") +__success +int BPF_PROG(cgrp_kfunc_perf_event) +{ + cgrp_kfunc_load_test(); + return 0; +} + /****************** * cpumask kfuncs * ******************/ @@ -120,3 +152,19 @@ int BPF_PROG(cpumask_kfunc_syscall) cpumask_kfunc_load_test(); return 0; } + +SEC("tracepoint") +__success +int BPF_PROG(cpumask_kfunc_tracepoint) +{ + cpumask_kfunc_load_test(); + return 0; +} + +SEC("perf_event") +__success +int BPF_PROG(cpumask_kfunc_perf_event) +{ + cpumask_kfunc_load_test(); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/verifier_ldsx.c b/tools/testing/selftests/bpf/progs/verifier_ldsx.c index d4427d8e1217..52edee41caf6 100644 --- a/tools/testing/selftests/bpf/progs/verifier_ldsx.c +++ b/tools/testing/selftests/bpf/progs/verifier_ldsx.c @@ -144,6 +144,118 @@ __naked void ldsx_s32_range(void) : __clobber_all); } +SEC("xdp") +__description("LDSX, xdp s32 xdp_md->data") +__failure __msg("invalid bpf_context access") +__naked void ldsx_ctx_1(void) +{ + asm volatile ( + "r2 = *(s32 *)(r1 + %[xdp_md_data]);" + "r0 = 0;" + "exit;" + : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)) + : __clobber_all); +} + +SEC("xdp") +__description("LDSX, xdp s32 xdp_md->data_end") +__failure __msg("invalid bpf_context access") +__naked void ldsx_ctx_2(void) +{ + asm volatile ( + "r2 = *(s32 *)(r1 + %[xdp_md_data_end]);" + "r0 = 0;" + "exit;" + : + : __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("LDSX, xdp s32 xdp_md->data_meta") +__failure __msg("invalid bpf_context access") +__naked void ldsx_ctx_3(void) +{ + asm volatile ( + "r2 = *(s32 *)(r1 + %[xdp_md_data_meta]);" + "r0 = 0;" + "exit;" + : + : __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("tcx/ingress") +__description("LDSX, tcx s32 __sk_buff->data") +__failure __msg("invalid bpf_context access") +__naked void ldsx_ctx_4(void) +{ + asm volatile ( + "r2 = *(s32 *)(r1 + %[sk_buff_data]);" + "r0 = 0;" + "exit;" + : + : __imm_const(sk_buff_data, offsetof(struct __sk_buff, data)) + : __clobber_all); +} + +SEC("tcx/ingress") +__description("LDSX, tcx s32 __sk_buff->data_end") +__failure __msg("invalid bpf_context access") +__naked void ldsx_ctx_5(void) +{ + asm volatile ( + "r2 = *(s32 *)(r1 + %[sk_buff_data_end]);" + "r0 = 0;" + "exit;" + : + : __imm_const(sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tcx/ingress") +__description("LDSX, tcx s32 __sk_buff->data_meta") +__failure __msg("invalid bpf_context access") +__naked void ldsx_ctx_6(void) +{ + asm volatile ( + "r2 = *(s32 *)(r1 + %[sk_buff_data_meta]);" + "r0 = 0;" + "exit;" + : + : __imm_const(sk_buff_data_meta, offsetof(struct __sk_buff, data_meta)) + : __clobber_all); +} + +SEC("flow_dissector") +__description("LDSX, flow_dissector s32 __sk_buff->data") +__failure __msg("invalid bpf_context access") +__naked void ldsx_ctx_7(void) +{ + asm volatile ( + "r2 = *(s32 *)(r1 + %[sk_buff_data]);" + "r0 = 0;" + "exit;" + : + : __imm_const(sk_buff_data, offsetof(struct __sk_buff, data)) + : __clobber_all); +} + +SEC("flow_dissector") +__description("LDSX, flow_dissector s32 __sk_buff->data_end") +__failure __msg("invalid bpf_context access") +__naked void ldsx_ctx_8(void) +{ + asm volatile ( + "r2 = *(s32 *)(r1 + %[sk_buff_data_end]);" + "r0 = 0;" + "exit;" + : + : __imm_const(sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + #else SEC("socket") diff --git a/tools/testing/selftests/bpf/progs/verifier_lsm.c b/tools/testing/selftests/bpf/progs/verifier_lsm.c new file mode 100644 index 000000000000..32e5e779cb96 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_lsm.c @@ -0,0 +1,162 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +SEC("lsm/file_alloc_security") +__description("lsm bpf prog with -4095~0 retval. test 1") +__success +__naked int errno_zero_retval_test1(void *ctx) +{ + asm volatile ( + "r0 = 0;" + "exit;" + ::: __clobber_all); +} + +SEC("lsm/file_alloc_security") +__description("lsm bpf prog with -4095~0 retval. test 2") +__success +__naked int errno_zero_retval_test2(void *ctx) +{ + asm volatile ( + "r0 = -4095;" + "exit;" + ::: __clobber_all); +} + +SEC("lsm/file_mprotect") +__description("lsm bpf prog with -4095~0 retval. test 4") +__failure __msg("R0 has smin=-4096 smax=-4096 should have been in [-4095, 0]") +__naked int errno_zero_retval_test4(void *ctx) +{ + asm volatile ( + "r0 = -4096;" + "exit;" + ::: __clobber_all); +} + +SEC("lsm/file_mprotect") +__description("lsm bpf prog with -4095~0 retval. test 5") +__failure __msg("R0 has smin=4096 smax=4096 should have been in [-4095, 0]") +__naked int errno_zero_retval_test5(void *ctx) +{ + asm volatile ( + "r0 = 4096;" + "exit;" + ::: __clobber_all); +} + +SEC("lsm/file_mprotect") +__description("lsm bpf prog with -4095~0 retval. test 6") +__failure __msg("R0 has smin=1 smax=1 should have been in [-4095, 0]") +__naked int errno_zero_retval_test6(void *ctx) +{ + asm volatile ( + "r0 = 1;" + "exit;" + ::: __clobber_all); +} + +SEC("lsm/audit_rule_known") +__description("lsm bpf prog with bool retval. test 1") +__success +__naked int bool_retval_test1(void *ctx) +{ + asm volatile ( + "r0 = 1;" + "exit;" + ::: __clobber_all); +} + +SEC("lsm/audit_rule_known") +__description("lsm bpf prog with bool retval. test 2") +__success +__success +__naked int bool_retval_test2(void *ctx) +{ + asm volatile ( + "r0 = 0;" + "exit;" + ::: __clobber_all); +} + +SEC("lsm/audit_rule_known") +__description("lsm bpf prog with bool retval. test 3") +__failure __msg("R0 has smin=-1 smax=-1 should have been in [0, 1]") +__naked int bool_retval_test3(void *ctx) +{ + asm volatile ( + "r0 = -1;" + "exit;" + ::: __clobber_all); +} + +SEC("lsm/audit_rule_known") +__description("lsm bpf prog with bool retval. test 4") +__failure __msg("R0 has smin=2 smax=2 should have been in [0, 1]") +__naked int bool_retval_test4(void *ctx) +{ + asm volatile ( + "r0 = 2;" + "exit;" + ::: __clobber_all); +} + +SEC("lsm/file_free_security") +__success +__description("lsm bpf prog with void retval. test 1") +__naked int void_retval_test1(void *ctx) +{ + asm volatile ( + "r0 = -4096;" + "exit;" + ::: __clobber_all); +} + +SEC("lsm/file_free_security") +__success +__description("lsm bpf prog with void retval. test 2") +__naked int void_retval_test2(void *ctx) +{ + asm volatile ( + "r0 = 4096;" + "exit;" + ::: __clobber_all); +} + +SEC("lsm/getprocattr") +__description("lsm disabled hook: getprocattr") +__failure __msg("points to disabled hook") +__naked int disabled_hook_test1(void *ctx) +{ + asm volatile ( + "r0 = 0;" + "exit;" + ::: __clobber_all); +} + +SEC("lsm/setprocattr") +__description("lsm disabled hook: setprocattr") +__failure __msg("points to disabled hook") +__naked int disabled_hook_test2(void *ctx) +{ + asm volatile ( + "r0 = 0;" + "exit;" + ::: __clobber_all); +} + +SEC("lsm/ismaclabel") +__description("lsm disabled hook: ismaclabel") +__failure __msg("points to disabled hook") +__naked int disabled_hook_test3(void *ctx) +{ + asm volatile ( + "r0 = 0;" + "exit;" + ::: __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c b/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c index 13b29a7faa71..2ecf77b623e0 100644 --- a/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c +++ b/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c @@ -5,18 +5,27 @@ #include "bpf_misc.h" /* Check that precision marks propagate through scalar IDs. - * Registers r{0,1,2} have the same scalar ID at the moment when r0 is - * marked to be precise, this mark is immediately propagated to r{1,2}. + * Registers r{0,1,2} have the same scalar ID. + * Range information is propagated for scalars sharing same ID. + * Check that precision mark for r0 causes precision marks for r{1,2} + * when range information is propagated for 'if <reg> <op> <const>' insn. */ SEC("socket") __success __log_level(2) -__msg("frame0: regs=r0,r1,r2 stack= before 4: (bf) r3 = r10") +/* first 'if' branch */ +__msg("6: (0f) r3 += r0") +__msg("frame0: regs=r0 stack= before 4: (25) if r1 > 0x7 goto pc+0") +__msg("frame0: parent state regs=r0,r1,r2 stack=:") __msg("frame0: regs=r0,r1,r2 stack= before 3: (bf) r2 = r0") -__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0") -__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255") -__msg("frame0: regs=r0 stack= before 0: (85) call bpf_ktime_get_ns") +/* second 'if' branch */ +__msg("from 4 to 5: ") +__msg("6: (0f) r3 += r0") +__msg("frame0: regs=r0 stack= before 5: (bf) r3 = r10") +__msg("frame0: regs=r0 stack= before 4: (25) if r1 > 0x7 goto pc+0") +/* parent state already has r{0,1,2} as precise */ +__msg("frame0: parent state regs= stack=:") __flag(BPF_F_TEST_STATE_FREQ) -__naked void precision_same_state(void) +__naked void linked_regs_bpf_k(void) { asm volatile ( /* r0 = random number up to 0xff */ @@ -25,7 +34,8 @@ __naked void precision_same_state(void) /* tie r0.id == r1.id == r2.id */ "r1 = r0;" "r2 = r0;" - /* force r0 to be precise, this immediately marks r1 and r2 as + "if r1 > 7 goto +0;" + /* force r0 to be precise, this eventually marks r1 and r2 as * precise as well because of shared IDs */ "r3 = r10;" @@ -37,22 +47,17 @@ __naked void precision_same_state(void) : __clobber_all); } -/* Same as precision_same_state, but mark propagates through state / - * parent state boundary. +/* Registers r{0,1,2} share same ID when 'if r1 > ...' insn is processed, + * check that verifier marks r{1,2} as precise while backtracking + * 'if r1 > ...' with r0 already marked. */ SEC("socket") __success __log_level(2) -__msg("frame0: last_idx 6 first_idx 5 subseq_idx -1") -__msg("frame0: regs=r0,r1,r2 stack= before 5: (bf) r3 = r10") -__msg("frame0: parent state regs=r0,r1,r2 stack=:") -__msg("frame0: regs=r0,r1,r2 stack= before 4: (05) goto pc+0") -__msg("frame0: regs=r0,r1,r2 stack= before 3: (bf) r2 = r0") -__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0") -__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255") -__msg("frame0: parent state regs=r0 stack=:") -__msg("frame0: regs=r0 stack= before 0: (85) call bpf_ktime_get_ns") __flag(BPF_F_TEST_STATE_FREQ) -__naked void precision_cross_state(void) +__msg("frame0: regs=r0 stack= before 5: (2d) if r1 > r3 goto pc+0") +__msg("frame0: parent state regs=r0,r1,r2,r3 stack=:") +__msg("frame0: regs=r0,r1,r2,r3 stack= before 4: (b7) r3 = 7") +__naked void linked_regs_bpf_x_src(void) { asm volatile ( /* r0 = random number up to 0xff */ @@ -61,13 +66,13 @@ __naked void precision_cross_state(void) /* tie r0.id == r1.id == r2.id */ "r1 = r0;" "r2 = r0;" - /* force checkpoint */ - "goto +0;" - /* force r0 to be precise, this immediately marks r1 and r2 as + "r3 = 7;" + "if r1 > r3 goto +0;" + /* force r0 to be precise, this eventually marks r1 and r2 as * precise as well because of shared IDs */ - "r3 = r10;" - "r3 += r0;" + "r4 = r10;" + "r4 += r0;" "r0 = 0;" "exit;" : @@ -75,19 +80,17 @@ __naked void precision_cross_state(void) : __clobber_all); } -/* Same as precision_same_state, but break one of the - * links, note that r1 is absent from regs=... in __msg below. +/* Registers r{0,1,2} share same ID when 'if r1 > r3' insn is processed, + * check that verifier marks r{0,1,2} as precise while backtracking + * 'if r1 > r3' with r3 already marked. */ SEC("socket") __success __log_level(2) -__msg("frame0: regs=r0,r2 stack= before 5: (bf) r3 = r10") -__msg("frame0: regs=r0,r2 stack= before 4: (b7) r1 = 0") -__msg("frame0: regs=r0,r2 stack= before 3: (bf) r2 = r0") -__msg("frame0: regs=r0 stack= before 2: (bf) r1 = r0") -__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255") -__msg("frame0: regs=r0 stack= before 0: (85) call bpf_ktime_get_ns") __flag(BPF_F_TEST_STATE_FREQ) -__naked void precision_same_state_broken_link(void) +__msg("frame0: regs=r3 stack= before 5: (2d) if r1 > r3 goto pc+0") +__msg("frame0: parent state regs=r0,r1,r2,r3 stack=:") +__msg("frame0: regs=r0,r1,r2,r3 stack= before 4: (b7) r3 = 7") +__naked void linked_regs_bpf_x_dst(void) { asm volatile ( /* r0 = random number up to 0xff */ @@ -96,15 +99,13 @@ __naked void precision_same_state_broken_link(void) /* tie r0.id == r1.id == r2.id */ "r1 = r0;" "r2 = r0;" - /* break link for r1, this is the only line that differs - * compared to the previous test - */ - "r1 = 0;" - /* force r0 to be precise, this immediately marks r1 and r2 as + "r3 = 7;" + "if r1 > r3 goto +0;" + /* force r0 to be precise, this eventually marks r1 and r2 as * precise as well because of shared IDs */ - "r3 = r10;" - "r3 += r0;" + "r4 = r10;" + "r4 += r3;" "r0 = 0;" "exit;" : @@ -112,22 +113,18 @@ __naked void precision_same_state_broken_link(void) : __clobber_all); } -/* Same as precision_same_state_broken_link, but with state / - * parent state boundary. +/* Same as linked_regs_bpf_k, but break one of the + * links, note that r1 is absent from regs=... in __msg below. */ SEC("socket") __success __log_level(2) -__msg("frame0: regs=r0,r2 stack= before 6: (bf) r3 = r10") -__msg("frame0: regs=r0,r2 stack= before 5: (b7) r1 = 0") -__msg("frame0: parent state regs=r0,r2 stack=:") -__msg("frame0: regs=r0,r1,r2 stack= before 4: (05) goto pc+0") -__msg("frame0: regs=r0,r1,r2 stack= before 3: (bf) r2 = r0") -__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0") -__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255") +__msg("7: (0f) r3 += r0") +__msg("frame0: regs=r0 stack= before 6: (bf) r3 = r10") __msg("frame0: parent state regs=r0 stack=:") -__msg("frame0: regs=r0 stack= before 0: (85) call bpf_ktime_get_ns") +__msg("frame0: regs=r0 stack= before 5: (25) if r0 > 0x7 goto pc+0") +__msg("frame0: parent state regs=r0,r2 stack=:") __flag(BPF_F_TEST_STATE_FREQ) -__naked void precision_cross_state_broken_link(void) +__naked void linked_regs_broken_link(void) { asm volatile ( /* r0 = random number up to 0xff */ @@ -136,18 +133,13 @@ __naked void precision_cross_state_broken_link(void) /* tie r0.id == r1.id == r2.id */ "r1 = r0;" "r2 = r0;" - /* force checkpoint, although link between r1 and r{0,2} is - * broken by the next statement current precision tracking - * algorithm can't react to it and propagates mark for r1 to - * the parent state. - */ - "goto +0;" /* break link for r1, this is the only line that differs - * compared to precision_cross_state() + * compared to the previous test */ "r1 = 0;" - /* force r0 to be precise, this immediately marks r1 and r2 as - * precise as well because of shared IDs + "if r0 > 7 goto +0;" + /* force r0 to be precise, + * this eventually marks r2 as precise because of shared IDs */ "r3 = r10;" "r3 += r0;" @@ -164,10 +156,16 @@ __naked void precision_cross_state_broken_link(void) */ SEC("socket") __success __log_level(2) -__msg("11: (0f) r2 += r1") +__msg("12: (0f) r2 += r1") /* Current state */ -__msg("frame2: last_idx 11 first_idx 10 subseq_idx -1") -__msg("frame2: regs=r1 stack= before 10: (bf) r2 = r10") +__msg("frame2: last_idx 12 first_idx 11 subseq_idx -1 ") +__msg("frame2: regs=r1 stack= before 11: (bf) r2 = r10") +__msg("frame2: parent state regs=r1 stack=") +__msg("frame1: parent state regs= stack=") +__msg("frame0: parent state regs= stack=") +/* Parent state */ +__msg("frame2: last_idx 10 first_idx 10 subseq_idx 11 ") +__msg("frame2: regs=r1 stack= before 10: (25) if r1 > 0x7 goto pc+0") __msg("frame2: parent state regs=r1 stack=") /* frame1.r{6,7} are marked because mark_precise_scalar_ids() * looks for all registers with frame2.r1.id in the current state @@ -192,7 +190,7 @@ __msg("frame1: regs=r1 stack= before 4: (85) call pc+1") __msg("frame0: parent state regs=r1,r6 stack=") /* Parent state */ __msg("frame0: last_idx 3 first_idx 1 subseq_idx 4") -__msg("frame0: regs=r0,r1,r6 stack= before 3: (bf) r6 = r0") +__msg("frame0: regs=r1,r6 stack= before 3: (bf) r6 = r0") __msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0") __msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255") __flag(BPF_F_TEST_STATE_FREQ) @@ -230,7 +228,8 @@ static __naked __noinline __used void precision_many_frames__bar(void) { asm volatile ( - /* force r1 to be precise, this immediately marks: + "if r1 > 7 goto +0;" + /* force r1 to be precise, this eventually marks: * - bar frame r1 * - foo frame r{1,6,7} * - main frame r{1,6} @@ -247,14 +246,16 @@ void precision_many_frames__bar(void) */ SEC("socket") __success __log_level(2) +__msg("11: (0f) r2 += r1") /* foo frame */ -__msg("frame1: regs=r1 stack=-8,-16 before 9: (bf) r2 = r10") +__msg("frame1: regs=r1 stack= before 10: (bf) r2 = r10") +__msg("frame1: regs=r1 stack= before 9: (25) if r1 > 0x7 goto pc+0") __msg("frame1: regs=r1 stack=-8,-16 before 8: (7b) *(u64 *)(r10 -16) = r1") __msg("frame1: regs=r1 stack=-8 before 7: (7b) *(u64 *)(r10 -8) = r1") __msg("frame1: regs=r1 stack= before 4: (85) call pc+2") /* main frame */ -__msg("frame0: regs=r0,r1 stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r1") -__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0") +__msg("frame0: regs=r1 stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r1") +__msg("frame0: regs=r1 stack= before 2: (bf) r1 = r0") __msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255") __flag(BPF_F_TEST_STATE_FREQ) __naked void precision_stack(void) @@ -283,7 +284,8 @@ void precision_stack__foo(void) */ "*(u64*)(r10 - 8) = r1;" "*(u64*)(r10 - 16) = r1;" - /* force r1 to be precise, this immediately marks: + "if r1 > 7 goto +0;" + /* force r1 to be precise, this eventually marks: * - foo frame r1,fp{-8,-16} * - main frame r1,fp{-8} */ @@ -299,15 +301,17 @@ void precision_stack__foo(void) SEC("socket") __success __log_level(2) /* r{6,7} */ -__msg("11: (0f) r3 += r7") -__msg("frame0: regs=r6,r7 stack= before 10: (bf) r3 = r10") +__msg("12: (0f) r3 += r7") +__msg("frame0: regs=r7 stack= before 11: (bf) r3 = r10") +__msg("frame0: regs=r7 stack= before 9: (25) if r7 > 0x7 goto pc+0") /* ... skip some insns ... */ __msg("frame0: regs=r6,r7 stack= before 3: (bf) r7 = r0") __msg("frame0: regs=r0,r6 stack= before 2: (bf) r6 = r0") /* r{8,9} */ -__msg("12: (0f) r3 += r9") -__msg("frame0: regs=r8,r9 stack= before 11: (0f) r3 += r7") +__msg("13: (0f) r3 += r9") +__msg("frame0: regs=r9 stack= before 12: (0f) r3 += r7") /* ... skip some insns ... */ +__msg("frame0: regs=r9 stack= before 10: (25) if r9 > 0x7 goto pc+0") __msg("frame0: regs=r8,r9 stack= before 7: (bf) r9 = r0") __msg("frame0: regs=r0,r8 stack= before 6: (bf) r8 = r0") __flag(BPF_F_TEST_STATE_FREQ) @@ -328,8 +332,9 @@ __naked void precision_two_ids(void) "r9 = r0;" /* clear r0 id */ "r0 = 0;" - /* force checkpoint */ - "goto +0;" + /* propagate equal scalars precision */ + "if r7 > 7 goto +0;" + "if r9 > 7 goto +0;" "r3 = r10;" /* force r7 to be precise, this also marks r6 */ "r3 += r7;" @@ -341,6 +346,105 @@ __naked void precision_two_ids(void) : __clobber_all); } +SEC("socket") +__success __log_level(2) +__flag(BPF_F_TEST_STATE_FREQ) +/* check thar r0 and r6 have different IDs after 'if', + * collect_linked_regs() can't tie more than 6 registers for a single insn. + */ +__msg("8: (25) if r0 > 0x7 goto pc+0 ; R0=scalar(id=1") +__msg("9: (bf) r6 = r6 ; R6_w=scalar(id=2") +/* check that r{0-5} are marked precise after 'if' */ +__msg("frame0: regs=r0 stack= before 8: (25) if r0 > 0x7 goto pc+0") +__msg("frame0: parent state regs=r0,r1,r2,r3,r4,r5 stack=:") +__naked void linked_regs_too_many_regs(void) +{ + asm volatile ( + /* r0 = random number up to 0xff */ + "call %[bpf_ktime_get_ns];" + "r0 &= 0xff;" + /* tie r{0-6} IDs */ + "r1 = r0;" + "r2 = r0;" + "r3 = r0;" + "r4 = r0;" + "r5 = r0;" + "r6 = r0;" + /* propagate range for r{0-6} */ + "if r0 > 7 goto +0;" + /* make r6 appear in the log */ + "r6 = r6;" + /* force r0 to be precise, + * this would cause r{0-4} to be precise because of shared IDs + */ + "r7 = r10;" + "r7 += r0;" + "r0 = 0;" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__failure __log_level(2) +__flag(BPF_F_TEST_STATE_FREQ) +__msg("regs=r7 stack= before 5: (3d) if r8 >= r0") +__msg("parent state regs=r0,r7,r8") +__msg("regs=r0,r7,r8 stack= before 4: (25) if r0 > 0x1") +__msg("div by zero") +__naked void linked_regs_broken_link_2(void) +{ + asm volatile ( + "call %[bpf_get_prandom_u32];" + "r7 = r0;" + "r8 = r0;" + "call %[bpf_get_prandom_u32];" + "if r0 > 1 goto +0;" + /* r7.id == r8.id, + * thus r7 precision implies r8 precision, + * which implies r0 precision because of the conditional below. + */ + "if r8 >= r0 goto 1f;" + /* break id relation between r7 and r8 */ + "r8 += r8;" + /* make r7 precise */ + "if r7 == 0 goto 1f;" + "r0 /= 0;" +"1:" + "r0 = 42;" + "exit;" + : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +/* Check that mark_chain_precision() for one of the conditional jump + * operands does not trigger equal scalars precision propagation. + */ +SEC("socket") +__success __log_level(2) +__msg("3: (25) if r1 > 0x100 goto pc+0") +__msg("frame0: regs=r1 stack= before 2: (bf) r1 = r0") +__naked void cjmp_no_linked_regs_trigger(void) +{ + asm volatile ( + /* r0 = random number up to 0xff */ + "call %[bpf_ktime_get_ns];" + "r0 &= 0xff;" + /* tie r0.id == r1.id */ + "r1 = r0;" + /* the jump below would be predicted, thus r1 would be marked precise, + * this should not imply precision mark for r0 + */ + "if r1 > 256 goto +0;" + "r0 = 0;" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + /* Verify that check_ids() is used by regsafe() for scalars. * * r9 = ... some pointer with range X ... diff --git a/tools/testing/selftests/bpf/progs/verifier_sdiv.c b/tools/testing/selftests/bpf/progs/verifier_sdiv.c index 2a2271cf0294..148d2299e5b4 100644 --- a/tools/testing/selftests/bpf/progs/verifier_sdiv.c +++ b/tools/testing/selftests/bpf/progs/verifier_sdiv.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/bpf.h> +#include <limits.h> #include <bpf/bpf_helpers.h> #include "bpf_misc.h" @@ -770,6 +771,444 @@ __naked void smod64_zero_divisor(void) " ::: __clobber_all); } +SEC("socket") +__description("SDIV64, overflow r/r, LLONG_MIN/-1") +__success __retval(1) +__arch_x86_64 +__xlated("0: r2 = 0x8000000000000000") +__xlated("2: r3 = -1") +__xlated("3: r4 = r2") +__xlated("4: r11 = r3") +__xlated("5: r11 += 1") +__xlated("6: if r11 > 0x1 goto pc+4") +__xlated("7: if r11 == 0x0 goto pc+1") +__xlated("8: r2 = 0") +__xlated("9: r2 = -r2") +__xlated("10: goto pc+1") +__xlated("11: r2 s/= r3") +__xlated("12: r0 = 0") +__xlated("13: if r2 != r4 goto pc+1") +__xlated("14: r0 = 1") +__xlated("15: exit") +__naked void sdiv64_overflow_rr(void) +{ + asm volatile (" \ + r2 = %[llong_min] ll; \ + r3 = -1; \ + r4 = r2; \ + r2 s/= r3; \ + r0 = 0; \ + if r2 != r4 goto +1; \ + r0 = 1; \ + exit; \ +" : + : __imm_const(llong_min, LLONG_MIN) + : __clobber_all); +} + +SEC("socket") +__description("SDIV64, r/r, small_val/-1") +__success __retval(-5) +__arch_x86_64 +__xlated("0: r2 = 5") +__xlated("1: r3 = -1") +__xlated("2: r11 = r3") +__xlated("3: r11 += 1") +__xlated("4: if r11 > 0x1 goto pc+4") +__xlated("5: if r11 == 0x0 goto pc+1") +__xlated("6: r2 = 0") +__xlated("7: r2 = -r2") +__xlated("8: goto pc+1") +__xlated("9: r2 s/= r3") +__xlated("10: r0 = r2") +__xlated("11: exit") +__naked void sdiv64_rr_divisor_neg_1(void) +{ + asm volatile (" \ + r2 = 5; \ + r3 = -1; \ + r2 s/= r3; \ + r0 = r2; \ + exit; \ +" : + : + : __clobber_all); +} + +SEC("socket") +__description("SDIV64, overflow r/i, LLONG_MIN/-1") +__success __retval(1) +__arch_x86_64 +__xlated("0: r2 = 0x8000000000000000") +__xlated("2: r4 = r2") +__xlated("3: r2 = -r2") +__xlated("4: r0 = 0") +__xlated("5: if r2 != r4 goto pc+1") +__xlated("6: r0 = 1") +__xlated("7: exit") +__naked void sdiv64_overflow_ri(void) +{ + asm volatile (" \ + r2 = %[llong_min] ll; \ + r4 = r2; \ + r2 s/= -1; \ + r0 = 0; \ + if r2 != r4 goto +1; \ + r0 = 1; \ + exit; \ +" : + : __imm_const(llong_min, LLONG_MIN) + : __clobber_all); +} + +SEC("socket") +__description("SDIV64, r/i, small_val/-1") +__success __retval(-5) +__arch_x86_64 +__xlated("0: r2 = 5") +__xlated("1: r4 = r2") +__xlated("2: r2 = -r2") +__xlated("3: r0 = r2") +__xlated("4: exit") +__naked void sdiv64_ri_divisor_neg_1(void) +{ + asm volatile (" \ + r2 = 5; \ + r4 = r2; \ + r2 s/= -1; \ + r0 = r2; \ + exit; \ +" : + : + : __clobber_all); +} + +SEC("socket") +__description("SDIV32, overflow r/r, INT_MIN/-1") +__success __retval(1) +__arch_x86_64 +__xlated("0: w2 = -2147483648") +__xlated("1: w3 = -1") +__xlated("2: w4 = w2") +__xlated("3: r11 = r3") +__xlated("4: w11 += 1") +__xlated("5: if w11 > 0x1 goto pc+4") +__xlated("6: if w11 == 0x0 goto pc+1") +__xlated("7: w2 = 0") +__xlated("8: w2 = -w2") +__xlated("9: goto pc+1") +__xlated("10: w2 s/= w3") +__xlated("11: r0 = 0") +__xlated("12: if w2 != w4 goto pc+1") +__xlated("13: r0 = 1") +__xlated("14: exit") +__naked void sdiv32_overflow_rr(void) +{ + asm volatile (" \ + w2 = %[int_min]; \ + w3 = -1; \ + w4 = w2; \ + w2 s/= w3; \ + r0 = 0; \ + if w2 != w4 goto +1; \ + r0 = 1; \ + exit; \ +" : + : __imm_const(int_min, INT_MIN) + : __clobber_all); +} + +SEC("socket") +__description("SDIV32, r/r, small_val/-1") +__success __retval(5) +__arch_x86_64 +__xlated("0: w2 = -5") +__xlated("1: w3 = -1") +__xlated("2: w4 = w2") +__xlated("3: r11 = r3") +__xlated("4: w11 += 1") +__xlated("5: if w11 > 0x1 goto pc+4") +__xlated("6: if w11 == 0x0 goto pc+1") +__xlated("7: w2 = 0") +__xlated("8: w2 = -w2") +__xlated("9: goto pc+1") +__xlated("10: w2 s/= w3") +__xlated("11: w0 = w2") +__xlated("12: exit") +__naked void sdiv32_rr_divisor_neg_1(void) +{ + asm volatile (" \ + w2 = -5; \ + w3 = -1; \ + w4 = w2; \ + w2 s/= w3; \ + w0 = w2; \ + exit; \ +" : + : + : __clobber_all); +} + +SEC("socket") +__description("SDIV32, overflow r/i, INT_MIN/-1") +__success __retval(1) +__arch_x86_64 +__xlated("0: w2 = -2147483648") +__xlated("1: w4 = w2") +__xlated("2: w2 = -w2") +__xlated("3: r0 = 0") +__xlated("4: if w2 != w4 goto pc+1") +__xlated("5: r0 = 1") +__xlated("6: exit") +__naked void sdiv32_overflow_ri(void) +{ + asm volatile (" \ + w2 = %[int_min]; \ + w4 = w2; \ + w2 s/= -1; \ + r0 = 0; \ + if w2 != w4 goto +1; \ + r0 = 1; \ + exit; \ +" : + : __imm_const(int_min, INT_MIN) + : __clobber_all); +} + +SEC("socket") +__description("SDIV32, r/i, small_val/-1") +__success __retval(-5) +__arch_x86_64 +__xlated("0: w2 = 5") +__xlated("1: w4 = w2") +__xlated("2: w2 = -w2") +__xlated("3: w0 = w2") +__xlated("4: exit") +__naked void sdiv32_ri_divisor_neg_1(void) +{ + asm volatile (" \ + w2 = 5; \ + w4 = w2; \ + w2 s/= -1; \ + w0 = w2; \ + exit; \ +" : + : + : __clobber_all); +} + +SEC("socket") +__description("SMOD64, overflow r/r, LLONG_MIN/-1") +__success __retval(0) +__arch_x86_64 +__xlated("0: r2 = 0x8000000000000000") +__xlated("2: r3 = -1") +__xlated("3: r4 = r2") +__xlated("4: r11 = r3") +__xlated("5: r11 += 1") +__xlated("6: if r11 > 0x1 goto pc+3") +__xlated("7: if r11 == 0x1 goto pc+3") +__xlated("8: w2 = 0") +__xlated("9: goto pc+1") +__xlated("10: r2 s%= r3") +__xlated("11: r0 = r2") +__xlated("12: exit") +__naked void smod64_overflow_rr(void) +{ + asm volatile (" \ + r2 = %[llong_min] ll; \ + r3 = -1; \ + r4 = r2; \ + r2 s%%= r3; \ + r0 = r2; \ + exit; \ +" : + : __imm_const(llong_min, LLONG_MIN) + : __clobber_all); +} + +SEC("socket") +__description("SMOD64, r/r, small_val/-1") +__success __retval(0) +__arch_x86_64 +__xlated("0: r2 = 5") +__xlated("1: r3 = -1") +__xlated("2: r4 = r2") +__xlated("3: r11 = r3") +__xlated("4: r11 += 1") +__xlated("5: if r11 > 0x1 goto pc+3") +__xlated("6: if r11 == 0x1 goto pc+3") +__xlated("7: w2 = 0") +__xlated("8: goto pc+1") +__xlated("9: r2 s%= r3") +__xlated("10: r0 = r2") +__xlated("11: exit") +__naked void smod64_rr_divisor_neg_1(void) +{ + asm volatile (" \ + r2 = 5; \ + r3 = -1; \ + r4 = r2; \ + r2 s%%= r3; \ + r0 = r2; \ + exit; \ +" : + : + : __clobber_all); +} + +SEC("socket") +__description("SMOD64, overflow r/i, LLONG_MIN/-1") +__success __retval(0) +__arch_x86_64 +__xlated("0: r2 = 0x8000000000000000") +__xlated("2: r4 = r2") +__xlated("3: w2 = 0") +__xlated("4: r0 = r2") +__xlated("5: exit") +__naked void smod64_overflow_ri(void) +{ + asm volatile (" \ + r2 = %[llong_min] ll; \ + r4 = r2; \ + r2 s%%= -1; \ + r0 = r2; \ + exit; \ +" : + : __imm_const(llong_min, LLONG_MIN) + : __clobber_all); +} + +SEC("socket") +__description("SMOD64, r/i, small_val/-1") +__success __retval(0) +__arch_x86_64 +__xlated("0: r2 = 5") +__xlated("1: r4 = r2") +__xlated("2: w2 = 0") +__xlated("3: r0 = r2") +__xlated("4: exit") +__naked void smod64_ri_divisor_neg_1(void) +{ + asm volatile (" \ + r2 = 5; \ + r4 = r2; \ + r2 s%%= -1; \ + r0 = r2; \ + exit; \ +" : + : + : __clobber_all); +} + +SEC("socket") +__description("SMOD32, overflow r/r, INT_MIN/-1") +__success __retval(0) +__arch_x86_64 +__xlated("0: w2 = -2147483648") +__xlated("1: w3 = -1") +__xlated("2: w4 = w2") +__xlated("3: r11 = r3") +__xlated("4: w11 += 1") +__xlated("5: if w11 > 0x1 goto pc+3") +__xlated("6: if w11 == 0x1 goto pc+4") +__xlated("7: w2 = 0") +__xlated("8: goto pc+1") +__xlated("9: w2 s%= w3") +__xlated("10: goto pc+1") +__xlated("11: w2 = w2") +__xlated("12: r0 = r2") +__xlated("13: exit") +__naked void smod32_overflow_rr(void) +{ + asm volatile (" \ + w2 = %[int_min]; \ + w3 = -1; \ + w4 = w2; \ + w2 s%%= w3; \ + r0 = r2; \ + exit; \ +" : + : __imm_const(int_min, INT_MIN) + : __clobber_all); +} + +SEC("socket") +__description("SMOD32, r/r, small_val/-1") +__success __retval(0) +__arch_x86_64 +__xlated("0: w2 = -5") +__xlated("1: w3 = -1") +__xlated("2: w4 = w2") +__xlated("3: r11 = r3") +__xlated("4: w11 += 1") +__xlated("5: if w11 > 0x1 goto pc+3") +__xlated("6: if w11 == 0x1 goto pc+4") +__xlated("7: w2 = 0") +__xlated("8: goto pc+1") +__xlated("9: w2 s%= w3") +__xlated("10: goto pc+1") +__xlated("11: w2 = w2") +__xlated("12: r0 = r2") +__xlated("13: exit") +__naked void smod32_rr_divisor_neg_1(void) +{ + asm volatile (" \ + w2 = -5; \ + w3 = -1; \ + w4 = w2; \ + w2 s%%= w3; \ + r0 = r2; \ + exit; \ +" : + : + : __clobber_all); +} + +SEC("socket") +__description("SMOD32, overflow r/i, INT_MIN/-1") +__success __retval(0) +__arch_x86_64 +__xlated("0: w2 = -2147483648") +__xlated("1: w4 = w2") +__xlated("2: w2 = 0") +__xlated("3: r0 = r2") +__xlated("4: exit") +__naked void smod32_overflow_ri(void) +{ + asm volatile (" \ + w2 = %[int_min]; \ + w4 = w2; \ + w2 s%%= -1; \ + r0 = r2; \ + exit; \ +" : + : __imm_const(int_min, INT_MIN) + : __clobber_all); +} + +SEC("socket") +__description("SMOD32, r/i, small_val/-1") +__success __retval(0) +__arch_x86_64 +__xlated("0: w2 = 5") +__xlated("1: w4 = w2") +__xlated("2: w2 = 0") +__xlated("3: w0 = w2") +__xlated("4: exit") +__naked void smod32_ri_divisor_neg_1(void) +{ + asm volatile (" \ + w2 = 5; \ + w4 = w2; \ + w2 s%%= -1; \ + w0 = w2; \ + exit; \ +" : + : + : __clobber_all); +} + #else SEC("socket") diff --git a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c index 85e48069c9e6..671d9f415dbf 100644 --- a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c +++ b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c @@ -402,7 +402,7 @@ __naked void spill_32bit_of_64bit_fail(void) *(u32*)(r10 - 8) = r1; \ /* 32-bit fill r2 from stack. */ \ r2 = *(u32*)(r10 - 8); \ - /* Compare r2 with another register to trigger find_equal_scalars.\ + /* Compare r2 with another register to trigger sync_linked_regs.\ * Having one random bit is important here, otherwise the verifier cuts\ * the corners. If the ID was mistakenly preserved on spill, this would\ * cause the verifier to think that r1 is also equal to zero in one of\ @@ -441,7 +441,7 @@ __naked void spill_16bit_of_32bit_fail(void) *(u16*)(r10 - 8) = r1; \ /* 16-bit fill r2 from stack. */ \ r2 = *(u16*)(r10 - 8); \ - /* Compare r2 with another register to trigger find_equal_scalars.\ + /* Compare r2 with another register to trigger sync_linked_regs.\ * Having one random bit is important here, otherwise the verifier cuts\ * the corners. If the ID was mistakenly preserved on spill, this would\ * cause the verifier to think that r1 is also equal to zero in one of\ @@ -833,7 +833,7 @@ __naked void spill_64bit_of_64bit_ok(void) *(u64*)(r10 - 8) = r0; \ /* 64-bit fill r1 from stack - should preserve the ID. */\ r1 = *(u64*)(r10 - 8); \ - /* Compare r1 with another register to trigger find_equal_scalars.\ + /* Compare r1 with another register to trigger sync_linked_regs.\ * Having one random bit is important here, otherwise the verifier cuts\ * the corners. \ */ \ @@ -866,7 +866,7 @@ __naked void spill_32bit_of_32bit_ok(void) *(u32*)(r10 - 8) = r0; \ /* 32-bit fill r1 from stack - should preserve the ID. */\ r1 = *(u32*)(r10 - 8); \ - /* Compare r1 with another register to trigger find_equal_scalars.\ + /* Compare r1 with another register to trigger sync_linked_regs.\ * Having one random bit is important here, otherwise the verifier cuts\ * the corners. \ */ \ @@ -899,7 +899,7 @@ __naked void spill_16bit_of_16bit_ok(void) *(u16*)(r10 - 8) = r0; \ /* 16-bit fill r1 from stack - should preserve the ID. */\ r1 = *(u16*)(r10 - 8); \ - /* Compare r1 with another register to trigger find_equal_scalars.\ + /* Compare r1 with another register to trigger sync_linked_regs.\ * Having one random bit is important here, otherwise the verifier cuts\ * the corners. \ */ \ @@ -932,7 +932,7 @@ __naked void spill_8bit_of_8bit_ok(void) *(u8*)(r10 - 8) = r0; \ /* 8-bit fill r1 from stack - should preserve the ID. */\ r1 = *(u8*)(r10 - 8); \ - /* Compare r1 with another register to trigger find_equal_scalars.\ + /* Compare r1 with another register to trigger sync_linked_regs.\ * Having one random bit is important here, otherwise the verifier cuts\ * the corners. \ */ \ @@ -1029,7 +1029,7 @@ __naked void fill_32bit_after_spill_64bit_preserve_id(void) "r1 = *(u32*)(r10 - 4);" #endif " \ - /* Compare r1 with another register to trigger find_equal_scalars. */\ + /* Compare r1 with another register to trigger sync_linked_regs. */\ r2 = 0; \ if r1 != r2 goto l0_%=; \ /* The result of this comparison is predefined. */\ @@ -1070,7 +1070,7 @@ __naked void fill_32bit_after_spill_64bit_clear_id(void) "r2 = *(u32*)(r10 - 4);" #endif " \ - /* Compare r2 with another register to trigger find_equal_scalars.\ + /* Compare r2 with another register to trigger sync_linked_regs.\ * Having one random bit is important here, otherwise the verifier cuts\ * the corners. If the ID was mistakenly preserved on fill, this would\ * cause the verifier to think that r1 is also equal to zero in one of\ @@ -1213,10 +1213,10 @@ __success __log_level(2) * - once for path entry - label 2; * - once for path entry - label 1 - label 2. */ -__msg("r1 = *(u64 *)(r10 -8)") -__msg("exit") -__msg("r1 = *(u64 *)(r10 -8)") -__msg("exit") +__msg("8: (79) r1 = *(u64 *)(r10 -8)") +__msg("9: (95) exit") +__msg("from 2 to 7") +__msg("8: safe") __msg("processed 11 insns") __flag(BPF_F_TEST_STATE_FREQ) __naked void old_stack_misc_vs_cur_ctx_ptr(void) diff --git a/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c index 6a6fad625f7e..9d415f7ce599 100644 --- a/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c +++ b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c @@ -278,7 +278,7 @@ __msg("mark_precise: frame0: last_idx 14 first_idx 9") __msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7") __msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4") __msg("mark_precise: frame0: regs=r6 stack= before 11: (25) if r6 > 0x3 goto pc+4") -__msg("mark_precise: frame0: regs=r6 stack= before 10: (bf) r6 = r0") +__msg("mark_precise: frame0: regs=r0,r6 stack= before 10: (bf) r6 = r0") __msg("mark_precise: frame0: regs=r0 stack= before 9: (85) call bpf_loop") /* State entering callback body popped from states stack */ __msg("from 9 to 17: frame1:") diff --git a/tools/testing/selftests/bpf/progs/verifier_tailcall_jit.c b/tools/testing/selftests/bpf/progs/verifier_tailcall_jit.c new file mode 100644 index 000000000000..8d60c634a114 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_tailcall_jit.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +int main(void); + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 1); + __uint(key_size, sizeof(__u32)); + __array(values, void (void)); +} jmp_table SEC(".maps") = { + .values = { + [0] = (void *) &main, + }, +}; + +__noinline __auxiliary +static __naked int sub(void) +{ + asm volatile ( + "r2 = %[jmp_table] ll;" + "r3 = 0;" + "call 12;" + "exit;" + : + : __imm_addr(jmp_table) + : __clobber_all); +} + +__success +__arch_x86_64 +/* program entry for main(), regular function prologue */ +__jited(" endbr64") +__jited(" nopl (%rax,%rax)") +__jited(" xorq %rax, %rax") +__jited(" pushq %rbp") +__jited(" movq %rsp, %rbp") +/* tail call prologue for program: + * - establish memory location for tail call counter at &rbp[-8]; + * - spill tail_call_cnt_ptr at &rbp[-16]; + * - expect tail call counter to be passed in rax; + * - for entry program rax is a raw counter, value < 33; + * - for tail called program rax is tail_call_cnt_ptr (value > 33). + */ +__jited(" endbr64") +__jited(" cmpq $0x21, %rax") +__jited(" ja L0") +__jited(" pushq %rax") +__jited(" movq %rsp, %rax") +__jited(" jmp L1") +__jited("L0: pushq %rax") /* rbp[-8] = rax */ +__jited("L1: pushq %rax") /* rbp[-16] = rax */ +/* on subprogram call restore rax to be tail_call_cnt_ptr from rbp[-16] + * (cause original rax might be clobbered by this point) + */ +__jited(" movq -0x10(%rbp), %rax") +__jited(" callq 0x{{.*}}") /* call to sub() */ +__jited(" xorl %eax, %eax") +__jited(" leave") +__jited(" {{(retq|jmp 0x)}}") /* return or jump to rethunk */ +__jited("...") +/* subprogram entry for sub(), regular function prologue */ +__jited(" endbr64") +__jited(" nopl (%rax,%rax)") +__jited(" nopl (%rax)") +__jited(" pushq %rbp") +__jited(" movq %rsp, %rbp") +/* tail call prologue for subprogram address of tail call counter + * stored at rbp[-16]. + */ +__jited(" endbr64") +__jited(" pushq %rax") /* rbp[-8] = rax */ +__jited(" pushq %rax") /* rbp[-16] = rax */ +__jited(" movabsq ${{.*}}, %rsi") /* r2 = &jmp_table */ +__jited(" xorl %edx, %edx") /* r3 = 0 */ +/* bpf_tail_call implementation: + * - load tail_call_cnt_ptr from rbp[-16]; + * - if *tail_call_cnt_ptr < 33, increment it and jump to target; + * - otherwise do nothing. + */ +__jited(" movq -0x10(%rbp), %rax") +__jited(" cmpq $0x21, (%rax)") +__jited(" jae L0") +__jited(" nopl (%rax,%rax)") +__jited(" addq $0x1, (%rax)") /* *tail_call_cnt_ptr += 1 */ +__jited(" popq %rax") +__jited(" popq %rax") +__jited(" jmp {{.*}}") /* jump to tail call tgt */ +__jited("L0: leave") +__jited(" {{(retq|jmp 0x)}}") /* return or jump to rethunk */ +SEC("tc") +__naked int main(void) +{ + asm volatile ( + "call %[sub];" + "r0 = 0;" + "exit;" + : + : __imm(sub) + : __clobber_all); +} + +char __license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_vfs_accept.c b/tools/testing/selftests/bpf/progs/verifier_vfs_accept.c new file mode 100644 index 000000000000..a7c0a553aa50 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_vfs_accept.c @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Google LLC. */ + +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +#include "bpf_misc.h" +#include "bpf_experimental.h" + +static char buf[64]; + +SEC("lsm.s/file_open") +__success +int BPF_PROG(get_task_exe_file_and_put_kfunc_from_current_sleepable) +{ + struct file *acquired; + + acquired = bpf_get_task_exe_file(bpf_get_current_task_btf()); + if (!acquired) + return 0; + + bpf_put_file(acquired); + return 0; +} + +SEC("lsm/file_open") +__success +int BPF_PROG(get_task_exe_file_and_put_kfunc_from_current_non_sleepable, struct file *file) +{ + struct file *acquired; + + acquired = bpf_get_task_exe_file(bpf_get_current_task_btf()); + if (!acquired) + return 0; + + bpf_put_file(acquired); + return 0; +} + +SEC("lsm.s/task_alloc") +__success +int BPF_PROG(get_task_exe_file_and_put_kfunc_from_argument, + struct task_struct *task) +{ + struct file *acquired; + + acquired = bpf_get_task_exe_file(task); + if (!acquired) + return 0; + + bpf_put_file(acquired); + return 0; +} + +SEC("lsm.s/inode_getattr") +__success +int BPF_PROG(path_d_path_from_path_argument, struct path *path) +{ + int ret; + + ret = bpf_path_d_path(path, buf, sizeof(buf)); + __sink(ret); + return 0; +} + +SEC("lsm.s/file_open") +__success +int BPF_PROG(path_d_path_from_file_argument, struct file *file) +{ + int ret; + struct path *path; + + /* The f_path member is a path which is embedded directly within a + * file. Therefore, a pointer to such embedded members are still + * recognized by the BPF verifier as being PTR_TRUSTED as it's + * essentially PTR_TRUSTED w/ a non-zero fixed offset. + */ + path = &file->f_path; + ret = bpf_path_d_path(path, buf, sizeof(buf)); + __sink(ret); + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c b/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c new file mode 100644 index 000000000000..d6d3f4fcb24c --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c @@ -0,0 +1,161 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Google LLC. */ + +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include <linux/limits.h> + +#include "bpf_misc.h" +#include "bpf_experimental.h" + +static char buf[PATH_MAX]; + +SEC("lsm.s/file_open") +__failure __msg("Possibly NULL pointer passed to trusted arg0") +int BPF_PROG(get_task_exe_file_kfunc_null) +{ + struct file *acquired; + + /* Can't pass a NULL pointer to bpf_get_task_exe_file(). */ + acquired = bpf_get_task_exe_file(NULL); + if (!acquired) + return 0; + + bpf_put_file(acquired); + return 0; +} + +SEC("lsm.s/inode_getxattr") +__failure __msg("arg#0 pointer type STRUCT task_struct must point to scalar, or struct with scalar") +int BPF_PROG(get_task_exe_file_kfunc_fp) +{ + u64 x; + struct file *acquired; + struct task_struct *task; + + task = (struct task_struct *)&x; + /* Can't pass random frame pointer to bpf_get_task_exe_file(). */ + acquired = bpf_get_task_exe_file(task); + if (!acquired) + return 0; + + bpf_put_file(acquired); + return 0; +} + +SEC("lsm.s/file_open") +__failure __msg("R1 must be referenced or trusted") +int BPF_PROG(get_task_exe_file_kfunc_untrusted) +{ + struct file *acquired; + struct task_struct *parent; + + /* Walking a trusted struct task_struct returned from + * bpf_get_current_task_btf() yields an untrusted pointer. + */ + parent = bpf_get_current_task_btf()->parent; + /* Can't pass untrusted pointer to bpf_get_task_exe_file(). */ + acquired = bpf_get_task_exe_file(parent); + if (!acquired) + return 0; + + bpf_put_file(acquired); + return 0; +} + +SEC("lsm.s/file_open") +__failure __msg("Unreleased reference") +int BPF_PROG(get_task_exe_file_kfunc_unreleased) +{ + struct file *acquired; + + acquired = bpf_get_task_exe_file(bpf_get_current_task_btf()); + if (!acquired) + return 0; + + /* Acquired but never released. */ + return 0; +} + +SEC("lsm.s/file_open") +__failure __msg("release kernel function bpf_put_file expects") +int BPF_PROG(put_file_kfunc_unacquired, struct file *file) +{ + /* Can't release an unacquired pointer. */ + bpf_put_file(file); + return 0; +} + +SEC("lsm.s/file_open") +__failure __msg("Possibly NULL pointer passed to trusted arg0") +int BPF_PROG(path_d_path_kfunc_null) +{ + /* Can't pass NULL value to bpf_path_d_path() kfunc. */ + bpf_path_d_path(NULL, buf, sizeof(buf)); + return 0; +} + +SEC("lsm.s/task_alloc") +__failure __msg("R1 must be referenced or trusted") +int BPF_PROG(path_d_path_kfunc_untrusted_from_argument, struct task_struct *task) +{ + struct path *root; + + /* Walking a trusted argument typically yields an untrusted + * pointer. This is one example of that. + */ + root = &task->fs->root; + bpf_path_d_path(root, buf, sizeof(buf)); + return 0; +} + +SEC("lsm.s/file_open") +__failure __msg("R1 must be referenced or trusted") +int BPF_PROG(path_d_path_kfunc_untrusted_from_current) +{ + struct path *pwd; + struct task_struct *current; + + current = bpf_get_current_task_btf(); + /* Walking a trusted pointer returned from bpf_get_current_task_btf() + * yields an untrusted pointer. + */ + pwd = ¤t->fs->pwd; + bpf_path_d_path(pwd, buf, sizeof(buf)); + return 0; +} + +SEC("lsm.s/file_open") +__failure __msg("kernel function bpf_path_d_path args#0 expected pointer to STRUCT path but R1 has a pointer to STRUCT file") +int BPF_PROG(path_d_path_kfunc_type_mismatch, struct file *file) +{ + bpf_path_d_path((struct path *)&file->f_task_work, buf, sizeof(buf)); + return 0; +} + +SEC("lsm.s/file_open") +__failure __msg("invalid access to map value, value_size=4096 off=0 size=8192") +int BPF_PROG(path_d_path_kfunc_invalid_buf_sz, struct file *file) +{ + /* bpf_path_d_path() enforces a constraint on the buffer size supplied + * by the BPF LSM program via the __sz annotation. buf here is set to + * PATH_MAX, so let's ensure that the BPF verifier rejects BPF_PROG_LOAD + * attempts if the supplied size and the actual size of the buffer + * mismatches. + */ + bpf_path_d_path(&file->f_path, buf, PATH_MAX * 2); + return 0; +} + +SEC("fentry/vfs_open") +__failure __msg("calling kernel function bpf_path_d_path is not allowed") +int BPF_PROG(path_d_path_kfunc_non_lsm, struct path *path, struct file *f) +{ + /* Calling bpf_path_d_path() from a non-LSM BPF program isn't permitted. + */ + bpf_path_d_path(path, buf, sizeof(buf)); + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/xdp_redirect_map.c b/tools/testing/selftests/bpf/progs/xdp_redirect_map.c index d037262c8937..682dda8dabbc 100644 --- a/tools/testing/selftests/bpf/progs/xdp_redirect_map.c +++ b/tools/testing/selftests/bpf/progs/xdp_redirect_map.c @@ -10,19 +10,19 @@ struct { __uint(value_size, sizeof(int)); } tx_port SEC(".maps"); -SEC("redirect_map_0") +SEC("xdp") int xdp_redirect_map_0(struct xdp_md *xdp) { return bpf_redirect_map(&tx_port, 0, 0); } -SEC("redirect_map_1") +SEC("xdp") int xdp_redirect_map_1(struct xdp_md *xdp) { return bpf_redirect_map(&tx_port, 1, 0); } -SEC("redirect_map_2") +SEC("xdp") int xdp_redirect_map_2(struct xdp_md *xdp) { return bpf_redirect_map(&tx_port, 2, 0); diff --git a/tools/testing/selftests/bpf/test_cgroup_storage.c b/tools/testing/selftests/bpf/test_cgroup_storage.c deleted file mode 100644 index 0861ea60dcdd..000000000000 --- a/tools/testing/selftests/bpf/test_cgroup_storage.c +++ /dev/null @@ -1,174 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include <assert.h> -#include <bpf/bpf.h> -#include <linux/filter.h> -#include <stdio.h> -#include <stdlib.h> -#include <sys/sysinfo.h> - -#include "bpf_util.h" -#include "cgroup_helpers.h" -#include "testing_helpers.h" - -char bpf_log_buf[BPF_LOG_BUF_SIZE]; - -#define TEST_CGROUP "/test-bpf-cgroup-storage-buf/" - -int main(int argc, char **argv) -{ - struct bpf_insn prog[] = { - BPF_LD_MAP_FD(BPF_REG_1, 0), /* percpu map fd */ - BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */ - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_local_storage), - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1), - BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), - - BPF_LD_MAP_FD(BPF_REG_1, 0), /* map fd */ - BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */ - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_local_storage), - BPF_MOV64_IMM(BPF_REG_1, 1), - BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_1, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x1), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }; - size_t insns_cnt = ARRAY_SIZE(prog); - int error = EXIT_FAILURE; - int map_fd, percpu_map_fd, prog_fd, cgroup_fd; - struct bpf_cgroup_storage_key key; - unsigned long long value; - unsigned long long *percpu_value; - int cpu, nproc; - - nproc = bpf_num_possible_cpus(); - percpu_value = malloc(sizeof(*percpu_value) * nproc); - if (!percpu_value) { - printf("Not enough memory for per-cpu area (%d cpus)\n", nproc); - goto err; - } - - /* Use libbpf 1.0 API mode */ - libbpf_set_strict_mode(LIBBPF_STRICT_ALL); - - map_fd = bpf_map_create(BPF_MAP_TYPE_CGROUP_STORAGE, NULL, sizeof(key), - sizeof(value), 0, NULL); - if (map_fd < 0) { - printf("Failed to create map: %s\n", strerror(errno)); - goto out; - } - - percpu_map_fd = bpf_map_create(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, NULL, - sizeof(key), sizeof(value), 0, NULL); - if (percpu_map_fd < 0) { - printf("Failed to create map: %s\n", strerror(errno)); - goto out; - } - - prog[0].imm = percpu_map_fd; - prog[7].imm = map_fd; - prog_fd = bpf_test_load_program(BPF_PROG_TYPE_CGROUP_SKB, - prog, insns_cnt, "GPL", 0, - bpf_log_buf, BPF_LOG_BUF_SIZE); - if (prog_fd < 0) { - printf("Failed to load bpf program: %s\n", bpf_log_buf); - goto out; - } - - cgroup_fd = cgroup_setup_and_join(TEST_CGROUP); - - /* Attach the bpf program */ - if (bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_INET_EGRESS, 0)) { - printf("Failed to attach bpf program\n"); - goto err; - } - - if (bpf_map_get_next_key(map_fd, NULL, &key)) { - printf("Failed to get the first key in cgroup storage\n"); - goto err; - } - - if (bpf_map_lookup_elem(map_fd, &key, &value)) { - printf("Failed to lookup cgroup storage 0\n"); - goto err; - } - - for (cpu = 0; cpu < nproc; cpu++) - percpu_value[cpu] = 1000; - - if (bpf_map_update_elem(percpu_map_fd, &key, percpu_value, 0)) { - printf("Failed to update the data in the cgroup storage\n"); - goto err; - } - - /* Every second packet should be dropped */ - assert(system("ping localhost -c 1 -W 1 -q > /dev/null") == 0); - assert(system("ping localhost -c 1 -W 1 -q > /dev/null")); - assert(system("ping localhost -c 1 -W 1 -q > /dev/null") == 0); - - /* Check the counter in the cgroup local storage */ - if (bpf_map_lookup_elem(map_fd, &key, &value)) { - printf("Failed to lookup cgroup storage\n"); - goto err; - } - - if (value != 3) { - printf("Unexpected data in the cgroup storage: %llu\n", value); - goto err; - } - - /* Bump the counter in the cgroup local storage */ - value++; - if (bpf_map_update_elem(map_fd, &key, &value, 0)) { - printf("Failed to update the data in the cgroup storage\n"); - goto err; - } - - /* Every second packet should be dropped */ - assert(system("ping localhost -c 1 -W 1 -q > /dev/null") == 0); - assert(system("ping localhost -c 1 -W 1 -q > /dev/null")); - assert(system("ping localhost -c 1 -W 1 -q > /dev/null") == 0); - - /* Check the final value of the counter in the cgroup local storage */ - if (bpf_map_lookup_elem(map_fd, &key, &value)) { - printf("Failed to lookup the cgroup storage\n"); - goto err; - } - - if (value != 7) { - printf("Unexpected data in the cgroup storage: %llu\n", value); - goto err; - } - - /* Check the final value of the counter in the percpu local storage */ - - for (cpu = 0; cpu < nproc; cpu++) - percpu_value[cpu] = 0; - - if (bpf_map_lookup_elem(percpu_map_fd, &key, percpu_value)) { - printf("Failed to lookup the per-cpu cgroup storage\n"); - goto err; - } - - value = 0; - for (cpu = 0; cpu < nproc; cpu++) - value += percpu_value[cpu]; - - if (value != nproc * 1000 + 6) { - printf("Unexpected data in the per-cpu cgroup storage\n"); - goto err; - } - - error = 0; - printf("test_cgroup_storage:PASS\n"); - -err: - cleanup_cgroup_environment(); - free(percpu_value); - -out: - return error; -} diff --git a/tools/testing/selftests/bpf/test_cpp.cpp b/tools/testing/selftests/bpf/test_cpp.cpp index dde0bb16e782..abc2a56ab261 100644 --- a/tools/testing/selftests/bpf/test_cpp.cpp +++ b/tools/testing/selftests/bpf/test_cpp.cpp @@ -6,6 +6,10 @@ #include <bpf/libbpf.h> #include <bpf/bpf.h> #include <bpf/btf.h> + +#ifndef _Bool +#define _Bool bool +#endif #include "test_core_extern.skel.h" #include "struct_ops_module.skel.h" diff --git a/tools/testing/selftests/bpf/test_dev_cgroup.c b/tools/testing/selftests/bpf/test_dev_cgroup.c deleted file mode 100644 index adeaf63cb6fa..000000000000 --- a/tools/testing/selftests/bpf/test_dev_cgroup.c +++ /dev/null @@ -1,85 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2017 Facebook - */ - -#include <stdio.h> -#include <stdlib.h> -#include <string.h> -#include <errno.h> -#include <assert.h> -#include <sys/time.h> - -#include <linux/bpf.h> -#include <bpf/bpf.h> -#include <bpf/libbpf.h> - -#include "cgroup_helpers.h" -#include "testing_helpers.h" - -#define DEV_CGROUP_PROG "./dev_cgroup.bpf.o" - -#define TEST_CGROUP "/test-bpf-based-device-cgroup/" - -int main(int argc, char **argv) -{ - struct bpf_object *obj; - int error = EXIT_FAILURE; - int prog_fd, cgroup_fd; - __u32 prog_cnt; - - /* Use libbpf 1.0 API mode */ - libbpf_set_strict_mode(LIBBPF_STRICT_ALL); - - if (bpf_prog_test_load(DEV_CGROUP_PROG, BPF_PROG_TYPE_CGROUP_DEVICE, - &obj, &prog_fd)) { - printf("Failed to load DEV_CGROUP program\n"); - goto out; - } - - cgroup_fd = cgroup_setup_and_join(TEST_CGROUP); - if (cgroup_fd < 0) { - printf("Failed to create test cgroup\n"); - goto out; - } - - /* Attach bpf program */ - if (bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_DEVICE, 0)) { - printf("Failed to attach DEV_CGROUP program"); - goto err; - } - - if (bpf_prog_query(cgroup_fd, BPF_CGROUP_DEVICE, 0, NULL, NULL, - &prog_cnt)) { - printf("Failed to query attached programs"); - goto err; - } - - /* All operations with /dev/zero and and /dev/urandom are allowed, - * everything else is forbidden. - */ - assert(system("rm -f /tmp/test_dev_cgroup_null") == 0); - assert(system("mknod /tmp/test_dev_cgroup_null c 1 3")); - assert(system("rm -f /tmp/test_dev_cgroup_null") == 0); - - /* /dev/zero is whitelisted */ - assert(system("rm -f /tmp/test_dev_cgroup_zero") == 0); - assert(system("mknod /tmp/test_dev_cgroup_zero c 1 5") == 0); - assert(system("rm -f /tmp/test_dev_cgroup_zero") == 0); - - assert(system("dd if=/dev/urandom of=/dev/zero count=64") == 0); - - /* src is allowed, target is forbidden */ - assert(system("dd if=/dev/urandom of=/dev/full count=64")); - - /* src is forbidden, target is allowed */ - assert(system("dd if=/dev/random of=/dev/zero count=64")); - - error = 0; - printf("test_dev_cgroup:PASS\n"); - -err: - cleanup_cgroup_environment(); - -out: - return error; -} diff --git a/tools/testing/selftests/bpf/test_loader.c b/tools/testing/selftests/bpf/test_loader.c index f14e10b0de96..3e9b009580d4 100644 --- a/tools/testing/selftests/bpf/test_loader.c +++ b/tools/testing/selftests/bpf/test_loader.c @@ -7,8 +7,10 @@ #include <bpf/btf.h> #include "autoconf_helper.h" +#include "disasm_helpers.h" #include "unpriv_helpers.h" #include "cap_helpers.h" +#include "jit_disasm_helpers.h" #define str_has_pfx(str, pfx) \ (strncmp(str, pfx, __builtin_constant_p(pfx) ? sizeof(pfx) - 1 : strlen(pfx)) == 0) @@ -18,11 +20,11 @@ #define TEST_TAG_EXPECT_FAILURE "comment:test_expect_failure" #define TEST_TAG_EXPECT_SUCCESS "comment:test_expect_success" #define TEST_TAG_EXPECT_MSG_PFX "comment:test_expect_msg=" -#define TEST_TAG_EXPECT_REGEX_PFX "comment:test_expect_regex=" +#define TEST_TAG_EXPECT_XLATED_PFX "comment:test_expect_xlated=" #define TEST_TAG_EXPECT_FAILURE_UNPRIV "comment:test_expect_failure_unpriv" #define TEST_TAG_EXPECT_SUCCESS_UNPRIV "comment:test_expect_success_unpriv" #define TEST_TAG_EXPECT_MSG_PFX_UNPRIV "comment:test_expect_msg_unpriv=" -#define TEST_TAG_EXPECT_REGEX_PFX_UNPRIV "comment:test_expect_regex_unpriv=" +#define TEST_TAG_EXPECT_XLATED_PFX_UNPRIV "comment:test_expect_xlated_unpriv=" #define TEST_TAG_LOG_LEVEL_PFX "comment:test_log_level=" #define TEST_TAG_PROG_FLAGS_PFX "comment:test_prog_flags=" #define TEST_TAG_DESCRIPTION_PFX "comment:test_description=" @@ -31,6 +33,9 @@ #define TEST_TAG_AUXILIARY "comment:test_auxiliary" #define TEST_TAG_AUXILIARY_UNPRIV "comment:test_auxiliary_unpriv" #define TEST_BTF_PATH "comment:test_btf_path=" +#define TEST_TAG_ARCH "comment:test_arch=" +#define TEST_TAG_JITED_PFX "comment:test_jited=" +#define TEST_TAG_JITED_PFX_UNPRIV "comment:test_jited_unpriv=" /* Warning: duplicated in bpf_misc.h */ #define POINTER_VALUE 0xcafe4all @@ -51,15 +56,22 @@ enum mode { struct expect_msg { const char *substr; /* substring match */ - const char *regex_str; /* regex-based match */ regex_t regex; + bool is_regex; + bool on_next_line; +}; + +struct expected_msgs { + struct expect_msg *patterns; + size_t cnt; }; struct test_subspec { char *name; bool expect_failure; - struct expect_msg *expect_msgs; - size_t expect_msg_cnt; + struct expected_msgs expect_msgs; + struct expected_msgs expect_xlated; + struct expected_msgs jited; int retval; bool execute; }; @@ -72,6 +84,7 @@ struct test_spec { int log_level; int prog_flags; int mode_mask; + int arch_mask; bool auxiliary; bool valid; }; @@ -96,61 +109,152 @@ void test_loader_fini(struct test_loader *tester) free(tester->log_buf); } -static void free_test_spec(struct test_spec *spec) +static void free_msgs(struct expected_msgs *msgs) { int i; + for (i = 0; i < msgs->cnt; i++) + if (msgs->patterns[i].is_regex) + regfree(&msgs->patterns[i].regex); + free(msgs->patterns); + msgs->patterns = NULL; + msgs->cnt = 0; +} + +static void free_test_spec(struct test_spec *spec) +{ /* Deallocate expect_msgs arrays. */ - for (i = 0; i < spec->priv.expect_msg_cnt; i++) - if (spec->priv.expect_msgs[i].regex_str) - regfree(&spec->priv.expect_msgs[i].regex); - for (i = 0; i < spec->unpriv.expect_msg_cnt; i++) - if (spec->unpriv.expect_msgs[i].regex_str) - regfree(&spec->unpriv.expect_msgs[i].regex); + free_msgs(&spec->priv.expect_msgs); + free_msgs(&spec->unpriv.expect_msgs); + free_msgs(&spec->priv.expect_xlated); + free_msgs(&spec->unpriv.expect_xlated); + free_msgs(&spec->priv.jited); + free_msgs(&spec->unpriv.jited); free(spec->priv.name); free(spec->unpriv.name); - free(spec->priv.expect_msgs); - free(spec->unpriv.expect_msgs); - spec->priv.name = NULL; spec->unpriv.name = NULL; - spec->priv.expect_msgs = NULL; - spec->unpriv.expect_msgs = NULL; } -static int push_msg(const char *substr, const char *regex_str, struct test_subspec *subspec) +/* Compiles regular expression matching pattern. + * Pattern has a special syntax: + * + * pattern := (<verbatim text> | regex)* + * regex := "{{" <posix extended regular expression> "}}" + * + * In other words, pattern is a verbatim text with inclusion + * of regular expressions enclosed in "{{" "}}" pairs. + * For example, pattern "foo{{[0-9]+}}" matches strings like + * "foo0", "foo007", etc. + */ +static int compile_regex(const char *pattern, regex_t *regex) +{ + char err_buf[256], buf[256] = {}, *ptr, *buf_end; + const char *original_pattern = pattern; + bool in_regex = false; + int err; + + buf_end = buf + sizeof(buf); + ptr = buf; + while (*pattern && ptr < buf_end - 2) { + if (!in_regex && str_has_pfx(pattern, "{{")) { + in_regex = true; + pattern += 2; + continue; + } + if (in_regex && str_has_pfx(pattern, "}}")) { + in_regex = false; + pattern += 2; + continue; + } + if (in_regex) { + *ptr++ = *pattern++; + continue; + } + /* list of characters that need escaping for extended posix regex */ + if (strchr(".[]\\()*+?{}|^$", *pattern)) { + *ptr++ = '\\'; + *ptr++ = *pattern++; + continue; + } + *ptr++ = *pattern++; + } + if (*pattern) { + PRINT_FAIL("Regexp too long: '%s'\n", original_pattern); + return -EINVAL; + } + if (in_regex) { + PRINT_FAIL("Regexp has open '{{' but no closing '}}': '%s'\n", original_pattern); + return -EINVAL; + } + err = regcomp(regex, buf, REG_EXTENDED | REG_NEWLINE); + if (err != 0) { + regerror(err, regex, err_buf, sizeof(err_buf)); + PRINT_FAIL("Regexp compilation error in '%s': '%s'\n", buf, err_buf); + return -EINVAL; + } + return 0; +} + +static int __push_msg(const char *pattern, bool on_next_line, struct expected_msgs *msgs) { - void *tmp; - int regcomp_res; - char error_msg[100]; struct expect_msg *msg; + void *tmp; + int err; - tmp = realloc(subspec->expect_msgs, - (1 + subspec->expect_msg_cnt) * sizeof(struct expect_msg)); + tmp = realloc(msgs->patterns, + (1 + msgs->cnt) * sizeof(struct expect_msg)); if (!tmp) { ASSERT_FAIL("failed to realloc memory for messages\n"); return -ENOMEM; } - subspec->expect_msgs = tmp; - msg = &subspec->expect_msgs[subspec->expect_msg_cnt]; + msgs->patterns = tmp; + msg = &msgs->patterns[msgs->cnt]; + msg->on_next_line = on_next_line; + msg->substr = pattern; + msg->is_regex = false; + if (strstr(pattern, "{{")) { + err = compile_regex(pattern, &msg->regex); + if (err) + return err; + msg->is_regex = true; + } + msgs->cnt += 1; + return 0; +} - if (substr) { - msg->substr = substr; - msg->regex_str = NULL; - } else { - msg->regex_str = regex_str; - msg->substr = NULL; - regcomp_res = regcomp(&msg->regex, regex_str, REG_EXTENDED|REG_NEWLINE); - if (regcomp_res != 0) { - regerror(regcomp_res, &msg->regex, error_msg, sizeof(error_msg)); - PRINT_FAIL("Regexp compilation error in '%s': '%s'\n", - regex_str, error_msg); - return -EINVAL; - } +static int clone_msgs(struct expected_msgs *from, struct expected_msgs *to) +{ + struct expect_msg *msg; + int i, err; + + for (i = 0; i < from->cnt; i++) { + msg = &from->patterns[i]; + err = __push_msg(msg->substr, msg->on_next_line, to); + if (err) + return err; } + return 0; +} + +static int push_msg(const char *substr, struct expected_msgs *msgs) +{ + return __push_msg(substr, false, msgs); +} - subspec->expect_msg_cnt += 1; +static int push_disasm_msg(const char *regex_str, bool *on_next_line, struct expected_msgs *msgs) +{ + int err; + + if (strcmp(regex_str, "...") == 0) { + *on_next_line = false; + return 0; + } + err = __push_msg(regex_str, *on_next_line, msgs); + if (err) + return err; + *on_next_line = true; return 0; } @@ -202,6 +306,54 @@ static void update_flags(int *flags, int flag, bool clear) *flags |= flag; } +/* Matches a string of form '<pfx>[^=]=.*' and returns it's suffix. + * Used to parse btf_decl_tag values. + * Such values require unique prefix because compiler does not add + * same __attribute__((btf_decl_tag(...))) twice. + * Test suite uses two-component tags for such cases: + * + * <pfx> __COUNTER__ '=' + * + * For example, two consecutive __msg tags '__msg("foo") __msg("foo")' + * would be encoded as: + * + * [18] DECL_TAG 'comment:test_expect_msg=0=foo' type_id=15 component_idx=-1 + * [19] DECL_TAG 'comment:test_expect_msg=1=foo' type_id=15 component_idx=-1 + * + * And the purpose of this function is to extract 'foo' from the above. + */ +static const char *skip_dynamic_pfx(const char *s, const char *pfx) +{ + const char *msg; + + if (strncmp(s, pfx, strlen(pfx)) != 0) + return NULL; + msg = s + strlen(pfx); + msg = strchr(msg, '='); + if (!msg) + return NULL; + return msg + 1; +} + +enum arch { + ARCH_UNKNOWN = 0x1, + ARCH_X86_64 = 0x2, + ARCH_ARM64 = 0x4, + ARCH_RISCV64 = 0x8, +}; + +static int get_current_arch(void) +{ +#if defined(__x86_64__) + return ARCH_X86_64; +#elif defined(__aarch64__) + return ARCH_ARM64; +#elif defined(__riscv) && __riscv_xlen == 64 + return ARCH_RISCV64; +#endif + return ARCH_UNKNOWN; +} + /* Uses btf_decl_tag attributes to describe the expected test * behavior, see bpf_misc.h for detailed description of each attribute * and attribute combinations. @@ -214,8 +366,15 @@ static int parse_test_spec(struct test_loader *tester, const char *description = NULL; bool has_unpriv_result = false; bool has_unpriv_retval = false; + bool unpriv_xlated_on_next_line = true; + bool xlated_on_next_line = true; + bool unpriv_jit_on_next_line; + bool jit_on_next_line; + bool collect_jit = false; int func_id, i, err = 0; + u32 arch_mask = 0; struct btf *btf; + enum arch arch; memset(spec, 0, sizeof(*spec)); @@ -270,27 +429,49 @@ static int parse_test_spec(struct test_loader *tester, } else if (strcmp(s, TEST_TAG_AUXILIARY_UNPRIV) == 0) { spec->auxiliary = true; spec->mode_mask |= UNPRIV; - } else if (str_has_pfx(s, TEST_TAG_EXPECT_MSG_PFX)) { - msg = s + sizeof(TEST_TAG_EXPECT_MSG_PFX) - 1; - err = push_msg(msg, NULL, &spec->priv); + } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_MSG_PFX))) { + err = push_msg(msg, &spec->priv.expect_msgs); if (err) goto cleanup; spec->mode_mask |= PRIV; - } else if (str_has_pfx(s, TEST_TAG_EXPECT_MSG_PFX_UNPRIV)) { - msg = s + sizeof(TEST_TAG_EXPECT_MSG_PFX_UNPRIV) - 1; - err = push_msg(msg, NULL, &spec->unpriv); + } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_MSG_PFX_UNPRIV))) { + err = push_msg(msg, &spec->unpriv.expect_msgs); if (err) goto cleanup; spec->mode_mask |= UNPRIV; - } else if (str_has_pfx(s, TEST_TAG_EXPECT_REGEX_PFX)) { - msg = s + sizeof(TEST_TAG_EXPECT_REGEX_PFX) - 1; - err = push_msg(NULL, msg, &spec->priv); + } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_JITED_PFX))) { + if (arch_mask == 0) { + PRINT_FAIL("__jited used before __arch_*"); + goto cleanup; + } + if (collect_jit) { + err = push_disasm_msg(msg, &jit_on_next_line, + &spec->priv.jited); + if (err) + goto cleanup; + spec->mode_mask |= PRIV; + } + } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_JITED_PFX_UNPRIV))) { + if (arch_mask == 0) { + PRINT_FAIL("__unpriv_jited used before __arch_*"); + goto cleanup; + } + if (collect_jit) { + err = push_disasm_msg(msg, &unpriv_jit_on_next_line, + &spec->unpriv.jited); + if (err) + goto cleanup; + spec->mode_mask |= UNPRIV; + } + } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_XLATED_PFX))) { + err = push_disasm_msg(msg, &xlated_on_next_line, + &spec->priv.expect_xlated); if (err) goto cleanup; spec->mode_mask |= PRIV; - } else if (str_has_pfx(s, TEST_TAG_EXPECT_REGEX_PFX_UNPRIV)) { - msg = s + sizeof(TEST_TAG_EXPECT_REGEX_PFX_UNPRIV) - 1; - err = push_msg(NULL, msg, &spec->unpriv); + } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_XLATED_PFX_UNPRIV))) { + err = push_disasm_msg(msg, &unpriv_xlated_on_next_line, + &spec->unpriv.expect_xlated); if (err) goto cleanup; spec->mode_mask |= UNPRIV; @@ -341,11 +522,30 @@ static int parse_test_spec(struct test_loader *tester, goto cleanup; update_flags(&spec->prog_flags, flags, clear); } + } else if (str_has_pfx(s, TEST_TAG_ARCH)) { + val = s + sizeof(TEST_TAG_ARCH) - 1; + if (strcmp(val, "X86_64") == 0) { + arch = ARCH_X86_64; + } else if (strcmp(val, "ARM64") == 0) { + arch = ARCH_ARM64; + } else if (strcmp(val, "RISCV64") == 0) { + arch = ARCH_RISCV64; + } else { + PRINT_FAIL("bad arch spec: '%s'", val); + err = -EINVAL; + goto cleanup; + } + arch_mask |= arch; + collect_jit = get_current_arch() == arch; + unpriv_jit_on_next_line = true; + jit_on_next_line = true; } else if (str_has_pfx(s, TEST_BTF_PATH)) { spec->btf_custom_path = s + sizeof(TEST_BTF_PATH) - 1; } } + spec->arch_mask = arch_mask ?: -1; + if (spec->mode_mask == 0) spec->mode_mask = PRIV; @@ -387,15 +587,12 @@ static int parse_test_spec(struct test_loader *tester, spec->unpriv.execute = spec->priv.execute; } - if (!spec->unpriv.expect_msgs) { - for (i = 0; i < spec->priv.expect_msg_cnt; i++) { - struct expect_msg *msg = &spec->priv.expect_msgs[i]; - - err = push_msg(msg->substr, msg->regex_str, &spec->unpriv); - if (err) - goto cleanup; - } - } + if (spec->unpriv.expect_msgs.cnt == 0) + clone_msgs(&spec->priv.expect_msgs, &spec->unpriv.expect_msgs); + if (spec->unpriv.expect_xlated.cnt == 0) + clone_msgs(&spec->priv.expect_xlated, &spec->unpriv.expect_xlated); + if (spec->unpriv.jited.cnt == 0) + clone_msgs(&spec->priv.jited, &spec->unpriv.jited); } spec->valid = true; @@ -434,7 +631,6 @@ static void prepare_case(struct test_loader *tester, bpf_program__set_flags(prog, prog_flags | spec->prog_flags); tester->log_buf[0] = '\0'; - tester->next_match_pos = 0; } static void emit_verifier_log(const char *log_buf, bool force) @@ -444,46 +640,84 @@ static void emit_verifier_log(const char *log_buf, bool force) fprintf(stdout, "VERIFIER LOG:\n=============\n%s=============\n", log_buf); } -static void validate_case(struct test_loader *tester, - struct test_subspec *subspec, - struct bpf_object *obj, - struct bpf_program *prog, - int load_err) +static void emit_xlated(const char *xlated, bool force) { - int i, j, err; - char *match; + if (!force && env.verbosity == VERBOSE_NONE) + return; + fprintf(stdout, "XLATED:\n=============\n%s=============\n", xlated); +} + +static void emit_jited(const char *jited, bool force) +{ + if (!force && env.verbosity == VERBOSE_NONE) + return; + fprintf(stdout, "JITED:\n=============\n%s=============\n", jited); +} + +static void validate_msgs(char *log_buf, struct expected_msgs *msgs, + void (*emit_fn)(const char *buf, bool force)) +{ + const char *log = log_buf, *prev_match; regmatch_t reg_match[1]; + int prev_match_line; + int match_line; + int i, j, err; - for (i = 0; i < subspec->expect_msg_cnt; i++) { - struct expect_msg *msg = &subspec->expect_msgs[i]; + prev_match_line = -1; + match_line = 0; + prev_match = log; + for (i = 0; i < msgs->cnt; i++) { + struct expect_msg *msg = &msgs->patterns[i]; + const char *match = NULL, *pat_status; + bool wrong_line = false; - if (msg->substr) { - match = strstr(tester->log_buf + tester->next_match_pos, msg->substr); + if (!msg->is_regex) { + match = strstr(log, msg->substr); if (match) - tester->next_match_pos = match - tester->log_buf + strlen(msg->substr); + log = match + strlen(msg->substr); } else { - err = regexec(&msg->regex, - tester->log_buf + tester->next_match_pos, 1, reg_match, 0); + err = regexec(&msg->regex, log, 1, reg_match, 0); if (err == 0) { - match = tester->log_buf + tester->next_match_pos + reg_match[0].rm_so; - tester->next_match_pos += reg_match[0].rm_eo; - } else { - match = NULL; + match = log + reg_match[0].rm_so; + log += reg_match[0].rm_eo; } } - if (!ASSERT_OK_PTR(match, "expect_msg")) { + if (match) { + for (; prev_match < match; ++prev_match) + if (*prev_match == '\n') + ++match_line; + wrong_line = msg->on_next_line && prev_match_line >= 0 && + prev_match_line + 1 != match_line; + } + + if (!match || wrong_line) { + PRINT_FAIL("expect_msg\n"); if (env.verbosity == VERBOSE_NONE) - emit_verifier_log(tester->log_buf, true /*force*/); + emit_fn(log_buf, true /*force*/); for (j = 0; j <= i; j++) { - msg = &subspec->expect_msgs[j]; + msg = &msgs->patterns[j]; + if (j < i) + pat_status = "MATCHED "; + else if (wrong_line) + pat_status = "WRONG LINE"; + else + pat_status = "EXPECTED "; + msg = &msgs->patterns[j]; fprintf(stderr, "%s %s: '%s'\n", - j < i ? "MATCHED " : "EXPECTED", - msg->substr ? "SUBSTR" : " REGEX", - msg->substr ?: msg->regex_str); + pat_status, + msg->is_regex ? " REGEX" : "SUBSTR", + msg->substr); } - return; + if (wrong_line) { + fprintf(stderr, + "expecting match at line %d, actual match is at line %d\n", + prev_match_line + 1, match_line); + } + break; } + + prev_match_line = match_line; } } @@ -611,6 +845,37 @@ static bool should_do_test_run(struct test_spec *spec, struct test_subspec *subs return true; } +/* Get a disassembly of BPF program after verifier applies all rewrites */ +static int get_xlated_program_text(int prog_fd, char *text, size_t text_sz) +{ + struct bpf_insn *insn_start = NULL, *insn, *insn_end; + __u32 insns_cnt = 0, i; + char buf[64]; + FILE *out = NULL; + int err; + + err = get_xlated_program(prog_fd, &insn_start, &insns_cnt); + if (!ASSERT_OK(err, "get_xlated_program")) + goto out; + out = fmemopen(text, text_sz, "w"); + if (!ASSERT_OK_PTR(out, "open_memstream")) + goto out; + insn_end = insn_start + insns_cnt; + insn = insn_start; + while (insn < insn_end) { + i = insn - insn_start; + insn = disasm_insn(insn, buf, sizeof(buf)); + fprintf(out, "%d: %s\n", i, buf); + } + fflush(out); + +out: + free(insn_start); + if (out) + fclose(out); + return err; +} + /* this function is forced noinline and has short generic name to look better * in test_progs output (in case of a failure) */ @@ -625,16 +890,23 @@ void run_subtest(struct test_loader *tester, { struct test_subspec *subspec = unpriv ? &spec->unpriv : &spec->priv; struct bpf_program *tprog = NULL, *tprog_iter; + struct bpf_link *link, *links[32] = {}; struct test_spec *spec_iter; struct cap_state caps = {}; struct bpf_object *tobj; struct bpf_map *map; int retval, err, i; + int links_cnt = 0; bool should_load; if (!test__start_subtest(subspec->name)) return; + if ((get_current_arch() & spec->arch_mask) == 0) { + test__skip(); + return; + } + if (unpriv) { if (!can_execute_unpriv(tester, spec)) { test__skip(); @@ -695,9 +967,32 @@ void run_subtest(struct test_loader *tester, goto tobj_cleanup; } } - emit_verifier_log(tester->log_buf, false /*force*/); - validate_case(tester, subspec, tobj, tprog, err); + validate_msgs(tester->log_buf, &subspec->expect_msgs, emit_verifier_log); + + if (subspec->expect_xlated.cnt) { + err = get_xlated_program_text(bpf_program__fd(tprog), + tester->log_buf, tester->log_buf_sz); + if (err) + goto tobj_cleanup; + emit_xlated(tester->log_buf, false /*force*/); + validate_msgs(tester->log_buf, &subspec->expect_xlated, emit_xlated); + } + + if (subspec->jited.cnt) { + err = get_jited_program_text(bpf_program__fd(tprog), + tester->log_buf, tester->log_buf_sz); + if (err == -EOPNOTSUPP) { + printf("%s:SKIP: jited programs disassembly is not supported,\n", __func__); + printf("%s:SKIP: tests are built w/o LLVM development libs\n", __func__); + test__skip(); + goto tobj_cleanup; + } + if (!ASSERT_EQ(err, 0, "get_jited_program_text")) + goto tobj_cleanup; + emit_jited(tester->log_buf, false /*force*/); + validate_msgs(tester->log_buf, &subspec->jited, emit_jited); + } if (should_do_test_run(spec, subspec)) { /* For some reason test_verifier executes programs @@ -706,6 +1001,26 @@ void run_subtest(struct test_loader *tester, if (restore_capabilities(&caps)) goto tobj_cleanup; + /* Do bpf_map__attach_struct_ops() for each struct_ops map. + * This should trigger bpf_struct_ops->reg callback on kernel side. + */ + bpf_object__for_each_map(map, tobj) { + if (!bpf_map__autocreate(map) || + bpf_map__type(map) != BPF_MAP_TYPE_STRUCT_OPS) + continue; + if (links_cnt >= ARRAY_SIZE(links)) { + PRINT_FAIL("too many struct_ops maps"); + goto tobj_cleanup; + } + link = bpf_map__attach_struct_ops(map); + if (!link) { + PRINT_FAIL("bpf_map__attach_struct_ops failed for map %s: err=%d\n", + bpf_map__name(map), err); + goto tobj_cleanup; + } + links[links_cnt++] = link; + } + if (tester->pre_execution_cb) { err = tester->pre_execution_cb(tobj); if (err) { @@ -720,9 +1035,14 @@ void run_subtest(struct test_loader *tester, PRINT_FAIL("Unexpected retval: %d != %d\n", retval, subspec->retval); goto tobj_cleanup; } + /* redo bpf_map__attach_struct_ops for each test */ + while (links_cnt > 0) + bpf_link__destroy(links[--links_cnt]); } tobj_cleanup: + while (links_cnt > 0) + bpf_link__destroy(links[--links_cnt]); bpf_object__close(tobj); subtest_cleanup: test__end_subtest(); diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c index 4d0650cfb5cd..fda7589c5023 100644 --- a/tools/testing/selftests/bpf/test_lru_map.c +++ b/tools/testing/selftests/bpf/test_lru_map.c @@ -126,7 +126,8 @@ static int sched_next_online(int pid, int *next_to_try) while (next < nr_cpus) { CPU_ZERO(&cpuset); - CPU_SET(next++, &cpuset); + CPU_SET(next, &cpuset); + next++; if (!sched_setaffinity(pid, sizeof(cpuset), &cpuset)) { ret = 0; break; diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index dfbab214f4d1..905d5981ace1 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c @@ -1515,7 +1515,7 @@ again: value == key); } - /* Now let's delete all elemenets in parallel. */ + /* Now let's delete all elements in parallel. */ data[1] = DO_DELETE; run_parallel(TASKS, test_update_delete, data); diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index 89ff704e9dad..c7a70e1a1085 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -10,7 +10,6 @@ #include <sched.h> #include <signal.h> #include <string.h> -#include <execinfo.h> /* backtrace */ #include <sys/sysinfo.h> /* get_nprocs */ #include <netinet/in.h> #include <sys/select.h> @@ -19,6 +18,25 @@ #include <bpf/btf.h> #include "json_writer.h" +#include "network_helpers.h" + +#ifdef __GLIBC__ +#include <execinfo.h> /* backtrace */ +#endif + +/* Default backtrace funcs if missing at link */ +__weak int backtrace(void **buffer, int size) +{ + return 0; +} + +__weak void backtrace_symbols_fd(void *const *buffer, int size, int fd) +{ + dprintf(fd, "<backtrace not supported>\n"); +} + +int env_verbosity = 0; + static bool verbose(void) { return env.verbosity > VERBOSE_NONE; @@ -37,15 +55,15 @@ static void stdio_hijack_init(char **log_buf, size_t *log_cnt) stdout = open_memstream(log_buf, log_cnt); if (!stdout) { - stdout = env.stdout; + stdout = env.stdout_saved; perror("open_memstream"); return; } if (env.subtest_state) - env.subtest_state->stdout = stdout; + env.subtest_state->stdout_saved = stdout; else - env.test_state->stdout = stdout; + env.test_state->stdout_saved = stdout; stderr = stdout; #endif @@ -59,8 +77,8 @@ static void stdio_hijack(char **log_buf, size_t *log_cnt) return; } - env.stdout = stdout; - env.stderr = stderr; + env.stdout_saved = stdout; + env.stderr_saved = stderr; stdio_hijack_init(log_buf, log_cnt); #endif @@ -77,13 +95,13 @@ static void stdio_restore_cleanup(void) fflush(stdout); if (env.subtest_state) { - fclose(env.subtest_state->stdout); - env.subtest_state->stdout = NULL; - stdout = env.test_state->stdout; - stderr = env.test_state->stdout; + fclose(env.subtest_state->stdout_saved); + env.subtest_state->stdout_saved = NULL; + stdout = env.test_state->stdout_saved; + stderr = env.test_state->stdout_saved; } else { - fclose(env.test_state->stdout); - env.test_state->stdout = NULL; + fclose(env.test_state->stdout_saved); + env.test_state->stdout_saved = NULL; } #endif } @@ -96,13 +114,13 @@ static void stdio_restore(void) return; } - if (stdout == env.stdout) + if (stdout == env.stdout_saved) return; stdio_restore_cleanup(); - stdout = env.stdout; - stderr = env.stderr; + stdout = env.stdout_saved; + stderr = env.stderr_saved; #endif } @@ -141,6 +159,7 @@ struct prog_test_def { void (*run_serial_test)(void); bool should_run; bool need_cgroup_cleanup; + bool should_tmon; }; /* Override C runtime library's usleep() implementation to ensure nanosleep() @@ -178,46 +197,59 @@ static bool should_run(struct test_selector *sel, int num, const char *name) return num < sel->num_set_len && sel->num_set[num]; } -static bool should_run_subtest(struct test_selector *sel, - struct test_selector *subtest_sel, - int subtest_num, - const char *test_name, - const char *subtest_name) +static bool match_subtest(struct test_filter_set *filter, + const char *test_name, + const char *subtest_name) { int i, j; - for (i = 0; i < sel->blacklist.cnt; i++) { - if (glob_match(test_name, sel->blacklist.tests[i].name)) { - if (!sel->blacklist.tests[i].subtest_cnt) - return false; - - for (j = 0; j < sel->blacklist.tests[i].subtest_cnt; j++) { - if (glob_match(subtest_name, - sel->blacklist.tests[i].subtests[j])) - return false; - } - } - } - - for (i = 0; i < sel->whitelist.cnt; i++) { - if (glob_match(test_name, sel->whitelist.tests[i].name)) { - if (!sel->whitelist.tests[i].subtest_cnt) + for (i = 0; i < filter->cnt; i++) { + if (glob_match(test_name, filter->tests[i].name)) { + if (!filter->tests[i].subtest_cnt) return true; - for (j = 0; j < sel->whitelist.tests[i].subtest_cnt; j++) { + for (j = 0; j < filter->tests[i].subtest_cnt; j++) { if (glob_match(subtest_name, - sel->whitelist.tests[i].subtests[j])) + filter->tests[i].subtests[j])) return true; } } } + return false; +} + +static bool should_run_subtest(struct test_selector *sel, + struct test_selector *subtest_sel, + int subtest_num, + const char *test_name, + const char *subtest_name) +{ + if (match_subtest(&sel->blacklist, test_name, subtest_name)) + return false; + + if (match_subtest(&sel->whitelist, test_name, subtest_name)) + return true; + if (!sel->whitelist.cnt && !subtest_sel->num_set) return true; return subtest_num < subtest_sel->num_set_len && subtest_sel->num_set[subtest_num]; } +static bool should_tmon(struct test_selector *sel, const char *name) +{ + int i; + + for (i = 0; i < sel->whitelist.cnt; i++) { + if (glob_match(name, sel->whitelist.tests[i].name) && + !sel->whitelist.tests[i].subtest_cnt) + return true; + } + + return false; +} + static char *test_result(bool failed, bool skipped) { return failed ? "FAIL" : (skipped ? "SKIP" : "OK"); @@ -230,25 +262,25 @@ static void print_test_result(const struct prog_test_def *test, const struct tes int skipped_cnt = test_state->skip_cnt; int subtests_cnt = test_state->subtest_num; - fprintf(env.stdout, "#%-*d %s:", TEST_NUM_WIDTH, test->test_num, test->test_name); + fprintf(env.stdout_saved, "#%-*d %s:", TEST_NUM_WIDTH, test->test_num, test->test_name); if (test_state->error_cnt) - fprintf(env.stdout, "FAIL"); + fprintf(env.stdout_saved, "FAIL"); else if (!skipped_cnt) - fprintf(env.stdout, "OK"); + fprintf(env.stdout_saved, "OK"); else if (skipped_cnt == subtests_cnt || !subtests_cnt) - fprintf(env.stdout, "SKIP"); + fprintf(env.stdout_saved, "SKIP"); else - fprintf(env.stdout, "OK (SKIP: %d/%d)", skipped_cnt, subtests_cnt); + fprintf(env.stdout_saved, "OK (SKIP: %d/%d)", skipped_cnt, subtests_cnt); - fprintf(env.stdout, "\n"); + fprintf(env.stdout_saved, "\n"); } static void print_test_log(char *log_buf, size_t log_cnt) { log_buf[log_cnt] = '\0'; - fprintf(env.stdout, "%s", log_buf); + fprintf(env.stdout_saved, "%s", log_buf); if (log_buf[log_cnt - 1] != '\n') - fprintf(env.stdout, "\n"); + fprintf(env.stdout_saved, "\n"); } static void print_subtest_name(int test_num, int subtest_num, @@ -259,14 +291,14 @@ static void print_subtest_name(int test_num, int subtest_num, snprintf(test_num_str, sizeof(test_num_str), "%d/%d", test_num, subtest_num); - fprintf(env.stdout, "#%-*s %s/%s", + fprintf(env.stdout_saved, "#%-*s %s/%s", TEST_NUM_WIDTH, test_num_str, test_name, subtest_name); if (result) - fprintf(env.stdout, ":%s", result); + fprintf(env.stdout_saved, ":%s", result); - fprintf(env.stdout, "\n"); + fprintf(env.stdout_saved, "\n"); } static void jsonw_write_log_message(json_writer_t *w, char *log_buf, size_t log_cnt) @@ -451,7 +483,7 @@ bool test__start_subtest(const char *subtest_name) memset(subtest_state, 0, sub_state_size); if (!subtest_name || !subtest_name[0]) { - fprintf(env.stderr, + fprintf(env.stderr_saved, "Subtest #%d didn't provide sub-test name!\n", state->subtest_num); return false; @@ -459,7 +491,7 @@ bool test__start_subtest(const char *subtest_name) subtest_state->name = strdup(subtest_name); if (!subtest_state->name) { - fprintf(env.stderr, + fprintf(env.stderr_saved, "Subtest #%d: failed to copy subtest name!\n", state->subtest_num); return false; @@ -474,6 +506,10 @@ bool test__start_subtest(const char *subtest_name) return false; } + subtest_state->should_tmon = match_subtest(&env.tmon_selector.whitelist, + test->test_name, + subtest_name); + env.subtest_state = subtest_state; stdio_hijack_init(&subtest_state->log_buf, &subtest_state->log_cnt); @@ -610,6 +646,92 @@ out: return err; } +struct netns_obj { + char *nsname; + struct tmonitor_ctx *tmon; + struct nstoken *nstoken; +}; + +/* Create a new network namespace with the given name. + * + * Create a new network namespace and set the network namespace of the + * current process to the new network namespace if the argument "open" is + * true. This function should be paired with netns_free() to release the + * resource and delete the network namespace. + * + * It also implements the functionality of the option "-m" by starting + * traffic monitor on the background to capture the packets in this network + * namespace if the current test or subtest matching the pattern. + * + * nsname: the name of the network namespace to create. + * open: open the network namespace if true. + * + * Return: the network namespace object on success, NULL on failure. + */ +struct netns_obj *netns_new(const char *nsname, bool open) +{ + struct netns_obj *netns_obj = malloc(sizeof(*netns_obj)); + const char *test_name, *subtest_name; + int r; + + if (!netns_obj) + return NULL; + memset(netns_obj, 0, sizeof(*netns_obj)); + + netns_obj->nsname = strdup(nsname); + if (!netns_obj->nsname) + goto fail; + + /* Create the network namespace */ + r = make_netns(nsname); + if (r) + goto fail; + + /* Start traffic monitor */ + if (env.test->should_tmon || + (env.subtest_state && env.subtest_state->should_tmon)) { + test_name = env.test->test_name; + subtest_name = env.subtest_state ? env.subtest_state->name : NULL; + netns_obj->tmon = traffic_monitor_start(nsname, test_name, subtest_name); + if (!netns_obj->tmon) { + fprintf(stderr, "Failed to start traffic monitor for %s\n", nsname); + goto fail; + } + } else { + netns_obj->tmon = NULL; + } + + if (open) { + netns_obj->nstoken = open_netns(nsname); + if (!netns_obj->nstoken) + goto fail; + } + + return netns_obj; +fail: + traffic_monitor_stop(netns_obj->tmon); + remove_netns(nsname); + free(netns_obj->nsname); + free(netns_obj); + return NULL; +} + +/* Delete the network namespace. + * + * This function should be paired with netns_new() to delete the namespace + * created by netns_new(). + */ +void netns_free(struct netns_obj *netns_obj) +{ + if (!netns_obj) + return; + traffic_monitor_stop(netns_obj->tmon); + close_netns(netns_obj->nstoken); + remove_netns(netns_obj->nsname); + free(netns_obj->nsname); + free(netns_obj); +} + /* extern declarations for test funcs */ #define DEFINE_TEST(name) \ extern void test_##name(void) __weak; \ @@ -653,7 +775,8 @@ enum ARG_KEYS { ARG_TEST_NAME_GLOB_DENYLIST = 'd', ARG_NUM_WORKERS = 'j', ARG_DEBUG = -1, - ARG_JSON_SUMMARY = 'J' + ARG_JSON_SUMMARY = 'J', + ARG_TRAFFIC_MONITOR = 'm', }; static const struct argp_option opts[] = { @@ -680,6 +803,10 @@ static const struct argp_option opts[] = { { "debug", ARG_DEBUG, NULL, 0, "print extra debug information for test_progs." }, { "json-summary", ARG_JSON_SUMMARY, "FILE", 0, "Write report in json format to this file."}, +#ifdef TRAFFIC_MONITOR + { "traffic-monitor", ARG_TRAFFIC_MONITOR, "NAMES", 0, + "Monitor network traffic of tests with name matching the pattern (supports '*' wildcard)." }, +#endif {}, }; @@ -848,6 +975,7 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state) return -EINVAL; } } + env_verbosity = env->verbosity; if (verbose()) { if (setenv("SELFTESTS_VERBOSE", "1", 1) == -1) { @@ -891,6 +1019,18 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state) break; case ARGP_KEY_END: break; +#ifdef TRAFFIC_MONITOR + case ARG_TRAFFIC_MONITOR: + if (arg[0] == '@') + err = parse_test_list_file(arg + 1, + &env->tmon_selector.whitelist, + true); + else + err = parse_test_list(arg, + &env->tmon_selector.whitelist, + true); + break; +#endif default: return ARGP_ERR_UNKNOWN; } @@ -1029,7 +1169,7 @@ void crash_handler(int signum) sz = backtrace(bt, ARRAY_SIZE(bt)); - if (env.stdout) + if (env.stdout_saved) stdio_restore(); if (env.test) { env.test_state->error_cnt++; @@ -1345,7 +1485,7 @@ static void calculate_summary_and_print_errors(struct test_env *env) if (env->json) { w = jsonw_new(env->json); if (!w) - fprintf(env->stderr, "Failed to create new JSON stream."); + fprintf(env->stderr_saved, "Failed to create new JSON stream."); } if (w) { @@ -1360,7 +1500,7 @@ static void calculate_summary_and_print_errors(struct test_env *env) /* * We only print error logs summary when there are failed tests and - * verbose mode is not enabled. Otherwise, results may be incosistent. + * verbose mode is not enabled. Otherwise, results may be inconsistent. * */ if (!verbose() && fail_cnt) { @@ -1694,8 +1834,8 @@ int main(int argc, char **argv) return -1; } - env.stdout = stdout; - env.stderr = stderr; + env.stdout_saved = stdout; + env.stderr_saved = stderr; env.has_testmod = true; if (!env.list_test_names) { @@ -1703,7 +1843,7 @@ int main(int argc, char **argv) unload_bpf_testmod(verbose()); if (load_bpf_testmod(verbose())) { - fprintf(env.stderr, "WARNING! Selftests relying on bpf_testmod.ko will be skipped.\n"); + fprintf(env.stderr_saved, "WARNING! Selftests relying on bpf_testmod.ko will be skipped.\n"); env.has_testmod = false; } } @@ -1722,6 +1862,8 @@ int main(int argc, char **argv) test->test_num, test->test_name, test->test_name, test->test_name); exit(EXIT_ERR_SETUP_INFRA); } + if (test->should_run) + test->should_tmon = should_tmon(&env.tmon_selector, test->test_name); } /* ignore workers if we are just listing */ @@ -1731,7 +1873,7 @@ int main(int argc, char **argv) /* launch workers if requested */ env.worker_id = -1; /* main process */ if (env.workers) { - env.worker_pids = calloc(sizeof(__pid_t), env.workers); + env.worker_pids = calloc(sizeof(pid_t), env.workers); env.worker_socks = calloc(sizeof(int), env.workers); if (env.debug) fprintf(stdout, "Launching %d workers.\n", env.workers); @@ -1781,7 +1923,7 @@ int main(int argc, char **argv) } if (env.list_test_names) { - fprintf(env.stdout, "%s\n", test->test_name); + fprintf(env.stdout_saved, "%s\n", test->test_name); env.succ_cnt++; continue; } @@ -1806,6 +1948,7 @@ out: free_test_selector(&env.test_selector); free_test_selector(&env.subtest_selector); + free_test_selector(&env.tmon_selector); free_test_states(); if (env.succ_cnt + env.fail_cnt + env.skip_cnt == 0) diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h index 51341d50213b..7767d9a825ae 100644 --- a/tools/testing/selftests/bpf/test_progs.h +++ b/tools/testing/selftests/bpf/test_progs.h @@ -74,8 +74,9 @@ struct subtest_state { int error_cnt; bool skipped; bool filtered; + bool should_tmon; - FILE *stdout; + FILE *stdout_saved; }; struct test_state { @@ -92,12 +93,15 @@ struct test_state { size_t log_cnt; char *log_buf; - FILE *stdout; + FILE *stdout_saved; }; +extern int env_verbosity; + struct test_env { struct test_selector test_selector; struct test_selector subtest_selector; + struct test_selector tmon_selector; bool verifier_stats; bool debug; enum verbosity verbosity; @@ -111,8 +115,8 @@ struct test_env { struct test_state *test_state; /* current running test state */ struct subtest_state *subtest_state; /* current running subtest state */ - FILE *stdout; - FILE *stderr; + FILE *stdout_saved; + FILE *stderr_saved; int nr_cpus; FILE *json; @@ -428,6 +432,10 @@ int write_sysctl(const char *sysctl, const char *value); int get_bpf_max_tramp_links_from(struct btf *btf); int get_bpf_max_tramp_links(void); +struct netns_obj; +struct netns_obj *netns_new(const char *name, bool open); +void netns_free(struct netns_obj *netns); + #ifdef __x86_64__ #define SYS_NANOSLEEP_KPROBE_NAME "__x64_sys_nanosleep" #elif defined(__s390x__) @@ -447,7 +455,6 @@ typedef int (*pre_execution_cb)(struct bpf_object *obj); struct test_loader { char *log_buf; size_t log_buf_sz; - size_t next_match_pos; pre_execution_cb pre_execution_cb; struct bpf_object *obj; diff --git a/tools/testing/selftests/bpf/test_skb_cgroup_id.sh b/tools/testing/selftests/bpf/test_skb_cgroup_id.sh deleted file mode 100755 index 515c2eafc97f..000000000000 --- a/tools/testing/selftests/bpf/test_skb_cgroup_id.sh +++ /dev/null @@ -1,63 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 -# Copyright (c) 2018 Facebook - -set -eu - -wait_for_ip() -{ - local _i - echo -n "Wait for testing link-local IP to become available " - for _i in $(seq ${MAX_PING_TRIES}); do - echo -n "." - if $PING6 -c 1 -W 1 ff02::1%${TEST_IF} >/dev/null 2>&1; then - echo " OK" - return - fi - sleep 1 - done - echo 1>&2 "ERROR: Timeout waiting for test IP to become available." - exit 1 -} - -setup() -{ - # Create testing interfaces not to interfere with current environment. - ip link add dev ${TEST_IF} type veth peer name ${TEST_IF_PEER} - ip link set ${TEST_IF} up - ip link set ${TEST_IF_PEER} up - - wait_for_ip - - tc qdisc add dev ${TEST_IF} clsact - tc filter add dev ${TEST_IF} egress bpf obj ${BPF_PROG_OBJ} \ - sec ${BPF_PROG_SECTION} da - - BPF_PROG_ID=$(tc filter show dev ${TEST_IF} egress | \ - awk '/ id / {sub(/.* id /, "", $0); print($1)}') -} - -cleanup() -{ - ip link del ${TEST_IF} 2>/dev/null || : - ip link del ${TEST_IF_PEER} 2>/dev/null || : -} - -main() -{ - trap cleanup EXIT 2 3 6 15 - setup - ${PROG} ${TEST_IF} ${BPF_PROG_ID} -} - -DIR=$(dirname $0) -TEST_IF="test_cgid_1" -TEST_IF_PEER="test_cgid_2" -MAX_PING_TRIES=5 -BPF_PROG_OBJ="${DIR}/test_skb_cgroup_id_kern.bpf.o" -BPF_PROG_SECTION="cgroup_id_logger" -BPF_PROG_ID=0 -PROG="${DIR}/test_skb_cgroup_id_user" -type ping6 >/dev/null 2>&1 && PING6="ping6" || PING6="ping -6" - -main diff --git a/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c b/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c deleted file mode 100644 index ed518d075d1d..000000000000 --- a/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c +++ /dev/null @@ -1,183 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -// Copyright (c) 2018 Facebook - -#include <stdlib.h> -#include <string.h> -#include <unistd.h> - -#include <arpa/inet.h> -#include <net/if.h> -#include <netinet/in.h> -#include <sys/socket.h> -#include <sys/types.h> - - -#include <bpf/bpf.h> -#include <bpf/libbpf.h> - -#include "cgroup_helpers.h" - -#define CGROUP_PATH "/skb_cgroup_test" -#define NUM_CGROUP_LEVELS 4 - -/* RFC 4291, Section 2.7.1 */ -#define LINKLOCAL_MULTICAST "ff02::1" - -static int mk_dst_addr(const char *ip, const char *iface, - struct sockaddr_in6 *dst) -{ - memset(dst, 0, sizeof(*dst)); - - dst->sin6_family = AF_INET6; - dst->sin6_port = htons(1025); - - if (inet_pton(AF_INET6, ip, &dst->sin6_addr) != 1) { - log_err("Invalid IPv6: %s", ip); - return -1; - } - - dst->sin6_scope_id = if_nametoindex(iface); - if (!dst->sin6_scope_id) { - log_err("Failed to get index of iface: %s", iface); - return -1; - } - - return 0; -} - -static int send_packet(const char *iface) -{ - struct sockaddr_in6 dst; - char msg[] = "msg"; - int err = 0; - int fd = -1; - - if (mk_dst_addr(LINKLOCAL_MULTICAST, iface, &dst)) - goto err; - - fd = socket(AF_INET6, SOCK_DGRAM, 0); - if (fd == -1) { - log_err("Failed to create UDP socket"); - goto err; - } - - if (sendto(fd, &msg, sizeof(msg), 0, (const struct sockaddr *)&dst, - sizeof(dst)) == -1) { - log_err("Failed to send datagram"); - goto err; - } - - goto out; -err: - err = -1; -out: - if (fd >= 0) - close(fd); - return err; -} - -int get_map_fd_by_prog_id(int prog_id) -{ - struct bpf_prog_info info = {}; - __u32 info_len = sizeof(info); - __u32 map_ids[1]; - int prog_fd = -1; - int map_fd = -1; - - prog_fd = bpf_prog_get_fd_by_id(prog_id); - if (prog_fd < 0) { - log_err("Failed to get fd by prog id %d", prog_id); - goto err; - } - - info.nr_map_ids = 1; - info.map_ids = (__u64) (unsigned long) map_ids; - - if (bpf_prog_get_info_by_fd(prog_fd, &info, &info_len)) { - log_err("Failed to get info by prog fd %d", prog_fd); - goto err; - } - - if (!info.nr_map_ids) { - log_err("No maps found for prog fd %d", prog_fd); - goto err; - } - - map_fd = bpf_map_get_fd_by_id(map_ids[0]); - if (map_fd < 0) - log_err("Failed to get fd by map id %d", map_ids[0]); -err: - if (prog_fd >= 0) - close(prog_fd); - return map_fd; -} - -int check_ancestor_cgroup_ids(int prog_id) -{ - __u64 actual_ids[NUM_CGROUP_LEVELS], expected_ids[NUM_CGROUP_LEVELS]; - __u32 level; - int err = 0; - int map_fd; - - expected_ids[0] = get_cgroup_id("/.."); /* root cgroup */ - expected_ids[1] = get_cgroup_id(""); - expected_ids[2] = get_cgroup_id(CGROUP_PATH); - expected_ids[3] = 0; /* non-existent cgroup */ - - map_fd = get_map_fd_by_prog_id(prog_id); - if (map_fd < 0) - goto err; - - for (level = 0; level < NUM_CGROUP_LEVELS; ++level) { - if (bpf_map_lookup_elem(map_fd, &level, &actual_ids[level])) { - log_err("Failed to lookup key %d", level); - goto err; - } - if (actual_ids[level] != expected_ids[level]) { - log_err("%llx (actual) != %llx (expected), level: %u\n", - actual_ids[level], expected_ids[level], level); - goto err; - } - } - - goto out; -err: - err = -1; -out: - if (map_fd >= 0) - close(map_fd); - return err; -} - -int main(int argc, char **argv) -{ - int cgfd = -1; - int err = 0; - - if (argc < 3) { - fprintf(stderr, "Usage: %s iface prog_id\n", argv[0]); - exit(EXIT_FAILURE); - } - - /* Use libbpf 1.0 API mode */ - libbpf_set_strict_mode(LIBBPF_STRICT_ALL); - - cgfd = cgroup_setup_and_join(CGROUP_PATH); - if (cgfd < 0) - goto err; - - if (send_packet(argv[1])) - goto err; - - if (check_ancestor_cgroup_ids(atoi(argv[2]))) - goto err; - - goto out; -err: - err = -1; -out: - close(cgfd); - cleanup_cgroup_environment(); - printf("[%s]\n", err ? "FAIL" : "PASS"); - return err; -} diff --git a/tools/testing/selftests/bpf/test_xdp_veth.sh b/tools/testing/selftests/bpf/test_xdp_veth.sh deleted file mode 100755 index 5211ca9a0239..000000000000 --- a/tools/testing/selftests/bpf/test_xdp_veth.sh +++ /dev/null @@ -1,121 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 -# -# Create 3 namespaces with 3 veth peers, and -# forward packets in-between using native XDP -# -# XDP_TX -# NS1(veth11) NS2(veth22) NS3(veth33) -# | | | -# | | | -# (veth1, (veth2, (veth3, -# id:111) id:122) id:133) -# ^ | ^ | ^ | -# | | XDP_REDIRECT | | XDP_REDIRECT | | -# | ------------------ ------------------ | -# ----------------------------------------- -# XDP_REDIRECT - -# Kselftest framework requirement - SKIP code is 4. -ksft_skip=4 - -TESTNAME=xdp_veth -BPF_FS=$(awk '$3 == "bpf" {print $2; exit}' /proc/mounts) -BPF_DIR=$BPF_FS/test_$TESTNAME -readonly NS1="ns1-$(mktemp -u XXXXXX)" -readonly NS2="ns2-$(mktemp -u XXXXXX)" -readonly NS3="ns3-$(mktemp -u XXXXXX)" - -_cleanup() -{ - set +e - ip link del veth1 2> /dev/null - ip link del veth2 2> /dev/null - ip link del veth3 2> /dev/null - ip netns del ${NS1} 2> /dev/null - ip netns del ${NS2} 2> /dev/null - ip netns del ${NS3} 2> /dev/null - rm -rf $BPF_DIR 2> /dev/null -} - -cleanup_skip() -{ - echo "selftests: $TESTNAME [SKIP]" - _cleanup - - exit $ksft_skip -} - -cleanup() -{ - if [ "$?" = 0 ]; then - echo "selftests: $TESTNAME [PASS]" - else - echo "selftests: $TESTNAME [FAILED]" - fi - _cleanup -} - -if [ $(id -u) -ne 0 ]; then - echo "selftests: $TESTNAME [SKIP] Need root privileges" - exit $ksft_skip -fi - -if ! ip link set dev lo xdp off > /dev/null 2>&1; then - echo "selftests: $TESTNAME [SKIP] Could not run test without the ip xdp support" - exit $ksft_skip -fi - -if [ -z "$BPF_FS" ]; then - echo "selftests: $TESTNAME [SKIP] Could not run test without bpffs mounted" - exit $ksft_skip -fi - -if ! bpftool version > /dev/null 2>&1; then - echo "selftests: $TESTNAME [SKIP] Could not run test without bpftool" - exit $ksft_skip -fi - -set -e - -trap cleanup_skip EXIT - -ip netns add ${NS1} -ip netns add ${NS2} -ip netns add ${NS3} - -ip link add veth1 index 111 type veth peer name veth11 netns ${NS1} -ip link add veth2 index 122 type veth peer name veth22 netns ${NS2} -ip link add veth3 index 133 type veth peer name veth33 netns ${NS3} - -ip link set veth1 up -ip link set veth2 up -ip link set veth3 up - -ip -n ${NS1} addr add 10.1.1.11/24 dev veth11 -ip -n ${NS3} addr add 10.1.1.33/24 dev veth33 - -ip -n ${NS1} link set dev veth11 up -ip -n ${NS2} link set dev veth22 up -ip -n ${NS3} link set dev veth33 up - -mkdir $BPF_DIR -bpftool prog loadall \ - xdp_redirect_map.bpf.o $BPF_DIR/progs type xdp \ - pinmaps $BPF_DIR/maps -bpftool map update pinned $BPF_DIR/maps/tx_port key 0 0 0 0 value 122 0 0 0 -bpftool map update pinned $BPF_DIR/maps/tx_port key 1 0 0 0 value 133 0 0 0 -bpftool map update pinned $BPF_DIR/maps/tx_port key 2 0 0 0 value 111 0 0 0 -ip link set dev veth1 xdp pinned $BPF_DIR/progs/xdp_redirect_map_0 -ip link set dev veth2 xdp pinned $BPF_DIR/progs/xdp_redirect_map_1 -ip link set dev veth3 xdp pinned $BPF_DIR/progs/xdp_redirect_map_2 - -ip -n ${NS1} link set dev veth11 xdp obj xdp_dummy.bpf.o sec xdp -ip -n ${NS2} link set dev veth22 xdp obj xdp_tx.bpf.o sec xdp -ip -n ${NS3} link set dev veth33 xdp obj xdp_dummy.bpf.o sec xdp - -trap cleanup EXIT - -ip netns exec ${NS1} ping -c 1 -W 1 10.1.1.33 - -exit 0 diff --git a/tools/testing/selftests/bpf/testing_helpers.c b/tools/testing/selftests/bpf/testing_helpers.c index d5379a0e6da8..d3c3c3a24150 100644 --- a/tools/testing/selftests/bpf/testing_helpers.c +++ b/tools/testing/selftests/bpf/testing_helpers.c @@ -7,6 +7,7 @@ #include <errno.h> #include <bpf/bpf.h> #include <bpf/libbpf.h> +#include "disasm.h" #include "test_progs.h" #include "testing_helpers.h" #include <linux/membarrier.h> @@ -220,13 +221,13 @@ int parse_test_list(const char *s, bool is_glob_pattern) { char *input, *state = NULL, *test_spec; - int err = 0; + int err = 0, cnt = 0; input = strdup(s); if (!input) return -ENOMEM; - while ((test_spec = strtok_r(state ? NULL : input, ",", &state))) { + while ((test_spec = strtok_r(cnt++ ? NULL : input, ",", &state))) { err = insert_test(set, test_spec, is_glob_pattern); if (err) break; @@ -451,7 +452,7 @@ int get_xlated_program(int fd_prog, struct bpf_insn **buf, __u32 *cnt) *cnt = xlated_prog_len / buf_element_size; *buf = calloc(*cnt, buf_element_size); - if (!buf) { + if (!*buf) { perror("can't allocate xlated program buffer"); return -ENOMEM; } diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c index 465d196c7165..2d742fdac6b9 100644 --- a/tools/testing/selftests/bpf/trace_helpers.c +++ b/tools/testing/selftests/bpf/trace_helpers.c @@ -10,6 +10,8 @@ #include <pthread.h> #include <unistd.h> #include <linux/perf_event.h> +#include <linux/fs.h> +#include <sys/ioctl.h> #include <sys/mman.h> #include "trace_helpers.h" #include <linux/limits.h> @@ -244,29 +246,91 @@ out: return err; } +#ifdef PROCMAP_QUERY +int env_verbosity __weak = 0; + +static int procmap_query(int fd, const void *addr, __u32 query_flags, size_t *start, size_t *offset, int *flags) +{ + char path_buf[PATH_MAX], build_id_buf[20]; + struct procmap_query q; + int err; + + memset(&q, 0, sizeof(q)); + q.size = sizeof(q); + q.query_flags = query_flags; + q.query_addr = (__u64)addr; + q.vma_name_addr = (__u64)path_buf; + q.vma_name_size = sizeof(path_buf); + q.build_id_addr = (__u64)build_id_buf; + q.build_id_size = sizeof(build_id_buf); + + err = ioctl(fd, PROCMAP_QUERY, &q); + if (err < 0) { + err = -errno; + if (err == -ENOTTY) + return -EOPNOTSUPP; /* ioctl() not implemented yet */ + if (err == -ENOENT) + return -ESRCH; /* vma not found */ + return err; + } + + if (env_verbosity >= 1) { + printf("VMA FOUND (addr %08lx): %08lx-%08lx %c%c%c%c %08lx %02x:%02x %ld %s (build ID: %s, %d bytes)\n", + (long)addr, (long)q.vma_start, (long)q.vma_end, + (q.vma_flags & PROCMAP_QUERY_VMA_READABLE) ? 'r' : '-', + (q.vma_flags & PROCMAP_QUERY_VMA_WRITABLE) ? 'w' : '-', + (q.vma_flags & PROCMAP_QUERY_VMA_EXECUTABLE) ? 'x' : '-', + (q.vma_flags & PROCMAP_QUERY_VMA_SHARED) ? 's' : 'p', + (long)q.vma_offset, q.dev_major, q.dev_minor, (long)q.inode, + q.vma_name_size ? path_buf : "", + q.build_id_size ? "YES" : "NO", + q.build_id_size); + } + + *start = q.vma_start; + *offset = q.vma_offset; + *flags = q.vma_flags; + return 0; +} +#else +static int procmap_query(int fd, const void *addr, __u32 query_flags, size_t *start, size_t *offset, int *flags) +{ + return -EOPNOTSUPP; +} +#endif + ssize_t get_uprobe_offset(const void *addr) { - size_t start, end, base; - char buf[256]; - bool found = false; + size_t start, base, end; FILE *f; + char buf[256]; + int err, flags; f = fopen("/proc/self/maps", "r"); if (!f) return -errno; - while (fscanf(f, "%zx-%zx %s %zx %*[^\n]\n", &start, &end, buf, &base) == 4) { - if (buf[2] == 'x' && (uintptr_t)addr >= start && (uintptr_t)addr < end) { - found = true; - break; + /* requested executable VMA only */ + err = procmap_query(fileno(f), addr, PROCMAP_QUERY_VMA_EXECUTABLE, &start, &base, &flags); + if (err == -EOPNOTSUPP) { + bool found = false; + + while (fscanf(f, "%zx-%zx %s %zx %*[^\n]\n", &start, &end, buf, &base) == 4) { + if (buf[2] == 'x' && (uintptr_t)addr >= start && (uintptr_t)addr < end) { + found = true; + break; + } + } + if (!found) { + fclose(f); + return -ESRCH; } + } else if (err) { + fclose(f); + return err; } - fclose(f); - if (!found) - return -ESRCH; - #if defined(__powerpc64__) && defined(_CALL_ELF) && _CALL_ELF == 2 #define OP_RT_RA_MASK 0xffff0000UL @@ -307,15 +371,25 @@ ssize_t get_rel_offset(uintptr_t addr) size_t start, end, offset; char buf[256]; FILE *f; + int err, flags; f = fopen("/proc/self/maps", "r"); if (!f) return -errno; - while (fscanf(f, "%zx-%zx %s %zx %*[^\n]\n", &start, &end, buf, &offset) == 4) { - if (addr >= start && addr < end) { - fclose(f); - return (size_t)addr - start + offset; + err = procmap_query(fileno(f), (const void *)addr, 0, &start, &offset, &flags); + if (err == 0) { + fclose(f); + return (size_t)addr - start + offset; + } else if (err != -EOPNOTSUPP) { + fclose(f); + return err; + } else if (err) { + while (fscanf(f, "%zx-%zx %s %zx %*[^\n]\n", &start, &end, buf, &offset) == 4) { + if (addr >= start && addr < end) { + fclose(f); + return (size_t)addr - start + offset; + } } } diff --git a/tools/testing/selftests/bpf/unpriv_helpers.c b/tools/testing/selftests/bpf/unpriv_helpers.c index b6d016461fb0..220f6a963813 100644 --- a/tools/testing/selftests/bpf/unpriv_helpers.c +++ b/tools/testing/selftests/bpf/unpriv_helpers.c @@ -2,7 +2,6 @@ #include <stdbool.h> #include <stdlib.h> -#include <error.h> #include <stdio.h> #include <string.h> #include <unistd.h> diff --git a/tools/testing/selftests/bpf/uprobe_multi.c b/tools/testing/selftests/bpf/uprobe_multi.c index 7ffa563ffeba..c7828b13e5ff 100644 --- a/tools/testing/selftests/bpf/uprobe_multi.c +++ b/tools/testing/selftests/bpf/uprobe_multi.c @@ -2,8 +2,21 @@ #include <stdio.h> #include <string.h> +#include <stdbool.h> +#include <stdint.h> +#include <sys/mman.h> +#include <unistd.h> #include <sdt.h> +#ifndef MADV_POPULATE_READ +#define MADV_POPULATE_READ 22 +#endif + +int __attribute__((weak)) uprobe(void) +{ + return 0; +} + #define __PASTE(a, b) a##b #define PASTE(a, b) __PASTE(a, b) @@ -75,6 +88,30 @@ static int usdt(void) return 0; } +extern char build_id_start[]; +extern char build_id_end[]; + +int __attribute__((weak)) trigger_uprobe(bool build_id_resident) +{ + int page_sz = sysconf(_SC_PAGESIZE); + void *addr; + + /* page-align build ID start */ + addr = (void *)((uintptr_t)&build_id_start & ~(page_sz - 1)); + + /* to guarantee MADV_PAGEOUT work reliably, we need to ensure that + * memory range is mapped into current process, so we unconditionally + * do MADV_POPULATE_READ, and then MADV_PAGEOUT, if necessary + */ + madvise(addr, page_sz, MADV_POPULATE_READ); + if (!build_id_resident) + madvise(addr, page_sz, MADV_PAGEOUT); + + (void)uprobe(); + + return 0; +} + int main(int argc, char **argv) { if (argc != 2) @@ -84,6 +121,10 @@ int main(int argc, char **argv) return bench(); if (!strcmp("usdt", argv[1])) return usdt(); + if (!strcmp("uprobe-paged-out", argv[1])) + return trigger_uprobe(false /* page-out build ID */); + if (!strcmp("uprobe-paged-in", argv[1])) + return trigger_uprobe(true /* page-in build ID */); error: fprintf(stderr, "usage: %s <bench|usdt>\n", argv[0]); diff --git a/tools/testing/selftests/bpf/uprobe_multi.ld b/tools/testing/selftests/bpf/uprobe_multi.ld new file mode 100644 index 000000000000..a2e94828bc8c --- /dev/null +++ b/tools/testing/selftests/bpf/uprobe_multi.ld @@ -0,0 +1,11 @@ +SECTIONS +{ + . = ALIGN(4096); + .note.gnu.build-id : { *(.note.gnu.build-id) } + . = ALIGN(4096); +} +INSERT AFTER .text; + +build_id_start = ADDR(.note.gnu.build-id); +build_id_end = ADDR(.note.gnu.build-id) + SIZEOF(.note.gnu.build-id); + diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c index d0cdd156cd55..7afc2619ab14 100644 --- a/tools/testing/selftests/bpf/verifier/calls.c +++ b/tools/testing/selftests/bpf/verifier/calls.c @@ -76,7 +76,7 @@ }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .result = REJECT, - .errstr = "arg#0 expected pointer to ctx, but got PTR", + .errstr = "arg#0 expected pointer to ctx, but got fp", .fixup_kfunc_btf_id = { { "bpf_kfunc_call_test_pass_ctx", 2 }, }, diff --git a/tools/testing/selftests/bpf/verifier/map_kptr.c b/tools/testing/selftests/bpf/verifier/map_kptr.c index d25c3e9605f1..f420c0312aa0 100644 --- a/tools/testing/selftests/bpf/verifier/map_kptr.c +++ b/tools/testing/selftests/bpf/verifier/map_kptr.c @@ -153,7 +153,7 @@ .result = REJECT, .errstr = "variable untrusted_ptr_ access var_off=(0x0; 0x7) disallowed", }, -/* Tests for unreferened PTR_TO_BTF_ID */ +/* Tests for unreferenced PTR_TO_BTF_ID */ { "map_kptr: unref: reject btf_struct_ids_match == false", .insns = { diff --git a/tools/testing/selftests/bpf/verifier/precise.c b/tools/testing/selftests/bpf/verifier/precise.c index 90643ccc221d..59a020c35647 100644 --- a/tools/testing/selftests/bpf/verifier/precise.c +++ b/tools/testing/selftests/bpf/verifier/precise.c @@ -39,11 +39,11 @@ .result = VERBOSE_ACCEPT, .errstr = "mark_precise: frame0: last_idx 26 first_idx 20\ - mark_precise: frame0: regs=r2,r9 stack= before 25\ - mark_precise: frame0: regs=r2,r9 stack= before 24\ - mark_precise: frame0: regs=r2,r9 stack= before 23\ - mark_precise: frame0: regs=r2,r9 stack= before 22\ - mark_precise: frame0: regs=r2,r9 stack= before 20\ + mark_precise: frame0: regs=r2 stack= before 25\ + mark_precise: frame0: regs=r2 stack= before 24\ + mark_precise: frame0: regs=r2 stack= before 23\ + mark_precise: frame0: regs=r2 stack= before 22\ + mark_precise: frame0: regs=r2 stack= before 20\ mark_precise: frame0: parent state regs=r2,r9 stack=:\ mark_precise: frame0: last_idx 19 first_idx 10\ mark_precise: frame0: regs=r2,r9 stack= before 19\ @@ -100,13 +100,13 @@ .errstr = "26: (85) call bpf_probe_read_kernel#113\ mark_precise: frame0: last_idx 26 first_idx 22\ - mark_precise: frame0: regs=r2,r9 stack= before 25\ - mark_precise: frame0: regs=r2,r9 stack= before 24\ - mark_precise: frame0: regs=r2,r9 stack= before 23\ - mark_precise: frame0: regs=r2,r9 stack= before 22\ - mark_precise: frame0: parent state regs=r2,r9 stack=:\ + mark_precise: frame0: regs=r2 stack= before 25\ + mark_precise: frame0: regs=r2 stack= before 24\ + mark_precise: frame0: regs=r2 stack= before 23\ + mark_precise: frame0: regs=r2 stack= before 22\ + mark_precise: frame0: parent state regs=r2 stack=:\ mark_precise: frame0: last_idx 20 first_idx 20\ - mark_precise: frame0: regs=r2,r9 stack= before 20\ + mark_precise: frame0: regs=r2 stack= before 20\ mark_precise: frame0: parent state regs=r2,r9 stack=:\ mark_precise: frame0: last_idx 19 first_idx 17\ mark_precise: frame0: regs=r2,r9 stack= before 19\ @@ -183,10 +183,10 @@ .prog_type = BPF_PROG_TYPE_XDP, .flags = BPF_F_TEST_STATE_FREQ, .errstr = "mark_precise: frame0: last_idx 7 first_idx 7\ - mark_precise: frame0: parent state regs=r4 stack=-8:\ + mark_precise: frame0: parent state regs=r4 stack=:\ mark_precise: frame0: last_idx 6 first_idx 4\ - mark_precise: frame0: regs=r4 stack=-8 before 6: (b7) r0 = -1\ - mark_precise: frame0: regs=r4 stack=-8 before 5: (79) r4 = *(u64 *)(r10 -8)\ + mark_precise: frame0: regs=r4 stack= before 6: (b7) r0 = -1\ + mark_precise: frame0: regs=r4 stack= before 5: (79) r4 = *(u64 *)(r10 -8)\ mark_precise: frame0: regs= stack=-8 before 4: (7b) *(u64 *)(r3 -8) = r0\ mark_precise: frame0: parent state regs=r0 stack=:\ mark_precise: frame0: last_idx 3 first_idx 3\ diff --git a/tools/testing/selftests/bpf/veristat.c b/tools/testing/selftests/bpf/veristat.c index b2854238d4a0..1ec5c4c47235 100644 --- a/tools/testing/selftests/bpf/veristat.c +++ b/tools/testing/selftests/bpf/veristat.c @@ -2,6 +2,7 @@ /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ #define _GNU_SOURCE #include <argp.h> +#include <libgen.h> #include <string.h> #include <stdlib.h> #include <sched.h> @@ -784,13 +785,13 @@ static int parse_stat(const char *stat_name, struct stat_specs *specs) static int parse_stats(const char *stats_str, struct stat_specs *specs) { char *input, *state = NULL, *next; - int err; + int err, cnt = 0; input = strdup(stats_str); if (!input) return -ENOMEM; - while ((next = strtok_r(state ? NULL : input, ",", &state))) { + while ((next = strtok_r(cnt++ ? NULL : input, ",", &state))) { err = parse_stat(next, specs); if (err) { free(input); @@ -988,8 +989,8 @@ skip_freplace_fixup: static int process_prog(const char *filename, struct bpf_object *obj, struct bpf_program *prog) { + const char *base_filename = basename(strdupa(filename)); const char *prog_name = bpf_program__name(prog); - const char *base_filename = basename(filename); char *buf; int buf_sz, log_level; struct verif_stats *stats; @@ -1056,13 +1057,14 @@ static int process_prog(const char *filename, struct bpf_object *obj, struct bpf static int process_obj(const char *filename) { + const char *base_filename = basename(strdupa(filename)); struct bpf_object *obj = NULL, *tobj; struct bpf_program *prog, *tprog, *lprog; libbpf_print_fn_t old_libbpf_print_fn; LIBBPF_OPTS(bpf_object_open_opts, opts); int err = 0, prog_cnt = 0; - if (!should_process_file_prog(basename(filename), NULL)) { + if (!should_process_file_prog(base_filename, NULL)) { if (env.verbose) printf("Skipping '%s' due to filters...\n", filename); env.files_skipped++; @@ -1076,7 +1078,7 @@ static int process_obj(const char *filename) } if (!env.quiet && env.out_fmt == RESFMT_TABLE) - printf("Processing '%s'...\n", basename(filename)); + printf("Processing '%s'...\n", base_filename); old_libbpf_print_fn = libbpf_set_print(libbpf_print_fn); obj = bpf_object__open_file(filename, &opts); @@ -1493,7 +1495,7 @@ static int parse_stats_csv(const char *filename, struct stat_specs *specs, while (fgets(line, sizeof(line), f)) { char *input = line, *state = NULL, *next; struct verif_stats *st = NULL; - int col = 0; + int col = 0, cnt = 0; if (!header) { void *tmp; @@ -1511,7 +1513,7 @@ static int parse_stats_csv(const char *filename, struct stat_specs *specs, *stat_cntp += 1; } - while ((next = strtok_r(state ? NULL : input, ",\n", &state))) { + while ((next = strtok_r(cnt++ ? NULL : input, ",\n", &state))) { if (header) { /* for the first line, set up spec stats */ err = parse_stat(next, specs); diff --git a/tools/testing/selftests/bpf/vmtest.sh b/tools/testing/selftests/bpf/vmtest.sh index 65d14f3bbe30..79505d294c44 100755 --- a/tools/testing/selftests/bpf/vmtest.sh +++ b/tools/testing/selftests/bpf/vmtest.sh @@ -1,31 +1,47 @@ #!/bin/bash # SPDX-License-Identifier: GPL-2.0 -set -u set -e -# This script currently only works for x86_64 and s390x, as -# it is based on the VM image used by the BPF CI, which is -# available only for these architectures. -ARCH="$(uname -m)" -case "${ARCH}" in +# This script currently only works for the following platforms, +# as it is based on the VM image used by the BPF CI, which is +# available only for these architectures. We can also specify +# the local rootfs image generated by the following script: +# https://github.com/libbpf/ci/blob/main/rootfs/mkrootfs_debian.sh +PLATFORM="${PLATFORM:-$(uname -m)}" +case "${PLATFORM}" in s390x) QEMU_BINARY=qemu-system-s390x QEMU_CONSOLE="ttyS1" - QEMU_FLAGS=(-smp 2) + HOST_FLAGS=(-smp 2 -enable-kvm) + CROSS_FLAGS=(-smp 2) BZIMAGE="arch/s390/boot/vmlinux" + ARCH="s390" ;; x86_64) QEMU_BINARY=qemu-system-x86_64 QEMU_CONSOLE="ttyS0,115200" - QEMU_FLAGS=(-cpu host -smp 8) + HOST_FLAGS=(-cpu host -enable-kvm -smp 8) + CROSS_FLAGS=(-smp 8) BZIMAGE="arch/x86/boot/bzImage" + ARCH="x86" ;; aarch64) QEMU_BINARY=qemu-system-aarch64 QEMU_CONSOLE="ttyAMA0,115200" - QEMU_FLAGS=(-M virt,gic-version=3 -cpu host -smp 8) + HOST_FLAGS=(-M virt,gic-version=3 -cpu host -enable-kvm -smp 8) + CROSS_FLAGS=(-M virt,gic-version=3 -cpu cortex-a76 -smp 8) BZIMAGE="arch/arm64/boot/Image" + ARCH="arm64" + ;; +riscv64) + # required qemu version v7.2.0+ + QEMU_BINARY=qemu-system-riscv64 + QEMU_CONSOLE="ttyS0,115200" + HOST_FLAGS=(-M virt -cpu host -enable-kvm -smp 8) + CROSS_FLAGS=(-M virt -cpu rv64,sscofpmf=true -smp 8) + BZIMAGE="arch/riscv/boot/Image" + ARCH="riscv" ;; *) echo "Unsupported architecture" @@ -34,11 +50,12 @@ aarch64) esac DEFAULT_COMMAND="./test_progs" MOUNT_DIR="mnt" +LOCAL_ROOTFS_IMAGE="" ROOTFS_IMAGE="root.img" OUTPUT_DIR="$HOME/.bpf_selftests" KCONFIG_REL_PATHS=("tools/testing/selftests/bpf/config" "tools/testing/selftests/bpf/config.vm" - "tools/testing/selftests/bpf/config.${ARCH}") + "tools/testing/selftests/bpf/config.${PLATFORM}") INDEX_URL="https://raw.githubusercontent.com/libbpf/ci/master/INDEX" NUM_COMPILE_JOBS="$(nproc)" LOG_FILE_BASE="$(date +"bpf_selftests.%Y-%m-%d_%H-%M-%S")" @@ -58,6 +75,10 @@ tools/testing/selftests/bpf. e.g: If no command is specified and a debug shell (-s) is not requested, "${DEFAULT_COMMAND}" will be run by default. +Using PLATFORM= and CROSS_COMPILE= options will enable cross platform testing: + + PLATFORM=<platform> CROSS_COMPILE=<toolchain> $0 -- ./test_progs -t test_lsm + If you build your kernel using KBUILD_OUTPUT= or O= options, these can be passed as environment variables to the script: @@ -69,6 +90,7 @@ or Options: + -l) Specify the path to the local rootfs image. -i) Update the rootfs image with a newer version. -d) Update the output directory (default: ${OUTPUT_DIR}) -j) Number of jobs for compilation, similar to -j in make @@ -92,24 +114,11 @@ populate_url_map() fi } -download() -{ - local file="$1" - - if [[ ! -v URLS[$file] ]]; then - echo "$file not found" >&2 - return 1 - fi - - echo "Downloading $file..." >&2 - curl -Lsf "${URLS[$file]}" "${@:2}" -} - newest_rootfs_version() { { for file in "${!URLS[@]}"; do - if [[ $file =~ ^"${ARCH}"/libbpf-vmtest-rootfs-(.*)\.tar\.zst$ ]]; then + if [[ $file =~ ^"${PLATFORM}"/libbpf-vmtest-rootfs-(.*)\.tar\.zst$ ]]; then echo "${BASH_REMATCH[1]}" fi done @@ -118,16 +127,34 @@ newest_rootfs_version() download_rootfs() { - local rootfsversion="$1" - local dir="$2" + populate_url_map + + local rootfsversion="$(newest_rootfs_version)" + local file="${PLATFORM}/libbpf-vmtest-rootfs-$rootfsversion.tar.zst" + + if [[ ! -v URLS[$file] ]]; then + echo "$file not found" >&2 + return 1 + fi + + echo "Downloading $file..." >&2 + curl -Lsf "${URLS[$file]}" "${@:2}" +} + +load_rootfs() +{ + local dir="$1" if ! which zstd &> /dev/null; then echo 'Could not find "zstd" on the system, please install zstd' exit 1 fi - download "${ARCH}/libbpf-vmtest-rootfs-$rootfsversion.tar.zst" | - zstd -d | sudo tar -C "$dir" -x + if [[ -n "${LOCAL_ROOTFS_IMAGE}" ]]; then + cat "${LOCAL_ROOTFS_IMAGE}" | zstd -d | sudo tar -C "$dir" -x + else + download_rootfs | zstd -d | sudo tar -C "$dir" -x + fi } recompile_kernel() @@ -227,7 +254,7 @@ create_vm_image() mkfs.ext4 -q "${rootfs_img}" mount_image - download_rootfs "$(newest_rootfs_version)" "${mount_dir}" + load_rootfs "${mount_dir}" unmount_image } @@ -244,12 +271,17 @@ EOF exit 1 fi + if [[ "${PLATFORM}" != "$(uname -m)" ]]; then + QEMU_FLAGS=("${CROSS_FLAGS[@]}") + else + QEMU_FLAGS=("${HOST_FLAGS[@]}") + fi + ${QEMU_BINARY} \ -nodefaults \ -display none \ -serial mon:stdio \ "${QEMU_FLAGS[@]}" \ - -enable-kvm \ -m 4G \ -drive file="${rootfs_img}",format=raw,index=1,media=disk,if=virtio,cache=none \ -kernel "${kernel_bzimage}" \ @@ -341,8 +373,11 @@ main() local exit_command="poweroff -f" local debug_shell="no" - while getopts ':hskid:j:' opt; do + while getopts ':hskl:id:j:' opt; do case ${opt} in + l) + LOCAL_ROOTFS_IMAGE="$OPTARG" + ;; i) update_image="yes" ;; @@ -377,6 +412,11 @@ main() trap 'catch "$?"' EXIT + if [[ "${PLATFORM}" != "$(uname -m)" ]] && [[ -z "${CROSS_COMPILE}" ]]; then + echo "Cross-platform testing needs to specify CROSS_COMPILE" + exit 1 + fi + if [[ $# -eq 0 && "${debug_shell}" == "no" ]]; then echo "No command specified, will run ${DEFAULT_COMMAND} in the vm" else @@ -384,7 +424,8 @@ main() fi local kconfig_file="${OUTPUT_DIR}/latest.config" - local make_command="make -j ${NUM_COMPILE_JOBS} KCONFIG_CONFIG=${kconfig_file}" + local make_command="make ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} \ + -j ${NUM_COMPILE_JOBS} KCONFIG_CONFIG=${kconfig_file}" # Figure out where the kernel is being built. # O takes precedence over KBUILD_OUTPUT. @@ -402,8 +443,6 @@ main() make_command="${make_command} KBUILD_OUTPUT=${KBUILD_OUTPUT}" fi - populate_url_map - local rootfs_img="${OUTPUT_DIR}/${ROOTFS_IMAGE}" local mount_dir="${OUTPUT_DIR}/${MOUNT_DIR}" diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c index 1ee0ef114f9d..11f047b8af75 100644 --- a/tools/testing/selftests/bpf/xskxceiver.c +++ b/tools/testing/selftests/bpf/xskxceiver.c @@ -90,6 +90,7 @@ #include <signal.h> #include <stdio.h> #include <stdlib.h> +#include <libgen.h> #include <string.h> #include <stddef.h> #include <sys/mman.h> |